--- /dev/null
+This package, the RabbitMQ server is licensed under the MPL.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
+
--- /dev/null
+
+The MIT license is as follows:
+
+ "Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this file (the Software), to deal in the
+ Software without restriction, including without limitation the
+ rights to use, copy, modify, merge, publish, distribute,
+ sublicense, and/or sell copies of the Software, and to permit
+ persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE."
+
+
+The BSD 2-Clause license is as follows:
+
+ "Redistribution and use in source and binary forms, with or
+ without modification, are permitted provided that the
+ following conditions are met:
+
+ 1. Redistributions of source code must retain the above
+ copyright notice, this list of conditions and the following
+ disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials
+ provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
+
+
+The rest of this package is licensed under the Mozilla Public License 1.1
+Authors and Copyright are as described below:
+
+ The Initial Developer of the Original Code is GoPivotal, Inc.
+ Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+
+
+ MOZILLA PUBLIC LICENSE
+ Version 1.1
+
+ ---------------
+
+1. Definitions.
+
+ 1.0.1. "Commercial Use" means distribution or otherwise making the
+ Covered Code available to a third party.
+
+ 1.1. "Contributor" means each entity that creates or contributes to
+ the creation of Modifications.
+
+ 1.2. "Contributor Version" means the combination of the Original
+ Code, prior Modifications used by a Contributor, and the Modifications
+ made by that particular Contributor.
+
+ 1.3. "Covered Code" means the Original Code or Modifications or the
+ combination of the Original Code and Modifications, in each case
+ including portions thereof.
+
+ 1.4. "Electronic Distribution Mechanism" means a mechanism generally
+ accepted in the software development community for the electronic
+ transfer of data.
+
+ 1.5. "Executable" means Covered Code in any form other than Source
+ Code.
+
+ 1.6. "Initial Developer" means the individual or entity identified
+ as the Initial Developer in the Source Code notice required by Exhibit
+ A.
+
+ 1.7. "Larger Work" means a work which combines Covered Code or
+ portions thereof with code not governed by the terms of this License.
+
+ 1.8. "License" means this document.
+
+ 1.8.1. "Licensable" means having the right to grant, to the maximum
+ extent possible, whether at the time of the initial grant or
+ subsequently acquired, any and all of the rights conveyed herein.
+
+ 1.9. "Modifications" means any addition to or deletion from the
+ substance or structure of either the Original Code or any previous
+ Modifications. When Covered Code is released as a series of files, a
+ Modification is:
+ A. Any addition to or deletion from the contents of a file
+ containing Original Code or previous Modifications.
+
+ B. Any new file that contains any part of the Original Code or
+ previous Modifications.
+
+ 1.10. "Original Code" means Source Code of computer software code
+ which is described in the Source Code notice required by Exhibit A as
+ Original Code, and which, at the time of its release under this
+ License is not already Covered Code governed by this License.
+
+ 1.10.1. "Patent Claims" means any patent claim(s), now owned or
+ hereafter acquired, including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by grantor.
+
+ 1.11. "Source Code" means the preferred form of the Covered Code for
+ making modifications to it, including all modules it contains, plus
+ any associated interface definition files, scripts used to control
+ compilation and installation of an Executable, or source code
+ differential comparisons against either the Original Code or another
+ well known, available Covered Code of the Contributor's choice. The
+ Source Code can be in a compressed or archival form, provided the
+ appropriate decompression or de-archiving software is widely available
+ for no charge.
+
+ 1.12. "You" (or "Your") means an individual or a legal entity
+ exercising rights under, and complying with all of the terms of, this
+ License or a future version of this License issued under Section 6.1.
+ For legal entities, "You" includes any entity which controls, is
+ controlled by, or is under common control with You. For purposes of
+ this definition, "control" means (a) the power, direct or indirect,
+ to cause the direction or management of such entity, whether by
+ contract or otherwise, or (b) ownership of more than fifty percent
+ (50%) of the outstanding shares or beneficial ownership of such
+ entity.
+
+2. Source Code License.
+
+ 2.1. The Initial Developer Grant.
+ The Initial Developer hereby grants You a world-wide, royalty-free,
+ non-exclusive license, subject to third party intellectual property
+ claims:
+ (a) under intellectual property rights (other than patent or
+ trademark) Licensable by Initial Developer to use, reproduce,
+ modify, display, perform, sublicense and distribute the Original
+ Code (or portions thereof) with or without Modifications, and/or
+ as part of a Larger Work; and
+
+ (b) under Patents Claims infringed by the making, using or
+ selling of Original Code, to make, have made, use, practice,
+ sell, and offer for sale, and/or otherwise dispose of the
+ Original Code (or portions thereof).
+
+ (c) the licenses granted in this Section 2.1(a) and (b) are
+ effective on the date Initial Developer first distributes
+ Original Code under the terms of this License.
+
+ (d) Notwithstanding Section 2.1(b) above, no patent license is
+ granted: 1) for code that You delete from the Original Code; 2)
+ separate from the Original Code; or 3) for infringements caused
+ by: i) the modification of the Original Code or ii) the
+ combination of the Original Code with other software or devices.
+
+ 2.2. Contributor Grant.
+ Subject to third party intellectual property claims, each Contributor
+ hereby grants You a world-wide, royalty-free, non-exclusive license
+
+ (a) under intellectual property rights (other than patent or
+ trademark) Licensable by Contributor, to use, reproduce, modify,
+ display, perform, sublicense and distribute the Modifications
+ created by such Contributor (or portions thereof) either on an
+ unmodified basis, with other Modifications, as Covered Code
+ and/or as part of a Larger Work; and
+
+ (b) under Patent Claims infringed by the making, using, or
+ selling of Modifications made by that Contributor either alone
+ and/or in combination with its Contributor Version (or portions
+ of such combination), to make, use, sell, offer for sale, have
+ made, and/or otherwise dispose of: 1) Modifications made by that
+ Contributor (or portions thereof); and 2) the combination of
+ Modifications made by that Contributor with its Contributor
+ Version (or portions of such combination).
+
+ (c) the licenses granted in Sections 2.2(a) and 2.2(b) are
+ effective on the date Contributor first makes Commercial Use of
+ the Covered Code.
+
+ (d) Notwithstanding Section 2.2(b) above, no patent license is
+ granted: 1) for any code that Contributor has deleted from the
+ Contributor Version; 2) separate from the Contributor Version;
+ 3) for infringements caused by: i) third party modifications of
+ Contributor Version or ii) the combination of Modifications made
+ by that Contributor with other software (except as part of the
+ Contributor Version) or other devices; or 4) under Patent Claims
+ infringed by Covered Code in the absence of Modifications made by
+ that Contributor.
+
+3. Distribution Obligations.
+
+ 3.1. Application of License.
+ The Modifications which You create or to which You contribute are
+ governed by the terms of this License, including without limitation
+ Section 2.2. The Source Code version of Covered Code may be
+ distributed only under the terms of this License or a future version
+ of this License released under Section 6.1, and You must include a
+ copy of this License with every copy of the Source Code You
+ distribute. You may not offer or impose any terms on any Source Code
+ version that alters or restricts the applicable version of this
+ License or the recipients' rights hereunder. However, You may include
+ an additional document offering the additional rights described in
+ Section 3.5.
+
+ 3.2. Availability of Source Code.
+ Any Modification which You create or to which You contribute must be
+ made available in Source Code form under the terms of this License
+ either on the same media as an Executable version or via an accepted
+ Electronic Distribution Mechanism to anyone to whom you made an
+ Executable version available; and if made available via Electronic
+ Distribution Mechanism, must remain available for at least twelve (12)
+ months after the date it initially became available, or at least six
+ (6) months after a subsequent version of that particular Modification
+ has been made available to such recipients. You are responsible for
+ ensuring that the Source Code version remains available even if the
+ Electronic Distribution Mechanism is maintained by a third party.
+
+ 3.3. Description of Modifications.
+ You must cause all Covered Code to which You contribute to contain a
+ file documenting the changes You made to create that Covered Code and
+ the date of any change. You must include a prominent statement that
+ the Modification is derived, directly or indirectly, from Original
+ Code provided by the Initial Developer and including the name of the
+ Initial Developer in (a) the Source Code, and (b) in any notice in an
+ Executable version or related documentation in which You describe the
+ origin or ownership of the Covered Code.
+
+ 3.4. Intellectual Property Matters
+ (a) Third Party Claims.
+ If Contributor has knowledge that a license under a third party's
+ intellectual property rights is required to exercise the rights
+ granted by such Contributor under Sections 2.1 or 2.2,
+ Contributor must include a text file with the Source Code
+ distribution titled "LEGAL" which describes the claim and the
+ party making the claim in sufficient detail that a recipient will
+ know whom to contact. If Contributor obtains such knowledge after
+ the Modification is made available as described in Section 3.2,
+ Contributor shall promptly modify the LEGAL file in all copies
+ Contributor makes available thereafter and shall take other steps
+ (such as notifying appropriate mailing lists or newsgroups)
+ reasonably calculated to inform those who received the Covered
+ Code that new knowledge has been obtained.
+
+ (b) Contributor APIs.
+ If Contributor's Modifications include an application programming
+ interface and Contributor has knowledge of patent licenses which
+ are reasonably necessary to implement that API, Contributor must
+ also include this information in the LEGAL file.
+
+ (c) Representations.
+ Contributor represents that, except as disclosed pursuant to
+ Section 3.4(a) above, Contributor believes that Contributor's
+ Modifications are Contributor's original creation(s) and/or
+ Contributor has sufficient rights to grant the rights conveyed by
+ this License.
+
+ 3.5. Required Notices.
+ You must duplicate the notice in Exhibit A in each file of the Source
+ Code. If it is not possible to put such notice in a particular Source
+ Code file due to its structure, then You must include such notice in a
+ location (such as a relevant directory) where a user would be likely
+ to look for such a notice. If You created one or more Modification(s)
+ You may add your name as a Contributor to the notice described in
+ Exhibit A. You must also duplicate this License in any documentation
+ for the Source Code where You describe recipients' rights or ownership
+ rights relating to Covered Code. You may choose to offer, and to
+ charge a fee for, warranty, support, indemnity or liability
+ obligations to one or more recipients of Covered Code. However, You
+ may do so only on Your own behalf, and not on behalf of the Initial
+ Developer or any Contributor. You must make it absolutely clear than
+ any such warranty, support, indemnity or liability obligation is
+ offered by You alone, and You hereby agree to indemnify the Initial
+ Developer and every Contributor for any liability incurred by the
+ Initial Developer or such Contributor as a result of warranty,
+ support, indemnity or liability terms You offer.
+
+ 3.6. Distribution of Executable Versions.
+ You may distribute Covered Code in Executable form only if the
+ requirements of Section 3.1-3.5 have been met for that Covered Code,
+ and if You include a notice stating that the Source Code version of
+ the Covered Code is available under the terms of this License,
+ including a description of how and where You have fulfilled the
+ obligations of Section 3.2. The notice must be conspicuously included
+ in any notice in an Executable version, related documentation or
+ collateral in which You describe recipients' rights relating to the
+ Covered Code. You may distribute the Executable version of Covered
+ Code or ownership rights under a license of Your choice, which may
+ contain terms different from this License, provided that You are in
+ compliance with the terms of this License and that the license for the
+ Executable version does not attempt to limit or alter the recipient's
+ rights in the Source Code version from the rights set forth in this
+ License. If You distribute the Executable version under a different
+ license You must make it absolutely clear that any terms which differ
+ from this License are offered by You alone, not by the Initial
+ Developer or any Contributor. You hereby agree to indemnify the
+ Initial Developer and every Contributor for any liability incurred by
+ the Initial Developer or such Contributor as a result of any such
+ terms You offer.
+
+ 3.7. Larger Works.
+ You may create a Larger Work by combining Covered Code with other code
+ not governed by the terms of this License and distribute the Larger
+ Work as a single product. In such a case, You must make sure the
+ requirements of this License are fulfilled for the Covered Code.
+
+4. Inability to Comply Due to Statute or Regulation.
+
+ If it is impossible for You to comply with any of the terms of this
+ License with respect to some or all of the Covered Code due to
+ statute, judicial order, or regulation then You must: (a) comply with
+ the terms of this License to the maximum extent possible; and (b)
+ describe the limitations and the code they affect. Such description
+ must be included in the LEGAL file described in Section 3.4 and must
+ be included with all distributions of the Source Code. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Application of this License.
+
+ This License applies to code to which the Initial Developer has
+ attached the notice in Exhibit A and to related Covered Code.
+
+6. Versions of the License.
+
+ 6.1. New Versions.
+ Netscape Communications Corporation ("Netscape") may publish revised
+ and/or new versions of the License from time to time. Each version
+ will be given a distinguishing version number.
+
+ 6.2. Effect of New Versions.
+ Once Covered Code has been published under a particular version of the
+ License, You may always continue to use it under the terms of that
+ version. You may also choose to use such Covered Code under the terms
+ of any subsequent version of the License published by Netscape. No one
+ other than Netscape has the right to modify the terms applicable to
+ Covered Code created under this License.
+
+ 6.3. Derivative Works.
+ If You create or use a modified version of this License (which you may
+ only do in order to apply it to code which is not already Covered Code
+ governed by this License), You must (a) rename Your license so that
+ the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
+ "MPL", "NPL" or any confusingly similar phrase do not appear in your
+ license (except to note that your license differs from this License)
+ and (b) otherwise make it clear that Your version of the license
+ contains terms which differ from the Mozilla Public License and
+ Netscape Public License. (Filling in the name of the Initial
+ Developer, Original Code or Contributor in the notice described in
+ Exhibit A shall not of themselves be deemed to be modifications of
+ this License.)
+
+7. DISCLAIMER OF WARRANTY.
+
+ COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
+ DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
+ THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
+ IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
+ YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
+ COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
+ OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
+ ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
+
+8. TERMINATION.
+
+ 8.1. This License and the rights granted hereunder will terminate
+ automatically if You fail to comply with terms herein and fail to cure
+ such breach within 30 days of becoming aware of the breach. All
+ sublicenses to the Covered Code which are properly granted shall
+ survive any termination of this License. Provisions which, by their
+ nature, must remain in effect beyond the termination of this License
+ shall survive.
+
+ 8.2. If You initiate litigation by asserting a patent infringement
+ claim (excluding declatory judgment actions) against Initial Developer
+ or a Contributor (the Initial Developer or Contributor against whom
+ You file such action is referred to as "Participant") alleging that:
+
+ (a) such Participant's Contributor Version directly or indirectly
+ infringes any patent, then any and all rights granted by such
+ Participant to You under Sections 2.1 and/or 2.2 of this License
+ shall, upon 60 days notice from Participant terminate prospectively,
+ unless if within 60 days after receipt of notice You either: (i)
+ agree in writing to pay Participant a mutually agreeable reasonable
+ royalty for Your past and future use of Modifications made by such
+ Participant, or (ii) withdraw Your litigation claim with respect to
+ the Contributor Version against such Participant. If within 60 days
+ of notice, a reasonable royalty and payment arrangement are not
+ mutually agreed upon in writing by the parties or the litigation claim
+ is not withdrawn, the rights granted by Participant to You under
+ Sections 2.1 and/or 2.2 automatically terminate at the expiration of
+ the 60 day notice period specified above.
+
+ (b) any software, hardware, or device, other than such Participant's
+ Contributor Version, directly or indirectly infringes any patent, then
+ any rights granted to You by such Participant under Sections 2.1(b)
+ and 2.2(b) are revoked effective as of the date You first made, used,
+ sold, distributed, or had made, Modifications made by that
+ Participant.
+
+ 8.3. If You assert a patent infringement claim against Participant
+ alleging that such Participant's Contributor Version directly or
+ indirectly infringes any patent where such claim is resolved (such as
+ by license or settlement) prior to the initiation of patent
+ infringement litigation, then the reasonable value of the licenses
+ granted by such Participant under Sections 2.1 or 2.2 shall be taken
+ into account in determining the amount or value of any payment or
+ license.
+
+ 8.4. In the event of termination under Sections 8.1 or 8.2 above,
+ all end user license agreements (excluding distributors and resellers)
+ which have been validly granted by You or any distributor hereunder
+ prior to termination shall survive termination.
+
+9. LIMITATION OF LIABILITY.
+
+ UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
+ (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
+ DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
+ OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
+ ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
+ CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
+ WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
+ COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
+ INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
+ LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
+ RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
+ PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
+ EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
+ THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
+
+10. U.S. GOVERNMENT END USERS.
+
+ The Covered Code is a "commercial item," as that term is defined in
+ 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
+ software" and "commercial computer software documentation," as such
+ terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
+ C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
+ all U.S. Government End Users acquire Covered Code with only those
+ rights set forth herein.
+
+11. MISCELLANEOUS.
+
+ This License represents the complete agreement concerning subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. This License shall be governed by
+ California law provisions (except to the extent applicable law, if
+ any, provides otherwise), excluding its conflict-of-law provisions.
+ With respect to disputes in which at least one party is a citizen of,
+ or an entity chartered or registered to do business in the United
+ States of America, any litigation relating to this License shall be
+ subject to the jurisdiction of the Federal Courts of the Northern
+ District of California, with venue lying in Santa Clara County,
+ California, with the losing party responsible for costs, including
+ without limitation, court costs and reasonable attorneys' fees and
+ expenses. The application of the United Nations Convention on
+ Contracts for the International Sale of Goods is expressly excluded.
+ Any law or regulation which provides that the language of a contract
+ shall be construed against the drafter shall not apply to this
+ License.
+
+12. RESPONSIBILITY FOR CLAIMS.
+
+ As between Initial Developer and the Contributors, each party is
+ responsible for claims and damages arising, directly or indirectly,
+ out of its utilization of rights under this License and You agree to
+ work with Initial Developer and Contributors to distribute such
+ responsibility on an equitable basis. Nothing herein is intended or
+ shall be deemed to constitute any admission of liability.
+
+13. MULTIPLE-LICENSED CODE.
+
+ Initial Developer may designate portions of the Covered Code as
+ "Multiple-Licensed". "Multiple-Licensed" means that the Initial
+ Developer permits you to utilize portions of the Covered Code under
+ Your choice of the NPL or the alternative licenses, if any, specified
+ by the Initial Developer in the file described in Exhibit A.
+
+EXHIBIT A -Mozilla Public License.
+
+ ``The contents of this file are subject to the Mozilla Public License
+ Version 1.1 (the "License"); you may not use this file except in
+ compliance with the License. You may obtain a copy of the License at
+ http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+ License for the specific language governing rights and limitations
+ under the License.
+
+ The Original Code is RabbitMQ.
+
+ The Initial Developer of the Original Code is GoPivotal, Inc.
+ Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.''
+
+ [NOTE: The text of this Exhibit A may differ slightly from the text of
+ the notices in the Source Code files of the Original Code. You should
+ use the text of this Exhibit A rather than the text found in the
+ Original Code Source Code for Your Modifications.]
--- /dev/null
+This is rabbitmq-server, a message broker implementing AMQP, STOMP and MQTT.
+
+Most of the documentation for RabbitMQ is provided on the RabbitMQ web
+site. You can see documentation for the current version at:
+
+http://www.rabbitmq.com/documentation.html
+
+and for previous versions at:
+
+http://www.rabbitmq.com/previous.html
+
+Man pages are installed with this package. Of particular interest are
+rabbitmqctl(1), to interact with a running RabbitMQ server, and
+rabbitmq-plugins(1), to enable and disable plugins. These should be
+run as the superuser.
+
+An example configuration file is provided in the same directory as
+this README. Copy it to /etc/rabbitmq/rabbitmq.config to use it. The
+RabbitMQ server must be restarted after changing the configuration
+file or enabling or disabling plugins.
--- /dev/null
+rabbitmq-server (3.3.5-1~mos6.1) trusty; urgency=low
+
+ * Pick the source from packages/precise/rabbitmq-server, commit
+ 55fbf9f2223244c363790e84913232adf9fed990.
+ * Bump the version for MOS 6.1
+
+ -- Alexei Sheplyakov <asheplyakov@mirantis.com> Wed, 28 Jan 2015 17:48:06 +0300
+
+rabbitmq-server (3.3.5-1) unstable; urgency=low
+
+ * New Upstream Release
+ * Changed Uploaders from Emile Joubert to Blair Hester
+
+ -- Simon MacMullen <simon@rabbitmq.com> Mon, 11 Aug 2014 12:23:31 +0100
+
+rabbitmq-server (3.3.4-1) unstable; urgency=low
+
+ * New Upstream Release
+
+ -- Simon MacMullen <simon@rabbitmq.com> Tue, 24 Jun 2014 12:50:29 +0100
+
+rabbitmq-server (3.3.3-1) unstable; urgency=low
+
+ * New Upstream Release
+
+ -- Simon MacMullen <simon@rabbitmq.com> Mon, 16 Jun 2014 13:00:00 +0100
+
+rabbitmq-server (3.3.2-1) unstable; urgency=low
+
+ * New Upstream Release
+
+ -- Simon MacMullen <simon@rabbitmq.com> Mon, 09 Jun 2014 10:25:22 +0100
+
+rabbitmq-server (3.3.1-1) unstable; urgency=low
+
+ * New Upstream Release
+
+ -- Simon MacMullen <simon@rabbitmq.com> Tue, 29 Apr 2014 11:49:23 +0100
+
+rabbitmq-server (3.3.0-1) unstable; urgency=low
+
+ * New Upstream Release
+
+ -- Simon MacMullen <simon@rabbitmq.com> Wed, 02 Apr 2014 14:23:14 +0100
+
+rabbitmq-server (3.2.4-1) unstable; urgency=low
+
+ * New Upstream Release
+
+ -- Simon MacMullen <simon@rabbitmq.com> Mon, 03 Mar 2014 14:50:18 +0000
+
+rabbitmq-server (3.2.3-1+mira.1) precise; urgency=low
+
+ * Disable rabbitmq-server autostart
+
+ -- Vladimir Kuklin <vkuklin@mirantis.com> Thu, 10 Jul 2014 14:01:02 +0400
+
+rabbitmq-server (3.2.3-1) unstable; urgency=low
+
+ [ Emile Joubert ]
+ * New Upstream Release
+
+ [ Vladimir Kuklin ]
+ * +mira
+
+ -- Vladimir Kuklin <vvk@vvk-workstation> Thu, 10 Jul 2014 14:00:41 +0400
+
+rabbitmq-server (3.2.2-1) unstable; urgency=low
+
+ * New Upstream Release
+
+ -- Emile Joubert <emile@rabbitmq.com> Tue, 10 Dec 2013 16:08:08 +0000
+
+rabbitmq-server (3.2.0-1) unstable; urgency=low
+
+ * New Upstream Release
+
+ -- Emile Joubert <emile@rabbitmq.com> Wed, 23 Oct 2013 12:44:10 +0100
+
+rabbitmq-server (3.1.5-1) unstable; urgency=low
+
+ * New Upstream Release
+
+ -- Simon MacMullen <simon@rabbitmq.com> Thu, 15 Aug 2013 11:03:13 +0100
+
+rabbitmq-server (3.1.3-1) unstable; urgency=low
+
+ * New Upstream Release
+
+ -- Tim Watson <tim@rabbitmq.com> Tue, 25 Jun 2013 15:01:12 +0100
+
+rabbitmq-server (3.1.2-1) unstable; urgency=low
+
+ * New Upstream Release
+
+ -- Tim Watson <tim@rabbitmq.com> Mon, 24 Jun 2013 11:16:41 +0100
+
+rabbitmq-server (3.1.1-1) unstable; urgency=low
+
+ * Test release
+
+ -- Tim Watson <tim@rabbitmq.com> Mon, 20 May 2013 16:21:20 +0100
+
+rabbitmq-server (3.1.0-1) unstable; urgency=low
+
+ * New Upstream Release
+
+ -- Simon MacMullen <simon@rabbitmq.com> Wed, 01 May 2013 11:57:58 +0100
+
+rabbitmq-server (3.0.1-1) unstable; urgency=low
+
+ * New Upstream Release
+
+ -- Simon MacMullen <simon@rabbitmq.com> Tue, 11 Dec 2012 11:29:55 +0000
+
+rabbitmq-server (3.0.0-1) unstable; urgency=low
+
+ * New Upstream Release
+
+ -- Simon MacMullen <simon@rabbitmq.com> Fri, 16 Nov 2012 14:15:29 +0000
+
+rabbitmq-server (2.7.1-1) natty; urgency=low
+
+ * New Upstream Release
+
+ -- Steve Powell <steve@rabbitmq.com> Fri, 16 Dec 2011 12:12:36 +0000
+
+rabbitmq-server (2.7.0-1) natty; urgency=low
+
+ * New Upstream Release
+
+ -- Steve Powell <steve@rabbitmq.com> Tue, 08 Nov 2011 16:47:50 +0000
+
+rabbitmq-server (2.6.1-1) natty; urgency=low
+
+ * New Upstream Release
+
+ -- Tim <tim@rabbitmq.com> Fri, 09 Sep 2011 14:38:45 +0100
+
+rabbitmq-server (2.6.0-1) natty; urgency=low
+
+ * New Upstream Release
+
+ -- Tim <tim@rabbitmq.com> Fri, 26 Aug 2011 16:29:40 +0100
+
+rabbitmq-server (2.5.1-1) lucid; urgency=low
+
+ * New Upstream Release
+
+ -- Simon MacMullen <simon@rabbitmq.com> Mon, 27 Jun 2011 11:21:49 +0100
+
+rabbitmq-server (2.5.0-1) lucid; urgency=low
+
+ * New Upstream Release
+
+ -- <jerryk@vmware.com> Thu, 09 Jun 2011 07:20:29 -0700
+
+rabbitmq-server (2.4.1-1) lucid; urgency=low
+
+ * New Upstream Release
+
+ -- Alexandru Scvortov <alexandru@rabbitmq.com> Thu, 07 Apr 2011 16:49:22 +0100
+
+rabbitmq-server (2.4.0-1) lucid; urgency=low
+
+ * New Upstream Release
+
+ -- Alexandru Scvortov <alexandru@rabbitmq.com> Tue, 22 Mar 2011 17:34:31 +0000
+
+rabbitmq-server (2.3.1-1) lucid; urgency=low
+
+ * New Upstream Release
+
+ -- Simon MacMullen <simon@rabbitmq.com> Thu, 03 Feb 2011 12:43:56 +0000
+
+rabbitmq-server (2.3.0-1) lucid; urgency=low
+
+ * New Upstream Release
+
+ -- Simon MacMullen <simon@rabbitmq.com> Tue, 01 Feb 2011 12:52:16 +0000
+
+rabbitmq-server (2.2.0-1) lucid; urgency=low
+
+ * New Upstream Release
+
+ -- Rob Harrop <rob@rabbitmq.com> Mon, 29 Nov 2010 12:24:48 +0000
+
+rabbitmq-server (2.1.1-1) lucid; urgency=low
+
+ * New Upstream Release
+
+ -- Vlad Alexandru Ionescu <vlad@rabbitmq.com> Tue, 19 Oct 2010 17:20:10 +0100
+
+rabbitmq-server (2.1.0-1) lucid; urgency=low
+
+ * New Upstream Release
+
+ -- Marek Majkowski <marek@rabbitmq.com> Tue, 14 Sep 2010 14:20:17 +0100
+
+rabbitmq-server (2.0.0-1) karmic; urgency=low
+
+ * New Upstream Release
+
+ -- Michael Bridgen <mikeb@rabbitmq.com> Mon, 23 Aug 2010 14:55:39 +0100
+
+rabbitmq-server (1.8.1-1) lucid; urgency=low
+
+ * New Upstream Release
+
+ -- Emile Joubert <emile@rabbitmq.com> Wed, 14 Jul 2010 15:05:24 +0100
+
+rabbitmq-server (1.8.0-1) intrepid; urgency=low
+
+ * New Upstream Release
+
+ -- Matthew Sackman <matthew@rabbitmq.com> Tue, 15 Jun 2010 12:48:48 +0100
+
+rabbitmq-server (1.7.2-1) intrepid; urgency=low
+
+ * New Upstream Release
+
+ -- Matthew Sackman <matthew@lshift.net> Mon, 15 Feb 2010 15:54:47 +0000
+
+rabbitmq-server (1.7.1-1) intrepid; urgency=low
+
+ * New Upstream Release
+
+ -- Matthew Sackman <matthew@lshift.net> Fri, 22 Jan 2010 14:14:29 +0000
+
+rabbitmq-server (1.7.0-1) intrepid; urgency=low
+
+ * New Upstream Release
+
+ -- David Wragg <dpw@lshift.net> Mon, 05 Oct 2009 13:44:41 +0100
+
+rabbitmq-server (1.6.0-1) hardy; urgency=low
+
+ * New Upstream Release
+
+ -- Matthias Radestock <matthias@lshift.net> Tue, 16 Jun 2009 15:02:58 +0100
+
+rabbitmq-server (1.5.5-1) hardy; urgency=low
+
+ * New Upstream Release
+
+ -- Matthias Radestock <matthias@lshift.net> Tue, 19 May 2009 09:57:54 +0100
+
+rabbitmq-server (1.5.4-1) hardy; urgency=low
+
+ * New Upstream Release
+
+ -- Matthias Radestock <matthias@lshift.net> Mon, 06 Apr 2009 09:19:32 +0100
+
+rabbitmq-server (1.5.3-1) hardy; urgency=low
+
+ * New Upstream Release
+
+ -- Tony Garnock-Jones <tonyg@lshift.net> Tue, 24 Feb 2009 18:23:33 +0000
+
+rabbitmq-server (1.5.2-1) hardy; urgency=low
+
+ * New Upstream Release
+
+ -- Tony Garnock-Jones <tonyg@lshift.net> Mon, 23 Feb 2009 16:03:38 +0000
+
+rabbitmq-server (1.5.1-1) hardy; urgency=low
+
+ * New Upstream Release
+
+ -- Simon MacMullen <simon@lshift.net> Mon, 19 Jan 2009 15:46:13 +0000
+
+rabbitmq-server (1.5.0-1) testing; urgency=low
+
+ * New Upstream Release
+
+ -- Matthias Radestock <matthias@lshift.net> Wed, 17 Dec 2008 18:23:47 +0000
+
+rabbitmq-server (1.4.0-1) testing; urgency=low
+
+ * New Upstream Release
+
+ -- Tony Garnock-Jones <tonyg@lshift.net> Thu, 24 Jul 2008 13:21:48 +0100
+
+rabbitmq-server (1.3.0-1) testing; urgency=low
+
+ * New Upstream Release
+
+ -- Adrien Pierard <adrien@lshift.net> Mon, 03 Mar 2008 15:34:38 +0000
+
+rabbitmq-server (1.2.0-2) testing; urgency=low
+
+ * Fixed rabbitmqctl wrapper script
+
+ -- Simon MacMullen <simon@lshift.net> Fri, 05 Oct 2007 11:55:00 +0100
+
+rabbitmq-server (1.2.0-1) testing; urgency=low
+
+ * New upstream release
+
+ -- Simon MacMullen <simon@lshift.net> Wed, 26 Sep 2007 11:49:26 +0100
+
+rabbitmq-server (1.1.1-1) testing; urgency=low
+
+ * New upstream release
+
+ -- Simon MacMullen <simon@lshift.net> Wed, 29 Aug 2007 12:03:15 +0100
+
+rabbitmq-server (1.1.0-alpha-2) testing; urgency=low
+
+ * Fixed erlang-nox dependency
+
+ -- Simon MacMullen <simon@lshift.net> Thu, 02 Aug 2007 11:27:13 +0100
+
+rabbitmq-server (1.1.0-alpha-1) testing; urgency=low
+
+ * New upstream release
+
+ -- Simon MacMullen <simon@lshift.net> Fri, 20 Jul 2007 18:17:33 +0100
+
+rabbitmq-server (1.0.0-alpha-1) unstable; urgency=low
+
+ * Initial release
+
+ -- Tony Garnock-Jones <tonyg@shortstop.lshift.net> Wed, 31 Jan 2007 19:06:33 +0000
+
--- /dev/null
+Source: rabbitmq-server
+Section: net
+Priority: extra
+Maintainer: RabbitMQ Team <packaging@rabbitmq.com>
+Uploaders: Emile Joubert <emile@rabbitmq.com>
+DM-Upload-Allowed: yes
+Build-Depends: cdbs, debhelper (>= 5), erlang-dev, python-simplejson, xmlto, xsltproc, erlang-nox (>= 1:13.b.3), erlang-src (>= 1:13.b.3), unzip, zip
+Standards-Version: 3.9.2
+
+Package: rabbitmq-server
+Architecture: all
+Depends: erlang-nox (>= 1:13.b.3) | esl-erlang, adduser, logrotate, ${misc:Depends}
+Description: AMQP server written in Erlang
+ RabbitMQ is an implementation of AMQP, the emerging standard for high
+ performance enterprise messaging. The RabbitMQ server is a robust and
+ scalable implementation of an AMQP broker.
+Homepage: http://www.rabbitmq.com/
--- /dev/null
+This package was debianized by Tony Garnock-Jones <tonyg@rabbitmq.com> on
+Wed, 3 Jan 2007 15:43:44 +0000.
+
+It was downloaded from http://www.rabbitmq.com/
+
+
+This package, the RabbitMQ server is licensed under the MPL.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
+
+The files amqp-rabbitmq-0.8.json and amqp-rabbitmq-0.9.1.json are
+"Copyright (C) 2008-2013 GoPivotal", Inc. and are covered by the MIT
+license.
+
+jQuery is "Copyright (c) 2010 John Resig" and is covered by the MIT
+license. It was downloaded from http://jquery.com/
+
+EJS is "Copyright (c) 2007 Edward Benson" and is covered by the MIT
+license. It was downloaded from http://embeddedjs.com/
+
+Sammy is "Copyright (c) 2008 Aaron Quint, Quirkey NYC, LLC" and is
+covered by the MIT license. It was downloaded from
+http://code.quirkey.com/sammy/
+
+ExplorerCanvas is "Copyright 2006 Google Inc" and is covered by the
+Apache License version 2.0. It was downloaded from
+http://code.google.com/p/explorercanvas/
+
+Flot is "Copyright (c) 2007-2013 IOLA and Ole Laursen" and is covered
+by the MIT license. It was downloaded from
+http://www.flotcharts.org/
+Webmachine is Copyright (c) Basho Technologies and is covered by the
+Apache License 2.0. It was downloaded from http://webmachine.basho.com/
+
+Eldap is "Copyright (c) 2010, Torbjorn Tornkvist" and is covered by
+the MIT license. It was downloaded from https://github.com/etnt/eldap
+
+Mochiweb is "Copyright (c) 2007 Mochi Media, Inc." and is covered by
+the MIT license. It was downloaded from
+http://github.com/mochi/mochiweb/
+
+glMatrix is "Copyright (c) 2011, Brandon Jones" and is covered by the
+BSD 2-Clause license. It was downloaded from
+http://code.google.com/p/glmatrix/
+
+
+The MIT license is as follows:
+
+ "Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this file (the Software), to deal in the
+ Software without restriction, including without limitation the
+ rights to use, copy, modify, merge, publish, distribute,
+ sublicense, and/or sell copies of the Software, and to permit
+ persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE."
+
+
+The BSD 2-Clause license is as follows:
+
+ "Redistribution and use in source and binary forms, with or
+ without modification, are permitted provided that the
+ following conditions are met:
+
+ 1. Redistributions of source code must retain the above
+ copyright notice, this list of conditions and the following
+ disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials
+ provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
+
+
+The rest of this package is licensed under the Mozilla Public License 1.1
+Authors and Copyright are as described below:
+
+ The Initial Developer of the Original Code is GoPivotal, Inc.
+ Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+
+
+ MOZILLA PUBLIC LICENSE
+ Version 1.1
+
+ ---------------
+
+1. Definitions.
+
+ 1.0.1. "Commercial Use" means distribution or otherwise making the
+ Covered Code available to a third party.
+
+ 1.1. "Contributor" means each entity that creates or contributes to
+ the creation of Modifications.
+
+ 1.2. "Contributor Version" means the combination of the Original
+ Code, prior Modifications used by a Contributor, and the Modifications
+ made by that particular Contributor.
+
+ 1.3. "Covered Code" means the Original Code or Modifications or the
+ combination of the Original Code and Modifications, in each case
+ including portions thereof.
+
+ 1.4. "Electronic Distribution Mechanism" means a mechanism generally
+ accepted in the software development community for the electronic
+ transfer of data.
+
+ 1.5. "Executable" means Covered Code in any form other than Source
+ Code.
+
+ 1.6. "Initial Developer" means the individual or entity identified
+ as the Initial Developer in the Source Code notice required by Exhibit
+ A.
+
+ 1.7. "Larger Work" means a work which combines Covered Code or
+ portions thereof with code not governed by the terms of this License.
+
+ 1.8. "License" means this document.
+
+ 1.8.1. "Licensable" means having the right to grant, to the maximum
+ extent possible, whether at the time of the initial grant or
+ subsequently acquired, any and all of the rights conveyed herein.
+
+ 1.9. "Modifications" means any addition to or deletion from the
+ substance or structure of either the Original Code or any previous
+ Modifications. When Covered Code is released as a series of files, a
+ Modification is:
+ A. Any addition to or deletion from the contents of a file
+ containing Original Code or previous Modifications.
+
+ B. Any new file that contains any part of the Original Code or
+ previous Modifications.
+
+ 1.10. "Original Code" means Source Code of computer software code
+ which is described in the Source Code notice required by Exhibit A as
+ Original Code, and which, at the time of its release under this
+ License is not already Covered Code governed by this License.
+
+ 1.10.1. "Patent Claims" means any patent claim(s), now owned or
+ hereafter acquired, including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by grantor.
+
+ 1.11. "Source Code" means the preferred form of the Covered Code for
+ making modifications to it, including all modules it contains, plus
+ any associated interface definition files, scripts used to control
+ compilation and installation of an Executable, or source code
+ differential comparisons against either the Original Code or another
+ well known, available Covered Code of the Contributor's choice. The
+ Source Code can be in a compressed or archival form, provided the
+ appropriate decompression or de-archiving software is widely available
+ for no charge.
+
+ 1.12. "You" (or "Your") means an individual or a legal entity
+ exercising rights under, and complying with all of the terms of, this
+ License or a future version of this License issued under Section 6.1.
+ For legal entities, "You" includes any entity which controls, is
+ controlled by, or is under common control with You. For purposes of
+ this definition, "control" means (a) the power, direct or indirect,
+ to cause the direction or management of such entity, whether by
+ contract or otherwise, or (b) ownership of more than fifty percent
+ (50%) of the outstanding shares or beneficial ownership of such
+ entity.
+
+2. Source Code License.
+
+ 2.1. The Initial Developer Grant.
+ The Initial Developer hereby grants You a world-wide, royalty-free,
+ non-exclusive license, subject to third party intellectual property
+ claims:
+ (a) under intellectual property rights (other than patent or
+ trademark) Licensable by Initial Developer to use, reproduce,
+ modify, display, perform, sublicense and distribute the Original
+ Code (or portions thereof) with or without Modifications, and/or
+ as part of a Larger Work; and
+
+ (b) under Patents Claims infringed by the making, using or
+ selling of Original Code, to make, have made, use, practice,
+ sell, and offer for sale, and/or otherwise dispose of the
+ Original Code (or portions thereof).
+
+ (c) the licenses granted in this Section 2.1(a) and (b) are
+ effective on the date Initial Developer first distributes
+ Original Code under the terms of this License.
+
+ (d) Notwithstanding Section 2.1(b) above, no patent license is
+ granted: 1) for code that You delete from the Original Code; 2)
+ separate from the Original Code; or 3) for infringements caused
+ by: i) the modification of the Original Code or ii) the
+ combination of the Original Code with other software or devices.
+
+ 2.2. Contributor Grant.
+ Subject to third party intellectual property claims, each Contributor
+ hereby grants You a world-wide, royalty-free, non-exclusive license
+
+ (a) under intellectual property rights (other than patent or
+ trademark) Licensable by Contributor, to use, reproduce, modify,
+ display, perform, sublicense and distribute the Modifications
+ created by such Contributor (or portions thereof) either on an
+ unmodified basis, with other Modifications, as Covered Code
+ and/or as part of a Larger Work; and
+
+ (b) under Patent Claims infringed by the making, using, or
+ selling of Modifications made by that Contributor either alone
+ and/or in combination with its Contributor Version (or portions
+ of such combination), to make, use, sell, offer for sale, have
+ made, and/or otherwise dispose of: 1) Modifications made by that
+ Contributor (or portions thereof); and 2) the combination of
+ Modifications made by that Contributor with its Contributor
+ Version (or portions of such combination).
+
+ (c) the licenses granted in Sections 2.2(a) and 2.2(b) are
+ effective on the date Contributor first makes Commercial Use of
+ the Covered Code.
+
+ (d) Notwithstanding Section 2.2(b) above, no patent license is
+ granted: 1) for any code that Contributor has deleted from the
+ Contributor Version; 2) separate from the Contributor Version;
+ 3) for infringements caused by: i) third party modifications of
+ Contributor Version or ii) the combination of Modifications made
+ by that Contributor with other software (except as part of the
+ Contributor Version) or other devices; or 4) under Patent Claims
+ infringed by Covered Code in the absence of Modifications made by
+ that Contributor.
+
+3. Distribution Obligations.
+
+ 3.1. Application of License.
+ The Modifications which You create or to which You contribute are
+ governed by the terms of this License, including without limitation
+ Section 2.2. The Source Code version of Covered Code may be
+ distributed only under the terms of this License or a future version
+ of this License released under Section 6.1, and You must include a
+ copy of this License with every copy of the Source Code You
+ distribute. You may not offer or impose any terms on any Source Code
+ version that alters or restricts the applicable version of this
+ License or the recipients' rights hereunder. However, You may include
+ an additional document offering the additional rights described in
+ Section 3.5.
+
+ 3.2. Availability of Source Code.
+ Any Modification which You create or to which You contribute must be
+ made available in Source Code form under the terms of this License
+ either on the same media as an Executable version or via an accepted
+ Electronic Distribution Mechanism to anyone to whom you made an
+ Executable version available; and if made available via Electronic
+ Distribution Mechanism, must remain available for at least twelve (12)
+ months after the date it initially became available, or at least six
+ (6) months after a subsequent version of that particular Modification
+ has been made available to such recipients. You are responsible for
+ ensuring that the Source Code version remains available even if the
+ Electronic Distribution Mechanism is maintained by a third party.
+
+ 3.3. Description of Modifications.
+ You must cause all Covered Code to which You contribute to contain a
+ file documenting the changes You made to create that Covered Code and
+ the date of any change. You must include a prominent statement that
+ the Modification is derived, directly or indirectly, from Original
+ Code provided by the Initial Developer and including the name of the
+ Initial Developer in (a) the Source Code, and (b) in any notice in an
+ Executable version or related documentation in which You describe the
+ origin or ownership of the Covered Code.
+
+ 3.4. Intellectual Property Matters
+ (a) Third Party Claims.
+ If Contributor has knowledge that a license under a third party's
+ intellectual property rights is required to exercise the rights
+ granted by such Contributor under Sections 2.1 or 2.2,
+ Contributor must include a text file with the Source Code
+ distribution titled "LEGAL" which describes the claim and the
+ party making the claim in sufficient detail that a recipient will
+ know whom to contact. If Contributor obtains such knowledge after
+ the Modification is made available as described in Section 3.2,
+ Contributor shall promptly modify the LEGAL file in all copies
+ Contributor makes available thereafter and shall take other steps
+ (such as notifying appropriate mailing lists or newsgroups)
+ reasonably calculated to inform those who received the Covered
+ Code that new knowledge has been obtained.
+
+ (b) Contributor APIs.
+ If Contributor's Modifications include an application programming
+ interface and Contributor has knowledge of patent licenses which
+ are reasonably necessary to implement that API, Contributor must
+ also include this information in the LEGAL file.
+
+ (c) Representations.
+ Contributor represents that, except as disclosed pursuant to
+ Section 3.4(a) above, Contributor believes that Contributor's
+ Modifications are Contributor's original creation(s) and/or
+ Contributor has sufficient rights to grant the rights conveyed by
+ this License.
+
+ 3.5. Required Notices.
+ You must duplicate the notice in Exhibit A in each file of the Source
+ Code. If it is not possible to put such notice in a particular Source
+ Code file due to its structure, then You must include such notice in a
+ location (such as a relevant directory) where a user would be likely
+ to look for such a notice. If You created one or more Modification(s)
+ You may add your name as a Contributor to the notice described in
+ Exhibit A. You must also duplicate this License in any documentation
+ for the Source Code where You describe recipients' rights or ownership
+ rights relating to Covered Code. You may choose to offer, and to
+ charge a fee for, warranty, support, indemnity or liability
+ obligations to one or more recipients of Covered Code. However, You
+ may do so only on Your own behalf, and not on behalf of the Initial
+ Developer or any Contributor. You must make it absolutely clear than
+ any such warranty, support, indemnity or liability obligation is
+ offered by You alone, and You hereby agree to indemnify the Initial
+ Developer and every Contributor for any liability incurred by the
+ Initial Developer or such Contributor as a result of warranty,
+ support, indemnity or liability terms You offer.
+
+ 3.6. Distribution of Executable Versions.
+ You may distribute Covered Code in Executable form only if the
+ requirements of Section 3.1-3.5 have been met for that Covered Code,
+ and if You include a notice stating that the Source Code version of
+ the Covered Code is available under the terms of this License,
+ including a description of how and where You have fulfilled the
+ obligations of Section 3.2. The notice must be conspicuously included
+ in any notice in an Executable version, related documentation or
+ collateral in which You describe recipients' rights relating to the
+ Covered Code. You may distribute the Executable version of Covered
+ Code or ownership rights under a license of Your choice, which may
+ contain terms different from this License, provided that You are in
+ compliance with the terms of this License and that the license for the
+ Executable version does not attempt to limit or alter the recipient's
+ rights in the Source Code version from the rights set forth in this
+ License. If You distribute the Executable version under a different
+ license You must make it absolutely clear that any terms which differ
+ from this License are offered by You alone, not by the Initial
+ Developer or any Contributor. You hereby agree to indemnify the
+ Initial Developer and every Contributor for any liability incurred by
+ the Initial Developer or such Contributor as a result of any such
+ terms You offer.
+
+ 3.7. Larger Works.
+ You may create a Larger Work by combining Covered Code with other code
+ not governed by the terms of this License and distribute the Larger
+ Work as a single product. In such a case, You must make sure the
+ requirements of this License are fulfilled for the Covered Code.
+
+4. Inability to Comply Due to Statute or Regulation.
+
+ If it is impossible for You to comply with any of the terms of this
+ License with respect to some or all of the Covered Code due to
+ statute, judicial order, or regulation then You must: (a) comply with
+ the terms of this License to the maximum extent possible; and (b)
+ describe the limitations and the code they affect. Such description
+ must be included in the LEGAL file described in Section 3.4 and must
+ be included with all distributions of the Source Code. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Application of this License.
+
+ This License applies to code to which the Initial Developer has
+ attached the notice in Exhibit A and to related Covered Code.
+
+6. Versions of the License.
+
+ 6.1. New Versions.
+ Netscape Communications Corporation ("Netscape") may publish revised
+ and/or new versions of the License from time to time. Each version
+ will be given a distinguishing version number.
+
+ 6.2. Effect of New Versions.
+ Once Covered Code has been published under a particular version of the
+ License, You may always continue to use it under the terms of that
+ version. You may also choose to use such Covered Code under the terms
+ of any subsequent version of the License published by Netscape. No one
+ other than Netscape has the right to modify the terms applicable to
+ Covered Code created under this License.
+
+ 6.3. Derivative Works.
+ If You create or use a modified version of this License (which you may
+ only do in order to apply it to code which is not already Covered Code
+ governed by this License), You must (a) rename Your license so that
+ the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
+ "MPL", "NPL" or any confusingly similar phrase do not appear in your
+ license (except to note that your license differs from this License)
+ and (b) otherwise make it clear that Your version of the license
+ contains terms which differ from the Mozilla Public License and
+ Netscape Public License. (Filling in the name of the Initial
+ Developer, Original Code or Contributor in the notice described in
+ Exhibit A shall not of themselves be deemed to be modifications of
+ this License.)
+
+7. DISCLAIMER OF WARRANTY.
+
+ COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
+ DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
+ THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
+ IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
+ YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
+ COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
+ OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
+ ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
+
+8. TERMINATION.
+
+ 8.1. This License and the rights granted hereunder will terminate
+ automatically if You fail to comply with terms herein and fail to cure
+ such breach within 30 days of becoming aware of the breach. All
+ sublicenses to the Covered Code which are properly granted shall
+ survive any termination of this License. Provisions which, by their
+ nature, must remain in effect beyond the termination of this License
+ shall survive.
+
+ 8.2. If You initiate litigation by asserting a patent infringement
+ claim (excluding declatory judgment actions) against Initial Developer
+ or a Contributor (the Initial Developer or Contributor against whom
+ You file such action is referred to as "Participant") alleging that:
+
+ (a) such Participant's Contributor Version directly or indirectly
+ infringes any patent, then any and all rights granted by such
+ Participant to You under Sections 2.1 and/or 2.2 of this License
+ shall, upon 60 days notice from Participant terminate prospectively,
+ unless if within 60 days after receipt of notice You either: (i)
+ agree in writing to pay Participant a mutually agreeable reasonable
+ royalty for Your past and future use of Modifications made by such
+ Participant, or (ii) withdraw Your litigation claim with respect to
+ the Contributor Version against such Participant. If within 60 days
+ of notice, a reasonable royalty and payment arrangement are not
+ mutually agreed upon in writing by the parties or the litigation claim
+ is not withdrawn, the rights granted by Participant to You under
+ Sections 2.1 and/or 2.2 automatically terminate at the expiration of
+ the 60 day notice period specified above.
+
+ (b) any software, hardware, or device, other than such Participant's
+ Contributor Version, directly or indirectly infringes any patent, then
+ any rights granted to You by such Participant under Sections 2.1(b)
+ and 2.2(b) are revoked effective as of the date You first made, used,
+ sold, distributed, or had made, Modifications made by that
+ Participant.
+
+ 8.3. If You assert a patent infringement claim against Participant
+ alleging that such Participant's Contributor Version directly or
+ indirectly infringes any patent where such claim is resolved (such as
+ by license or settlement) prior to the initiation of patent
+ infringement litigation, then the reasonable value of the licenses
+ granted by such Participant under Sections 2.1 or 2.2 shall be taken
+ into account in determining the amount or value of any payment or
+ license.
+
+ 8.4. In the event of termination under Sections 8.1 or 8.2 above,
+ all end user license agreements (excluding distributors and resellers)
+ which have been validly granted by You or any distributor hereunder
+ prior to termination shall survive termination.
+
+9. LIMITATION OF LIABILITY.
+
+ UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
+ (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
+ DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
+ OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
+ ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
+ CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
+ WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
+ COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
+ INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
+ LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
+ RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
+ PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
+ EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
+ THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
+
+10. U.S. GOVERNMENT END USERS.
+
+ The Covered Code is a "commercial item," as that term is defined in
+ 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
+ software" and "commercial computer software documentation," as such
+ terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
+ C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
+ all U.S. Government End Users acquire Covered Code with only those
+ rights set forth herein.
+
+11. MISCELLANEOUS.
+
+ This License represents the complete agreement concerning subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. This License shall be governed by
+ California law provisions (except to the extent applicable law, if
+ any, provides otherwise), excluding its conflict-of-law provisions.
+ With respect to disputes in which at least one party is a citizen of,
+ or an entity chartered or registered to do business in the United
+ States of America, any litigation relating to this License shall be
+ subject to the jurisdiction of the Federal Courts of the Northern
+ District of California, with venue lying in Santa Clara County,
+ California, with the losing party responsible for costs, including
+ without limitation, court costs and reasonable attorneys' fees and
+ expenses. The application of the United Nations Convention on
+ Contracts for the International Sale of Goods is expressly excluded.
+ Any law or regulation which provides that the language of a contract
+ shall be construed against the drafter shall not apply to this
+ License.
+
+12. RESPONSIBILITY FOR CLAIMS.
+
+ As between Initial Developer and the Contributors, each party is
+ responsible for claims and damages arising, directly or indirectly,
+ out of its utilization of rights under this License and You agree to
+ work with Initial Developer and Contributors to distribute such
+ responsibility on an equitable basis. Nothing herein is intended or
+ shall be deemed to constitute any admission of liability.
+
+13. MULTIPLE-LICENSED CODE.
+
+ Initial Developer may designate portions of the Covered Code as
+ "Multiple-Licensed". "Multiple-Licensed" means that the Initial
+ Developer permits you to utilize portions of the Covered Code under
+ Your choice of the NPL or the alternative licenses, if any, specified
+ by the Initial Developer in the file described in Exhibit A.
+
+EXHIBIT A -Mozilla Public License.
+
+ ``The contents of this file are subject to the Mozilla Public License
+ Version 1.1 (the "License"); you may not use this file except in
+ compliance with the License. You may obtain a copy of the License at
+ http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+ License for the specific language governing rights and limitations
+ under the License.
+
+ The Original Code is RabbitMQ.
+
+ The Initial Developer of the Original Code is GoPivotal, Inc.
+ Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.''
+
+ [NOTE: The text of this Exhibit A may differ slightly from the text of
+ the notices in the Source Code files of the Original Code. You should
+ use the text of this Exhibit A rather than the text found in the
+ Original Code Source Code for Your Modifications.]
+
+
+The Debian packaging is (C) 2007-2013, GoPivotal, Inc. and is licensed
+under the MPL 1.1, see above.
+
--- /dev/null
+usr/lib/rabbitmq/bin
+usr/lib/erlang/lib
+usr/sbin
+usr/share/man
+var/lib/rabbitmq/mnesia
+var/log/rabbitmq
+etc/logrotate.d
+etc/rabbitmq
+
--- /dev/null
+#!/bin/sh
+# postinst script for rabbitmq
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+# * <postinst> `configure' <most-recently-configured-version>
+# * <old-postinst> `abort-upgrade' <new version>
+# * <conflictor's-postinst> `abort-remove' `in-favour' <package>
+# <new-version>
+# * <postinst> `abort-remove'
+# * <deconfigured's-postinst> `abort-deconfigure' `in-favour'
+# <failed-install-package> <version> `removing'
+# <conflicting-package> <version>
+# for details, see http://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+
+# create rabbitmq group
+if ! getent group rabbitmq >/dev/null; then
+ addgroup --system rabbitmq
+fi
+
+# create rabbitmq user
+if ! getent passwd rabbitmq >/dev/null; then
+ adduser --system --ingroup rabbitmq --home /var/lib/rabbitmq \
+ --no-create-home --gecos "RabbitMQ messaging server" \
+ --disabled-login rabbitmq
+fi
+
+chown -R rabbitmq:rabbitmq /var/lib/rabbitmq
+chown -R rabbitmq:rabbitmq /var/log/rabbitmq
+
+case "$1" in
+ configure)
+ if [ -f /etc/rabbitmq/rabbitmq.conf ] && \
+ [ ! -f /etc/rabbitmq/rabbitmq-env.conf ]; then
+ mv /etc/rabbitmq/rabbitmq.conf /etc/rabbitmq/rabbitmq-env.conf
+ fi
+ ;;
+
+ abort-upgrade|abort-remove|abort-deconfigure)
+ ;;
+
+ *)
+ echo "postinst called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
+
+
--- /dev/null
+#!/bin/sh
+# postrm script for rabbitmq
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+# * <postrm> `remove'
+# * <postrm> `purge'
+# * <old-postrm> `upgrade' <new-version>
+# * <new-postrm> `failed-upgrade' <old-version>
+# * <new-postrm> `abort-install'
+# * <new-postrm> `abort-install' <old-version>
+# * <new-postrm> `abort-upgrade' <old-version>
+# * <disappearer's-postrm> `disappear' <overwriter>
+# <overwriter-version>
+# for details, see http://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+remove_plugin_traces() {
+ # Remove traces of plugins
+ rm -rf /var/lib/rabbitmq/plugins-scratch
+}
+
+case "$1" in
+ purge)
+ rm -f /etc/default/rabbitmq
+ if [ -d /var/lib/rabbitmq ]; then
+ rm -r /var/lib/rabbitmq
+ fi
+ if [ -d /var/log/rabbitmq ]; then
+ rm -r /var/log/rabbitmq
+ fi
+ if [ -d /etc/rabbitmq ]; then
+ rm -r /etc/rabbitmq
+ fi
+ remove_plugin_traces
+ if getent passwd rabbitmq >/dev/null; then
+ # Stop epmd if run by the rabbitmq user
+ pkill -u rabbitmq epmd || :
+ fi
+ ;;
+
+ remove|upgrade)
+ remove_plugin_traces
+ ;;
+
+ failed-upgrade|abort-install|abort-upgrade|disappear)
+ ;;
+
+ *)
+ echo "postrm called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
+
+
--- /dev/null
+#!/bin/sh
+## The contents of this file are subject to the Mozilla Public License
+## Version 1.1 (the "License"); you may not use this file except in
+## compliance with the License. You may obtain a copy of the License
+## at http://www.mozilla.org/MPL/
+##
+## Software distributed under the License is distributed on an "AS IS"
+## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+## the License for the specific language governing rights and
+## limitations under the License.
+##
+## The Original Code is RabbitMQ.
+##
+## The Initial Developer of the Original Code is GoPivotal, Inc.
+## Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+##
+
+# Escape spaces and quotes, because shell is revolting.
+for arg in "$@" ; do
+ # Escape quotes in parameters, so that they're passed through cleanly.
+ arg=$(sed -e 's/"/\\"/g' <<-END
+ $arg
+ END
+ )
+ CMDLINE="${CMDLINE} \"${arg}\""
+done
+
+cd /var/lib/rabbitmq
+
+SCRIPT=`basename $0`
+
+if [ `id -u` = `id -u rabbitmq` -a "$SCRIPT" = "rabbitmq-server" ] ; then
+ /usr/lib/rabbitmq/bin/rabbitmq-server "$@" > "/var/log/rabbitmq/startup_log" 2> "/var/log/rabbitmq/startup_err"
+elif [ `id -u` = `id -u rabbitmq` -o "$SCRIPT" = "rabbitmq-plugins" ] ; then
+ /usr/lib/rabbitmq/bin/${SCRIPT} "$@"
+elif [ `id -u` = 0 ] ; then
+ su rabbitmq -s /bin/sh -c "/usr/lib/rabbitmq/bin/${SCRIPT} ${CMDLINE}"
+else
+ /usr/lib/rabbitmq/bin/${SCRIPT}
+ echo
+ echo "Only root or rabbitmq should run ${SCRIPT}"
+ echo
+ exit 1
+fi
--- /dev/null
+# This file is sourced by /etc/init.d/rabbitmq-server. Its primary
+# reason for existing is to allow adjustment of system limits for the
+# rabbitmq-server process.
+#
+# Maximum number of open file handles. This will need to be increased
+# to handle many simultaneous connections. Refer to the system
+# documentation for ulimit (in man bash) for more information.
+#
+ulimit -H -n 105472
+ulimit -S -n 102400
+
--- /dev/null
+#!/bin/sh
+#
+# rabbitmq-server RabbitMQ broker
+#
+# chkconfig: - 80 05
+# description: Enable AMQP service provided by RabbitMQ
+#
+
+### BEGIN INIT INFO
+# Provides: rabbitmq-server
+# Required-Start: $remote_fs $network
+# Required-Stop: $remote_fs $network
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Description: RabbitMQ broker
+# Short-Description: Enable AMQP service provided by RabbitMQ broker
+### END INIT INFO
+
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+NAME=rabbitmq-server
+DAEMON=/usr/sbin/${NAME}
+CONTROL=/usr/sbin/rabbitmqctl
+DESC="message broker"
+USER=rabbitmq
+ROTATE_SUFFIX=
+INIT_LOG_DIR=/var/log/rabbitmq
+PID_FILE=/var/run/rabbitmq/pid
+
+
+test -x $DAEMON || exit 0
+test -x $CONTROL || exit 0
+
+RETVAL=0
+set -e
+
+[ -f /etc/default/${NAME} ] && . /etc/default/${NAME}
+
+. /lib/lsb/init-functions
+. /lib/init/vars.sh
+
+ensure_pid_dir () {
+ PID_DIR=`dirname ${PID_FILE}`
+ if [ ! -d ${PID_DIR} ] ; then
+ mkdir -p ${PID_DIR}
+ chown -R ${USER}:${USER} ${PID_DIR}
+ chmod 755 ${PID_DIR}
+ fi
+}
+
+remove_pid () {
+ rm -f ${PID_FILE}
+ rmdir `dirname ${PID_FILE}` || :
+}
+
+start_rabbitmq () {
+ status_rabbitmq quiet
+ if [ $RETVAL != 0 ] ; then
+ RETVAL=0
+ ensure_pid_dir
+ set +e
+ RABBITMQ_PID_FILE=$PID_FILE start-stop-daemon --quiet \
+ --chuid rabbitmq --start --exec $DAEMON \
+ --pidfile "$RABBITMQ_PID_FILE" --background
+ $CONTROL wait $PID_FILE >/dev/null 2>&1
+ RETVAL=$?
+ set -e
+ if [ $RETVAL != 0 ] ; then
+ remove_pid
+ fi
+ else
+ RETVAL=3
+ fi
+}
+
+stop_rabbitmq () {
+ status_rabbitmq quiet
+ if [ $RETVAL = 0 ] ; then
+ set +e
+ $CONTROL stop ${PID_FILE} > ${INIT_LOG_DIR}/shutdown_log 2> ${INIT_LOG_DIR}/shutdown_err
+ RETVAL=$?
+ set -e
+ if [ $RETVAL = 0 ] ; then
+ remove_pid
+ fi
+ else
+ RETVAL=3
+ fi
+}
+
+status_rabbitmq() {
+ set +e
+ if [ "$1" != "quiet" ] ; then
+ $CONTROL status 2>&1
+ else
+ $CONTROL status > /dev/null 2>&1
+ fi
+ if [ $? != 0 ] ; then
+ RETVAL=3
+ fi
+ set -e
+}
+
+rotate_logs_rabbitmq() {
+ set +e
+ $CONTROL -q rotate_logs ${ROTATE_SUFFIX}
+ if [ $? != 0 ] ; then
+ RETVAL=1
+ fi
+ set -e
+}
+
+restart_running_rabbitmq () {
+ status_rabbitmq quiet
+ if [ $RETVAL = 0 ] ; then
+ restart_rabbitmq
+ else
+ log_warning_msg "${DESC} not running"
+ fi
+}
+
+restart_rabbitmq() {
+ stop_rabbitmq
+ start_rabbitmq
+}
+
+restart_end() {
+ if [ $RETVAL = 0 ] ; then
+ log_end_msg 0
+ else
+ log_end_msg 1
+ fi
+}
+
+start_stop_end() {
+ case "$RETVAL" in
+ 0)
+ [ -x /sbin/initctl ] && /sbin/initctl emit --no-wait "${NAME}-${1}"
+ log_end_msg 0
+ ;;
+ 3)
+ log_warning_msg "${DESC} already ${1}"
+ log_end_msg 0
+ RETVAL=0
+ ;;
+ *)
+ log_warning_msg "FAILED - check ${INIT_LOG_DIR}/startup_\{log, _err\}"
+ log_end_msg 1
+ ;;
+ esac
+}
+
+case "$1" in
+ start)
+ log_daemon_msg "Starting ${DESC}" $NAME
+ start_rabbitmq
+ start_stop_end "running"
+ ;;
+ stop)
+ log_daemon_msg "Stopping ${DESC}" $NAME
+ stop_rabbitmq
+ start_stop_end "stopped"
+ ;;
+ status)
+ status_rabbitmq
+ ;;
+ rotate-logs)
+ log_action_begin_msg "Rotating log files for ${DESC}: ${NAME}"
+ rotate_logs_rabbitmq
+ log_action_end_msg $RETVAL
+ ;;
+ force-reload|reload|restart)
+ log_daemon_msg "Restarting ${DESC}" $NAME
+ restart_rabbitmq
+ restart_end
+ ;;
+ try-restart)
+ log_daemon_msg "Restarting ${DESC}" $NAME
+ restart_running_rabbitmq
+ restart_end
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|status|rotate-logs|restart|condrestart|try-restart|reload|force-reload}" >&2
+ RETVAL=1
+ ;;
+esac
+
+exit $RETVAL
--- /dev/null
+/var/log/rabbitmq/*.log {
+ weekly
+ missingok
+ rotate 20
+ compress
+ delaycompress
+ notifempty
+ sharedscripts
+ postrotate
+ /etc/init.d/rabbitmq-server rotate-logs > /dev/null
+ endscript
+}
--- /dev/null
+#!/bin/sh
+## The contents of this file are subject to the Mozilla Public License
+## Version 1.1 (the "License"); you may not use this file except in
+## compliance with the License. You may obtain a copy of the License
+## at http://www.mozilla.org/MPL/
+##
+## Software distributed under the License is distributed on an "AS IS"
+## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+## the License for the specific language governing rights and
+## limitations under the License.
+##
+## The Original Code is RabbitMQ.
+##
+## The Initial Developer of the Original Code is GoPivotal, Inc.
+## Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+##
+
+##
+## OCF Resource Agent compliant rabbitmq-server resource script.
+##
+
+## OCF instance parameters
+## OCF_RESKEY_server
+## OCF_RESKEY_ctl
+## OCF_RESKEY_nodename
+## OCF_RESKEY_ip
+## OCF_RESKEY_port
+## OCF_RESKEY_config_file
+## OCF_RESKEY_log_base
+## OCF_RESKEY_mnesia_base
+## OCF_RESKEY_server_start_args
+## OCF_RESKEY_pid_file
+
+#######################################################################
+# Initialization:
+
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/resource.d/heartbeat}
+. ${OCF_FUNCTIONS_DIR}/.ocf-shellfuncs
+
+#######################################################################
+
+OCF_RESKEY_server_default="/usr/sbin/rabbitmq-server"
+OCF_RESKEY_ctl_default="/usr/sbin/rabbitmqctl"
+OCF_RESKEY_nodename_default="rabbit@localhost"
+OCF_RESKEY_log_base_default="/var/log/rabbitmq"
+OCF_RESKEY_pid_file_default="/var/run/rabbitmq/pid"
+: ${OCF_RESKEY_server=${OCF_RESKEY_server_default}}
+: ${OCF_RESKEY_ctl=${OCF_RESKEY_ctl_default}}
+: ${OCF_RESKEY_nodename=${OCF_RESKEY_nodename_default}}
+: ${OCF_RESKEY_log_base=${OCF_RESKEY_log_base_default}}
+: ${OCF_RESKEY_pid_file=${OCF_RESKEY_pid_file_default}}
+
+meta_data() {
+ cat <<END
+<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="rabbitmq-server">
+<version>1.0</version>
+
+<longdesc lang="en">
+Resource agent for RabbitMQ-server
+</longdesc>
+
+<shortdesc lang="en">Resource agent for RabbitMQ-server</shortdesc>
+
+<parameters>
+<parameter name="server" unique="0" required="0">
+<longdesc lang="en">
+The path to the rabbitmq-server script
+</longdesc>
+<shortdesc lang="en">Path to rabbitmq-server</shortdesc>
+<content type="string" default="${OCF_RESKEY_server_default}" />
+</parameter>
+
+<parameter name="ctl" unique="0" required="0">
+<longdesc lang="en">
+The path to the rabbitmqctl script
+</longdesc>
+<shortdesc lang="en">Path to rabbitmqctl</shortdesc>
+<content type="string" default="${OCF_RESKEY_ctl_default}" />
+</parameter>
+
+<parameter name="nodename" unique="0" required="0">
+<longdesc lang="en">
+The node name for rabbitmq-server
+</longdesc>
+<shortdesc lang="en">Node name</shortdesc>
+<content type="string" default="${OCF_RESKEY_nodename_default}" />
+</parameter>
+
+<parameter name="ip" unique="0" required="0">
+<longdesc lang="en">
+The IP address for rabbitmq-server to listen on
+</longdesc>
+<shortdesc lang="en">IP Address</shortdesc>
+<content type="string" default="" />
+</parameter>
+
+<parameter name="port" unique="0" required="0">
+<longdesc lang="en">
+The IP Port for rabbitmq-server to listen on
+</longdesc>
+<shortdesc lang="en">IP Port</shortdesc>
+<content type="integer" default="" />
+</parameter>
+
+<parameter name="config_file" unique="0" required="0">
+<longdesc lang="en">
+Location of the config file (without the .config suffix)
+</longdesc>
+<shortdesc lang="en">Config file path (without the .config suffix)</shortdesc>
+<content type="string" default="" />
+</parameter>
+
+<parameter name="log_base" unique="0" required="0">
+<longdesc lang="en">
+Location of the directory under which logs will be created
+</longdesc>
+<shortdesc lang="en">Log base path</shortdesc>
+<content type="string" default="${OCF_RESKEY_log_base_default}" />
+</parameter>
+
+<parameter name="mnesia_base" unique="0" required="0">
+<longdesc lang="en">
+Location of the directory under which mnesia will store data
+</longdesc>
+<shortdesc lang="en">Mnesia base path</shortdesc>
+<content type="string" default="" />
+</parameter>
+
+<parameter name="server_start_args" unique="0" required="0">
+<longdesc lang="en">
+Additional arguments provided to the server on startup
+</longdesc>
+<shortdesc lang="en">Server start arguments</shortdesc>
+<content type="string" default="" />
+</parameter>
+
+<parameter name="pid_file" unique="0" required="0">
+<longdesc lang="en">
+Location of the file in which the pid will be stored
+</longdesc>
+<shortdesc lang="en">Pid file path</shortdesc>
+<content type="string" default="${OCF_RESKEY_pid_file_default}" />
+</parameter>
+
+</parameters>
+
+<actions>
+<action name="start" timeout="600" />
+<action name="stop" timeout="120" />
+<action name="status" timeout="20" interval="10" />
+<action name="monitor" timeout="20" interval="10" />
+<action name="validate-all" timeout="30" />
+<action name="meta-data" timeout="5" />
+</actions>
+</resource-agent>
+END
+}
+
+rabbit_usage() {
+ cat <<END
+usage: $0 {start|stop|status|monitor|validate-all|meta-data}
+
+Expects to have a fully populated OCF RA-compliant environment set.
+END
+}
+
+RABBITMQ_SERVER=$OCF_RESKEY_server
+RABBITMQ_CTL=$OCF_RESKEY_ctl
+RABBITMQ_NODENAME=$OCF_RESKEY_nodename
+RABBITMQ_NODE_IP_ADDRESS=$OCF_RESKEY_ip
+RABBITMQ_NODE_PORT=$OCF_RESKEY_port
+RABBITMQ_CONFIG_FILE=$OCF_RESKEY_config_file
+RABBITMQ_LOG_BASE=$OCF_RESKEY_log_base
+RABBITMQ_MNESIA_BASE=$OCF_RESKEY_mnesia_base
+RABBITMQ_SERVER_START_ARGS=$OCF_RESKEY_server_start_args
+RABBITMQ_PID_FILE=$OCF_RESKEY_pid_file
+[ ! -z $RABBITMQ_NODENAME ] && NODENAME_ARG="-n $RABBITMQ_NODENAME"
+[ ! -z $RABBITMQ_NODENAME ] && export RABBITMQ_NODENAME
+
+ensure_pid_dir () {
+ PID_DIR=`dirname ${RABBITMQ_PID_FILE}`
+ if [ ! -d ${PID_DIR} ] ; then
+ mkdir -p ${PID_DIR}
+ chown -R rabbitmq:rabbitmq ${PID_DIR}
+ chmod 755 ${PID_DIR}
+ fi
+ return $OCF_SUCCESS
+}
+
+remove_pid () {
+ rm -f ${RABBITMQ_PID_FILE}
+ rmdir `dirname ${RABBITMQ_PID_FILE}` || :
+}
+
+export_vars() {
+ [ ! -z $RABBITMQ_NODE_IP_ADDRESS ] && export RABBITMQ_NODE_IP_ADDRESS
+ [ ! -z $RABBITMQ_NODE_PORT ] && export RABBITMQ_NODE_PORT
+ [ ! -z $RABBITMQ_CONFIG_FILE ] && export RABBITMQ_CONFIG_FILE
+ [ ! -z $RABBITMQ_LOG_BASE ] && export RABBITMQ_LOG_BASE
+ [ ! -z $RABBITMQ_MNESIA_BASE ] && export RABBITMQ_MNESIA_BASE
+ [ ! -z $RABBITMQ_SERVER_START_ARGS ] && export RABBITMQ_SERVER_START_ARGS
+ [ ! -z $RABBITMQ_PID_FILE ] && ensure_pid_dir && export RABBITMQ_PID_FILE
+}
+
+rabbit_validate_partial() {
+ if [ ! -x $RABBITMQ_SERVER ]; then
+ ocf_log err "rabbitmq-server server $RABBITMQ_SERVER does not exist or is not executable";
+ exit $OCF_ERR_INSTALLED;
+ fi
+
+ if [ ! -x $RABBITMQ_CTL ]; then
+ ocf_log err "rabbitmq-server ctl $RABBITMQ_CTL does not exist or is not executable";
+ exit $OCF_ERR_INSTALLED;
+ fi
+}
+
+rabbit_validate_full() {
+ if [ ! -z $RABBITMQ_CONFIG_FILE ] && [ ! -e "${RABBITMQ_CONFIG_FILE}.config" ]; then
+ ocf_log err "rabbitmq-server config_file ${RABBITMQ_CONFIG_FILE}.config does not exist or is not a file";
+ exit $OCF_ERR_INSTALLED;
+ fi
+
+ if [ ! -z $RABBITMQ_LOG_BASE ] && [ ! -d $RABBITMQ_LOG_BASE ]; then
+ ocf_log err "rabbitmq-server log_base $RABBITMQ_LOG_BASE does not exist or is not a directory";
+ exit $OCF_ERR_INSTALLED;
+ fi
+
+ if [ ! -z $RABBITMQ_MNESIA_BASE ] && [ ! -d $RABBITMQ_MNESIA_BASE ]; then
+ ocf_log err "rabbitmq-server mnesia_base $RABBITMQ_MNESIA_BASE does not exist or is not a directory";
+ exit $OCF_ERR_INSTALLED;
+ fi
+
+ rabbit_validate_partial
+
+ return $OCF_SUCCESS
+}
+
+rabbit_status() {
+ rabbitmqctl_action "status"
+}
+
+rabbit_wait() {
+ rabbitmqctl_action "wait" $1
+}
+
+rabbitmqctl_action() {
+ local rc
+ local action
+ action=$@
+ $RABBITMQ_CTL $NODENAME_ARG $action > /dev/null 2> /dev/null
+ rc=$?
+ case "$rc" in
+ 0)
+ ocf_log debug "RabbitMQ server is running normally"
+ return $OCF_SUCCESS
+ ;;
+ 2)
+ ocf_log debug "RabbitMQ server is not running"
+ return $OCF_NOT_RUNNING
+ ;;
+ *)
+ ocf_log err "Unexpected return from rabbitmqctl $NODENAME_ARG $action: $rc"
+ exit $OCF_ERR_GENERIC
+ esac
+}
+
+rabbit_start() {
+ local rc
+
+ if rabbit_status; then
+ ocf_log info "Resource already running."
+ return $OCF_SUCCESS
+ fi
+
+ export_vars
+
+ setsid sh -c "$RABBITMQ_SERVER > ${RABBITMQ_LOG_BASE}/startup_log 2> ${RABBITMQ_LOG_BASE}/startup_err" &
+
+ # Wait for the server to come up.
+ # Let the CRM/LRM time us out if required
+ rabbit_wait $RABBITMQ_PID_FILE
+ rc=$?
+ if [ "$rc" != $OCF_SUCCESS ]; then
+ remove_pid
+ ocf_log info "rabbitmq-server start failed: $rc"
+ exit $OCF_ERR_GENERIC
+ fi
+
+ return $OCF_SUCCESS
+}
+
+rabbit_stop() {
+ local rc
+
+ if ! rabbit_status; then
+ ocf_log info "Resource not running."
+ return $OCF_SUCCESS
+ fi
+
+ $RABBITMQ_CTL stop
+ rc=$?
+
+ if [ "$rc" != 0 ]; then
+ ocf_log err "rabbitmq-server stop command failed: $RABBITMQ_CTL stop, $rc"
+ return $rc
+ fi
+
+ # Spin waiting for the server to shut down.
+ # Let the CRM/LRM time us out if required
+ stop_wait=1
+ while [ $stop_wait = 1 ]; do
+ rabbit_status
+ rc=$?
+ if [ "$rc" = $OCF_NOT_RUNNING ]; then
+ remove_pid
+ stop_wait=0
+ break
+ elif [ "$rc" != $OCF_SUCCESS ]; then
+ ocf_log info "rabbitmq-server stop failed: $rc"
+ exit $OCF_ERR_GENERIC
+ fi
+ sleep 1
+ done
+
+ return $OCF_SUCCESS
+}
+
+rabbit_monitor() {
+ rabbit_status
+ return $?
+}
+
+case $__OCF_ACTION in
+ meta-data)
+ meta_data
+ exit $OCF_SUCCESS
+ ;;
+ usage|help)
+ rabbit_usage
+ exit $OCF_SUCCESS
+ ;;
+esac
+
+if ocf_is_probe; then
+ rabbit_validate_partial
+else
+ rabbit_validate_full
+fi
+
+case $__OCF_ACTION in
+ start)
+ rabbit_start
+ ;;
+ stop)
+ rabbit_stop
+ ;;
+ status|monitor)
+ rabbit_monitor
+ ;;
+ validate-all)
+ exit $OCF_SUCCESS
+ ;;
+ *)
+ rabbit_usage
+ exit $OCF_ERR_UNIMPLEMENTED
+ ;;
+esac
+
+exit $?
--- /dev/null
+#!/usr/bin/make -f
+
+include /usr/share/cdbs/1/rules/debhelper.mk
+include /usr/share/cdbs/1/class/makefile.mk
+
+RABBIT_LIB=$(DEB_DESTDIR)usr/lib/rabbitmq/lib/rabbitmq_server-$(DEB_UPSTREAM_VERSION)/
+RABBIT_BIN=$(DEB_DESTDIR)usr/lib/rabbitmq/bin/
+
+DOCDIR=$(DEB_DESTDIR)usr/share/doc/rabbitmq-server/
+DEB_MAKE_INSTALL_TARGET := install TARGET_DIR=$(RABBIT_LIB) SBIN_DIR=$(RABBIT_BIN) DOC_INSTALL_DIR=$(DOCDIR) MAN_DIR=$(DEB_DESTDIR)usr/share/man/
+DEB_MAKE_CLEAN_TARGET:= distclean
+DEB_INSTALL_DOCS_ALL=debian/README
+
+DEB_DH_INSTALLINIT_ARGS="--no-start"
+
+install/rabbitmq-server::
+ mkdir -p $(DOCDIR)
+ rm $(RABBIT_LIB)LICENSE* $(RABBIT_LIB)INSTALL*
+ for script in rabbitmqctl rabbitmq-server rabbitmq-plugins; do \
+ install -p -D -m 0755 debian/rabbitmq-script-wrapper $(DEB_DESTDIR)usr/sbin/$$script; \
+ done
+ sed -e 's|@RABBIT_LIB@|/usr/lib/rabbitmq/lib/rabbitmq_server-$(DEB_UPSTREAM_VERSION)|g' <debian/postrm.in >debian/postrm
+ install -p -D -m 0755 debian/rabbitmq-server.ocf $(DEB_DESTDIR)usr/lib/ocf/resource.d/rabbitmq/rabbitmq-server
+ install -p -D -m 0644 debian/rabbitmq-server.default $(DEB_DESTDIR)etc/default/rabbitmq-server
+
+clean::
+ rm -f plugins-src/rabbitmq-server debian/postrm plugins/README
--- /dev/null
+3.0 (quilt)
--- /dev/null
+version=3
+
+http://www.rabbitmq.com/releases/rabbitmq-server/v(.*)/rabbitmq-server-(\d.*)\.tar\.gz \
+ debian uupdate
--- /dev/null
+Please see http://www.rabbitmq.com/download.html for links to guides
+to installing RabbitMQ.
--- /dev/null
+This package, the RabbitMQ server is licensed under the MPL.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
+
+The files amqp-rabbitmq-0.8.json and amqp-rabbitmq-0.9.1.json are
+"Copyright (C) 2008-2013 GoPivotal", Inc. and are covered by the MIT
+license.
+
+jQuery is "Copyright (c) 2010 John Resig" and is covered by the MIT
+license. It was downloaded from http://jquery.com/
+
+EJS is "Copyright (c) 2007 Edward Benson" and is covered by the MIT
+license. It was downloaded from http://embeddedjs.com/
+
+Sammy is "Copyright (c) 2008 Aaron Quint, Quirkey NYC, LLC" and is
+covered by the MIT license. It was downloaded from
+http://code.quirkey.com/sammy/
+
+ExplorerCanvas is "Copyright 2006 Google Inc" and is covered by the
+Apache License version 2.0. It was downloaded from
+http://code.google.com/p/explorercanvas/
+
+Flot is "Copyright (c) 2007-2013 IOLA and Ole Laursen" and is covered
+by the MIT license. It was downloaded from
+http://www.flotcharts.org/
+Webmachine is Copyright (c) Basho Technologies and is covered by the
+Apache License 2.0. It was downloaded from http://webmachine.basho.com/
+
+Eldap is "Copyright (c) 2010, Torbjorn Tornkvist" and is covered by
+the MIT license. It was downloaded from https://github.com/etnt/eldap
+
+Mochiweb is "Copyright (c) 2007 Mochi Media, Inc." and is covered by
+the MIT license. It was downloaded from
+http://github.com/mochi/mochiweb/
+
+glMatrix is "Copyright (c) 2011, Brandon Jones" and is covered by the
+BSD 2-Clause license. It was downloaded from
+http://code.google.com/p/glmatrix/
+
+
+The MIT license is as follows:
+
+ "Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this file (the Software), to deal in the
+ Software without restriction, including without limitation the
+ rights to use, copy, modify, merge, publish, distribute,
+ sublicense, and/or sell copies of the Software, and to permit
+ persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE."
+
+
+The BSD 2-Clause license is as follows:
+
+ "Redistribution and use in source and binary forms, with or
+ without modification, are permitted provided that the
+ following conditions are met:
+
+ 1. Redistributions of source code must retain the above
+ copyright notice, this list of conditions and the following
+ disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials
+ provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
+
+
+The rest of this package is licensed under the Mozilla Public License 1.1
+Authors and Copyright are as described below:
+
+ The Initial Developer of the Original Code is GoPivotal, Inc.
+ Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+
+
+ MOZILLA PUBLIC LICENSE
+ Version 1.1
+
+ ---------------
+
+1. Definitions.
+
+ 1.0.1. "Commercial Use" means distribution or otherwise making the
+ Covered Code available to a third party.
+
+ 1.1. "Contributor" means each entity that creates or contributes to
+ the creation of Modifications.
+
+ 1.2. "Contributor Version" means the combination of the Original
+ Code, prior Modifications used by a Contributor, and the Modifications
+ made by that particular Contributor.
+
+ 1.3. "Covered Code" means the Original Code or Modifications or the
+ combination of the Original Code and Modifications, in each case
+ including portions thereof.
+
+ 1.4. "Electronic Distribution Mechanism" means a mechanism generally
+ accepted in the software development community for the electronic
+ transfer of data.
+
+ 1.5. "Executable" means Covered Code in any form other than Source
+ Code.
+
+ 1.6. "Initial Developer" means the individual or entity identified
+ as the Initial Developer in the Source Code notice required by Exhibit
+ A.
+
+ 1.7. "Larger Work" means a work which combines Covered Code or
+ portions thereof with code not governed by the terms of this License.
+
+ 1.8. "License" means this document.
+
+ 1.8.1. "Licensable" means having the right to grant, to the maximum
+ extent possible, whether at the time of the initial grant or
+ subsequently acquired, any and all of the rights conveyed herein.
+
+ 1.9. "Modifications" means any addition to or deletion from the
+ substance or structure of either the Original Code or any previous
+ Modifications. When Covered Code is released as a series of files, a
+ Modification is:
+ A. Any addition to or deletion from the contents of a file
+ containing Original Code or previous Modifications.
+
+ B. Any new file that contains any part of the Original Code or
+ previous Modifications.
+
+ 1.10. "Original Code" means Source Code of computer software code
+ which is described in the Source Code notice required by Exhibit A as
+ Original Code, and which, at the time of its release under this
+ License is not already Covered Code governed by this License.
+
+ 1.10.1. "Patent Claims" means any patent claim(s), now owned or
+ hereafter acquired, including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by grantor.
+
+ 1.11. "Source Code" means the preferred form of the Covered Code for
+ making modifications to it, including all modules it contains, plus
+ any associated interface definition files, scripts used to control
+ compilation and installation of an Executable, or source code
+ differential comparisons against either the Original Code or another
+ well known, available Covered Code of the Contributor's choice. The
+ Source Code can be in a compressed or archival form, provided the
+ appropriate decompression or de-archiving software is widely available
+ for no charge.
+
+ 1.12. "You" (or "Your") means an individual or a legal entity
+ exercising rights under, and complying with all of the terms of, this
+ License or a future version of this License issued under Section 6.1.
+ For legal entities, "You" includes any entity which controls, is
+ controlled by, or is under common control with You. For purposes of
+ this definition, "control" means (a) the power, direct or indirect,
+ to cause the direction or management of such entity, whether by
+ contract or otherwise, or (b) ownership of more than fifty percent
+ (50%) of the outstanding shares or beneficial ownership of such
+ entity.
+
+2. Source Code License.
+
+ 2.1. The Initial Developer Grant.
+ The Initial Developer hereby grants You a world-wide, royalty-free,
+ non-exclusive license, subject to third party intellectual property
+ claims:
+ (a) under intellectual property rights (other than patent or
+ trademark) Licensable by Initial Developer to use, reproduce,
+ modify, display, perform, sublicense and distribute the Original
+ Code (or portions thereof) with or without Modifications, and/or
+ as part of a Larger Work; and
+
+ (b) under Patents Claims infringed by the making, using or
+ selling of Original Code, to make, have made, use, practice,
+ sell, and offer for sale, and/or otherwise dispose of the
+ Original Code (or portions thereof).
+
+ (c) the licenses granted in this Section 2.1(a) and (b) are
+ effective on the date Initial Developer first distributes
+ Original Code under the terms of this License.
+
+ (d) Notwithstanding Section 2.1(b) above, no patent license is
+ granted: 1) for code that You delete from the Original Code; 2)
+ separate from the Original Code; or 3) for infringements caused
+ by: i) the modification of the Original Code or ii) the
+ combination of the Original Code with other software or devices.
+
+ 2.2. Contributor Grant.
+ Subject to third party intellectual property claims, each Contributor
+ hereby grants You a world-wide, royalty-free, non-exclusive license
+
+ (a) under intellectual property rights (other than patent or
+ trademark) Licensable by Contributor, to use, reproduce, modify,
+ display, perform, sublicense and distribute the Modifications
+ created by such Contributor (or portions thereof) either on an
+ unmodified basis, with other Modifications, as Covered Code
+ and/or as part of a Larger Work; and
+
+ (b) under Patent Claims infringed by the making, using, or
+ selling of Modifications made by that Contributor either alone
+ and/or in combination with its Contributor Version (or portions
+ of such combination), to make, use, sell, offer for sale, have
+ made, and/or otherwise dispose of: 1) Modifications made by that
+ Contributor (or portions thereof); and 2) the combination of
+ Modifications made by that Contributor with its Contributor
+ Version (or portions of such combination).
+
+ (c) the licenses granted in Sections 2.2(a) and 2.2(b) are
+ effective on the date Contributor first makes Commercial Use of
+ the Covered Code.
+
+ (d) Notwithstanding Section 2.2(b) above, no patent license is
+ granted: 1) for any code that Contributor has deleted from the
+ Contributor Version; 2) separate from the Contributor Version;
+ 3) for infringements caused by: i) third party modifications of
+ Contributor Version or ii) the combination of Modifications made
+ by that Contributor with other software (except as part of the
+ Contributor Version) or other devices; or 4) under Patent Claims
+ infringed by Covered Code in the absence of Modifications made by
+ that Contributor.
+
+3. Distribution Obligations.
+
+ 3.1. Application of License.
+ The Modifications which You create or to which You contribute are
+ governed by the terms of this License, including without limitation
+ Section 2.2. The Source Code version of Covered Code may be
+ distributed only under the terms of this License or a future version
+ of this License released under Section 6.1, and You must include a
+ copy of this License with every copy of the Source Code You
+ distribute. You may not offer or impose any terms on any Source Code
+ version that alters or restricts the applicable version of this
+ License or the recipients' rights hereunder. However, You may include
+ an additional document offering the additional rights described in
+ Section 3.5.
+
+ 3.2. Availability of Source Code.
+ Any Modification which You create or to which You contribute must be
+ made available in Source Code form under the terms of this License
+ either on the same media as an Executable version or via an accepted
+ Electronic Distribution Mechanism to anyone to whom you made an
+ Executable version available; and if made available via Electronic
+ Distribution Mechanism, must remain available for at least twelve (12)
+ months after the date it initially became available, or at least six
+ (6) months after a subsequent version of that particular Modification
+ has been made available to such recipients. You are responsible for
+ ensuring that the Source Code version remains available even if the
+ Electronic Distribution Mechanism is maintained by a third party.
+
+ 3.3. Description of Modifications.
+ You must cause all Covered Code to which You contribute to contain a
+ file documenting the changes You made to create that Covered Code and
+ the date of any change. You must include a prominent statement that
+ the Modification is derived, directly or indirectly, from Original
+ Code provided by the Initial Developer and including the name of the
+ Initial Developer in (a) the Source Code, and (b) in any notice in an
+ Executable version or related documentation in which You describe the
+ origin or ownership of the Covered Code.
+
+ 3.4. Intellectual Property Matters
+ (a) Third Party Claims.
+ If Contributor has knowledge that a license under a third party's
+ intellectual property rights is required to exercise the rights
+ granted by such Contributor under Sections 2.1 or 2.2,
+ Contributor must include a text file with the Source Code
+ distribution titled "LEGAL" which describes the claim and the
+ party making the claim in sufficient detail that a recipient will
+ know whom to contact. If Contributor obtains such knowledge after
+ the Modification is made available as described in Section 3.2,
+ Contributor shall promptly modify the LEGAL file in all copies
+ Contributor makes available thereafter and shall take other steps
+ (such as notifying appropriate mailing lists or newsgroups)
+ reasonably calculated to inform those who received the Covered
+ Code that new knowledge has been obtained.
+
+ (b) Contributor APIs.
+ If Contributor's Modifications include an application programming
+ interface and Contributor has knowledge of patent licenses which
+ are reasonably necessary to implement that API, Contributor must
+ also include this information in the LEGAL file.
+
+ (c) Representations.
+ Contributor represents that, except as disclosed pursuant to
+ Section 3.4(a) above, Contributor believes that Contributor's
+ Modifications are Contributor's original creation(s) and/or
+ Contributor has sufficient rights to grant the rights conveyed by
+ this License.
+
+ 3.5. Required Notices.
+ You must duplicate the notice in Exhibit A in each file of the Source
+ Code. If it is not possible to put such notice in a particular Source
+ Code file due to its structure, then You must include such notice in a
+ location (such as a relevant directory) where a user would be likely
+ to look for such a notice. If You created one or more Modification(s)
+ You may add your name as a Contributor to the notice described in
+ Exhibit A. You must also duplicate this License in any documentation
+ for the Source Code where You describe recipients' rights or ownership
+ rights relating to Covered Code. You may choose to offer, and to
+ charge a fee for, warranty, support, indemnity or liability
+ obligations to one or more recipients of Covered Code. However, You
+ may do so only on Your own behalf, and not on behalf of the Initial
+ Developer or any Contributor. You must make it absolutely clear than
+ any such warranty, support, indemnity or liability obligation is
+ offered by You alone, and You hereby agree to indemnify the Initial
+ Developer and every Contributor for any liability incurred by the
+ Initial Developer or such Contributor as a result of warranty,
+ support, indemnity or liability terms You offer.
+
+ 3.6. Distribution of Executable Versions.
+ You may distribute Covered Code in Executable form only if the
+ requirements of Section 3.1-3.5 have been met for that Covered Code,
+ and if You include a notice stating that the Source Code version of
+ the Covered Code is available under the terms of this License,
+ including a description of how and where You have fulfilled the
+ obligations of Section 3.2. The notice must be conspicuously included
+ in any notice in an Executable version, related documentation or
+ collateral in which You describe recipients' rights relating to the
+ Covered Code. You may distribute the Executable version of Covered
+ Code or ownership rights under a license of Your choice, which may
+ contain terms different from this License, provided that You are in
+ compliance with the terms of this License and that the license for the
+ Executable version does not attempt to limit or alter the recipient's
+ rights in the Source Code version from the rights set forth in this
+ License. If You distribute the Executable version under a different
+ license You must make it absolutely clear that any terms which differ
+ from this License are offered by You alone, not by the Initial
+ Developer or any Contributor. You hereby agree to indemnify the
+ Initial Developer and every Contributor for any liability incurred by
+ the Initial Developer or such Contributor as a result of any such
+ terms You offer.
+
+ 3.7. Larger Works.
+ You may create a Larger Work by combining Covered Code with other code
+ not governed by the terms of this License and distribute the Larger
+ Work as a single product. In such a case, You must make sure the
+ requirements of this License are fulfilled for the Covered Code.
+
+4. Inability to Comply Due to Statute or Regulation.
+
+ If it is impossible for You to comply with any of the terms of this
+ License with respect to some or all of the Covered Code due to
+ statute, judicial order, or regulation then You must: (a) comply with
+ the terms of this License to the maximum extent possible; and (b)
+ describe the limitations and the code they affect. Such description
+ must be included in the LEGAL file described in Section 3.4 and must
+ be included with all distributions of the Source Code. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Application of this License.
+
+ This License applies to code to which the Initial Developer has
+ attached the notice in Exhibit A and to related Covered Code.
+
+6. Versions of the License.
+
+ 6.1. New Versions.
+ Netscape Communications Corporation ("Netscape") may publish revised
+ and/or new versions of the License from time to time. Each version
+ will be given a distinguishing version number.
+
+ 6.2. Effect of New Versions.
+ Once Covered Code has been published under a particular version of the
+ License, You may always continue to use it under the terms of that
+ version. You may also choose to use such Covered Code under the terms
+ of any subsequent version of the License published by Netscape. No one
+ other than Netscape has the right to modify the terms applicable to
+ Covered Code created under this License.
+
+ 6.3. Derivative Works.
+ If You create or use a modified version of this License (which you may
+ only do in order to apply it to code which is not already Covered Code
+ governed by this License), You must (a) rename Your license so that
+ the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
+ "MPL", "NPL" or any confusingly similar phrase do not appear in your
+ license (except to note that your license differs from this License)
+ and (b) otherwise make it clear that Your version of the license
+ contains terms which differ from the Mozilla Public License and
+ Netscape Public License. (Filling in the name of the Initial
+ Developer, Original Code or Contributor in the notice described in
+ Exhibit A shall not of themselves be deemed to be modifications of
+ this License.)
+
+7. DISCLAIMER OF WARRANTY.
+
+ COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
+ DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
+ THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
+ IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
+ YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
+ COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
+ OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
+ ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
+
+8. TERMINATION.
+
+ 8.1. This License and the rights granted hereunder will terminate
+ automatically if You fail to comply with terms herein and fail to cure
+ such breach within 30 days of becoming aware of the breach. All
+ sublicenses to the Covered Code which are properly granted shall
+ survive any termination of this License. Provisions which, by their
+ nature, must remain in effect beyond the termination of this License
+ shall survive.
+
+ 8.2. If You initiate litigation by asserting a patent infringement
+ claim (excluding declatory judgment actions) against Initial Developer
+ or a Contributor (the Initial Developer or Contributor against whom
+ You file such action is referred to as "Participant") alleging that:
+
+ (a) such Participant's Contributor Version directly or indirectly
+ infringes any patent, then any and all rights granted by such
+ Participant to You under Sections 2.1 and/or 2.2 of this License
+ shall, upon 60 days notice from Participant terminate prospectively,
+ unless if within 60 days after receipt of notice You either: (i)
+ agree in writing to pay Participant a mutually agreeable reasonable
+ royalty for Your past and future use of Modifications made by such
+ Participant, or (ii) withdraw Your litigation claim with respect to
+ the Contributor Version against such Participant. If within 60 days
+ of notice, a reasonable royalty and payment arrangement are not
+ mutually agreed upon in writing by the parties or the litigation claim
+ is not withdrawn, the rights granted by Participant to You under
+ Sections 2.1 and/or 2.2 automatically terminate at the expiration of
+ the 60 day notice period specified above.
+
+ (b) any software, hardware, or device, other than such Participant's
+ Contributor Version, directly or indirectly infringes any patent, then
+ any rights granted to You by such Participant under Sections 2.1(b)
+ and 2.2(b) are revoked effective as of the date You first made, used,
+ sold, distributed, or had made, Modifications made by that
+ Participant.
+
+ 8.3. If You assert a patent infringement claim against Participant
+ alleging that such Participant's Contributor Version directly or
+ indirectly infringes any patent where such claim is resolved (such as
+ by license or settlement) prior to the initiation of patent
+ infringement litigation, then the reasonable value of the licenses
+ granted by such Participant under Sections 2.1 or 2.2 shall be taken
+ into account in determining the amount or value of any payment or
+ license.
+
+ 8.4. In the event of termination under Sections 8.1 or 8.2 above,
+ all end user license agreements (excluding distributors and resellers)
+ which have been validly granted by You or any distributor hereunder
+ prior to termination shall survive termination.
+
+9. LIMITATION OF LIABILITY.
+
+ UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
+ (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
+ DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
+ OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
+ ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
+ CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
+ WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
+ COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
+ INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
+ LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
+ RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
+ PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
+ EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
+ THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
+
+10. U.S. GOVERNMENT END USERS.
+
+ The Covered Code is a "commercial item," as that term is defined in
+ 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
+ software" and "commercial computer software documentation," as such
+ terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
+ C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
+ all U.S. Government End Users acquire Covered Code with only those
+ rights set forth herein.
+
+11. MISCELLANEOUS.
+
+ This License represents the complete agreement concerning subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. This License shall be governed by
+ California law provisions (except to the extent applicable law, if
+ any, provides otherwise), excluding its conflict-of-law provisions.
+ With respect to disputes in which at least one party is a citizen of,
+ or an entity chartered or registered to do business in the United
+ States of America, any litigation relating to this License shall be
+ subject to the jurisdiction of the Federal Courts of the Northern
+ District of California, with venue lying in Santa Clara County,
+ California, with the losing party responsible for costs, including
+ without limitation, court costs and reasonable attorneys' fees and
+ expenses. The application of the United Nations Convention on
+ Contracts for the International Sale of Goods is expressly excluded.
+ Any law or regulation which provides that the language of a contract
+ shall be construed against the drafter shall not apply to this
+ License.
+
+12. RESPONSIBILITY FOR CLAIMS.
+
+ As between Initial Developer and the Contributors, each party is
+ responsible for claims and damages arising, directly or indirectly,
+ out of its utilization of rights under this License and You agree to
+ work with Initial Developer and Contributors to distribute such
+ responsibility on an equitable basis. Nothing herein is intended or
+ shall be deemed to constitute any admission of liability.
+
+13. MULTIPLE-LICENSED CODE.
+
+ Initial Developer may designate portions of the Covered Code as
+ "Multiple-Licensed". "Multiple-Licensed" means that the Initial
+ Developer permits you to utilize portions of the Covered Code under
+ Your choice of the NPL or the alternative licenses, if any, specified
+ by the Initial Developer in the file described in Exhibit A.
+
+EXHIBIT A -Mozilla Public License.
+
+ ``The contents of this file are subject to the Mozilla Public License
+ Version 1.1 (the "License"); you may not use this file except in
+ compliance with the License. You may obtain a copy of the License at
+ http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+ License for the specific language governing rights and limitations
+ under the License.
+
+ The Original Code is RabbitMQ.
+
+ The Initial Developer of the Original Code is GoPivotal, Inc.
+ Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.''
+
+ [NOTE: The text of this Exhibit A may differ slightly from the text of
+ the notices in the Source Code files of the Original Code. You should
+ use the text of this Exhibit A rather than the text found in the
+ Original Code Source Code for Your Modifications.]
--- /dev/null
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
--- /dev/null
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
--- /dev/null
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
--- /dev/null
+/*
+ * Copyright (c) 2010 Nick Galbreath
+ * http://code.google.com/p/stringencoders/source/browse/#svn/trunk/javascript
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+*/
--- /dev/null
+Copyright (c) 2011, Brandon Jones
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the
+ distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--- /dev/null
+EJS - Embedded JavaScript
+
+Copyright (c) 2007 Edward Benson
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+
--- /dev/null
+Copyright (c) 2007-2013 IOLA and Ole Laursen
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
--- /dev/null
+This is the MIT license.
+
+Copyright (c) 2007 Mochi Media, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--- /dev/null
+Copyright (c) 2008 Aaron Quint, Quirkey NYC, LLC
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+
+
+
--- /dev/null
+
+Copyright (c) 2010, Torbjorn Tornkvist
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
--- /dev/null
+Copyright (c) 2011 John Resig, http://jquery.com/
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
--- /dev/null
+ MOZILLA PUBLIC LICENSE
+ Version 1.1
+
+ ---------------
+
+1. Definitions.
+
+ 1.0.1. "Commercial Use" means distribution or otherwise making the
+ Covered Code available to a third party.
+
+ 1.1. "Contributor" means each entity that creates or contributes to
+ the creation of Modifications.
+
+ 1.2. "Contributor Version" means the combination of the Original
+ Code, prior Modifications used by a Contributor, and the Modifications
+ made by that particular Contributor.
+
+ 1.3. "Covered Code" means the Original Code or Modifications or the
+ combination of the Original Code and Modifications, in each case
+ including portions thereof.
+
+ 1.4. "Electronic Distribution Mechanism" means a mechanism generally
+ accepted in the software development community for the electronic
+ transfer of data.
+
+ 1.5. "Executable" means Covered Code in any form other than Source
+ Code.
+
+ 1.6. "Initial Developer" means the individual or entity identified
+ as the Initial Developer in the Source Code notice required by Exhibit
+ A.
+
+ 1.7. "Larger Work" means a work which combines Covered Code or
+ portions thereof with code not governed by the terms of this License.
+
+ 1.8. "License" means this document.
+
+ 1.8.1. "Licensable" means having the right to grant, to the maximum
+ extent possible, whether at the time of the initial grant or
+ subsequently acquired, any and all of the rights conveyed herein.
+
+ 1.9. "Modifications" means any addition to or deletion from the
+ substance or structure of either the Original Code or any previous
+ Modifications. When Covered Code is released as a series of files, a
+ Modification is:
+ A. Any addition to or deletion from the contents of a file
+ containing Original Code or previous Modifications.
+
+ B. Any new file that contains any part of the Original Code or
+ previous Modifications.
+
+ 1.10. "Original Code" means Source Code of computer software code
+ which is described in the Source Code notice required by Exhibit A as
+ Original Code, and which, at the time of its release under this
+ License is not already Covered Code governed by this License.
+
+ 1.10.1. "Patent Claims" means any patent claim(s), now owned or
+ hereafter acquired, including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by grantor.
+
+ 1.11. "Source Code" means the preferred form of the Covered Code for
+ making modifications to it, including all modules it contains, plus
+ any associated interface definition files, scripts used to control
+ compilation and installation of an Executable, or source code
+ differential comparisons against either the Original Code or another
+ well known, available Covered Code of the Contributor's choice. The
+ Source Code can be in a compressed or archival form, provided the
+ appropriate decompression or de-archiving software is widely available
+ for no charge.
+
+ 1.12. "You" (or "Your") means an individual or a legal entity
+ exercising rights under, and complying with all of the terms of, this
+ License or a future version of this License issued under Section 6.1.
+ For legal entities, "You" includes any entity which controls, is
+ controlled by, or is under common control with You. For purposes of
+ this definition, "control" means (a) the power, direct or indirect,
+ to cause the direction or management of such entity, whether by
+ contract or otherwise, or (b) ownership of more than fifty percent
+ (50%) of the outstanding shares or beneficial ownership of such
+ entity.
+
+2. Source Code License.
+
+ 2.1. The Initial Developer Grant.
+ The Initial Developer hereby grants You a world-wide, royalty-free,
+ non-exclusive license, subject to third party intellectual property
+ claims:
+ (a) under intellectual property rights (other than patent or
+ trademark) Licensable by Initial Developer to use, reproduce,
+ modify, display, perform, sublicense and distribute the Original
+ Code (or portions thereof) with or without Modifications, and/or
+ as part of a Larger Work; and
+
+ (b) under Patents Claims infringed by the making, using or
+ selling of Original Code, to make, have made, use, practice,
+ sell, and offer for sale, and/or otherwise dispose of the
+ Original Code (or portions thereof).
+
+ (c) the licenses granted in this Section 2.1(a) and (b) are
+ effective on the date Initial Developer first distributes
+ Original Code under the terms of this License.
+
+ (d) Notwithstanding Section 2.1(b) above, no patent license is
+ granted: 1) for code that You delete from the Original Code; 2)
+ separate from the Original Code; or 3) for infringements caused
+ by: i) the modification of the Original Code or ii) the
+ combination of the Original Code with other software or devices.
+
+ 2.2. Contributor Grant.
+ Subject to third party intellectual property claims, each Contributor
+ hereby grants You a world-wide, royalty-free, non-exclusive license
+
+ (a) under intellectual property rights (other than patent or
+ trademark) Licensable by Contributor, to use, reproduce, modify,
+ display, perform, sublicense and distribute the Modifications
+ created by such Contributor (or portions thereof) either on an
+ unmodified basis, with other Modifications, as Covered Code
+ and/or as part of a Larger Work; and
+
+ (b) under Patent Claims infringed by the making, using, or
+ selling of Modifications made by that Contributor either alone
+ and/or in combination with its Contributor Version (or portions
+ of such combination), to make, use, sell, offer for sale, have
+ made, and/or otherwise dispose of: 1) Modifications made by that
+ Contributor (or portions thereof); and 2) the combination of
+ Modifications made by that Contributor with its Contributor
+ Version (or portions of such combination).
+
+ (c) the licenses granted in Sections 2.2(a) and 2.2(b) are
+ effective on the date Contributor first makes Commercial Use of
+ the Covered Code.
+
+ (d) Notwithstanding Section 2.2(b) above, no patent license is
+ granted: 1) for any code that Contributor has deleted from the
+ Contributor Version; 2) separate from the Contributor Version;
+ 3) for infringements caused by: i) third party modifications of
+ Contributor Version or ii) the combination of Modifications made
+ by that Contributor with other software (except as part of the
+ Contributor Version) or other devices; or 4) under Patent Claims
+ infringed by Covered Code in the absence of Modifications made by
+ that Contributor.
+
+3. Distribution Obligations.
+
+ 3.1. Application of License.
+ The Modifications which You create or to which You contribute are
+ governed by the terms of this License, including without limitation
+ Section 2.2. The Source Code version of Covered Code may be
+ distributed only under the terms of this License or a future version
+ of this License released under Section 6.1, and You must include a
+ copy of this License with every copy of the Source Code You
+ distribute. You may not offer or impose any terms on any Source Code
+ version that alters or restricts the applicable version of this
+ License or the recipients' rights hereunder. However, You may include
+ an additional document offering the additional rights described in
+ Section 3.5.
+
+ 3.2. Availability of Source Code.
+ Any Modification which You create or to which You contribute must be
+ made available in Source Code form under the terms of this License
+ either on the same media as an Executable version or via an accepted
+ Electronic Distribution Mechanism to anyone to whom you made an
+ Executable version available; and if made available via Electronic
+ Distribution Mechanism, must remain available for at least twelve (12)
+ months after the date it initially became available, or at least six
+ (6) months after a subsequent version of that particular Modification
+ has been made available to such recipients. You are responsible for
+ ensuring that the Source Code version remains available even if the
+ Electronic Distribution Mechanism is maintained by a third party.
+
+ 3.3. Description of Modifications.
+ You must cause all Covered Code to which You contribute to contain a
+ file documenting the changes You made to create that Covered Code and
+ the date of any change. You must include a prominent statement that
+ the Modification is derived, directly or indirectly, from Original
+ Code provided by the Initial Developer and including the name of the
+ Initial Developer in (a) the Source Code, and (b) in any notice in an
+ Executable version or related documentation in which You describe the
+ origin or ownership of the Covered Code.
+
+ 3.4. Intellectual Property Matters
+ (a) Third Party Claims.
+ If Contributor has knowledge that a license under a third party's
+ intellectual property rights is required to exercise the rights
+ granted by such Contributor under Sections 2.1 or 2.2,
+ Contributor must include a text file with the Source Code
+ distribution titled "LEGAL" which describes the claim and the
+ party making the claim in sufficient detail that a recipient will
+ know whom to contact. If Contributor obtains such knowledge after
+ the Modification is made available as described in Section 3.2,
+ Contributor shall promptly modify the LEGAL file in all copies
+ Contributor makes available thereafter and shall take other steps
+ (such as notifying appropriate mailing lists or newsgroups)
+ reasonably calculated to inform those who received the Covered
+ Code that new knowledge has been obtained.
+
+ (b) Contributor APIs.
+ If Contributor's Modifications include an application programming
+ interface and Contributor has knowledge of patent licenses which
+ are reasonably necessary to implement that API, Contributor must
+ also include this information in the LEGAL file.
+
+ (c) Representations.
+ Contributor represents that, except as disclosed pursuant to
+ Section 3.4(a) above, Contributor believes that Contributor's
+ Modifications are Contributor's original creation(s) and/or
+ Contributor has sufficient rights to grant the rights conveyed by
+ this License.
+
+ 3.5. Required Notices.
+ You must duplicate the notice in Exhibit A in each file of the Source
+ Code. If it is not possible to put such notice in a particular Source
+ Code file due to its structure, then You must include such notice in a
+ location (such as a relevant directory) where a user would be likely
+ to look for such a notice. If You created one or more Modification(s)
+ You may add your name as a Contributor to the notice described in
+ Exhibit A. You must also duplicate this License in any documentation
+ for the Source Code where You describe recipients' rights or ownership
+ rights relating to Covered Code. You may choose to offer, and to
+ charge a fee for, warranty, support, indemnity or liability
+ obligations to one or more recipients of Covered Code. However, You
+ may do so only on Your own behalf, and not on behalf of the Initial
+ Developer or any Contributor. You must make it absolutely clear than
+ any such warranty, support, indemnity or liability obligation is
+ offered by You alone, and You hereby agree to indemnify the Initial
+ Developer and every Contributor for any liability incurred by the
+ Initial Developer or such Contributor as a result of warranty,
+ support, indemnity or liability terms You offer.
+
+ 3.6. Distribution of Executable Versions.
+ You may distribute Covered Code in Executable form only if the
+ requirements of Section 3.1-3.5 have been met for that Covered Code,
+ and if You include a notice stating that the Source Code version of
+ the Covered Code is available under the terms of this License,
+ including a description of how and where You have fulfilled the
+ obligations of Section 3.2. The notice must be conspicuously included
+ in any notice in an Executable version, related documentation or
+ collateral in which You describe recipients' rights relating to the
+ Covered Code. You may distribute the Executable version of Covered
+ Code or ownership rights under a license of Your choice, which may
+ contain terms different from this License, provided that You are in
+ compliance with the terms of this License and that the license for the
+ Executable version does not attempt to limit or alter the recipient's
+ rights in the Source Code version from the rights set forth in this
+ License. If You distribute the Executable version under a different
+ license You must make it absolutely clear that any terms which differ
+ from this License are offered by You alone, not by the Initial
+ Developer or any Contributor. You hereby agree to indemnify the
+ Initial Developer and every Contributor for any liability incurred by
+ the Initial Developer or such Contributor as a result of any such
+ terms You offer.
+
+ 3.7. Larger Works.
+ You may create a Larger Work by combining Covered Code with other code
+ not governed by the terms of this License and distribute the Larger
+ Work as a single product. In such a case, You must make sure the
+ requirements of this License are fulfilled for the Covered Code.
+
+4. Inability to Comply Due to Statute or Regulation.
+
+ If it is impossible for You to comply with any of the terms of this
+ License with respect to some or all of the Covered Code due to
+ statute, judicial order, or regulation then You must: (a) comply with
+ the terms of this License to the maximum extent possible; and (b)
+ describe the limitations and the code they affect. Such description
+ must be included in the LEGAL file described in Section 3.4 and must
+ be included with all distributions of the Source Code. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Application of this License.
+
+ This License applies to code to which the Initial Developer has
+ attached the notice in Exhibit A and to related Covered Code.
+
+6. Versions of the License.
+
+ 6.1. New Versions.
+ Netscape Communications Corporation ("Netscape") may publish revised
+ and/or new versions of the License from time to time. Each version
+ will be given a distinguishing version number.
+
+ 6.2. Effect of New Versions.
+ Once Covered Code has been published under a particular version of the
+ License, You may always continue to use it under the terms of that
+ version. You may also choose to use such Covered Code under the terms
+ of any subsequent version of the License published by Netscape. No one
+ other than Netscape has the right to modify the terms applicable to
+ Covered Code created under this License.
+
+ 6.3. Derivative Works.
+ If You create or use a modified version of this License (which you may
+ only do in order to apply it to code which is not already Covered Code
+ governed by this License), You must (a) rename Your license so that
+ the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
+ "MPL", "NPL" or any confusingly similar phrase do not appear in your
+ license (except to note that your license differs from this License)
+ and (b) otherwise make it clear that Your version of the license
+ contains terms which differ from the Mozilla Public License and
+ Netscape Public License. (Filling in the name of the Initial
+ Developer, Original Code or Contributor in the notice described in
+ Exhibit A shall not of themselves be deemed to be modifications of
+ this License.)
+
+7. DISCLAIMER OF WARRANTY.
+
+ COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
+ DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
+ THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
+ IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
+ YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
+ COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
+ OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
+ ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
+
+8. TERMINATION.
+
+ 8.1. This License and the rights granted hereunder will terminate
+ automatically if You fail to comply with terms herein and fail to cure
+ such breach within 30 days of becoming aware of the breach. All
+ sublicenses to the Covered Code which are properly granted shall
+ survive any termination of this License. Provisions which, by their
+ nature, must remain in effect beyond the termination of this License
+ shall survive.
+
+ 8.2. If You initiate litigation by asserting a patent infringement
+ claim (excluding declatory judgment actions) against Initial Developer
+ or a Contributor (the Initial Developer or Contributor against whom
+ You file such action is referred to as "Participant") alleging that:
+
+ (a) such Participant's Contributor Version directly or indirectly
+ infringes any patent, then any and all rights granted by such
+ Participant to You under Sections 2.1 and/or 2.2 of this License
+ shall, upon 60 days notice from Participant terminate prospectively,
+ unless if within 60 days after receipt of notice You either: (i)
+ agree in writing to pay Participant a mutually agreeable reasonable
+ royalty for Your past and future use of Modifications made by such
+ Participant, or (ii) withdraw Your litigation claim with respect to
+ the Contributor Version against such Participant. If within 60 days
+ of notice, a reasonable royalty and payment arrangement are not
+ mutually agreed upon in writing by the parties or the litigation claim
+ is not withdrawn, the rights granted by Participant to You under
+ Sections 2.1 and/or 2.2 automatically terminate at the expiration of
+ the 60 day notice period specified above.
+
+ (b) any software, hardware, or device, other than such Participant's
+ Contributor Version, directly or indirectly infringes any patent, then
+ any rights granted to You by such Participant under Sections 2.1(b)
+ and 2.2(b) are revoked effective as of the date You first made, used,
+ sold, distributed, or had made, Modifications made by that
+ Participant.
+
+ 8.3. If You assert a patent infringement claim against Participant
+ alleging that such Participant's Contributor Version directly or
+ indirectly infringes any patent where such claim is resolved (such as
+ by license or settlement) prior to the initiation of patent
+ infringement litigation, then the reasonable value of the licenses
+ granted by such Participant under Sections 2.1 or 2.2 shall be taken
+ into account in determining the amount or value of any payment or
+ license.
+
+ 8.4. In the event of termination under Sections 8.1 or 8.2 above,
+ all end user license agreements (excluding distributors and resellers)
+ which have been validly granted by You or any distributor hereunder
+ prior to termination shall survive termination.
+
+9. LIMITATION OF LIABILITY.
+
+ UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
+ (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
+ DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
+ OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
+ ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
+ CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
+ WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
+ COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
+ INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
+ LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
+ RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
+ PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
+ EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
+ THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
+
+10. U.S. GOVERNMENT END USERS.
+
+ The Covered Code is a "commercial item," as that term is defined in
+ 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
+ software" and "commercial computer software documentation," as such
+ terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
+ C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
+ all U.S. Government End Users acquire Covered Code with only those
+ rights set forth herein.
+
+11. MISCELLANEOUS.
+
+ This License represents the complete agreement concerning subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. This License shall be governed by
+ California law provisions (except to the extent applicable law, if
+ any, provides otherwise), excluding its conflict-of-law provisions.
+ With respect to disputes in which at least one party is a citizen of,
+ or an entity chartered or registered to do business in the United
+ States of America, any litigation relating to this License shall be
+ subject to the jurisdiction of the Federal Courts of the Northern
+ District of California, with venue lying in Santa Clara County,
+ California, with the losing party responsible for costs, including
+ without limitation, court costs and reasonable attorneys' fees and
+ expenses. The application of the United Nations Convention on
+ Contracts for the International Sale of Goods is expressly excluded.
+ Any law or regulation which provides that the language of a contract
+ shall be construed against the drafter shall not apply to this
+ License.
+
+12. RESPONSIBILITY FOR CLAIMS.
+
+ As between Initial Developer and the Contributors, each party is
+ responsible for claims and damages arising, directly or indirectly,
+ out of its utilization of rights under this License and You agree to
+ work with Initial Developer and Contributors to distribute such
+ responsibility on an equitable basis. Nothing herein is intended or
+ shall be deemed to constitute any admission of liability.
+
+13. MULTIPLE-LICENSED CODE.
+
+ Initial Developer may designate portions of the Covered Code as
+ "Multiple-Licensed". "Multiple-Licensed" means that the Initial
+ Developer permits you to utilize portions of the Covered Code under
+ Your choice of the NPL or the alternative licenses, if any, specified
+ by the Initial Developer in the file described in Exhibit A.
+
+EXHIBIT A -Mozilla Public License.
+
+ ``The contents of this file are subject to the Mozilla Public License
+ Version 1.1 (the "License"); you may not use this file except in
+ compliance with the License. You may obtain a copy of the License at
+ http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+ License for the specific language governing rights and limitations
+ under the License.
+
+ The Original Code is RabbitMQ.
+
+ The Initial Developer of the Original Code is GoPivotal, Inc.
+ Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.''
+
+ [NOTE: The text of this Exhibit A may differ slightly from the text of
+ the notices in the Source Code files of the Original Code. You should
+ use the text of this Exhibit A rather than the text found in the
+ Original Code Source Code for Your Modifications.]
--- /dev/null
+TMPDIR ?= /tmp
+
+RABBITMQ_NODENAME ?= rabbit
+RABBITMQ_SERVER_START_ARGS ?=
+RABBITMQ_MNESIA_DIR ?= $(TMPDIR)/rabbitmq-$(RABBITMQ_NODENAME)-mnesia
+RABBITMQ_PLUGINS_EXPAND_DIR ?= $(TMPDIR)/rabbitmq-$(RABBITMQ_NODENAME)-plugins-scratch
+RABBITMQ_LOG_BASE ?= $(TMPDIR)
+
+DEPS_FILE=deps.mk
+SOURCE_DIR=src
+EBIN_DIR=ebin
+INCLUDE_DIR=include
+DOCS_DIR=docs
+INCLUDES=$(wildcard $(INCLUDE_DIR)/*.hrl) $(INCLUDE_DIR)/rabbit_framing.hrl
+SOURCES=$(wildcard $(SOURCE_DIR)/*.erl) $(SOURCE_DIR)/rabbit_framing_amqp_0_9_1.erl $(SOURCE_DIR)/rabbit_framing_amqp_0_8.erl $(USAGES_ERL)
+BEAM_TARGETS=$(patsubst $(SOURCE_DIR)/%.erl, $(EBIN_DIR)/%.beam, $(SOURCES))
+TARGETS=$(EBIN_DIR)/rabbit.app $(INCLUDE_DIR)/rabbit_framing.hrl $(BEAM_TARGETS) plugins
+WEB_URL=http://www.rabbitmq.com/
+MANPAGES=$(patsubst %.xml, %.gz, $(wildcard $(DOCS_DIR)/*.[0-9].xml))
+WEB_MANPAGES=$(patsubst %.xml, %.man.xml, $(wildcard $(DOCS_DIR)/*.[0-9].xml) $(DOCS_DIR)/rabbitmq-service.xml $(DOCS_DIR)/rabbitmq-echopid.xml)
+USAGES_XML=$(DOCS_DIR)/rabbitmqctl.1.xml $(DOCS_DIR)/rabbitmq-plugins.1.xml
+USAGES_ERL=$(foreach XML, $(USAGES_XML), $(call usage_xml_to_erl, $(XML)))
+QC_MODULES := rabbit_backing_queue_qc
+QC_TRIALS ?= 100
+
+ifeq ($(shell python -c 'import simplejson' 2>/dev/null && echo yes),yes)
+PYTHON=python
+else
+ifeq ($(shell python2.6 -c 'import simplejson' 2>/dev/null && echo yes),yes)
+PYTHON=python2.6
+else
+ifeq ($(shell python2.5 -c 'import simplejson' 2>/dev/null && echo yes),yes)
+PYTHON=python2.5
+else
+# Hmm. Missing simplejson?
+PYTHON=python
+endif
+endif
+endif
+
+BASIC_PLT=basic.plt
+RABBIT_PLT=rabbit.plt
+
+ifndef USE_SPECS
+# our type specs rely on callback specs, which are available in R15B
+# upwards.
+USE_SPECS:=$(shell erl -noshell -eval 'io:format([list_to_integer(X) || X <- string:tokens(erlang:system_info(version), ".")] >= [5,9]), halt().')
+endif
+
+ifndef USE_PROPER_QC
+# PropEr needs to be installed for property checking
+# http://proper.softlab.ntua.gr/
+USE_PROPER_QC:=$(shell erl -noshell -eval 'io:format({module, proper} =:= code:ensure_loaded(proper)), halt().')
+endif
+
+#other args: +native +"{hipe,[o3,verbose]}" -Ddebug=true +debug_info +no_strict_record_tests
+ERLC_OPTS=-I $(INCLUDE_DIR) -o $(EBIN_DIR) -Wall -v +debug_info $(call boolean_macro,$(USE_SPECS),use_specs) $(call boolean_macro,$(USE_PROPER_QC),use_proper_qc)
+
+include version.mk
+
+PLUGINS_SRC_DIR?=$(shell [ -d "plugins-src" ] && echo "plugins-src" || echo )
+PLUGINS_DIR=plugins
+TARBALL_NAME=rabbitmq-server-$(VERSION)
+TARGET_SRC_DIR=dist/$(TARBALL_NAME)
+
+SIBLING_CODEGEN_DIR=../rabbitmq-codegen/
+AMQP_CODEGEN_DIR=$(shell [ -d $(SIBLING_CODEGEN_DIR) ] && echo $(SIBLING_CODEGEN_DIR) || echo codegen)
+AMQP_SPEC_JSON_FILES_0_9_1=$(AMQP_CODEGEN_DIR)/amqp-rabbitmq-0.9.1.json $(AMQP_CODEGEN_DIR)/credit_extension.json
+AMQP_SPEC_JSON_FILES_0_8=$(AMQP_CODEGEN_DIR)/amqp-rabbitmq-0.8.json
+
+ERL_CALL=erl_call -sname $(RABBITMQ_NODENAME) -e
+
+ERL_EBIN=erl -noinput -pa $(EBIN_DIR)
+
+define usage_xml_to_erl
+ $(subst __,_,$(patsubst $(DOCS_DIR)/rabbitmq%.1.xml, $(SOURCE_DIR)/rabbit_%_usage.erl, $(subst -,_,$(1))))
+endef
+
+define usage_dep
+ $(call usage_xml_to_erl, $(1)): $(1) $(DOCS_DIR)/usage.xsl
+endef
+
+define boolean_macro
+$(if $(filter true,$(1)),-D$(2))
+endef
+
+ifneq "$(SBIN_DIR)" ""
+ifneq "$(TARGET_DIR)" ""
+SCRIPTS_REL_PATH=$(shell ./calculate-relative $(TARGET_DIR)/sbin $(SBIN_DIR))
+endif
+endif
+
+# Versions prior to this are not supported
+NEED_MAKE := 3.80
+ifneq "$(NEED_MAKE)" "$(firstword $(sort $(NEED_MAKE) $(MAKE_VERSION)))"
+$(error Versions of make prior to $(NEED_MAKE) are not supported)
+endif
+
+# .DEFAULT_GOAL introduced in 3.81
+DEFAULT_GOAL_MAKE := 3.81
+ifneq "$(DEFAULT_GOAL_MAKE)" "$(firstword $(sort $(DEFAULT_GOAL_MAKE) $(MAKE_VERSION)))"
+.DEFAULT_GOAL=all
+endif
+
+all: $(TARGETS)
+
+.PHONY: plugins check-xref
+ifneq "$(PLUGINS_SRC_DIR)" ""
+plugins:
+ [ -d "$(PLUGINS_SRC_DIR)/rabbitmq-server" ] || ln -s "$(CURDIR)" "$(PLUGINS_SRC_DIR)/rabbitmq-server"
+ mkdir -p $(PLUGINS_DIR)
+ PLUGINS_SRC_DIR="" $(MAKE) -C "$(PLUGINS_SRC_DIR)" plugins-dist PLUGINS_DIST_DIR="$(CURDIR)/$(PLUGINS_DIR)" VERSION=$(VERSION)
+ echo "Put your EZs here and use rabbitmq-plugins to enable them." > $(PLUGINS_DIR)/README
+ rm -f $(PLUGINS_DIR)/rabbit_common*.ez
+
+# add -q to remove printout of warnings....
+check-xref: $(BEAM_TARGETS) $(PLUGINS_DIR)
+ rm -rf lib
+ ./check_xref $(PLUGINS_DIR) -q
+
+else
+plugins:
+# Not building plugins
+
+check-xref:
+ $(info xref checks are disabled)
+
+endif
+
+$(DEPS_FILE): $(SOURCES) $(INCLUDES)
+ rm -f $@
+ echo $(subst : ,:,$(foreach FILE,$^,$(FILE):)) | escript generate_deps $@ $(EBIN_DIR)
+
+$(EBIN_DIR)/rabbit.app: $(EBIN_DIR)/rabbit_app.in $(SOURCES) generate_app
+ escript generate_app $< $@ $(SOURCE_DIR)
+
+$(EBIN_DIR)/%.beam: $(SOURCE_DIR)/%.erl | $(DEPS_FILE)
+ erlc $(ERLC_OPTS) -pa $(EBIN_DIR) $<
+
+$(INCLUDE_DIR)/rabbit_framing.hrl: codegen.py $(AMQP_CODEGEN_DIR)/amqp_codegen.py $(AMQP_SPEC_JSON_FILES_0_9_1) $(AMQP_SPEC_JSON_FILES_0_8)
+ $(PYTHON) codegen.py --ignore-conflicts header $(AMQP_SPEC_JSON_FILES_0_9_1) $(AMQP_SPEC_JSON_FILES_0_8) $@
+
+$(SOURCE_DIR)/rabbit_framing_amqp_0_9_1.erl: codegen.py $(AMQP_CODEGEN_DIR)/amqp_codegen.py $(AMQP_SPEC_JSON_FILES_0_9_1)
+ $(PYTHON) codegen.py body $(AMQP_SPEC_JSON_FILES_0_9_1) $@
+
+$(SOURCE_DIR)/rabbit_framing_amqp_0_8.erl: codegen.py $(AMQP_CODEGEN_DIR)/amqp_codegen.py $(AMQP_SPEC_JSON_FILES_0_8)
+ $(PYTHON) codegen.py body $(AMQP_SPEC_JSON_FILES_0_8) $@
+
+dialyze: $(BEAM_TARGETS) $(BASIC_PLT)
+ dialyzer --plt $(BASIC_PLT) --no_native --fullpath \
+ $(BEAM_TARGETS)
+
+# rabbit.plt is used by rabbitmq-erlang-client's dialyze make target
+create-plt: $(RABBIT_PLT)
+
+$(RABBIT_PLT): $(BEAM_TARGETS) $(BASIC_PLT)
+ dialyzer --plt $(BASIC_PLT) --output_plt $@ --no_native \
+ --add_to_plt $(BEAM_TARGETS)
+
+$(BASIC_PLT): $(BEAM_TARGETS)
+ if [ -f $@ ]; then \
+ touch $@; \
+ else \
+ dialyzer --output_plt $@ --build_plt \
+ --apps erts kernel stdlib compiler sasl os_mon mnesia tools \
+ public_key crypto ssl xmerl; \
+ fi
+
+clean:
+ rm -f $(EBIN_DIR)/*.beam
+ rm -f $(EBIN_DIR)/rabbit.app $(EBIN_DIR)/rabbit.boot $(EBIN_DIR)/rabbit.script $(EBIN_DIR)/rabbit.rel
+ rm -f $(PLUGINS_DIR)/*.ez
+ [ -d "$(PLUGINS_SRC_DIR)" ] && PLUGINS_SRC_DIR="" PRESERVE_CLONE_DIR=1 make -C $(PLUGINS_SRC_DIR) clean || true
+ rm -f $(INCLUDE_DIR)/rabbit_framing.hrl $(SOURCE_DIR)/rabbit_framing_amqp_*.erl codegen.pyc
+ rm -f $(DOCS_DIR)/*.[0-9].gz $(DOCS_DIR)/*.man.xml $(DOCS_DIR)/*.erl $(USAGES_ERL)
+ rm -f $(RABBIT_PLT)
+ rm -f $(DEPS_FILE)
+
+cleandb:
+ rm -rf $(RABBITMQ_MNESIA_DIR)/*
+
+############ various tasks to interact with RabbitMQ ###################
+
+BASIC_SCRIPT_ENVIRONMENT_SETTINGS=\
+ RABBITMQ_NODE_IP_ADDRESS="$(RABBITMQ_NODE_IP_ADDRESS)" \
+ RABBITMQ_NODE_PORT="$(RABBITMQ_NODE_PORT)" \
+ RABBITMQ_LOG_BASE="$(RABBITMQ_LOG_BASE)" \
+ RABBITMQ_MNESIA_DIR="$(RABBITMQ_MNESIA_DIR)" \
+ RABBITMQ_PLUGINS_EXPAND_DIR="$(RABBITMQ_PLUGINS_EXPAND_DIR)"
+
+run: all
+ $(BASIC_SCRIPT_ENVIRONMENT_SETTINGS) \
+ RABBITMQ_ALLOW_INPUT=true \
+ RABBITMQ_SERVER_START_ARGS="$(RABBITMQ_SERVER_START_ARGS)" \
+ ./scripts/rabbitmq-server
+
+run-background: all
+ $(BASIC_SCRIPT_ENVIRONMENT_SETTINGS) \
+ RABBITMQ_SERVER_START_ARGS="$(RABBITMQ_SERVER_START_ARGS)" \
+ ./scripts/rabbitmq-server -detached
+
+run-node: all
+ $(BASIC_SCRIPT_ENVIRONMENT_SETTINGS) \
+ RABBITMQ_NODE_ONLY=true \
+ RABBITMQ_ALLOW_INPUT=true \
+ RABBITMQ_SERVER_START_ARGS="$(RABBITMQ_SERVER_START_ARGS)" \
+ ./scripts/rabbitmq-server
+
+run-background-node: all
+ $(BASIC_SCRIPT_ENVIRONMENT_SETTINGS) \
+ RABBITMQ_NODE_ONLY=true \
+ RABBITMQ_SERVER_START_ARGS="$(RABBITMQ_SERVER_START_ARGS)" \
+ ./scripts/rabbitmq-server
+
+run-tests: all
+ OUT=$$(echo "rabbit_tests:all_tests()." | $(ERL_CALL)) ; \
+ echo $$OUT ; echo $$OUT | grep '^{ok, passed}$$' > /dev/null
+
+run-qc: all
+ $(foreach MOD,$(QC_MODULES),./quickcheck $(RABBITMQ_NODENAME) $(MOD) $(QC_TRIALS))
+
+start-background-node: all
+ -rm -f $(RABBITMQ_MNESIA_DIR).pid
+ mkdir -p $(RABBITMQ_MNESIA_DIR)
+ nohup sh -c "$(MAKE) run-background-node > $(RABBITMQ_MNESIA_DIR)/startup_log 2> $(RABBITMQ_MNESIA_DIR)/startup_err" > /dev/null &
+ ./scripts/rabbitmqctl -n $(RABBITMQ_NODENAME) wait $(RABBITMQ_MNESIA_DIR).pid kernel
+
+start-rabbit-on-node: all
+ echo "rabbit:start()." | $(ERL_CALL)
+ ./scripts/rabbitmqctl -n $(RABBITMQ_NODENAME) wait $(RABBITMQ_MNESIA_DIR).pid
+
+stop-rabbit-on-node: all
+ echo "rabbit:stop()." | $(ERL_CALL)
+
+set-resource-alarm: all
+ echo "rabbit_alarm:set_alarm({{resource_limit, $(SOURCE), node()}, []})." | \
+ $(ERL_CALL)
+
+clear-resource-alarm: all
+ echo "rabbit_alarm:clear_alarm({resource_limit, $(SOURCE), node()})." | \
+ $(ERL_CALL)
+
+stop-node:
+ -$(ERL_CALL) -q
+
+# code coverage will be created for subdirectory "ebin" of COVER_DIR
+COVER_DIR=.
+
+start-cover: all
+ echo "rabbit_misc:start_cover([\"rabbit\", \"hare\"])." | $(ERL_CALL)
+ echo "rabbit_misc:enable_cover([\"$(COVER_DIR)\"])." | $(ERL_CALL)
+
+stop-cover: all
+ echo "rabbit_misc:report_cover(), cover:stop()." | $(ERL_CALL)
+ cat cover/summary.txt
+
+########################################################################
+
+srcdist: distclean
+ mkdir -p $(TARGET_SRC_DIR)/codegen
+ cp -r ebin src include LICENSE LICENSE-MPL-RabbitMQ INSTALL README $(TARGET_SRC_DIR)
+ sed 's/%%VSN%%/$(VERSION)/' $(TARGET_SRC_DIR)/ebin/rabbit_app.in > $(TARGET_SRC_DIR)/ebin/rabbit_app.in.tmp && \
+ mv $(TARGET_SRC_DIR)/ebin/rabbit_app.in.tmp $(TARGET_SRC_DIR)/ebin/rabbit_app.in
+
+ cp -r $(AMQP_CODEGEN_DIR)/* $(TARGET_SRC_DIR)/codegen/
+ cp codegen.py Makefile generate_app generate_deps calculate-relative $(TARGET_SRC_DIR)
+
+ echo "VERSION?=${VERSION}" > $(TARGET_SRC_DIR)/version.mk
+
+ cp -r scripts $(TARGET_SRC_DIR)
+ cp -r $(DOCS_DIR) $(TARGET_SRC_DIR)
+ chmod 0755 $(TARGET_SRC_DIR)/scripts/*
+
+ifneq "$(PLUGINS_SRC_DIR)" ""
+ cp -r $(PLUGINS_SRC_DIR) $(TARGET_SRC_DIR)/plugins-src
+ rm $(TARGET_SRC_DIR)/LICENSE
+ cat packaging/common/LICENSE.head >> $(TARGET_SRC_DIR)/LICENSE
+ cat $(AMQP_CODEGEN_DIR)/license_info >> $(TARGET_SRC_DIR)/LICENSE
+ find $(PLUGINS_SRC_DIR)/licensing -name "license_info_*" -exec cat '{}' >> $(TARGET_SRC_DIR)/LICENSE \;
+ cat packaging/common/LICENSE.tail >> $(TARGET_SRC_DIR)/LICENSE
+ find $(PLUGINS_SRC_DIR)/licensing -name "LICENSE-*" -exec cp '{}' $(TARGET_SRC_DIR) \;
+ rm -rf $(TARGET_SRC_DIR)/licensing
+else
+ @echo No plugins source distribution found
+endif
+
+ (cd dist; tar -zchf $(TARBALL_NAME).tar.gz $(TARBALL_NAME))
+ (cd dist; zip -q -r $(TARBALL_NAME).zip $(TARBALL_NAME))
+ rm -rf $(TARGET_SRC_DIR)
+
+distclean: clean
+ $(MAKE) -C $(AMQP_CODEGEN_DIR) distclean
+ rm -rf dist
+ find . -regex '.*\(~\|#\|\.swp\|\.dump\)' -exec rm {} \;
+
+# xmlto can not read from standard input, so we mess with a tmp file.
+%.gz: %.xml $(DOCS_DIR)/examples-to-end.xsl
+ xmlto --version | grep -E '^xmlto version 0\.0\.([0-9]|1[1-8])$$' >/dev/null || opt='--stringparam man.indent.verbatims=0' ; \
+ xsltproc --novalid $(DOCS_DIR)/examples-to-end.xsl $< > $<.tmp && \
+ xmlto -o $(DOCS_DIR) $$opt man $<.tmp && \
+ gzip -f $(DOCS_DIR)/`basename $< .xml`
+ rm -f $<.tmp
+
+# Use tmp files rather than a pipeline so that we get meaningful errors
+# Do not fold the cp into previous line, it's there to stop the file being
+# generated but empty if we fail
+$(SOURCE_DIR)/%_usage.erl:
+ xsltproc --novalid --stringparam modulename "`basename $@ .erl`" \
+ $(DOCS_DIR)/usage.xsl $< > $@.tmp
+ sed -e 's/"/\\"/g' -e 's/%QUOTE%/"/g' $@.tmp > $@.tmp2
+ fold -s $@.tmp2 > $@.tmp3
+ mv $@.tmp3 $@
+ rm $@.tmp $@.tmp2
+
+# We rename the file before xmlto sees it since xmlto will use the name of
+# the file to make internal links.
+%.man.xml: %.xml $(DOCS_DIR)/html-to-website-xml.xsl
+ cp $< `basename $< .xml`.xml && \
+ xmlto xhtml-nochunks `basename $< .xml`.xml ; rm `basename $< .xml`.xml
+ cat `basename $< .xml`.html | \
+ xsltproc --novalid $(DOCS_DIR)/remove-namespaces.xsl - | \
+ xsltproc --novalid --stringparam original `basename $<` $(DOCS_DIR)/html-to-website-xml.xsl - | \
+ xmllint --format - > $@
+ rm `basename $< .xml`.html
+
+docs_all: $(MANPAGES) $(WEB_MANPAGES)
+
+install: install_bin install_docs
+
+install_bin: all install_dirs
+ cp -r ebin include LICENSE* INSTALL $(TARGET_DIR)
+
+ chmod 0755 scripts/*
+ for script in rabbitmq-env rabbitmq-server rabbitmqctl rabbitmq-plugins rabbitmq-defaults; do \
+ cp scripts/$$script $(TARGET_DIR)/sbin; \
+ [ -e $(SBIN_DIR)/$$script ] || ln -s $(SCRIPTS_REL_PATH)/$$script $(SBIN_DIR)/$$script; \
+ done
+
+ mkdir -p $(TARGET_DIR)/$(PLUGINS_DIR)
+ [ -d "$(PLUGINS_DIR)" ] && cp $(PLUGINS_DIR)/*.ez $(PLUGINS_DIR)/README $(TARGET_DIR)/$(PLUGINS_DIR) || true
+
+install_docs: docs_all install_dirs
+ for section in 1 5; do \
+ mkdir -p $(MAN_DIR)/man$$section; \
+ for manpage in $(DOCS_DIR)/*.$$section.gz; do \
+ cp $$manpage $(MAN_DIR)/man$$section; \
+ done; \
+ done
+ cp $(DOCS_DIR)/rabbitmq.config.example $(DOC_INSTALL_DIR)/rabbitmq.config.example
+
+install_dirs:
+ @ OK=true && \
+ { [ -n "$(TARGET_DIR)" ] || { echo "Please set TARGET_DIR."; OK=false; }; } && \
+ { [ -n "$(SBIN_DIR)" ] || { echo "Please set SBIN_DIR."; OK=false; }; } && \
+ { [ -n "$(MAN_DIR)" ] || { echo "Please set MAN_DIR."; OK=false; }; } && \
+ { [ -n "$(DOC_INSTALL_DIR)" ] || { echo "Please set DOC_INSTALL_DIR."; OK=false; }; } && $$OK
+
+ mkdir -p $(TARGET_DIR)/sbin
+ mkdir -p $(SBIN_DIR)
+ mkdir -p $(MAN_DIR)
+ mkdir -p $(DOC_INSTALL_DIR)
+
+$(foreach XML,$(USAGES_XML),$(eval $(call usage_dep, $(XML))))
+
+# Note that all targets which depend on clean must have clean in their
+# name. Also any target that doesn't depend on clean should not have
+# clean in its name, unless you know that you don't need any of the
+# automatic dependency generation for that target (e.g. cleandb).
+
+# We want to load the dep file if *any* target *doesn't* contain
+# "clean" - i.e. if removing all clean-like targets leaves something.
+
+ifeq "$(MAKECMDGOALS)" ""
+TESTABLEGOALS:=$(.DEFAULT_GOAL)
+else
+TESTABLEGOALS:=$(MAKECMDGOALS)
+endif
+
+ifneq "$(strip $(patsubst clean%,,$(patsubst %clean,,$(TESTABLEGOALS))))" ""
+include $(DEPS_FILE)
+endif
+
+.PHONY: run-qc
--- /dev/null
+Please see http://www.rabbitmq.com/build-server.html for build instructions.
\ No newline at end of file
--- /dev/null
+#!/usr/bin/env python
+#
+# relpath.py
+# R.Barran 30/08/2004
+# Retrieved from http://code.activestate.com/recipes/302594/
+
+import os
+import sys
+
+def relpath(target, base=os.curdir):
+ """
+ Return a relative path to the target from either the current dir or an optional base dir.
+ Base can be a directory specified either as absolute or relative to current dir.
+ """
+
+ if not os.path.exists(target):
+ raise OSError, 'Target does not exist: '+target
+
+ if not os.path.isdir(base):
+ raise OSError, 'Base is not a directory or does not exist: '+base
+
+ base_list = (os.path.abspath(base)).split(os.sep)
+ target_list = (os.path.abspath(target)).split(os.sep)
+
+ # On the windows platform the target may be on a completely different drive from the base.
+ if os.name in ['nt','dos','os2'] and base_list[0] <> target_list[0]:
+ raise OSError, 'Target is on a different drive to base. Target: '+target_list[0].upper()+', base: '+base_list[0].upper()
+
+ # Starting from the filepath root, work out how much of the filepath is
+ # shared by base and target.
+ for i in range(min(len(base_list), len(target_list))):
+ if base_list[i] <> target_list[i]: break
+ else:
+ # If we broke out of the loop, i is pointing to the first differing path elements.
+ # If we didn't break out of the loop, i is pointing to identical path elements.
+ # Increment i so that in all cases it points to the first differing path elements.
+ i+=1
+
+ rel_list = [os.pardir] * (len(base_list)-i) + target_list[i:]
+ if (len(rel_list) == 0):
+ return "."
+ return os.path.join(*rel_list)
+
+if __name__ == "__main__":
+ print(relpath(sys.argv[1], sys.argv[2]))
--- /dev/null
+## The contents of this file are subject to the Mozilla Public License
+## Version 1.1 (the "License"); you may not use this file except in
+## compliance with the License. You may obtain a copy of the License
+## at http://www.mozilla.org/MPL/
+##
+## Software distributed under the License is distributed on an "AS IS"
+## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+## the License for the specific language governing rights and
+## limitations under the License.
+##
+## The Original Code is RabbitMQ.
+##
+## The Initial Developer of the Original Code is GoPivotal, Inc.
+## Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+##
+
+from __future__ import nested_scopes
+
+import sys
+sys.path.append("../rabbitmq-codegen") # in case we're next to an experimental revision
+sys.path.append("codegen") # in case we're building from a distribution package
+
+from amqp_codegen import *
+import string
+import re
+
+# Coming up with a proper encoding of AMQP tables in JSON is too much
+# hassle at this stage. Given that the only default value we are
+# interested in is for the empty table, we only support that.
+def convertTable(d):
+ if len(d) == 0:
+ return "[]"
+ else:
+ raise Exception('Non-empty table defaults not supported ' + d)
+
+erlangDefaultValueTypeConvMap = {
+ bool : lambda x: str(x).lower(),
+ str : lambda x: "<<\"" + x + "\">>",
+ int : lambda x: str(x),
+ float : lambda x: str(x),
+ dict: convertTable,
+ unicode: lambda x: "<<\"" + x.encode("utf-8") + "\">>"
+}
+
+def erlangize(s):
+ s = s.replace('-', '_')
+ s = s.replace(' ', '_')
+ return s
+
+AmqpMethod.erlangName = lambda m: "'" + erlangize(m.klass.name) + '.' + erlangize(m.name) + "'"
+
+AmqpClass.erlangName = lambda c: "'" + erlangize(c.name) + "'"
+
+def erlangConstantName(s):
+ return '_'.join(re.split('[- ]', s.upper()))
+
+class PackedMethodBitField:
+ def __init__(self, index):
+ self.index = index
+ self.domain = 'bit'
+ self.contents = []
+
+ def extend(self, f):
+ self.contents.append(f)
+
+ def count(self):
+ return len(self.contents)
+
+ def full(self):
+ return self.count() == 8
+
+def multiLineFormat(things, prologue, separator, lineSeparator, epilogue, thingsPerLine = 4):
+ r = [prologue]
+ i = 0
+ for t in things:
+ if i != 0:
+ if i % thingsPerLine == 0:
+ r += [lineSeparator]
+ else:
+ r += [separator]
+ r += [t]
+ i += 1
+ r += [epilogue]
+ return "".join(r)
+
+def prettyType(typeName, subTypes, typesPerLine = 4):
+ """Pretty print a type signature made up of many alternative subtypes"""
+ sTs = multiLineFormat(subTypes,
+ "( ", " | ", "\n | ", " )",
+ thingsPerLine = typesPerLine)
+ return "-type(%s ::\n %s)." % (typeName, sTs)
+
+def printFileHeader():
+ print """%% Autogenerated code. Do not edit.
+%%
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%"""
+
+def genErl(spec):
+ def erlType(domain):
+ return erlangize(spec.resolveDomain(domain))
+
+ def fieldTypeList(fields):
+ return '[' + ', '.join([erlType(f.domain) for f in fields]) + ']'
+
+ def fieldNameList(fields):
+ return '[' + ', '.join([erlangize(f.name) for f in fields]) + ']'
+
+ def fieldTempList(fields):
+ return '[' + ', '.join(['F' + str(f.index) for f in fields]) + ']'
+
+ def fieldMapList(fields):
+ return ', '.join([erlangize(f.name) + " = F" + str(f.index) for f in fields])
+
+ def genLookupMethodName(m):
+ print "lookup_method_name({%d, %d}) -> %s;" % (m.klass.index, m.index, m.erlangName())
+
+ def genLookupClassName(c):
+ print "lookup_class_name(%d) -> %s;" % (c.index, c.erlangName())
+
+ def genMethodId(m):
+ print "method_id(%s) -> {%d, %d};" % (m.erlangName(), m.klass.index, m.index)
+
+ def genMethodHasContent(m):
+ print "method_has_content(%s) -> %s;" % (m.erlangName(), str(m.hasContent).lower())
+
+ def genMethodIsSynchronous(m):
+ hasNoWait = "nowait" in fieldNameList(m.arguments)
+ if m.isSynchronous and hasNoWait:
+ print "is_method_synchronous(#%s{nowait = NoWait}) -> not(NoWait);" % (m.erlangName())
+ else:
+ print "is_method_synchronous(#%s{}) -> %s;" % (m.erlangName(), str(m.isSynchronous).lower())
+
+ def genMethodFieldTypes(m):
+ """Not currently used - may be useful in future?"""
+ print "method_fieldtypes(%s) -> %s;" % (m.erlangName(), fieldTypeList(m.arguments))
+
+ def genMethodFieldNames(m):
+ print "method_fieldnames(%s) -> %s;" % (m.erlangName(), fieldNameList(m.arguments))
+
+ def packMethodFields(fields):
+ packed = []
+ bitfield = None
+ for f in fields:
+ if erlType(f.domain) == 'bit':
+ if not(bitfield) or bitfield.full():
+ bitfield = PackedMethodBitField(f.index)
+ packed.append(bitfield)
+ bitfield.extend(f)
+ else:
+ bitfield = None
+ packed.append(f)
+ return packed
+
+ def methodFieldFragment(f):
+ type = erlType(f.domain)
+ p = 'F' + str(f.index)
+ if type == 'shortstr':
+ return p+'Len:8/unsigned, '+p+':'+p+'Len/binary'
+ elif type == 'longstr':
+ return p+'Len:32/unsigned, '+p+':'+p+'Len/binary'
+ elif type == 'octet':
+ return p+':8/unsigned'
+ elif type == 'short':
+ return p+':16/unsigned'
+ elif type == 'long':
+ return p+':32/unsigned'
+ elif type == 'longlong':
+ return p+':64/unsigned'
+ elif type == 'timestamp':
+ return p+':64/unsigned'
+ elif type == 'bit':
+ return p+'Bits:8'
+ elif type == 'table':
+ return p+'Len:32/unsigned, '+p+'Tab:'+p+'Len/binary'
+
+ def genFieldPostprocessing(packed, hasContent):
+ for f in packed:
+ type = erlType(f.domain)
+ if type == 'bit':
+ for index in range(f.count()):
+ print " F%d = ((F%dBits band %d) /= 0)," % \
+ (f.index + index,
+ f.index,
+ 1 << index)
+ elif type == 'table':
+ print " F%d = rabbit_binary_parser:parse_table(F%dTab)," % \
+ (f.index, f.index)
+ # We skip the check on content-bearing methods for
+ # speed. This is a sanity check, not a security thing.
+ elif type == 'shortstr' and not hasContent:
+ print " rabbit_binary_parser:assert_utf8(F%d)," % (f.index)
+ else:
+ pass
+
+ def genMethodRecord(m):
+ print "method_record(%s) -> #%s{};" % (m.erlangName(), m.erlangName())
+
+ def genDecodeMethodFields(m):
+ packedFields = packMethodFields(m.arguments)
+ binaryPattern = ', '.join([methodFieldFragment(f) for f in packedFields])
+ if binaryPattern:
+ restSeparator = ', '
+ else:
+ restSeparator = ''
+ recordConstructorExpr = '#%s{%s}' % (m.erlangName(), fieldMapList(m.arguments))
+ print "decode_method_fields(%s, <<%s>>) ->" % (m.erlangName(), binaryPattern)
+ genFieldPostprocessing(packedFields, m.hasContent)
+ print " %s;" % (recordConstructorExpr,)
+
+ def genDecodeProperties(c):
+ def presentBin(fields):
+ ps = ', '.join(['P' + str(f.index) + ':1' for f in fields])
+ return '<<' + ps + ', _:%d, R0/binary>>' % (16 - len(fields),)
+ def writePropFieldLine(field):
+ i = str(field.index)
+ if field.domain == 'bit':
+ print " {F%s, R%s} = {P%s =/= 0, R%s}," % \
+ (i, str(field.index + 1), i, i)
+ else:
+ print " {F%s, R%s} = if P%s =:= 0 -> {undefined, R%s}; true -> ?%s_VAL(R%s, L%s, V%s, X%s) end," % \
+ (i, str(field.index + 1), i, i, erlType(field.domain).upper(), i, i, i, i)
+
+ if len(c.fields) == 0:
+ print "decode_properties(%d, <<>>) ->" % (c.index,)
+ else:
+ print ("decode_properties(%d, %s) ->" %
+ (c.index, presentBin(c.fields)))
+ for field in c.fields:
+ writePropFieldLine(field)
+ print " <<>> = %s," % ('R' + str(len(c.fields)))
+ print " #'P_%s'{%s};" % (erlangize(c.name), fieldMapList(c.fields))
+
+ def genFieldPreprocessing(packed):
+ for f in packed:
+ type = erlType(f.domain)
+ if type == 'bit':
+ print " F%dBits = (%s)," % \
+ (f.index,
+ ' bor '.join(['(bitvalue(F%d) bsl %d)' % (x.index, x.index - f.index)
+ for x in f.contents]))
+ elif type == 'table':
+ print " F%dTab = rabbit_binary_generator:generate_table(F%d)," % (f.index, f.index)
+ print " F%dLen = size(F%dTab)," % (f.index, f.index)
+ elif type == 'shortstr':
+ print " F%dLen = shortstr_size(F%d)," % (f.index, f.index)
+ elif type == 'longstr':
+ print " F%dLen = size(F%d)," % (f.index, f.index)
+ else:
+ pass
+
+ def genEncodeMethodFields(m):
+ packedFields = packMethodFields(m.arguments)
+ print "encode_method_fields(#%s{%s}) ->" % (m.erlangName(), fieldMapList(m.arguments))
+ genFieldPreprocessing(packedFields)
+ print " <<%s>>;" % (', '.join([methodFieldFragment(f) for f in packedFields]))
+
+ def genEncodeProperties(c):
+ def presentBin(fields):
+ ps = ', '.join(['P' + str(f.index) + ':1' for f in fields])
+ return '<<' + ps + ', 0:%d>>' % (16 - len(fields),)
+ def writePropFieldLine(field):
+ i = str(field.index)
+ if field.domain == 'bit':
+ print " {P%s, R%s} = {F%s =:= 1, R%s}," % \
+ (i, str(field.index + 1), i, i)
+ else:
+ print " {P%s, R%s} = if F%s =:= undefined -> {0, R%s}; true -> {1, [?%s_PROP(F%s, L%s) | R%s]} end," % \
+ (i, str(field.index + 1), i, i, erlType(field.domain).upper(), i, i, i)
+
+ print "encode_properties(#'P_%s'{%s}) ->" % (erlangize(c.name), fieldMapList(c.fields))
+ if len(c.fields) == 0:
+ print " <<>>;"
+ else:
+ print " R0 = [<<>>],"
+ for field in c.fields:
+ writePropFieldLine(field)
+ print " list_to_binary([%s | lists:reverse(R%s)]);" % \
+ (presentBin(c.fields), str(len(c.fields)))
+
+ def messageConstantClass(cls):
+ # We do this because 0.8 uses "soft error" and 8.1 uses "soft-error".
+ return erlangConstantName(cls)
+
+ def genLookupException(c,v,cls):
+ mCls = messageConstantClass(cls)
+ if mCls == 'SOFT_ERROR': genLookupException1(c,'false')
+ elif mCls == 'HARD_ERROR': genLookupException1(c, 'true')
+ elif mCls == '': pass
+ else: raise Exception('Unknown constant class' + cls)
+
+ def genLookupException1(c,hardErrorBoolStr):
+ n = erlangConstantName(c)
+ print 'lookup_amqp_exception(%s) -> {%s, ?%s, <<"%s">>};' % \
+ (n.lower(), hardErrorBoolStr, n, n)
+
+ def genAmqpException(c,v,cls):
+ n = erlangConstantName(c)
+ print 'amqp_exception(?%s) -> %s;' % \
+ (n, n.lower())
+
+ methods = spec.allMethods()
+
+ printFileHeader()
+ module = "rabbit_framing_amqp_%d_%d" % (spec.major, spec.minor)
+ if spec.revision != 0:
+ module = "%s_%d" % (module, spec.revision)
+ if module == "rabbit_framing_amqp_8_0":
+ module = "rabbit_framing_amqp_0_8"
+ print "-module(%s)." % module
+ print """-include("rabbit_framing.hrl").
+
+-export([version/0]).
+-export([lookup_method_name/1]).
+-export([lookup_class_name/1]).
+
+-export([method_id/1]).
+-export([method_has_content/1]).
+-export([is_method_synchronous/1]).
+-export([method_record/1]).
+-export([method_fieldnames/1]).
+-export([decode_method_fields/2]).
+-export([decode_properties/2]).
+-export([encode_method_fields/1]).
+-export([encode_properties/1]).
+-export([lookup_amqp_exception/1]).
+-export([amqp_exception/1]).
+
+"""
+ print "%% Various types"
+ print "-ifdef(use_specs)."
+
+ print """-export_type([amqp_field_type/0, amqp_property_type/0,
+ amqp_table/0, amqp_array/0, amqp_value/0,
+ amqp_method_name/0, amqp_method/0, amqp_method_record/0,
+ amqp_method_field_name/0, amqp_property_record/0,
+ amqp_exception/0, amqp_exception_code/0, amqp_class_id/0]).
+
+-type(amqp_field_type() ::
+ 'longstr' | 'signedint' | 'decimal' | 'timestamp' |
+ 'table' | 'byte' | 'double' | 'float' | 'long' |
+ 'short' | 'bool' | 'binary' | 'void' | 'array').
+-type(amqp_property_type() ::
+ 'shortstr' | 'longstr' | 'octet' | 'short' | 'long' |
+ 'longlong' | 'timestamp' | 'bit' | 'table').
+
+-type(amqp_table() :: [{binary(), amqp_field_type(), amqp_value()}]).
+-type(amqp_array() :: [{amqp_field_type(), amqp_value()}]).
+-type(amqp_value() :: binary() | % longstr
+ integer() | % signedint
+ {non_neg_integer(), non_neg_integer()} | % decimal
+ amqp_table() |
+ amqp_array() |
+ byte() | % byte
+ float() | % double
+ integer() | % long
+ integer() | % short
+ boolean() | % bool
+ binary() | % binary
+ 'undefined' | % void
+ non_neg_integer() % timestamp
+ ).
+"""
+
+ print prettyType("amqp_method_name()",
+ [m.erlangName() for m in methods])
+ print prettyType("amqp_method()",
+ ["{%s, %s}" % (m.klass.index, m.index) for m in methods],
+ 6)
+ print prettyType("amqp_method_record()",
+ ["#%s{}" % (m.erlangName()) for m in methods])
+ fieldNames = set()
+ for m in methods:
+ fieldNames.update(m.arguments)
+ fieldNames = [erlangize(f.name) for f in fieldNames]
+ print prettyType("amqp_method_field_name()",
+ fieldNames)
+ print prettyType("amqp_property_record()",
+ ["#'P_%s'{}" % erlangize(c.name) for c in spec.allClasses()])
+ print prettyType("amqp_exception()",
+ ["'%s'" % erlangConstantName(c).lower() for (c, v, cls) in spec.constants])
+ print prettyType("amqp_exception_code()",
+ ["%i" % v for (c, v, cls) in spec.constants])
+ classIds = set()
+ for m in spec.allMethods():
+ classIds.add(m.klass.index)
+ print prettyType("amqp_class_id()",
+ ["%i" % ci for ci in classIds])
+ print prettyType("amqp_class_name()",
+ ["%s" % c.erlangName() for c in spec.allClasses()])
+ print "-endif. % use_specs"
+
+ print """
+%% Method signatures
+-ifdef(use_specs).
+-spec(version/0 :: () -> {non_neg_integer(), non_neg_integer(), non_neg_integer()}).
+-spec(lookup_method_name/1 :: (amqp_method()) -> amqp_method_name()).
+-spec(lookup_class_name/1 :: (amqp_class_id()) -> amqp_class_name()).
+-spec(method_id/1 :: (amqp_method_name()) -> amqp_method()).
+-spec(method_has_content/1 :: (amqp_method_name()) -> boolean()).
+-spec(is_method_synchronous/1 :: (amqp_method_record()) -> boolean()).
+-spec(method_record/1 :: (amqp_method_name()) -> amqp_method_record()).
+-spec(method_fieldnames/1 :: (amqp_method_name()) -> [amqp_method_field_name()]).
+-spec(decode_method_fields/2 ::
+ (amqp_method_name(), binary()) -> amqp_method_record() | rabbit_types:connection_exit()).
+-spec(decode_properties/2 :: (non_neg_integer(), binary()) -> amqp_property_record()).
+-spec(encode_method_fields/1 :: (amqp_method_record()) -> binary()).
+-spec(encode_properties/1 :: (amqp_property_record()) -> binary()).
+-spec(lookup_amqp_exception/1 :: (amqp_exception()) -> {boolean(), amqp_exception_code(), binary()}).
+-spec(amqp_exception/1 :: (amqp_exception_code()) -> amqp_exception()).
+-endif. % use_specs
+
+bitvalue(true) -> 1;
+bitvalue(false) -> 0;
+bitvalue(undefined) -> 0.
+
+shortstr_size(S) ->
+ case size(S) of
+ Len when Len =< 255 -> Len;
+ _ -> exit(method_field_shortstr_overflow)
+ end.
+
+-define(SHORTSTR_VAL(R, L, V, X),
+ begin
+ <<L:8/unsigned, V:L/binary, X/binary>> = R,
+ {V, X}
+ end).
+
+-define(LONGSTR_VAL(R, L, V, X),
+ begin
+ <<L:32/unsigned, V:L/binary, X/binary>> = R,
+ {V, X}
+ end).
+
+-define(SHORT_VAL(R, L, V, X),
+ begin
+ <<V:8/unsigned, X/binary>> = R,
+ {V, X}
+ end).
+
+-define(LONG_VAL(R, L, V, X),
+ begin
+ <<V:32/unsigned, X/binary>> = R,
+ {V, X}
+ end).
+
+-define(LONGLONG_VAL(R, L, V, X),
+ begin
+ <<V:64/unsigned, X/binary>> = R,
+ {V, X}
+ end).
+
+-define(OCTET_VAL(R, L, V, X),
+ begin
+ <<V:8/unsigned, X/binary>> = R,
+ {V, X}
+ end).
+
+-define(TABLE_VAL(R, L, V, X),
+ begin
+ <<L:32/unsigned, V:L/binary, X/binary>> = R,
+ {rabbit_binary_parser:parse_table(V), X}
+ end).
+
+-define(TIMESTAMP_VAL(R, L, V, X),
+ begin
+ <<V:64/unsigned, X/binary>> = R,
+ {V, X}
+ end).
+
+-define(SHORTSTR_PROP(X, L),
+ begin
+ L = size(X),
+ if L < 256 -> <<L:8, X:L/binary>>;
+ true -> exit(content_properties_shortstr_overflow)
+ end
+ end).
+
+-define(LONGSTR_PROP(X, L),
+ begin
+ L = size(X),
+ <<L:32, X:L/binary>>
+ end).
+
+-define(OCTET_PROP(X, L), <<X:8/unsigned>>).
+-define(SHORT_PROP(X, L), <<X:16/unsigned>>).
+-define(LONG_PROP(X, L), <<X:32/unsigned>>).
+-define(LONGLONG_PROP(X, L), <<X:64/unsigned>>).
+-define(TIMESTAMP_PROP(X, L), <<X:64/unsigned>>).
+
+-define(TABLE_PROP(X, T),
+ begin
+ T = rabbit_binary_generator:generate_table(X),
+ <<(size(T)):32, T/binary>>
+ end).
+"""
+ version = "{%d, %d, %d}" % (spec.major, spec.minor, spec.revision)
+ if version == '{8, 0, 0}': version = '{0, 8, 0}'
+ print "version() -> %s." % (version)
+
+ for m in methods: genLookupMethodName(m)
+ print "lookup_method_name({_ClassId, _MethodId} = Id) -> exit({unknown_method_id, Id})."
+
+ for c in spec.allClasses(): genLookupClassName(c)
+ print "lookup_class_name(ClassId) -> exit({unknown_class_id, ClassId})."
+
+ for m in methods: genMethodId(m)
+ print "method_id(Name) -> exit({unknown_method_name, Name})."
+
+ for m in methods: genMethodHasContent(m)
+ print "method_has_content(Name) -> exit({unknown_method_name, Name})."
+
+ for m in methods: genMethodIsSynchronous(m)
+ print "is_method_synchronous(Name) -> exit({unknown_method_name, Name})."
+
+ for m in methods: genMethodRecord(m)
+ print "method_record(Name) -> exit({unknown_method_name, Name})."
+
+ for m in methods: genMethodFieldNames(m)
+ print "method_fieldnames(Name) -> exit({unknown_method_name, Name})."
+
+ for m in methods: genDecodeMethodFields(m)
+ print "decode_method_fields(Name, BinaryFields) ->"
+ print " rabbit_misc:frame_error(Name, BinaryFields)."
+
+ for c in spec.allClasses(): genDecodeProperties(c)
+ print "decode_properties(ClassId, _BinaryFields) -> exit({unknown_class_id, ClassId})."
+
+ for m in methods: genEncodeMethodFields(m)
+ print "encode_method_fields(Record) -> exit({unknown_method_name, element(1, Record)})."
+
+ for c in spec.allClasses(): genEncodeProperties(c)
+ print "encode_properties(Record) -> exit({unknown_properties_record, Record})."
+
+ for (c,v,cls) in spec.constants: genLookupException(c,v,cls)
+ print "lookup_amqp_exception(Code) ->"
+ print " rabbit_log:warning(\"Unknown AMQP error code '~p'~n\", [Code]),"
+ print " {true, ?INTERNAL_ERROR, <<\"INTERNAL_ERROR\">>}."
+
+ for(c,v,cls) in spec.constants: genAmqpException(c,v,cls)
+ print "amqp_exception(_Code) -> undefined."
+
+def genHrl(spec):
+ def fieldNameList(fields):
+ return ', '.join([erlangize(f.name) for f in fields])
+
+ def fieldNameListDefaults(fields):
+ def fillField(field):
+ result = erlangize(f.name)
+ if field.defaultvalue != None:
+ conv_fn = erlangDefaultValueTypeConvMap[type(field.defaultvalue)]
+ result += ' = ' + conv_fn(field.defaultvalue)
+ return result
+ return ', '.join([fillField(f) for f in fields])
+
+ methods = spec.allMethods()
+
+ printFileHeader()
+ print "-define(PROTOCOL_PORT, %d)." % (spec.port)
+
+ for (c,v,cls) in spec.constants:
+ print "-define(%s, %s)." % (erlangConstantName(c), v)
+
+ print "%% Method field records."
+ for m in methods:
+ print "-record(%s, {%s})." % (m.erlangName(), fieldNameListDefaults(m.arguments))
+
+ print "%% Class property records."
+ for c in spec.allClasses():
+ print "-record('P_%s', {%s})." % (erlangize(c.name), fieldNameList(c.fields))
+
+
+def generateErl(specPath):
+ genErl(AmqpSpec(specPath))
+
+def generateHrl(specPath):
+ genHrl(AmqpSpec(specPath))
+
+if __name__ == "__main__":
+ do_main_dict({"header": generateHrl,
+ "body": generateErl})
+
--- /dev/null
+This package, the RabbitMQ code generation library and associated
+files, is licensed under the MPL. For the MPL, please see
+LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
--- /dev/null
+ MOZILLA PUBLIC LICENSE
+ Version 1.1
+
+ ---------------
+
+1. Definitions.
+
+ 1.0.1. "Commercial Use" means distribution or otherwise making the
+ Covered Code available to a third party.
+
+ 1.1. "Contributor" means each entity that creates or contributes to
+ the creation of Modifications.
+
+ 1.2. "Contributor Version" means the combination of the Original
+ Code, prior Modifications used by a Contributor, and the Modifications
+ made by that particular Contributor.
+
+ 1.3. "Covered Code" means the Original Code or Modifications or the
+ combination of the Original Code and Modifications, in each case
+ including portions thereof.
+
+ 1.4. "Electronic Distribution Mechanism" means a mechanism generally
+ accepted in the software development community for the electronic
+ transfer of data.
+
+ 1.5. "Executable" means Covered Code in any form other than Source
+ Code.
+
+ 1.6. "Initial Developer" means the individual or entity identified
+ as the Initial Developer in the Source Code notice required by Exhibit
+ A.
+
+ 1.7. "Larger Work" means a work which combines Covered Code or
+ portions thereof with code not governed by the terms of this License.
+
+ 1.8. "License" means this document.
+
+ 1.8.1. "Licensable" means having the right to grant, to the maximum
+ extent possible, whether at the time of the initial grant or
+ subsequently acquired, any and all of the rights conveyed herein.
+
+ 1.9. "Modifications" means any addition to or deletion from the
+ substance or structure of either the Original Code or any previous
+ Modifications. When Covered Code is released as a series of files, a
+ Modification is:
+ A. Any addition to or deletion from the contents of a file
+ containing Original Code or previous Modifications.
+
+ B. Any new file that contains any part of the Original Code or
+ previous Modifications.
+
+ 1.10. "Original Code" means Source Code of computer software code
+ which is described in the Source Code notice required by Exhibit A as
+ Original Code, and which, at the time of its release under this
+ License is not already Covered Code governed by this License.
+
+ 1.10.1. "Patent Claims" means any patent claim(s), now owned or
+ hereafter acquired, including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by grantor.
+
+ 1.11. "Source Code" means the preferred form of the Covered Code for
+ making modifications to it, including all modules it contains, plus
+ any associated interface definition files, scripts used to control
+ compilation and installation of an Executable, or source code
+ differential comparisons against either the Original Code or another
+ well known, available Covered Code of the Contributor's choice. The
+ Source Code can be in a compressed or archival form, provided the
+ appropriate decompression or de-archiving software is widely available
+ for no charge.
+
+ 1.12. "You" (or "Your") means an individual or a legal entity
+ exercising rights under, and complying with all of the terms of, this
+ License or a future version of this License issued under Section 6.1.
+ For legal entities, "You" includes any entity which controls, is
+ controlled by, or is under common control with You. For purposes of
+ this definition, "control" means (a) the power, direct or indirect,
+ to cause the direction or management of such entity, whether by
+ contract or otherwise, or (b) ownership of more than fifty percent
+ (50%) of the outstanding shares or beneficial ownership of such
+ entity.
+
+2. Source Code License.
+
+ 2.1. The Initial Developer Grant.
+ The Initial Developer hereby grants You a world-wide, royalty-free,
+ non-exclusive license, subject to third party intellectual property
+ claims:
+ (a) under intellectual property rights (other than patent or
+ trademark) Licensable by Initial Developer to use, reproduce,
+ modify, display, perform, sublicense and distribute the Original
+ Code (or portions thereof) with or without Modifications, and/or
+ as part of a Larger Work; and
+
+ (b) under Patents Claims infringed by the making, using or
+ selling of Original Code, to make, have made, use, practice,
+ sell, and offer for sale, and/or otherwise dispose of the
+ Original Code (or portions thereof).
+
+ (c) the licenses granted in this Section 2.1(a) and (b) are
+ effective on the date Initial Developer first distributes
+ Original Code under the terms of this License.
+
+ (d) Notwithstanding Section 2.1(b) above, no patent license is
+ granted: 1) for code that You delete from the Original Code; 2)
+ separate from the Original Code; or 3) for infringements caused
+ by: i) the modification of the Original Code or ii) the
+ combination of the Original Code with other software or devices.
+
+ 2.2. Contributor Grant.
+ Subject to third party intellectual property claims, each Contributor
+ hereby grants You a world-wide, royalty-free, non-exclusive license
+
+ (a) under intellectual property rights (other than patent or
+ trademark) Licensable by Contributor, to use, reproduce, modify,
+ display, perform, sublicense and distribute the Modifications
+ created by such Contributor (or portions thereof) either on an
+ unmodified basis, with other Modifications, as Covered Code
+ and/or as part of a Larger Work; and
+
+ (b) under Patent Claims infringed by the making, using, or
+ selling of Modifications made by that Contributor either alone
+ and/or in combination with its Contributor Version (or portions
+ of such combination), to make, use, sell, offer for sale, have
+ made, and/or otherwise dispose of: 1) Modifications made by that
+ Contributor (or portions thereof); and 2) the combination of
+ Modifications made by that Contributor with its Contributor
+ Version (or portions of such combination).
+
+ (c) the licenses granted in Sections 2.2(a) and 2.2(b) are
+ effective on the date Contributor first makes Commercial Use of
+ the Covered Code.
+
+ (d) Notwithstanding Section 2.2(b) above, no patent license is
+ granted: 1) for any code that Contributor has deleted from the
+ Contributor Version; 2) separate from the Contributor Version;
+ 3) for infringements caused by: i) third party modifications of
+ Contributor Version or ii) the combination of Modifications made
+ by that Contributor with other software (except as part of the
+ Contributor Version) or other devices; or 4) under Patent Claims
+ infringed by Covered Code in the absence of Modifications made by
+ that Contributor.
+
+3. Distribution Obligations.
+
+ 3.1. Application of License.
+ The Modifications which You create or to which You contribute are
+ governed by the terms of this License, including without limitation
+ Section 2.2. The Source Code version of Covered Code may be
+ distributed only under the terms of this License or a future version
+ of this License released under Section 6.1, and You must include a
+ copy of this License with every copy of the Source Code You
+ distribute. You may not offer or impose any terms on any Source Code
+ version that alters or restricts the applicable version of this
+ License or the recipients' rights hereunder. However, You may include
+ an additional document offering the additional rights described in
+ Section 3.5.
+
+ 3.2. Availability of Source Code.
+ Any Modification which You create or to which You contribute must be
+ made available in Source Code form under the terms of this License
+ either on the same media as an Executable version or via an accepted
+ Electronic Distribution Mechanism to anyone to whom you made an
+ Executable version available; and if made available via Electronic
+ Distribution Mechanism, must remain available for at least twelve (12)
+ months after the date it initially became available, or at least six
+ (6) months after a subsequent version of that particular Modification
+ has been made available to such recipients. You are responsible for
+ ensuring that the Source Code version remains available even if the
+ Electronic Distribution Mechanism is maintained by a third party.
+
+ 3.3. Description of Modifications.
+ You must cause all Covered Code to which You contribute to contain a
+ file documenting the changes You made to create that Covered Code and
+ the date of any change. You must include a prominent statement that
+ the Modification is derived, directly or indirectly, from Original
+ Code provided by the Initial Developer and including the name of the
+ Initial Developer in (a) the Source Code, and (b) in any notice in an
+ Executable version or related documentation in which You describe the
+ origin or ownership of the Covered Code.
+
+ 3.4. Intellectual Property Matters
+ (a) Third Party Claims.
+ If Contributor has knowledge that a license under a third party's
+ intellectual property rights is required to exercise the rights
+ granted by such Contributor under Sections 2.1 or 2.2,
+ Contributor must include a text file with the Source Code
+ distribution titled "LEGAL" which describes the claim and the
+ party making the claim in sufficient detail that a recipient will
+ know whom to contact. If Contributor obtains such knowledge after
+ the Modification is made available as described in Section 3.2,
+ Contributor shall promptly modify the LEGAL file in all copies
+ Contributor makes available thereafter and shall take other steps
+ (such as notifying appropriate mailing lists or newsgroups)
+ reasonably calculated to inform those who received the Covered
+ Code that new knowledge has been obtained.
+
+ (b) Contributor APIs.
+ If Contributor's Modifications include an application programming
+ interface and Contributor has knowledge of patent licenses which
+ are reasonably necessary to implement that API, Contributor must
+ also include this information in the LEGAL file.
+
+ (c) Representations.
+ Contributor represents that, except as disclosed pursuant to
+ Section 3.4(a) above, Contributor believes that Contributor's
+ Modifications are Contributor's original creation(s) and/or
+ Contributor has sufficient rights to grant the rights conveyed by
+ this License.
+
+ 3.5. Required Notices.
+ You must duplicate the notice in Exhibit A in each file of the Source
+ Code. If it is not possible to put such notice in a particular Source
+ Code file due to its structure, then You must include such notice in a
+ location (such as a relevant directory) where a user would be likely
+ to look for such a notice. If You created one or more Modification(s)
+ You may add your name as a Contributor to the notice described in
+ Exhibit A. You must also duplicate this License in any documentation
+ for the Source Code where You describe recipients' rights or ownership
+ rights relating to Covered Code. You may choose to offer, and to
+ charge a fee for, warranty, support, indemnity or liability
+ obligations to one or more recipients of Covered Code. However, You
+ may do so only on Your own behalf, and not on behalf of the Initial
+ Developer or any Contributor. You must make it absolutely clear than
+ any such warranty, support, indemnity or liability obligation is
+ offered by You alone, and You hereby agree to indemnify the Initial
+ Developer and every Contributor for any liability incurred by the
+ Initial Developer or such Contributor as a result of warranty,
+ support, indemnity or liability terms You offer.
+
+ 3.6. Distribution of Executable Versions.
+ You may distribute Covered Code in Executable form only if the
+ requirements of Section 3.1-3.5 have been met for that Covered Code,
+ and if You include a notice stating that the Source Code version of
+ the Covered Code is available under the terms of this License,
+ including a description of how and where You have fulfilled the
+ obligations of Section 3.2. The notice must be conspicuously included
+ in any notice in an Executable version, related documentation or
+ collateral in which You describe recipients' rights relating to the
+ Covered Code. You may distribute the Executable version of Covered
+ Code or ownership rights under a license of Your choice, which may
+ contain terms different from this License, provided that You are in
+ compliance with the terms of this License and that the license for the
+ Executable version does not attempt to limit or alter the recipient's
+ rights in the Source Code version from the rights set forth in this
+ License. If You distribute the Executable version under a different
+ license You must make it absolutely clear that any terms which differ
+ from this License are offered by You alone, not by the Initial
+ Developer or any Contributor. You hereby agree to indemnify the
+ Initial Developer and every Contributor for any liability incurred by
+ the Initial Developer or such Contributor as a result of any such
+ terms You offer.
+
+ 3.7. Larger Works.
+ You may create a Larger Work by combining Covered Code with other code
+ not governed by the terms of this License and distribute the Larger
+ Work as a single product. In such a case, You must make sure the
+ requirements of this License are fulfilled for the Covered Code.
+
+4. Inability to Comply Due to Statute or Regulation.
+
+ If it is impossible for You to comply with any of the terms of this
+ License with respect to some or all of the Covered Code due to
+ statute, judicial order, or regulation then You must: (a) comply with
+ the terms of this License to the maximum extent possible; and (b)
+ describe the limitations and the code they affect. Such description
+ must be included in the LEGAL file described in Section 3.4 and must
+ be included with all distributions of the Source Code. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Application of this License.
+
+ This License applies to code to which the Initial Developer has
+ attached the notice in Exhibit A and to related Covered Code.
+
+6. Versions of the License.
+
+ 6.1. New Versions.
+ Netscape Communications Corporation ("Netscape") may publish revised
+ and/or new versions of the License from time to time. Each version
+ will be given a distinguishing version number.
+
+ 6.2. Effect of New Versions.
+ Once Covered Code has been published under a particular version of the
+ License, You may always continue to use it under the terms of that
+ version. You may also choose to use such Covered Code under the terms
+ of any subsequent version of the License published by Netscape. No one
+ other than Netscape has the right to modify the terms applicable to
+ Covered Code created under this License.
+
+ 6.3. Derivative Works.
+ If You create or use a modified version of this License (which you may
+ only do in order to apply it to code which is not already Covered Code
+ governed by this License), You must (a) rename Your license so that
+ the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
+ "MPL", "NPL" or any confusingly similar phrase do not appear in your
+ license (except to note that your license differs from this License)
+ and (b) otherwise make it clear that Your version of the license
+ contains terms which differ from the Mozilla Public License and
+ Netscape Public License. (Filling in the name of the Initial
+ Developer, Original Code or Contributor in the notice described in
+ Exhibit A shall not of themselves be deemed to be modifications of
+ this License.)
+
+7. DISCLAIMER OF WARRANTY.
+
+ COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
+ DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
+ THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
+ IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
+ YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
+ COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
+ OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
+ ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
+
+8. TERMINATION.
+
+ 8.1. This License and the rights granted hereunder will terminate
+ automatically if You fail to comply with terms herein and fail to cure
+ such breach within 30 days of becoming aware of the breach. All
+ sublicenses to the Covered Code which are properly granted shall
+ survive any termination of this License. Provisions which, by their
+ nature, must remain in effect beyond the termination of this License
+ shall survive.
+
+ 8.2. If You initiate litigation by asserting a patent infringement
+ claim (excluding declatory judgment actions) against Initial Developer
+ or a Contributor (the Initial Developer or Contributor against whom
+ You file such action is referred to as "Participant") alleging that:
+
+ (a) such Participant's Contributor Version directly or indirectly
+ infringes any patent, then any and all rights granted by such
+ Participant to You under Sections 2.1 and/or 2.2 of this License
+ shall, upon 60 days notice from Participant terminate prospectively,
+ unless if within 60 days after receipt of notice You either: (i)
+ agree in writing to pay Participant a mutually agreeable reasonable
+ royalty for Your past and future use of Modifications made by such
+ Participant, or (ii) withdraw Your litigation claim with respect to
+ the Contributor Version against such Participant. If within 60 days
+ of notice, a reasonable royalty and payment arrangement are not
+ mutually agreed upon in writing by the parties or the litigation claim
+ is not withdrawn, the rights granted by Participant to You under
+ Sections 2.1 and/or 2.2 automatically terminate at the expiration of
+ the 60 day notice period specified above.
+
+ (b) any software, hardware, or device, other than such Participant's
+ Contributor Version, directly or indirectly infringes any patent, then
+ any rights granted to You by such Participant under Sections 2.1(b)
+ and 2.2(b) are revoked effective as of the date You first made, used,
+ sold, distributed, or had made, Modifications made by that
+ Participant.
+
+ 8.3. If You assert a patent infringement claim against Participant
+ alleging that such Participant's Contributor Version directly or
+ indirectly infringes any patent where such claim is resolved (such as
+ by license or settlement) prior to the initiation of patent
+ infringement litigation, then the reasonable value of the licenses
+ granted by such Participant under Sections 2.1 or 2.2 shall be taken
+ into account in determining the amount or value of any payment or
+ license.
+
+ 8.4. In the event of termination under Sections 8.1 or 8.2 above,
+ all end user license agreements (excluding distributors and resellers)
+ which have been validly granted by You or any distributor hereunder
+ prior to termination shall survive termination.
+
+9. LIMITATION OF LIABILITY.
+
+ UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
+ (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
+ DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
+ OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
+ ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
+ CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
+ WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
+ COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
+ INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
+ LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
+ RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
+ PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
+ EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
+ THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
+
+10. U.S. GOVERNMENT END USERS.
+
+ The Covered Code is a "commercial item," as that term is defined in
+ 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
+ software" and "commercial computer software documentation," as such
+ terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
+ C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
+ all U.S. Government End Users acquire Covered Code with only those
+ rights set forth herein.
+
+11. MISCELLANEOUS.
+
+ This License represents the complete agreement concerning subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. This License shall be governed by
+ California law provisions (except to the extent applicable law, if
+ any, provides otherwise), excluding its conflict-of-law provisions.
+ With respect to disputes in which at least one party is a citizen of,
+ or an entity chartered or registered to do business in the United
+ States of America, any litigation relating to this License shall be
+ subject to the jurisdiction of the Federal Courts of the Northern
+ District of California, with venue lying in Santa Clara County,
+ California, with the losing party responsible for costs, including
+ without limitation, court costs and reasonable attorneys' fees and
+ expenses. The application of the United Nations Convention on
+ Contracts for the International Sale of Goods is expressly excluded.
+ Any law or regulation which provides that the language of a contract
+ shall be construed against the drafter shall not apply to this
+ License.
+
+12. RESPONSIBILITY FOR CLAIMS.
+
+ As between Initial Developer and the Contributors, each party is
+ responsible for claims and damages arising, directly or indirectly,
+ out of its utilization of rights under this License and You agree to
+ work with Initial Developer and Contributors to distribute such
+ responsibility on an equitable basis. Nothing herein is intended or
+ shall be deemed to constitute any admission of liability.
+
+13. MULTIPLE-LICENSED CODE.
+
+ Initial Developer may designate portions of the Covered Code as
+ "Multiple-Licensed". "Multiple-Licensed" means that the Initial
+ Developer permits you to utilize portions of the Covered Code under
+ Your choice of the NPL or the alternative licenses, if any, specified
+ by the Initial Developer in the file described in Exhibit A.
+
+EXHIBIT A -Mozilla Public License.
+
+ ``The contents of this file are subject to the Mozilla Public License
+ Version 1.1 (the "License"); you may not use this file except in
+ compliance with the License. You may obtain a copy of the License at
+ http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+ License for the specific language governing rights and limitations
+ under the License.
+
+ The Original Code is RabbitMQ.
+
+ The Initial Developer of the Original Code is GoPivotal, Inc.
+ Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.''
+
+ [NOTE: The text of this Exhibit A may differ slightly from the text of
+ the notices in the Source Code files of the Original Code. You should
+ use the text of this Exhibit A rather than the text found in the
+ Original Code Source Code for Your Modifications.]
--- /dev/null
+all:
+ echo "Please select a target from the Makefile."
+
+clean:
+ rm -f *.pyc
+
+distclean: clean
+ find . -regex '.*\(~\|#\|\.swp\)' -exec rm {} \;
--- /dev/null
+# Protocol extensions
+
+The `amqp_codegen.py` AMQP specification compiler has recently been
+enhanced to take more than a single specification file, which allows
+AMQP library authors to include extensions to the core protocol
+without needing to modify the core AMQP specification file as
+distributed.
+
+The compiler is invoked with the path to a single "main" specification
+document and zero or more paths to "extension" documents.
+
+The order of the extensions matters: any later class property
+definitions, for instance, are added to the list of definitions in
+order of appearance. In general, composition of extensions with a core
+specification document is therefore non-commutative.
+
+## The main document
+
+Written in the style of a
+[json-shapes](http://github.com/tonyg/json-shapes) schema:
+
+ DomainDefinition = _and(array_of(string()), array_length_equals(2));
+
+ ConstantDefinition = {
+ "name": string(),
+ "value": number(),
+ "class": optional(_or("soft-error", "hard-error"))
+ };
+
+ FieldDefinition = {
+ "name": string(),
+ "type": string(),
+ "default-value": optional(anything())
+ };
+
+ MethodDefinition = {
+ "name": string(),
+ "id": number(),
+ "arguments": array_of(FieldDefinition),
+ "synchronous": optional(boolean()),
+ "content": optional(boolean())
+ };
+
+ ClassDefinition = {
+ "name": string(),
+ "id": number(),
+ "methods": array_of(MethodDefinition),
+ "properties": optional(array_of(FieldDefinition))
+ };
+
+ MainDocument = {
+ "major-version": number(),
+ "minor-version": number(),
+ "revision": optional(number()),
+ "port": number(),
+ "domains": array_of(DomainDefinition),
+ "constants": array_of(ConstantDefinition),
+ "classes": array_of(ClassDefinition),
+ }
+
+Within a `FieldDefinition`, the keyword `domain` can be used instead
+of `type`, but `type` is preferred and `domain` is deprecated.
+
+Type names can either be a defined `domain` name or a built-in name
+from the following list:
+
+ - octet
+ - shortstr
+ - longstr
+ - short
+ - long
+ - longlong
+ - bit
+ - table
+ - timestamp
+
+Method and class IDs must be integers between 0 and 65535,
+inclusive. Note that there is no specific subset of the space reserved
+for experimental or site-local extensions, so be careful not to
+conflict with IDs used by the AMQP core specification.
+
+If the `synchronous` field of a `MethodDefinition` is missing, it is
+assumed to be `false`; the same applies to the `content` field.
+
+A `ConstantDefinition` with a `class` attribute is considered to be an
+error-code definition; otherwise, it is considered to be a
+straightforward numeric constant.
+
+## Extensions
+
+Written in the style of a
+[json-shapes](http://github.com/tonyg/json-shapes) schema, and
+referencing some of the type definitions given above:
+
+ ExtensionDocument = {
+ "extension": anything(),
+ "domains": array_of(DomainDefinition),
+ "constants": array_of(ConstantDefinition),
+ "classes": array_of(ClassDefinition)
+ };
+
+The `extension` keyword is used to describe the extension informally
+for human readers. Typically it will be a dictionary, with members
+such as:
+
+ {
+ "name": "The name of the extension",
+ "version": "1.0",
+ "copyright": "Copyright (C) 1234 Yoyodyne, Inc."
+ }
+
+## Merge behaviour
+
+In the case of conflicts between values specified in the main document
+and in any extension documents, type-specific merge operators are
+invoked.
+
+ - Any doubly-defined domain names are regarded as true
+ conflicts. Otherwise, all the domain definitions from all the main
+ and extension documents supplied to the compiler are merged into a
+ single dictionary.
+
+ - Constant definitions are treated as per domain names above,
+ *mutatis mutandis*.
+
+ - Classes and their methods are a little trickier: if an extension
+ defines a class with the same name as one previously defined, then
+ only the `methods` and `properties` fields of the extension's class
+ definition are attended to.
+
+ - Any doubly-defined method names or property names within a class
+ are treated as true conflicts.
+
+ - Properties defined in an extension are added to the end of the
+ extant property list for the class.
+
+ (Extensions are of course permitted to define brand new classes as
+ well as to extend existing ones.)
+
+ - Any other kind of conflict leads to a raised
+ `AmqpSpecFileMergeConflict` exception.
+
+## Invoking the spec compiler
+
+Your code generation code should invoke `amqp_codegen.do_main_dict`
+with a dictionary of functions as the sole argument. Each will be
+used for generationg a separate file. The `do_main_dict` function
+will parse the command-line arguments supplied when python was
+invoked.
+
+The command-line will be parsed as:
+
+ python your_codegen.py <action> <mainspec> [<extspec> ...] <outfile>
+
+where `<action>` is a key into the dictionary supplied to
+`do_main_dict` and is used to select which generation function is
+called. The `<mainspec>` and `<extspec>` arguments are file names of
+specification documents containing expressions in the syntax given
+above. The *final* argument on the command line, `<outfile>`, is the
+name of the source-code file to generate.
+
+Here's a tiny example of the layout of a code generation module that
+uses `amqp_codegen`:
+
+ import amqp_codegen
+
+ def generateHeader(specPath):
+ spec = amqp_codegen.AmqpSpec(specPath)
+ ...
+
+ def generateImpl(specPath):
+ spec = amqp_codegen.AmqpSpec(specPath)
+ ...
+
+ if __name__ == "__main__":
+ amqp_codegen.do_main_dict({"header": generateHeader,
+ "body": generateImpl})
+
+The reasons for allowing more than one action, are that
+
+ - many languages have separate "header"-type files (C and Erlang, to
+ name two)
+ - `Makefile`s often require separate rules for generating the two
+ kinds of file, but it's convenient to keep the generation code
+ together in a single python module
+
+The main reason things are laid out this way, however, is simply that
+it's an accident of the history of the code. We may change the API to
+`amqp_codegen` in future to clean things up a little.
--- /dev/null
+{
+ "name": "AMQP",
+ "major-version": 8,
+ "minor-version": 0,
+ "port": 5672,
+ "copyright": [
+ "Copyright (C) 2008-2013 GoPivotal, Inc.\n",
+ "\n",
+ "Permission is hereby granted, free of charge, to any person\n",
+ "obtaining a copy of this file (the \"Software\"), to deal in the\n",
+ "Software without restriction, including without limitation the \n",
+ "rights to use, copy, modify, merge, publish, distribute, \n",
+ "sublicense, and/or sell copies of the Software, and to permit \n",
+ "persons to whom the Software is furnished to do so, subject to \n",
+ "the following conditions:\n",
+ "\n",
+ "The above copyright notice and this permission notice shall be\n",
+ "included in all copies or substantial portions of the Software.\n",
+ "\n",
+ "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n",
+ "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n",
+ "OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n",
+ "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n",
+ "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n",
+ "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n",
+ "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n",
+ "OTHER DEALINGS IN THE SOFTWARE.\n",
+ "\n",
+ "Class information entered from amqp_xml0-8.pdf and domain types from amqp-xml-doc0-9.pdf\n",
+ "\n",
+ "b3cb053f15e7b98808c0ccc67f23cb3e amqp_xml0-8.pdf\n",
+ "http://www.twiststandards.org/index.php?option=com_docman&task=cat_view&gid=28&&Itemid=90\n",
+ "8444db91e2949dbecfb2585e9eef6d64 amqp-xml-doc0-9.pdf\n",
+ "https://jira.amqp.org/confluence/download/attachments/720900/amqp-xml-doc0-9.pdf?version=1\n"],
+
+ "domains": [
+ ["access-ticket", "short"],
+ ["bit", "bit"],
+ ["channel-id", "longstr"],
+ ["class-id", "short"],
+ ["consumer-tag", "shortstr"],
+ ["delivery-tag", "longlong"],
+ ["destination", "shortstr"],
+ ["duration", "longlong"],
+ ["exchange-name", "shortstr"],
+ ["known-hosts", "shortstr"],
+ ["long", "long"],
+ ["longlong", "longlong"],
+ ["longstr", "longstr"],
+ ["method-id", "short"],
+ ["no-ack", "bit"],
+ ["no-local", "bit"],
+ ["octet", "octet"],
+ ["offset", "longlong"],
+ ["path", "shortstr"],
+ ["peer-properties", "table"],
+ ["queue-name", "shortstr"],
+ ["redelivered", "bit"],
+ ["reference", "longstr"],
+ ["reject-code", "short"],
+ ["reject-text", "shortstr"],
+ ["reply-code", "short"],
+ ["reply-text", "shortstr"],
+ ["security-token", "longstr"],
+ ["short", "short"],
+ ["shortstr", "shortstr"],
+ ["table", "table"],
+ ["timestamp", "timestamp"]
+ ],
+
+ "constants": [
+ {"name": "FRAME-METHOD", "value": 1},
+ {"name": "FRAME-HEADER", "value": 2},
+ {"name": "FRAME-BODY", "value": 3},
+ {"name": "FRAME-OOB-METHOD", "value": 4},
+ {"name": "FRAME-OOB-HEADER", "value": 5},
+ {"name": "FRAME-OOB-BODY", "value": 6},
+ {"name": "FRAME-TRACE", "value": 7},
+ {"name": "FRAME-HEARTBEAT", "value": 8},
+ {"name": "FRAME-MIN-SIZE", "value": 4096},
+ {"name": "FRAME-END", "value": 206},
+ {"name": "REPLY-SUCCESS", "value": 200},
+ {"name": "NOT-DELIVERED", "value": 310, "class": "soft-error"},
+ {"name": "CONTENT-TOO-LARGE", "value": 311, "class": "soft-error"},
+ {"name": "NO-ROUTE", "value": 312, "class": "soft-error"},
+ {"name": "NO-CONSUMERS", "value": 313, "class": "soft-error"},
+ {"name": "ACCESS-REFUSED", "value": 403, "class": "soft-error"},
+ {"name": "NOT-FOUND", "value": 404, "class": "soft-error"},
+ {"name": "RESOURCE-LOCKED", "value": 405, "class": "soft-error"},
+ {"name": "PRECONDITION-FAILED", "value": 406, "class": "soft-error"},
+ {"name": "CONNECTION-FORCED", "value": 320, "class": "hard-error"},
+ {"name": "INVALID-PATH", "value": 402, "class": "hard-error"},
+ {"name": "FRAME-ERROR", "value": 501, "class": "hard-error"},
+ {"name": "SYNTAX-ERROR", "value": 502, "class": "hard-error"},
+ {"name": "COMMAND-INVALID", "value": 503, "class": "hard-error"},
+ {"name": "CHANNEL-ERROR", "value": 504, "class": "hard-error"},
+ {"name": "UNEXPECTED-FRAME", "value": 505, "class": "hard-error"},
+ {"name": "RESOURCE-ERROR", "value": 506, "class": "hard-error"},
+ {"name": "NOT-ALLOWED", "value": 530, "class": "hard-error"},
+ {"name": "NOT-IMPLEMENTED", "value": 540, "class": "hard-error"},
+ {"name": "INTERNAL-ERROR", "value": 541, "class": "hard-error"}
+ ],
+
+ "classes": [
+ {
+ "id": 10,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "octet", "name": "version-major", "default-value": 0},
+ {"type": "octet", "name": "version-minor", "default-value": 8},
+ {"domain": "peer-properties", "name": "server-properties"},
+ {"type": "longstr", "name": "mechanisms", "default-value": "PLAIN"},
+ {"type": "longstr", "name": "locales", "default-value": "en_US"}],
+ "name": "start",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [{"domain": "peer-properties", "name": "client-properties"},
+ {"type": "shortstr", "name": "mechanism", "default-value": "PLAIN"},
+ {"type": "longstr", "name": "response"},
+ {"type": "shortstr", "name": "locale", "default-value": "en_US"}],
+ "name": "start-ok"},
+ {"id": 20,
+ "arguments": [{"type": "longstr", "name": "challenge"}],
+ "name": "secure",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [{"type": "longstr", "name": "response"}],
+ "name": "secure-ok"},
+ {"id": 30,
+ "arguments": [{"type": "short", "name": "channel-max", "default-value": 0},
+ {"type": "long", "name": "frame-max", "default-value": 0},
+ {"type": "short", "name": "heartbeat", "default-value": 0}],
+ "name": "tune",
+ "synchronous" : true},
+ {"id": 31,
+ "arguments": [{"type": "short", "name": "channel-max", "default-value": 0},
+ {"type": "long", "name": "frame-max", "default-value": 0},
+ {"type": "short", "name": "heartbeat", "default-value": 0}],
+ "name": "tune-ok"},
+ {"id": 40,
+ "arguments": [{"type": "shortstr", "name": "virtual-host", "default-value": "/"},
+ {"type": "shortstr", "name": "capabilities", "default-value": ""},
+ {"type": "bit", "name": "insist", "default-value": false}],
+ "name": "open",
+ "synchronous" : true},
+ {"id": 41,
+ "arguments": [{"type": "shortstr", "name": "known-hosts", "default-value": ""}],
+ "name": "open-ok"},
+ {"id": 50,
+ "arguments": [{"type": "shortstr", "name": "host"},
+ {"type": "shortstr", "name": "known-hosts", "default-value": ""}],
+ "name": "redirect"},
+ {"id": 60,
+ "arguments": [{"type": "short", "name": "reply-code"},
+ {"type": "shortstr", "name": "reply-text", "default-value": ""},
+ {"type": "short", "name": "class-id"},
+ {"type": "short", "name": "method-id"}],
+ "name": "close",
+ "synchronous" : true},
+ {"id": 61,
+ "arguments": [],
+ "name": "close-ok"}],
+ "name": "connection",
+ "properties": []
+ },
+ {
+ "id": 20,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "shortstr", "name": "out-of-band", "default-value": ""}],
+ "name": "open",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [],
+ "name": "open-ok"},
+ {"id": 20,
+ "arguments": [{"type": "bit", "name": "active"}],
+ "name": "flow",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [{"type": "bit", "name": "active"}],
+ "name": "flow-ok"},
+ {"id": 30,
+ "arguments": [{"type": "short", "name": "reply-code"},
+ {"type": "shortstr", "name": "reply-text", "default-value": ""},
+ {"type": "table", "name": "details", "default-value": {}}],
+ "name": "alert"},
+ {"id": 40,
+ "arguments": [{"type": "short", "name": "reply-code"},
+ {"type": "shortstr", "name": "reply-text", "default-value": ""},
+ {"type": "short", "name": "class-id"},
+ {"type": "short", "name": "method-id"}],
+ "name": "close",
+ "synchronous" : true},
+ {"id": 41,
+ "arguments": [],
+ "name": "close-ok"}],
+ "name": "channel"
+ },
+ {
+ "id": 30,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "shortstr", "name": "realm", "default-value": "/data"},
+ {"type": "bit", "name": "exclusive", "default-value": false},
+ {"type": "bit", "name": "passive", "default-value": true},
+ {"type": "bit", "name": "active", "default-value": true},
+ {"type": "bit", "name": "write", "default-value": true},
+ {"type": "bit", "name": "read", "default-value": true}],
+ "name": "request",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1}],
+ "name": "request-ok"}],
+ "name": "access"
+ },
+ {
+ "id": 40,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "shortstr", "name": "type", "default-value": "direct"},
+ {"type": "bit", "name": "passive", "default-value": false},
+ {"type": "bit", "name": "durable", "default-value": false},
+ {"type": "bit", "name": "auto-delete", "default-value": false},
+ {"type": "bit", "name": "internal", "default-value": false},
+ {"type": "bit", "name": "nowait", "default-value": false},
+ {"type": "table", "name": "arguments", "default-value": {}}],
+ "name": "declare",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [],
+ "name": "declare-ok"},
+ {"id": 20,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "bit", "name": "if-unused", "default-value": false},
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "delete",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [],
+ "name": "delete-ok"}],
+ "name": "exchange"
+ },
+ {
+ "id": 50,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "queue", "default-value": ""},
+ {"type": "bit", "name": "passive", "default-value": false},
+ {"type": "bit", "name": "durable", "default-value": false},
+ {"type": "bit", "name": "exclusive", "default-value": false},
+ {"type": "bit", "name": "auto-delete", "default-value": false},
+ {"type": "bit", "name": "nowait", "default-value": false},
+ {"type": "table", "name": "arguments", "default-value": {}}],
+ "name": "declare",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [{"type": "shortstr", "name": "queue"},
+ {"type": "long", "name": "message-count"},
+ {"type": "long", "name": "consumer-count"}],
+ "name": "declare-ok"},
+ {"id": 20,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "queue", "default-value": ""},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "shortstr", "name": "routing-key", "default-value": ""},
+ {"type": "bit", "name": "nowait", "default-value": false},
+ {"type": "table", "name": "arguments", "default-value": {}}],
+ "name": "bind",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [],
+ "name": "bind-ok"},
+ {"id": 30,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "queue", "default-value": ""},
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "purge",
+ "synchronous" : true},
+ {"id": 31,
+ "arguments": [{"type": "long", "name": "message-count"}],
+ "name": "purge-ok"},
+ {"id": 40,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "queue", "default-value": ""},
+ {"type": "bit", "name": "if-unused", "default-value": false},
+ {"type": "bit", "name": "if-empty", "default-value": false},
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "delete",
+ "synchronous" : true},
+ {"id": 41,
+ "arguments": [{"type": "long", "name": "message-count"}],
+ "name": "delete-ok"},
+ {"id": 50,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "queue", "default-value": ""},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "shortstr", "name": "routing-key", "default-value": ""},
+ {"type": "table", "name": "arguments", "default-value": {}}],
+ "name": "unbind",
+ "synchronous" : true},
+ {"id": 51,
+ "arguments": [],
+ "name": "unbind-ok"}
+ ],
+ "name": "queue"
+ },
+ {
+ "id": 60,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "long", "name": "prefetch-size", "default-value": 0},
+ {"type": "short", "name": "prefetch-count", "default-value": 0},
+ {"type": "bit", "name": "global", "default-value": false}],
+ "name": "qos",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [],
+ "name": "qos-ok"},
+ {"id": 20,
+ "arguments": [{"domain": "access-ticket", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "queue", "default-value": ""},
+ {"type": "shortstr", "name": "consumer-tag", "default-value": ""},
+ {"type": "bit", "name": "no-local", "default-value": false},
+ {"type": "bit", "name": "no-ack", "default-value": false},
+ {"type": "bit", "name": "exclusive", "default-value": false},
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "consume",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"}],
+ "name": "consume-ok"},
+ {"id": 30,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"},
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "cancel",
+ "synchronous" : true},
+ {"id": 31,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"}],
+ "name": "cancel-ok"},
+ {"content": true,
+ "id": 40,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "exchange", "default-value": ""},
+ {"type": "shortstr", "name": "routing-key", "default-value": ""},
+ {"type": "bit", "name": "mandatory", "default-value": false},
+ {"type": "bit", "name": "immediate", "default-value": false}],
+ "name": "publish"},
+ {"content": true,
+ "id": 50,
+ "arguments": [{"type": "short", "name": "reply-code"},
+ {"type": "shortstr", "name": "reply-text", "default-value": ""},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "shortstr", "name": "routing-key"}],
+ "name": "return"},
+ {"content": true,
+ "id": 60,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"},
+ {"type": "longlong", "name": "delivery-tag"},
+ {"type": "bit", "name": "redelivered", "default-value": false},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "shortstr", "name": "routing-key"}],
+ "name": "deliver"},
+ {"id": 70,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "queue", "default-value": ""},
+ {"type": "bit", "name": "no-ack", "default-value": false}],
+ "name": "get",
+ "synchronous" : true},
+ {"content": true,
+ "id": 71,
+ "arguments": [{"type": "longlong", "name": "delivery-tag"},
+ {"type": "bit", "name": "redelivered", "default-value": false},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "shortstr", "name": "routing-key"},
+ {"type": "long", "name": "message-count"}],
+ "name": "get-ok"},
+ {"id": 72,
+ "arguments": [{"type": "shortstr", "name": "cluster-id", "default-value": ""}],
+ "name": "get-empty"},
+ {"id": 80,
+ "arguments": [{"type": "longlong", "name": "delivery-tag", "default-value": 0},
+ {"type": "bit", "name": "multiple", "default-value": false}],
+ "name": "ack"},
+ {"id": 90,
+ "arguments": [{"type": "longlong", "name": "delivery-tag"},
+ {"type": "bit", "name": "requeue", "default-value": true}],
+ "name": "reject"},
+ {"id": 100,
+ "arguments": [{"type": "bit", "name": "requeue", "default-value": false}],
+ "name": "recover-async"},
+ {"id": 110,
+ "arguments": [{"type": "bit", "name": "requeue", "default-value": false}],
+ "name": "recover",
+ "synchronous" : true},
+ {"id": 111,
+ "arguments": [],
+ "name": "recover-ok"}],
+ "name": "basic",
+ "properties": [{"type": "shortstr", "name": "content-type"},
+ {"type": "shortstr", "name": "content-encoding"},
+ {"type": "table", "name": "headers"},
+ {"type": "octet", "name": "delivery-mode"},
+ {"type": "octet", "name": "priority"},
+ {"type": "shortstr", "name": "correlation-id"},
+ {"type": "shortstr", "name": "reply-to"},
+ {"type": "shortstr", "name": "expiration"},
+ {"type": "shortstr", "name": "message-id"},
+ {"type": "timestamp", "name": "timestamp"},
+ {"type": "shortstr", "name": "type"},
+ {"type": "shortstr", "name": "user-id"},
+ {"type": "shortstr", "name": "app-id"},
+ {"type": "shortstr", "name": "cluster-id"}]
+ },
+ {
+ "id": 70,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "long", "name": "prefetch-size", "default-value": 0},
+ {"type": "short", "name": "prefetch-count", "default-value": 0},
+ {"type": "bit", "name": "global", "default-value": false}],
+ "name": "qos",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [],
+ "name": "qos-ok"},
+ {"id": 20,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "queue", "default-value": ""},
+ {"type": "shortstr", "name": "consumer-tag", "default-value": ""},
+ {"type": "bit", "name": "no-local", "default-value": false},
+ {"type": "bit", "name": "no-ack", "default-value": false},
+ {"type": "bit", "name": "exclusive", "default-value": false},
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "consume",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"}],
+ "name": "consume-ok"},
+ {"id": 30,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"},
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "cancel",
+ "synchronous" : true},
+ {"id": 31,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"}],
+ "name": "cancel-ok"},
+ {"id": 40,
+ "arguments": [{"type": "shortstr", "name": "identifier"},
+ {"type": "longlong", "name": "content-size"}],
+ "name": "open",
+ "synchronous" : true},
+ {"id": 41,
+ "arguments": [{"type": "longlong", "name": "staged-size"}],
+ "name": "open-ok"},
+ {"content": true,
+ "id": 50,
+ "arguments": [],
+ "name": "stage"},
+ {"id": 60,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "exchange", "default-value": ""},
+ {"type": "shortstr", "name": "routing-key", "default-value": ""},
+ {"type": "bit", "name": "mandatory", "default-value": false},
+ {"type": "bit", "name": "immediate", "default-value": false},
+ {"type": "shortstr", "name": "identifier"}],
+ "name": "publish"},
+ {"content": true,
+ "id": 70,
+ "arguments": [{"type": "short", "name": "reply-code", "default-value": 200},
+ {"type": "shortstr", "name": "reply-text", "default-value": ""},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "shortstr", "name": "routing-key"}],
+ "name": "return"},
+ {"id": 80,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"},
+ {"type": "longlong", "name": "delivery-tag"},
+ {"type": "bit", "name": "redelivered", "default-value": false},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "shortstr", "name": "routing-key"},
+ {"type": "shortstr", "name": "identifier"}],
+ "name": "deliver"},
+ {"id": 90,
+ "arguments": [{"type": "longlong", "name": "delivery-tag", "default-value": 0},
+ {"type": "bit", "name": "multiple", "default-value": false}],
+ "name": "ack"},
+ {"id": 100,
+ "arguments": [{"type": "longlong", "name": "delivery-tag"},
+ {"type": "bit", "name": "requeue", "default-value": true}],
+ "name": "reject"}],
+ "name": "file",
+ "properties": [{"type": "shortstr", "name": "content-type"},
+ {"type": "shortstr", "name": "content-encoding"},
+ {"type": "table", "name": "headers"},
+ {"type": "octet", "name": "priority"},
+ {"type": "shortstr", "name": "reply-to"},
+ {"type": "shortstr", "name": "message-id"},
+ {"type": "shortstr", "name": "filename"},
+ {"type": "timestamp", "name": "timestamp"},
+ {"type": "shortstr", "name": "cluster-id"}]
+ },
+ {
+ "id": 80,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "long", "name": "prefetch-size", "default-value": 0},
+ {"type": "short", "name": "prefetch-count", "default-value": 0},
+ {"type": "long", "name": "consume-rate", "default-value": 0},
+ {"type": "bit", "name": "global", "default-value": false}],
+ "name": "qos",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [],
+ "name": "qos-ok"},
+ {"id": 20,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "queue", "default-value": ""},
+ {"type": "shortstr", "name": "consumer-tag", "default-value": ""},
+ {"type": "bit", "name": "no-local", "default-value": false},
+ {"type": "bit", "name": "exclusive", "default-value": false},
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "consume",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"}],
+ "name": "consume-ok"},
+ {"id": 30,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"},
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "cancel",
+ "synchronous" : true},
+ {"id": 31,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"}],
+ "name": "cancel-ok"},
+ {"content": true,
+ "id": 40,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "exchange", "default-value": ""},
+ {"type": "shortstr", "name": "routing-key", "default-value": ""},
+ {"type": "bit", "name": "mandatory", "default-value": false},
+ {"type": "bit", "name": "immediate", "default-value": false}],
+ "name": "publish"},
+ {"content": true,
+ "id": 50,
+ "arguments": [{"type": "short", "name": "reply-code", "default-value": 200},
+ {"type": "shortstr", "name": "reply-text", "default-value": ""},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "shortstr", "name": "routing-key"}],
+ "name": "return"},
+ {"content": true,
+ "id": 60,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"},
+ {"type": "longlong", "name": "delivery-tag"},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "shortstr", "name": "queue"}],
+ "name": "deliver"}],
+ "name": "stream",
+ "properties": [{"type": "shortstr", "name": "content-type"},
+ {"type": "shortstr", "name": "content-encoding"},
+ {"type": "table", "name": "headers"},
+ {"type": "octet", "name": "priority"},
+ {"type": "timestamp", "name": "timestamp"}]
+ },
+ {
+ "id": 90,
+ "methods": [{"id": 10,
+ "arguments": [],
+ "name": "select",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [],
+ "name": "select-ok"},
+ {"id": 20,
+ "arguments": [],
+ "name": "commit",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [],
+ "name": "commit-ok"},
+ {"id": 30,
+ "arguments": [],
+ "name": "rollback",
+ "synchronous" : true},
+ {"id": 31,
+ "arguments": [],
+ "name": "rollback-ok"}],
+ "name": "tx"
+ },
+ {
+ "id": 100,
+ "methods": [{"id": 10,
+ "arguments": [],
+ "name": "select",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [],
+ "name": "select-ok"},
+ {"id": 20,
+ "arguments": [{"type": "shortstr", "name": "dtx-identifier"}],
+ "name": "start",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [], "name": "start-ok"}],
+ "name": "dtx"
+ },
+ {
+ "id": 110,
+ "methods": [{"content": true,
+ "id": 10,
+ "arguments": [{"type": "table", "name": "meta-data"}],
+ "name": "request"}],
+ "name": "tunnel",
+ "properties": [{"type": "table", "name": "headers"},
+ {"type": "shortstr", "name": "proxy-name"},
+ {"type": "shortstr", "name": "data-name"},
+ {"type": "octet", "name": "durable"},
+ {"type": "octet", "name": "broadcast"}]
+ },
+ {
+ "id": 120,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "octet", "name": "integer-1"},
+ {"type": "short", "name": "integer-2"},
+ {"type": "long", "name": "integer-3"},
+ {"type": "longlong", "name": "integer-4"},
+ {"type": "octet", "name": "operation"}],
+ "name": "integer",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [{"type": "longlong", "name": "result"}],
+ "name": "integer-ok"},
+ {"id": 20,
+ "arguments": [{"type": "shortstr", "name": "string-1"},
+ {"type": "longstr", "name": "string-2"},
+ {"type": "octet", "name": "operation"}],
+ "name": "string",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [{"type": "longstr", "name": "result"}],
+ "name": "string-ok"},
+ {"id": 30,
+ "arguments": [{"type": "table", "name": "table"},
+ {"type": "octet", "name": "integer-op"},
+ {"type": "octet", "name": "string-op"}],
+ "name": "table",
+ "synchronous" : true},
+ {"id": 31,
+ "arguments": [{"type": "longlong", "name": "integer-result"},
+ {"type": "longstr", "name": "string-result"}],
+ "name": "table-ok"},
+ {"content": true,
+ "id": 40,
+ "arguments": [],
+ "name": "content",
+ "synchronous" : true},
+ {"content": true,
+ "id": 41,
+ "arguments": [{"type": "long", "name": "content-checksum"}],
+ "name": "content-ok"}],
+ "name": "test"
+ }
+ ]
+}
--- /dev/null
+{
+ "name": "AMQP",
+ "major-version": 0,
+ "minor-version": 9,
+ "revision": 1,
+ "port": 5672,
+ "copyright": [
+ "Copyright (C) 2008-2013 GoPivotal, Inc.\n",
+ "\n",
+ "Permission is hereby granted, free of charge, to any person\n",
+ "obtaining a copy of this file (the \"Software\"), to deal in the\n",
+ "Software without restriction, including without limitation the \n",
+ "rights to use, copy, modify, merge, publish, distribute, \n",
+ "sublicense, and/or sell copies of the Software, and to permit \n",
+ "persons to whom the Software is furnished to do so, subject to \n",
+ "the following conditions:\n",
+ "\n",
+ "The above copyright notice and this permission notice shall be\n",
+ "included in all copies or substantial portions of the Software.\n",
+ "\n",
+ "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n",
+ "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n",
+ "OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n",
+ "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n",
+ "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n",
+ "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n",
+ "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n",
+ "OTHER DEALINGS IN THE SOFTWARE.\n",
+ "\n",
+ "Class information entered from amqp_xml0-8.pdf and domain types from amqp-xml-doc0-9.pdf\n",
+ "Updated for 0-9-1 by Tony Garnock-Jones\n",
+ "\n",
+ "b3cb053f15e7b98808c0ccc67f23cb3e amqp_xml0-8.pdf\n",
+ "http://www.twiststandards.org/index.php?option=com_docman&task=cat_view&gid=28&&Itemid=90\n",
+ "8444db91e2949dbecfb2585e9eef6d64 amqp-xml-doc0-9.pdf\n",
+ "https://jira.amqp.org/confluence/download/attachments/720900/amqp-xml-doc0-9.pdf?version=1\n"],
+
+ "domains": [
+ ["bit", "bit"],
+ ["channel-id", "longstr"],
+ ["class-id", "short"],
+ ["consumer-tag", "shortstr"],
+ ["delivery-tag", "longlong"],
+ ["destination", "shortstr"],
+ ["duration", "longlong"],
+ ["exchange-name", "shortstr"],
+ ["long", "long"],
+ ["longlong", "longlong"],
+ ["longstr", "longstr"],
+ ["method-id", "short"],
+ ["no-ack", "bit"],
+ ["no-local", "bit"],
+ ["octet", "octet"],
+ ["offset", "longlong"],
+ ["path", "shortstr"],
+ ["peer-properties", "table"],
+ ["queue-name", "shortstr"],
+ ["redelivered", "bit"],
+ ["reference", "longstr"],
+ ["reject-code", "short"],
+ ["reject-text", "shortstr"],
+ ["reply-code", "short"],
+ ["reply-text", "shortstr"],
+ ["security-token", "longstr"],
+ ["short", "short"],
+ ["shortstr", "shortstr"],
+ ["table", "table"],
+ ["timestamp", "timestamp"]
+ ],
+
+ "constants": [
+ {"name": "FRAME-METHOD", "value": 1},
+ {"name": "FRAME-HEADER", "value": 2},
+ {"name": "FRAME-BODY", "value": 3},
+ {"name": "FRAME-HEARTBEAT", "value": 8},
+ {"name": "FRAME-MIN-SIZE", "value": 4096},
+ {"name": "FRAME-END", "value": 206},
+ {"name": "REPLY-SUCCESS", "value": 200},
+ {"name": "CONTENT-TOO-LARGE", "value": 311, "class": "soft-error"},
+ {"name": "NO-ROUTE", "value": 312, "class": "soft-error"},
+ {"name": "NO-CONSUMERS", "value": 313, "class": "soft-error"},
+ {"name": "ACCESS-REFUSED", "value": 403, "class": "soft-error"},
+ {"name": "NOT-FOUND", "value": 404, "class": "soft-error"},
+ {"name": "RESOURCE-LOCKED", "value": 405, "class": "soft-error"},
+ {"name": "PRECONDITION-FAILED", "value": 406, "class": "soft-error"},
+ {"name": "CONNECTION-FORCED", "value": 320, "class": "hard-error"},
+ {"name": "INVALID-PATH", "value": 402, "class": "hard-error"},
+ {"name": "FRAME-ERROR", "value": 501, "class": "hard-error"},
+ {"name": "SYNTAX-ERROR", "value": 502, "class": "hard-error"},
+ {"name": "COMMAND-INVALID", "value": 503, "class": "hard-error"},
+ {"name": "CHANNEL-ERROR", "value": 504, "class": "hard-error"},
+ {"name": "UNEXPECTED-FRAME", "value": 505, "class": "hard-error"},
+ {"name": "RESOURCE-ERROR", "value": 506, "class": "hard-error"},
+ {"name": "NOT-ALLOWED", "value": 530, "class": "hard-error"},
+ {"name": "NOT-IMPLEMENTED", "value": 540, "class": "hard-error"},
+ {"name": "INTERNAL-ERROR", "value": 541, "class": "hard-error"}
+ ],
+
+ "classes": [
+ {
+ "id": 10,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "octet", "name": "version-major", "default-value": 0},
+ {"type": "octet", "name": "version-minor", "default-value": 9},
+ {"domain": "peer-properties", "name": "server-properties"},
+ {"type": "longstr", "name": "mechanisms", "default-value": "PLAIN"},
+ {"type": "longstr", "name": "locales", "default-value": "en_US"}],
+ "name": "start",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [{"domain": "peer-properties", "name": "client-properties"},
+ {"type": "shortstr", "name": "mechanism", "default-value": "PLAIN"},
+ {"type": "longstr", "name": "response"},
+ {"type": "shortstr", "name": "locale", "default-value": "en_US"}],
+ "name": "start-ok"},
+ {"id": 20,
+ "arguments": [{"type": "longstr", "name": "challenge"}],
+ "name": "secure",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [{"type": "longstr", "name": "response"}],
+ "name": "secure-ok"},
+ {"id": 30,
+ "arguments": [{"type": "short", "name": "channel-max", "default-value": 0},
+ {"type": "long", "name": "frame-max", "default-value": 0},
+ {"type": "short", "name": "heartbeat", "default-value": 0}],
+ "name": "tune",
+ "synchronous" : true},
+ {"id": 31,
+ "arguments": [{"type": "short", "name": "channel-max", "default-value": 0},
+ {"type": "long", "name": "frame-max", "default-value": 0},
+ {"type": "short", "name": "heartbeat", "default-value": 0}],
+ "name": "tune-ok"},
+ {"id": 40,
+ "arguments": [{"type": "shortstr", "name": "virtual-host", "default-value": "/"},
+ {"type": "shortstr", "name": "capabilities", "default-value": ""},
+ {"type": "bit", "name": "insist", "default-value": false}],
+ "name": "open",
+ "synchronous" : true},
+ {"id": 41,
+ "arguments": [{"type": "shortstr", "name": "known-hosts", "default-value": ""}],
+ "name": "open-ok"},
+ {"id": 50,
+ "arguments": [{"type": "short", "name": "reply-code"},
+ {"type": "shortstr", "name": "reply-text", "default-value": ""},
+ {"type": "short", "name": "class-id"},
+ {"type": "short", "name": "method-id"}],
+ "name": "close",
+ "synchronous" : true},
+ {"id": 51,
+ "arguments": [],
+ "name": "close-ok"},
+ {"id": 60,
+ "arguments": [{"type": "shortstr", "name": "reason", "default-value": ""}],
+ "name": "blocked"},
+ {"id": 61,
+ "arguments": [],
+ "name": "unblocked"}],
+ "name": "connection",
+ "properties": []
+ },
+ {
+ "id": 20,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "shortstr", "name": "out-of-band", "default-value": ""}],
+ "name": "open",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [{"type": "longstr", "name": "channel-id", "default-value": ""}],
+ "name": "open-ok"},
+ {"id": 20,
+ "arguments": [{"type": "bit", "name": "active"}],
+ "name": "flow",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [{"type": "bit", "name": "active"}],
+ "name": "flow-ok"},
+ {"id": 40,
+ "arguments": [{"type": "short", "name": "reply-code"},
+ {"type": "shortstr", "name": "reply-text", "default-value": ""},
+ {"type": "short", "name": "class-id"},
+ {"type": "short", "name": "method-id"}],
+ "name": "close",
+ "synchronous" : true},
+ {"id": 41,
+ "arguments": [],
+ "name": "close-ok"}],
+ "name": "channel"
+ },
+ {
+ "id": 30,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "shortstr", "name": "realm", "default-value": "/data"},
+ {"type": "bit", "name": "exclusive", "default-value": false},
+ {"type": "bit", "name": "passive", "default-value": true},
+ {"type": "bit", "name": "active", "default-value": true},
+ {"type": "bit", "name": "write", "default-value": true},
+ {"type": "bit", "name": "read", "default-value": true}],
+ "name": "request",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1}],
+ "name": "request-ok"}],
+ "name": "access"
+ },
+ {
+ "id": 40,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 0},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "shortstr", "name": "type", "default-value": "direct"},
+ {"type": "bit", "name": "passive", "default-value": false},
+ {"type": "bit", "name": "durable", "default-value": false},
+ {"type": "bit", "name": "auto-delete", "default-value": false},
+ {"type": "bit", "name": "internal", "default-value": false},
+ {"type": "bit", "name": "nowait", "default-value": false},
+ {"type": "table", "name": "arguments", "default-value": {}}],
+ "name": "declare",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [],
+ "name": "declare-ok"},
+ {"id": 20,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 0},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "bit", "name": "if-unused", "default-value": false},
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "delete",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [],
+ "name": "delete-ok"},
+ {"id": 30,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 0},
+ {"type": "shortstr", "name": "destination"},
+ {"type": "shortstr", "name": "source"},
+ {"type": "shortstr", "name": "routing-key", "default-value": ""},
+ {"type": "bit", "name": "nowait", "default-value": false},
+ {"type": "table", "name": "arguments", "default-value": {}}],
+ "name": "bind",
+ "synchronous" : true},
+ {"id": 31,
+ "arguments": [],
+ "name": "bind-ok"},
+ {"id": 40,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 0},
+ {"type": "shortstr", "name": "destination"},
+ {"type": "shortstr", "name": "source"},
+ {"type": "shortstr", "name": "routing-key", "default-value": ""},
+ {"type": "bit", "name": "nowait", "default-value": false},
+ {"type": "table", "name": "arguments", "default-value": {}}],
+ "name": "unbind",
+ "synchronous" : true},
+ {"id": 51,
+ "arguments": [],
+ "name": "unbind-ok"}],
+ "name": "exchange"
+ },
+ {
+ "id": 50,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 0},
+ {"type": "shortstr", "name": "queue", "default-value": ""},
+ {"type": "bit", "name": "passive", "default-value": false},
+ {"type": "bit", "name": "durable", "default-value": false},
+ {"type": "bit", "name": "exclusive", "default-value": false},
+ {"type": "bit", "name": "auto-delete", "default-value": false},
+ {"type": "bit", "name": "nowait", "default-value": false},
+ {"type": "table", "name": "arguments", "default-value": {}}],
+ "name": "declare",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [{"type": "shortstr", "name": "queue"},
+ {"type": "long", "name": "message-count"},
+ {"type": "long", "name": "consumer-count"}],
+ "name": "declare-ok"},
+ {"id": 20,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 0},
+ {"type": "shortstr", "name": "queue", "default-value": ""},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "shortstr", "name": "routing-key", "default-value": ""},
+ {"type": "bit", "name": "nowait", "default-value": false},
+ {"type": "table", "name": "arguments", "default-value": {}}],
+ "name": "bind",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [],
+ "name": "bind-ok"},
+ {"id": 30,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 0},
+ {"type": "shortstr", "name": "queue", "default-value": ""},
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "purge",
+ "synchronous" : true},
+ {"id": 31,
+ "arguments": [{"type": "long", "name": "message-count"}],
+ "name": "purge-ok"},
+ {"id": 40,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 0},
+ {"type": "shortstr", "name": "queue", "default-value": ""},
+ {"type": "bit", "name": "if-unused", "default-value": false},
+ {"type": "bit", "name": "if-empty", "default-value": false},
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "delete",
+ "synchronous" : true},
+ {"id": 41,
+ "arguments": [{"type": "long", "name": "message-count"}],
+ "name": "delete-ok"},
+ {"id": 50,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 0},
+ {"type": "shortstr", "name": "queue", "default-value": ""},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "shortstr", "name": "routing-key", "default-value": ""},
+ {"type": "table", "name": "arguments", "default-value": {}}],
+ "name": "unbind",
+ "synchronous" : true},
+ {"id": 51,
+ "arguments": [],
+ "name": "unbind-ok"}
+ ],
+ "name": "queue"
+ },
+ {
+ "id": 60,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "long", "name": "prefetch-size", "default-value": 0},
+ {"type": "short", "name": "prefetch-count", "default-value": 0},
+ {"type": "bit", "name": "global", "default-value": false}],
+ "name": "qos",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [],
+ "name": "qos-ok"},
+ {"id": 20,
+ "arguments": [{"domain": "short", "name": "ticket", "default-value": 0},
+ {"type": "shortstr", "name": "queue", "default-value": ""},
+ {"type": "shortstr", "name": "consumer-tag", "default-value": ""},
+ {"type": "bit", "name": "no-local", "default-value": false},
+ {"type": "bit", "name": "no-ack", "default-value": false},
+ {"type": "bit", "name": "exclusive", "default-value": false},
+ {"type": "bit", "name": "nowait", "default-value": false},
+ {"type": "table", "name": "arguments", "default-value": {}}],
+ "name": "consume",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"}],
+ "name": "consume-ok"},
+ {"id": 30,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"},
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "cancel",
+ "synchronous" : true},
+ {"id": 31,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"}],
+ "name": "cancel-ok"},
+ {"content": true,
+ "id": 40,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 0},
+ {"type": "shortstr", "name": "exchange", "default-value": ""},
+ {"type": "shortstr", "name": "routing-key", "default-value": ""},
+ {"type": "bit", "name": "mandatory", "default-value": false},
+ {"type": "bit", "name": "immediate", "default-value": false}],
+ "name": "publish"},
+ {"content": true,
+ "id": 50,
+ "arguments": [{"type": "short", "name": "reply-code"},
+ {"type": "shortstr", "name": "reply-text", "default-value": ""},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "shortstr", "name": "routing-key"}],
+ "name": "return"},
+ {"content": true,
+ "id": 60,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"},
+ {"type": "longlong", "name": "delivery-tag"},
+ {"type": "bit", "name": "redelivered", "default-value": false},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "shortstr", "name": "routing-key"}],
+ "name": "deliver"},
+ {"id": 70,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 0},
+ {"type": "shortstr", "name": "queue", "default-value": ""},
+ {"type": "bit", "name": "no-ack", "default-value": false}],
+ "name": "get",
+ "synchronous" : true},
+ {"content": true,
+ "id": 71,
+ "arguments": [{"type": "longlong", "name": "delivery-tag"},
+ {"type": "bit", "name": "redelivered", "default-value": false},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "shortstr", "name": "routing-key"},
+ {"type": "long", "name": "message-count"}],
+ "name": "get-ok"},
+ {"id": 72,
+ "arguments": [{"type": "shortstr", "name": "cluster-id", "default-value": ""}],
+ "name": "get-empty"},
+ {"id": 80,
+ "arguments": [{"type": "longlong", "name": "delivery-tag", "default-value": 0},
+ {"type": "bit", "name": "multiple", "default-value": false}],
+ "name": "ack"},
+ {"id": 90,
+ "arguments": [{"type": "longlong", "name": "delivery-tag"},
+ {"type": "bit", "name": "requeue", "default-value": true}],
+ "name": "reject"},
+ {"id": 100,
+ "arguments": [{"type": "bit", "name": "requeue", "default-value": false}],
+ "name": "recover-async"},
+ {"id": 110,
+ "arguments": [{"type": "bit", "name": "requeue", "default-value": false}],
+ "name": "recover",
+ "synchronous" : true},
+ {"id": 111,
+ "arguments": [],
+ "name": "recover-ok"},
+ {"id": 120,
+ "arguments": [{"type": "longlong", "name": "delivery-tag", "default-value": 0},
+ {"type": "bit", "name": "multiple", "default-value": false},
+ {"type": "bit", "name": "requeue", "default-value": true}],
+ "name": "nack"}],
+ "name": "basic",
+ "properties": [{"type": "shortstr", "name": "content-type"},
+ {"type": "shortstr", "name": "content-encoding"},
+ {"type": "table", "name": "headers"},
+ {"type": "octet", "name": "delivery-mode"},
+ {"type": "octet", "name": "priority"},
+ {"type": "shortstr", "name": "correlation-id"},
+ {"type": "shortstr", "name": "reply-to"},
+ {"type": "shortstr", "name": "expiration"},
+ {"type": "shortstr", "name": "message-id"},
+ {"type": "timestamp", "name": "timestamp"},
+ {"type": "shortstr", "name": "type"},
+ {"type": "shortstr", "name": "user-id"},
+ {"type": "shortstr", "name": "app-id"},
+ {"type": "shortstr", "name": "cluster-id"}]
+ },
+ {
+ "id": 90,
+ "methods": [{"id": 10,
+ "arguments": [],
+ "name": "select",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [],
+ "name": "select-ok"},
+ {"id": 20,
+ "arguments": [],
+ "name": "commit",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [],
+ "name": "commit-ok"},
+ {"id": 30,
+ "arguments": [],
+ "name": "rollback",
+ "synchronous" : true},
+ {"id": 31,
+ "arguments": [],
+ "name": "rollback-ok"}],
+ "name": "tx"
+ },
+ {
+ "id": 85,
+ "methods": [{"id": 10,
+ "arguments": [
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "select",
+ "synchronous": true},
+ {"id": 11,
+ "arguments": [],
+ "name": "select-ok"}],
+ "name": "confirm"
+ }
+ ]
+}
--- /dev/null
+## The contents of this file are subject to the Mozilla Public License
+## Version 1.1 (the "License"); you may not use this file except in
+## compliance with the License. You may obtain a copy of the License
+## at http://www.mozilla.org/MPL/
+##
+## Software distributed under the License is distributed on an "AS IS"
+## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+## the License for the specific language governing rights and
+## limitations under the License.
+##
+## The Original Code is RabbitMQ.
+##
+## The Initial Developer of the Original Code is GoPivotal, Inc.
+## Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+##
+
+from __future__ import nested_scopes
+import re
+import sys
+import os
+from optparse import OptionParser
+
+try:
+ try:
+ import simplejson as json
+ except ImportError, e:
+ if sys.hexversion >= 0x20600f0:
+ import json
+ else:
+ raise e
+except ImportError:
+ print >> sys.stderr , " You don't appear to have simplejson.py installed"
+ print >> sys.stderr , " (an implementation of a JSON reader and writer in Python)."
+ print >> sys.stderr , " You can install it:"
+ print >> sys.stderr , " - by running 'apt-get install python-simplejson' on Debian-based systems,"
+ print >> sys.stderr , " - by running 'yum install python-simplejson' on Fedora/Red Hat system,"
+ print >> sys.stderr , " - by running 'port install py25-simplejson' on Macports on OS X"
+ print >> sys.stderr , " (you may need to say 'make PYTHON=python2.5', as well),"
+ print >> sys.stderr , " - from sources from 'http://pypi.python.org/pypi/simplejson'"
+ print >> sys.stderr , " - simplejson is a standard json library in the Python core since 2.6"
+ sys.exit(1)
+
+def insert_base_types(d):
+ for t in ['octet', 'shortstr', 'longstr', 'short', 'long',
+ 'longlong', 'bit', 'table', 'timestamp']:
+ d[t] = t
+
+class AmqpSpecFileMergeConflict(Exception): pass
+
+# If ignore_conflicts is true, then we allow acc and new to conflict,
+# with whatever's already in acc winning and new being ignored. If
+# ignore_conflicts is false, acc and new must not conflict.
+
+def default_spec_value_merger(key, acc, new, ignore_conflicts):
+ if acc is None or acc == new or ignore_conflicts:
+ return new
+ else:
+ raise AmqpSpecFileMergeConflict(key, acc, new)
+
+def extension_info_merger(key, acc, new, ignore_conflicts):
+ return acc + [new]
+
+def domains_merger(key, acc, new, ignore_conflicts):
+ merged = dict((k, v) for [k, v] in acc)
+ for [k, v] in new:
+ if merged.has_key(k):
+ if not ignore_conflicts:
+ raise AmqpSpecFileMergeConflict(key, acc, new)
+ else:
+ merged[k] = v
+
+ return [[k, v] for (k, v) in merged.iteritems()]
+
+def merge_dict_lists_by(dict_key, acc, new, ignore_conflicts):
+ acc_index = set(v[dict_key] for v in acc)
+ result = list(acc) # shallow copy
+ for v in new:
+ if v[dict_key] in acc_index:
+ if not ignore_conflicts:
+ raise AmqpSpecFileMergeConflict(description, acc, new)
+ else:
+ result.append(v)
+ return result
+
+def constants_merger(key, acc, new, ignore_conflicts):
+ return merge_dict_lists_by("name", acc, new, ignore_conflicts)
+
+def methods_merger(classname, acc, new, ignore_conflicts):
+ return merge_dict_lists_by("name", acc, new, ignore_conflicts)
+
+def properties_merger(classname, acc, new, ignore_conflicts):
+ return merge_dict_lists_by("name", acc, new, ignore_conflicts)
+
+def class_merger(acc, new, ignore_conflicts):
+ acc["methods"] = methods_merger(acc["name"],
+ acc["methods"],
+ new["methods"],
+ ignore_conflicts)
+ acc["properties"] = properties_merger(acc["name"],
+ acc.get("properties", []),
+ new.get("properties", []),
+ ignore_conflicts)
+
+def classes_merger(key, acc, new, ignore_conflicts):
+ acc_dict = dict((v["name"], v) for v in acc)
+ result = list(acc) # shallow copy
+ for w in new:
+ if w["name"] in acc_dict:
+ class_merger(acc_dict[w["name"]], w, ignore_conflicts)
+ else:
+ result.append(w)
+ return result
+
+mergers = {
+ "extension": (extension_info_merger, []),
+ "domains": (domains_merger, []),
+ "constants": (constants_merger, []),
+ "classes": (classes_merger, []),
+}
+
+def merge_load_specs(filenames, ignore_conflicts):
+ handles = [open(filename) for filename in filenames]
+ docs = [json.load(handle) for handle in handles]
+ spec = {}
+ for doc in docs:
+ for (key, value) in doc.iteritems():
+ (merger, default_value) = mergers.get(key, (default_spec_value_merger, None))
+ spec[key] = merger(key, spec.get(key, default_value), value, ignore_conflicts)
+ for handle in handles: handle.close()
+ return spec
+
+class AmqpSpec:
+ # Slight wart: use a class member rather than change the ctor signature
+ # to avoid breaking everyone else's code.
+ ignore_conflicts = False
+
+ def __init__(self, filenames):
+ self.spec = merge_load_specs(filenames, AmqpSpec.ignore_conflicts)
+
+ self.major = self.spec['major-version']
+ self.minor = self.spec['minor-version']
+ self.revision = self.spec.has_key('revision') and self.spec['revision'] or 0
+ self.port = self.spec['port']
+
+ self.domains = {}
+ insert_base_types(self.domains)
+ for entry in self.spec['domains']:
+ self.domains[ entry[0] ] = entry[1]
+
+ self.constants = []
+ for d in self.spec['constants']:
+ if d.has_key('class'):
+ klass = d['class']
+ else:
+ klass = ''
+ self.constants.append((d['name'], d['value'], klass))
+
+ self.classes = []
+ for element in self.spec['classes']:
+ self.classes.append(AmqpClass(self, element))
+
+ def allClasses(self):
+ return self.classes
+
+ def allMethods(self):
+ return [m for c in self.classes for m in c.allMethods()]
+
+ def resolveDomain(self, n):
+ return self.domains[n]
+
+class AmqpEntity:
+ def __init__(self, element):
+ self.element = element
+ self.name = element['name']
+
+class AmqpClass(AmqpEntity):
+ def __init__(self, spec, element):
+ AmqpEntity.__init__(self, element)
+ self.spec = spec
+ self.index = int(self.element['id'])
+
+ self.methods = []
+ for method_element in self.element['methods']:
+ self.methods.append(AmqpMethod(self, method_element))
+
+ self.hasContentProperties = False
+ for method in self.methods:
+ if method.hasContent:
+ self.hasContentProperties = True
+ break
+
+ self.fields = []
+ if self.element.has_key('properties'):
+ index = 0
+ for e in self.element['properties']:
+ self.fields.append(AmqpField(self, e, index))
+ index = index + 1
+
+ def allMethods(self):
+ return self.methods
+
+ def __repr__(self):
+ return 'AmqpClass("' + self.name + '")'
+
+class AmqpMethod(AmqpEntity):
+ def __init__(self, klass, element):
+ AmqpEntity.__init__(self, element)
+ self.klass = klass
+ self.index = int(self.element['id'])
+ if self.element.has_key('synchronous'):
+ self.isSynchronous = self.element['synchronous']
+ else:
+ self.isSynchronous = False
+ if self.element.has_key('content'):
+ self.hasContent = self.element['content']
+ else:
+ self.hasContent = False
+ self.arguments = []
+
+ index = 0
+ for argument in element['arguments']:
+ self.arguments.append(AmqpField(self, argument, index))
+ index = index + 1
+
+ def __repr__(self):
+ return 'AmqpMethod("' + self.klass.name + "." + self.name + '" ' + repr(self.arguments) + ')'
+
+class AmqpField(AmqpEntity):
+ def __init__(self, method, element, index):
+ AmqpEntity.__init__(self, element)
+ self.method = method
+ self.index = index
+
+ if self.element.has_key('type'):
+ self.domain = self.element['type']
+ else:
+ self.domain = self.element['domain']
+
+ if self.element.has_key('default-value'):
+ self.defaultvalue = self.element['default-value']
+ else:
+ self.defaultvalue = None
+
+ def __repr__(self):
+ return 'AmqpField("' + self.name + '")'
+
+def do_main(header_fn, body_fn):
+ do_main_dict({"header": header_fn, "body": body_fn})
+
+def do_main_dict(funcDict):
+ def usage():
+ print >> sys.stderr , "Usage:"
+ print >> sys.stderr , " %s <function> <path_to_amqp_spec.json>... <path_to_output_file>" % (sys.argv[0])
+ print >> sys.stderr , " where <function> is one of %s" % ", ".join([k for k in funcDict.keys()])
+
+ def execute(fn, amqp_specs, out_file):
+ stdout = sys.stdout
+ f = open(out_file, 'w')
+ success = False
+ try:
+ sys.stdout = f
+ fn(amqp_specs)
+ success = True
+ finally:
+ sys.stdout = stdout
+ f.close()
+ if not success:
+ os.remove(out_file)
+
+ parser = OptionParser()
+ parser.add_option("--ignore-conflicts", action="store_true", dest="ignore_conflicts", default=False)
+ (options, args) = parser.parse_args()
+
+ if len(args) < 3:
+ usage()
+ sys.exit(1)
+ else:
+ function = args[0]
+ sources = args[1:-1]
+ dest = args[-1]
+ AmqpSpec.ignore_conflicts = options.ignore_conflicts
+ if funcDict.has_key(function):
+ execute(funcDict[function], sources, dest)
+ else:
+ usage()
+ sys.exit(1)
--- /dev/null
+{
+ "extension": {
+ "name": "credit",
+ "version": "0.1",
+ "status": [
+ "This extension is used internally by the broker and plugins. ",
+ "It is NOT intended to be used by regular clients over the ",
+ "network. This extension is subject to change without notice; ",
+ "hence you are strongly discouraged from building clients ",
+ "which use it."],
+ "copyright": [
+ "Copyright (C) 2008-2013 GoPivotal, Inc.\n",
+ "\n",
+ "Permission is hereby granted, free of charge, to any person\n",
+ "obtaining a copy of this file (the \"Software\"), to deal in the\n",
+ "Software without restriction, including without limitation the \n",
+ "rights to use, copy, modify, merge, publish, distribute, \n",
+ "sublicense, and/or sell copies of the Software, and to permit \n",
+ "persons to whom the Software is furnished to do so, subject to \n",
+ "the following conditions:\n",
+ "\n",
+ "The above copyright notice and this permission notice shall be\n",
+ "included in all copies or substantial portions of the Software.\n",
+ "\n",
+ "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n",
+ "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n",
+ "OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n",
+ "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n",
+ "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n",
+ "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n",
+ "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n",
+ "OTHER DEALINGS IN THE SOFTWARE.\n"]
+ },
+
+ "classes": [
+ {
+ "id": 60,
+ "methods": [{"id": 200,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag", "default-value": ""},
+ {"type": "long", "name": "credit"},
+ {"type": "bit", "name": "drain"}],
+ "name": "credit",
+ "synchronous" : true},
+ {"id": 201,
+ "arguments": [{"type": "long", "name": "available"}],
+ "name": "credit-ok"},
+ {"id": 202,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag", "default-value": ""},
+ {"type": "long", "name": "credit-drained"}],
+ "name": "credit-drained"}],
+ "name": "basic"
+ }
+ ]
+}
--- /dev/null
+{
+ "extension": {
+ "name": "demo",
+ "version": "1.0",
+ "copyright": "Copyright (C) 2009-2013 GoPivotal, Inc."
+ },
+ "domains": [
+ ["foo-domain", "shortstr"]
+ ],
+ "constants": [
+ {"name": "FOO-CONSTANT", "value": 121212}
+ ],
+ "classes": [
+ {"name": "demo",
+ "id": 555,
+ "methods": [{"name": "one", "id": 1, "arguments": []}]}
+ ]
+}
--- /dev/null
+The files amqp-rabbitmq-0.8.json and amqp-rabbitmq-0.9.1.json are
+"Copyright (C) 2008-2013 GoPivotal", Inc. and are covered by the MIT
+license.
+
--- /dev/null
+<?xml version='1.0'?>
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ version='1.0'>
+
+<xsl:output doctype-public="-//OASIS//DTD DocBook XML V4.5//EN"
+ doctype-system="http://www.docbook.org/xml/4.5/docbookx.dtd"
+ indent="yes"
+/>
+
+<!-- Don't copy examples through in place -->
+<xsl:template match="*[@role='example-prefix']"/>
+<xsl:template match="*[@role='example']"/>
+
+<!-- Copy everything through (with lower priority) -->
+<xsl:template match="@*|node()">
+ <xsl:copy><xsl:apply-templates select="@*|node()"/></xsl:copy>
+</xsl:template>
+
+<!-- Copy the root node, and add examples at the end-->
+<xsl:template match="/refentry">
+<refentry lang="en">
+<xsl:for-each select="*">
+ <xsl:copy><xsl:apply-templates select="@*|node()"/></xsl:copy>
+</xsl:for-each>
+ <refsect1>
+ <title>Examples</title>
+<xsl:if test="//screen[@role='example']">
+ <variablelist>
+<xsl:for-each select="//screen[@role='example']">
+ <varlistentry>
+ <term><command><xsl:copy-of select="text()"/></command></term>
+ <listitem>
+ <xsl:copy-of select="following-sibling::para[@role='example' and preceding-sibling::screen[1] = current()]"/>
+ </listitem>
+ </varlistentry>
+</xsl:for-each>
+ </variablelist>
+</xsl:if>
+<!--
+We need to handle multiline examples separately, since not using a
+variablelist leads to slightly less nice formatting (the explanation doesn't get
+indented)
+-->
+<xsl:for-each select="//screen[@role='example-multiline']">
+<screen><emphasis role="bold"><xsl:copy-of select="text()"/></emphasis></screen>
+<xsl:copy-of select="following-sibling::para[@role='example']"/>
+</xsl:for-each>
+ </refsect1>
+</refentry>
+</xsl:template>
+
+<!--
+ We show all the subcommands using XML that looks like this:
+
+ <term>
+ <cmdsynopsis>
+ <command>list_connections</command>
+ <arg choice="opt">
+ <replaceable>connectioninfoitem</replaceable>
+ ...
+ </arg>
+ </cmdsynopsis>
+ </term>
+
+ However, while DocBook renders this sensibly for HTML, for some reason it
+ doen't show anything inside <cmdsynopsis> at all for man pages. I think what
+ we're doing is semantically correct so this is a bug in DocBook. The following
+ rules essentially do what DocBook does when <cmdsynopsis> is not inside a
+ <term>.
+-->
+
+<xsl:template match="term/cmdsynopsis">
+ <xsl:apply-templates mode="docbook-bug"/>
+</xsl:template>
+
+<xsl:template match="command" mode="docbook-bug">
+ <emphasis role="bold"><xsl:apply-templates mode="docbook-bug"/></emphasis>
+</xsl:template>
+
+<xsl:template match="arg[@choice='opt']" mode="docbook-bug">
+ [<xsl:apply-templates mode="docbook-bug"/>]
+</xsl:template>
+
+<xsl:template match="arg[@choice='req']" mode="docbook-bug">
+ {<xsl:apply-templates mode="docbook-bug"/>}
+</xsl:template>
+
+<xsl:template match="replaceable" mode="docbook-bug">
+ <emphasis><xsl:apply-templates mode="docbook-bug"/></emphasis>
+</xsl:template>
+
+</xsl:stylesheet>
+
--- /dev/null
+<?xml version='1.0'?>
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ xmlns:doc="http://www.rabbitmq.com/namespaces/ad-hoc/doc"
+ xmlns="http://www.w3.org/1999/xhtml"
+ version='1.0'>
+
+<xsl:param name="original"/>
+
+<xsl:output method="xml" />
+
+<!-- Copy every element through -->
+<xsl:template match="*">
+ <xsl:element name="{name()}" namespace="http://www.w3.org/1999/xhtml">
+ <xsl:apply-templates select="@*|node()"/>
+ </xsl:element>
+</xsl:template>
+
+<xsl:template match="@*">
+ <xsl:copy/>
+</xsl:template>
+
+<!-- Copy the root node, and munge the outer part of the page -->
+<xsl:template match="/html">
+<xsl:processing-instruction name="xml-stylesheet">type="text/xml" href="page.xsl"</xsl:processing-instruction>
+<html xmlns:doc="http://www.rabbitmq.com/namespaces/ad-hoc/doc" xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <title><xsl:value-of select="document($original)/refentry/refnamediv/refname"/><xsl:if test="document($original)/refentry/refmeta/manvolnum">(<xsl:value-of select="document($original)/refentry/refmeta/manvolnum"/>)</xsl:if> manual page</title>
+ </head>
+ <body show-in-this-page="true">
+ <xsl:choose>
+ <xsl:when test="document($original)/refentry/refmeta/manvolnum">
+ <p>
+ This is the manual page for
+ <code><xsl:value-of select="document($original)/refentry/refnamediv/refname"/>(<xsl:value-of select="document($original)/refentry/refmeta/manvolnum"/>)</code>.
+ </p>
+ <p>
+ <a href="../manpages.html">See a list of all manual pages</a>.
+ </p>
+ </xsl:when>
+ <xsl:otherwise>
+ <p>
+ This is the documentation for
+ <code><xsl:value-of select="document($original)/refentry/refnamediv/refname"/></code>.
+ </p>
+ </xsl:otherwise>
+ </xsl:choose>
+ <p>
+ For more general documentation, please see the
+ <a href="../admin-guide.html">administrator's guide</a>.
+ </p>
+
+ <xsl:apply-templates select="body/div[@class='refentry']"/>
+ </body>
+</html>
+</xsl:template>
+
+<!-- Specific instructions to revert the DocBook HTML to be more like our ad-hoc XML schema -->
+
+<xsl:template match="div[@class='refsect1'] | div[@class='refnamediv'] | div[@class='refsynopsisdiv']">
+ <doc:section name="{h2}">
+ <xsl:apply-templates select="node()"/>
+ </doc:section>
+</xsl:template>
+
+<xsl:template match="div[@class='refsect2']">
+ <doc:subsection name="{h3}">
+ <xsl:apply-templates select="node()"/>
+ </doc:subsection>
+</xsl:template>
+
+<xsl:template match="h2 | h3">
+ <doc:heading>
+ <xsl:apply-templates select="node()"/>
+ </doc:heading>
+</xsl:template>
+
+<xsl:template match="pre[@class='screen']">
+ <pre class="sourcecode">
+ <xsl:apply-templates select="node()"/>
+ </pre>
+</xsl:template>
+
+<xsl:template match="div[@class='cmdsynopsis']">
+ <div class="cmdsynopsis" id="{p/code[@class='command']}">
+ <xsl:apply-templates select="node()"/>
+ </div>
+</xsl:template>
+
+</xsl:stylesheet>
+
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.docbook.org/xml/4.5/docbookx.dtd">
+<refentry lang="en">
+ <refentryinfo>
+ <productname>RabbitMQ Server</productname>
+ <authorgroup>
+ <corpauthor>The RabbitMQ Team <<ulink url="mailto:info@rabbitmq.com"><email>info@rabbitmq.com</email></ulink>></corpauthor>
+ </authorgroup>
+ </refentryinfo>
+
+ <refmeta>
+ <refentrytitle>rabbitmq-echopid.bat</refentrytitle>
+ <refmiscinfo class="manual">RabbitMQ Server</refmiscinfo>
+ </refmeta>
+
+ <refnamediv>
+ <refname>rabbitmq-echopid.bat</refname>
+ <refpurpose>return the process id of the Erlang runtime hosting RabbitMQ</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+ <cmdsynopsis>
+ <command>rabbitmq-echopid.bat</command>
+ <arg choice="req">sname</arg>
+ </cmdsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>Description</title>
+ <para>
+ RabbitMQ is an implementation of AMQP, the emerging
+ standard for high performance enterprise messaging. The
+ RabbitMQ server is a robust and scalable implementation of
+ an AMQP broker.
+ </para>
+ <para>
+ Running <command>rabbitmq-echopid</command> will attempt to
+ discover and echo the process id (PID) of the Erlang runtime
+ process (erl.exe) that is hosting RabbitMQ. To allow erl.exe
+ time to start up and load RabbitMQ, the script will wait for
+ ten seconds before timing out if a suitable PID cannot be
+ found.
+ </para>
+ <para>
+ If a PID is discovered, the script will echo it to stdout
+ before exiting with a ERRORLEVEL of 0. If no PID is
+ discovered before the timeout, nothing is written to stdout
+ and the script exits setting ERRORLEVEL to 1.
+ </para>
+ <para>
+ Note that this script only exists on Windows due to the need
+ to wait for erl.exe and possibly time-out. To obtain the PID
+ on Unix set RABBITMQ_PID_FILE before starting
+ rabbitmq-server and do not use "-detached".
+ </para>
+ </refsect1>
+
+ <refsect1>
+ <title>Options</title>
+ <variablelist>
+ <varlistentry>
+ <term><cmdsynopsis><arg choice="req">sname</arg></cmdsynopsis></term>
+ <listitem>
+ <para role="usage">
+The short-name form of the RabbitMQ node name.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+</refentry>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.docbook.org/xml/4.5/docbookx.dtd">
+<refentry lang="en">
+ <refentryinfo>
+ <productname>RabbitMQ Server</productname>
+ <authorgroup>
+ <corpauthor>The RabbitMQ Team <<ulink url="mailto:info@rabbitmq.com"><email>info@rabbitmq.com</email></ulink>></corpauthor>
+ </authorgroup>
+ </refentryinfo>
+
+ <refmeta>
+ <refentrytitle>rabbitmq-env.conf</refentrytitle>
+ <manvolnum>5</manvolnum>
+ <refmiscinfo class="manual">RabbitMQ Server</refmiscinfo>
+ </refmeta>
+
+ <refnamediv>
+ <refname>rabbitmq-env.conf</refname>
+ <refpurpose>default settings for RabbitMQ AMQP server</refpurpose>
+ </refnamediv>
+
+ <refsect1>
+ <title>Description</title>
+ <para>
+<filename>/etc/rabbitmq/rabbitmq-env.conf</filename> contains variable settings that override the
+defaults built in to the RabbitMQ startup scripts.
+ </para>
+ <para>
+The file is interpreted by the system shell, and so should consist of
+a sequence of shell environment variable definitions. Normal shell
+syntax is permitted (since the file is sourced using the shell "."
+operator), including line comments starting with "#".
+ </para>
+ <para>
+In order of preference, the startup scripts get their values from the
+environment, from <filename>/etc/rabbitmq/rabbitmq-env.conf</filename> and finally from the
+built-in default values. For example, for the <envar>RABBITMQ_NODENAME</envar>
+setting,
+ </para>
+ <para>
+ <envar>RABBITMQ_NODENAME</envar>
+ </para>
+ <para>
+from the environment is checked first. If it is absent or equal to the
+empty string, then
+ </para>
+ <para>
+ <envar>NODENAME</envar>
+ </para>
+ <para>
+from <filename>/etc/rabbitmq/rabbitmq-env.conf</filename> is checked. If it is also absent
+or set equal to the empty string then the default value from the
+startup script is used.
+ </para>
+ <para>
+The variable names in /etc/rabbitmq/rabbitmq-env.conf are always equal to the
+environment variable names, with the <envar>RABBITMQ_</envar> prefix removed:
+<envar>RABBITMQ_NODE_PORT</envar> from the environment becomes <envar>NODE_PORT</envar> in the
+<filename>/etc/rabbitmq/rabbitmq-env.conf</filename> file, etc.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example-multiline">
+# I am a complete /etc/rabbitmq/rabbitmq-env.conf file.
+# Comment lines start with a hash character.
+# This is a /bin/sh script file - use ordinary envt var syntax
+NODENAME=hare
+ </screen>
+ <para role="example">
+ This is an example of a complete
+ <filename>/etc/rabbitmq/rabbitmq-env.conf</filename> file that overrides the default Erlang
+ node name from "rabbit" to "hare".
+ </para>
+
+ </refsect1>
+
+ <refsect1>
+ <title>See also</title>
+ <para>
+ <citerefentry><refentrytitle>rabbitmq-server</refentrytitle><manvolnum>1</manvolnum></citerefentry>
+ <citerefentry><refentrytitle>rabbitmqctl</refentrytitle><manvolnum>1</manvolnum></citerefentry>
+ </para>
+ </refsect1>
+</refentry>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.docbook.org/xml/4.5/docbookx.dtd">
+<!--
+ There is some extra magic in this document besides the usual DocBook semantics
+ to allow us to derive manpages, HTML and usage messages from the same source
+ document.
+
+ Examples need to be moved to the end for man pages. To this end, <para>s and
+ <screen>s with role="example" will be moved, and with role="example-prefix"
+ will be removed.
+
+ The usage messages are more involved. We have some magic in usage.xsl to pull
+ out the command synopsis, global option and subcommand synopses. We also pull
+ out <para>s with role="usage".
+
+ Finally we construct lists of possible values for subcommand options, if the
+ subcommand's <varlistentry> has role="usage-has-option-list". The option which
+ takes the values should be marked with role="usage-option-list".
+-->
+
+<refentry lang="en">
+ <refentryinfo>
+ <productname>RabbitMQ Server</productname>
+ <authorgroup>
+ <corpauthor>The RabbitMQ Team <<ulink url="mailto:info@rabbitmq.com"><email>info@rabbitmq.com</email></ulink>></corpauthor>
+ </authorgroup>
+ </refentryinfo>
+
+ <refmeta>
+ <refentrytitle>rabbitmq-plugins</refentrytitle>
+ <manvolnum>1</manvolnum>
+ <refmiscinfo class="manual">RabbitMQ Service</refmiscinfo>
+ </refmeta>
+
+ <refnamediv>
+ <refname>rabbitmq-plugins</refname>
+ <refpurpose>command line tool for managing RabbitMQ broker plugins</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+ <cmdsynopsis>
+ <command>rabbitmq-plugins</command>
+ <arg choice="req"><replaceable>command</replaceable></arg>
+ <arg choice="opt" rep="repeat"><replaceable>command options</replaceable></arg>
+ </cmdsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>Description</title>
+ <para>
+ <command>rabbitmq-plugins</command> is a command line tool for managing
+ RabbitMQ broker plugins. It allows one to enable, disable and browse
+ plugins. It must be run by a user with write permissions to the RabbitMQ
+ configuration directory.
+ </para>
+ <para>
+ Some plugins depend on others to work
+ correctly. <command>rabbitmq-plugins</command> traverses these
+ dependencies and enables all required plugins. Plugins listed on
+ the <command>rabbitmq-plugins</command> command line are marked as
+ explicitly enabled; dependent plugins are marked as implicitly
+ enabled. Implicitly enabled plugins are automatically disabled again
+ when they are no longer required.
+ </para>
+ </refsect1>
+
+ <refsect1>
+ <title>Commands</title>
+
+ <variablelist>
+ <varlistentry>
+ <term><cmdsynopsis><command>list</command> <arg choice="opt">-v</arg> <arg choice="opt">-m</arg> <arg choice="opt">-E</arg> <arg choice="opt">-e</arg> <arg choice="opt"><replaceable>pattern</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>-v</term>
+ <listitem><para>Show all plugin details (verbose).</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>-m</term>
+ <listitem><para>Show only plugin names (minimal).</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>-E</term>
+ <listitem><para>Show only explicitly enabled
+ plugins.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>-e</term>
+ <listitem><para>Show only explicitly or implicitly
+ enabled plugins.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>pattern</term>
+ <listitem><para>Pattern to filter the plugin names by.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ Lists all plugins, their versions, dependencies and
+ descriptions. Each plugin is prefixed with a status
+ indicator - [ ] to indicate that the plugin is not
+ enabled, [E] to indicate that it is explicitly enabled,
+ [e] to indicate that it is implicitly enabled, and [!] to
+ indicate that it is enabled but missing and thus not
+ operational.
+ </para>
+ <para>
+ If the optional pattern is given, only plugins whose
+ name matches <command>pattern</command> are shown.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmq-plugins list</screen>
+ <para role="example">
+ This command lists all plugins, on one line each.
+ </para>
+ <screen role="example">rabbitmq-plugins list -v </screen>
+ <para role="example">
+ This command lists all plugins.
+ </para>
+ <screen role="example">rabbitmq-plugins list -v management</screen>
+ <para role="example">
+ This command lists all plugins whose name contains "management".
+ </para>
+ <screen role="example">rabbitmq-plugins list -e rabbit</screen>
+ <para role="example">
+ This command lists all implicitly or explicitly enabled
+ RabbitMQ plugins.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><cmdsynopsis><command>enable</command> <arg choice="req"><replaceable>plugin</replaceable> ...</arg></cmdsynopsis></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>plugin</term>
+ <listitem><para>One or more plugins to enable.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ Enables the specified plugins and all their
+ dependencies.
+ </para>
+
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmq-plugins enable rabbitmq_shovel rabbitmq_management</screen>
+ <para role="example">
+ This command enables the <command>shovel</command> and
+ <command>management</command> plugins and all their
+ dependencies.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><cmdsynopsis><command>disable</command> <arg choice="req"><replaceable>plugin</replaceable> ...</arg></cmdsynopsis></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>plugin</term>
+ <listitem><para>One or more plugins to disable.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ Disables the specified plugins and all plugins that
+ depend on them.
+ </para>
+
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmq-plugins disable amqp_client</screen>
+ <para role="example">
+ This command disables <command>amqp_client</command> and
+ all plugins that depend on it.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+
+ </refsect1>
+
+</refentry>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.docbook.org/xml/4.5/docbookx.dtd">
+<refentry lang="en">
+ <refentryinfo>
+ <productname>RabbitMQ Server</productname>
+ <authorgroup>
+ <corpauthor>The RabbitMQ Team <<ulink url="mailto:info@rabbitmq.com"><email>info@rabbitmq.com</email></ulink>></corpauthor>
+ </authorgroup>
+ </refentryinfo>
+
+ <refmeta>
+ <refentrytitle>rabbitmq-server</refentrytitle>
+ <manvolnum>1</manvolnum>
+ <refmiscinfo class="manual">RabbitMQ Server</refmiscinfo>
+ </refmeta>
+
+ <refnamediv>
+ <refname>rabbitmq-server</refname>
+ <refpurpose>start RabbitMQ AMQP server</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+ <cmdsynopsis>
+ <command>rabbitmq-server</command>
+ <arg choice="opt">-detached</arg>
+ </cmdsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>Description</title>
+ <para>
+ RabbitMQ is an implementation of AMQP, the emerging standard for high
+performance enterprise messaging. The RabbitMQ server is a robust and
+scalable implementation of an AMQP broker.
+ </para>
+ <para>
+Running rabbitmq-server in the foreground displays a banner message,
+and reports on progress in the startup sequence, concluding with the
+message "broker running", indicating that the RabbitMQ broker has been
+started successfully. To shut down the server, just terminate the
+process or use rabbitmqctl(1).
+ </para>
+ </refsect1>
+
+ <refsect1>
+ <title>Environment</title>
+ <variablelist>
+
+ <varlistentry>
+ <term>RABBITMQ_MNESIA_BASE</term>
+ <listitem>
+ <para>
+Defaults to <filename>/var/lib/rabbitmq/mnesia</filename>. Set this to the directory where
+Mnesia database files should be placed.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>RABBITMQ_LOG_BASE</term>
+ <listitem>
+ <para>
+Defaults to <filename>/var/log/rabbitmq</filename>. Log files generated by the server will
+be placed in this directory.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>RABBITMQ_NODENAME</term>
+ <listitem>
+ <para>
+Defaults to rabbit. This can be useful if you want to run more than
+one node per machine - <envar>RABBITMQ_NODENAME</envar> should be unique per
+erlang-node-and-machine combination. See the
+<ulink url="http://www.rabbitmq.com/clustering.html#single-machine">clustering on a single
+machine guide</ulink> for details.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>RABBITMQ_NODE_IP_ADDRESS</term>
+ <listitem>
+ <para>
+By default RabbitMQ will bind to all interfaces, on IPv4 and IPv6 if
+available. Set this if you only want to bind to one network interface
+or address family.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>RABBITMQ_NODE_PORT</term>
+ <listitem>
+ <para>
+Defaults to 5672.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ </variablelist>
+ </refsect1>
+
+ <refsect1>
+ <title>Options</title>
+ <variablelist>
+ <varlistentry>
+ <term>-detached</term>
+ <listitem>
+ <para>
+ Start the server process in the background. Note that this will
+ cause the pid not to be written to the pid file.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmq-server -detached</screen>
+ <para role="example">
+ Runs RabbitMQ AMQP server in the background.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+
+ <refsect1>
+ <title>See also</title>
+ <para>
+ <citerefentry><refentrytitle>rabbitmq-env.conf</refentrytitle><manvolnum>5</manvolnum></citerefentry>
+ <citerefentry><refentrytitle>rabbitmqctl</refentrytitle><manvolnum>1</manvolnum></citerefentry>
+ </para>
+ </refsect1>
+</refentry>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.docbook.org/xml/4.5/docbookx.dtd">
+<refentry lang="en">
+ <refentryinfo>
+ <productname>RabbitMQ Server</productname>
+ <authorgroup>
+ <corpauthor>The RabbitMQ Team <<ulink url="mailto:info@rabbitmq.com"><email>info@rabbitmq.com</email></ulink>></corpauthor>
+ </authorgroup>
+ </refentryinfo>
+
+ <refmeta>
+ <refentrytitle>rabbitmq-service.bat</refentrytitle>
+ <refmiscinfo class="manual">RabbitMQ Server</refmiscinfo>
+ </refmeta>
+
+ <refnamediv>
+ <refname>rabbitmq-service.bat</refname>
+ <refpurpose>manage RabbitMQ AMQP service</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+ <cmdsynopsis>
+ <command>rabbitmq-service.bat</command>
+ <arg choice="opt">command</arg>
+ </cmdsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>Description</title>
+ <para>
+ RabbitMQ is an implementation of AMQP, the emerging standard for high
+performance enterprise messaging. The RabbitMQ server is a robust and
+scalable implementation of an AMQP broker.
+ </para>
+ <para>
+Running <command>rabbitmq-service</command> allows the RabbitMQ broker to be run as a
+service on NT/2000/2003/XP/Vista® environments. The RabbitMQ broker
+service can be started and stopped using the Windows® services
+applet.
+ </para>
+ <para>
+By default the service will run in the authentication context of the
+local system account. It is therefore necessary to synchronise Erlang
+cookies between the local system account (typically
+<filename>C:\WINDOWS\.erlang.cookie</filename> and the account that will be used to
+run <command>rabbitmqctl</command>.
+ </para>
+ </refsect1>
+
+ <refsect1>
+ <title>Commands</title>
+ <variablelist>
+
+ <varlistentry>
+ <term>help</term>
+ <listitem>
+ <para>
+Display usage information.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>install</term>
+ <listitem>
+ <para>
+Install the service. The service will not be started.
+Subsequent invocations will update the service parameters if
+relevant environment variables were modified or if the active
+plugins were changed.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>remove</term>
+ <listitem>
+ <para>
+Remove the service. If the service is running then it will
+automatically be stopped before being removed. No files will be
+deleted as a consequence and <command>rabbitmq-server</command> will remain operable.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>start</term>
+ <listitem>
+ <para>
+Start the service. The service must have been correctly installed
+beforehand.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stop</term>
+ <listitem>
+ <para>
+Stop the service. The service must be running for this command to
+have any effect.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>disable</term>
+ <listitem>
+ <para>
+Disable the service. This is the equivalent of setting the startup
+type to <code>Disabled</code> using the service control panel.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>enable</term>
+ <listitem>
+ <para>
+Enable the service. This is the equivalent of setting the startup
+type to <code>Automatic</code> using the service control panel.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+
+ <refsect1>
+ <title>Environment</title>
+ <variablelist>
+
+ <varlistentry>
+ <term>RABBITMQ_SERVICENAME</term>
+ <listitem>
+ <para>
+Defaults to RabbitMQ.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>RABBITMQ_BASE</term>
+ <listitem>
+ <para>
+Defaults to the application data directory of the current user.
+This is the location of log and database directories.
+
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>RABBITMQ_NODENAME</term>
+ <listitem>
+ <para>
+Defaults to rabbit. This can be useful if you want to run more than
+one node per machine - <envar>RABBITMQ_NODENAME</envar> should be unique per
+erlang-node-and-machine combination. See the
+<ulink url="http://www.rabbitmq.com/clustering.html#single-machine">clustering on a single
+machine guide</ulink> for details.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>RABBITMQ_NODE_IP_ADDRESS</term>
+ <listitem>
+ <para>
+By default RabbitMQ will bind to all interfaces, on IPv4 and IPv6 if
+available. Set this if you only want to bind to one network interface
+or address family.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>RABBITMQ_NODE_PORT</term>
+ <listitem>
+ <para>
+Defaults to 5672.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>ERLANG_SERVICE_MANAGER_PATH</term>
+ <listitem>
+ <para>
+Defaults to <filename>C:\Program Files\erl5.5.5\erts-5.5.5\bin</filename>
+(or <filename>C:\Program Files (x86)\erl5.5.5\erts-5.5.5\bin</filename> for 64-bit
+environments). This is the installation location of the Erlang service
+manager.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>RABBITMQ_CONSOLE_LOG</term>
+ <listitem>
+ <para>
+Set this varable to <code>new</code> or <code>reuse</code> to have the console
+output from the server redirected to a file named <code>SERVICENAME</code>.debug
+in the application data directory of the user that installed the service.
+Under Vista this will be <filename>C:\Users\AppData\username\SERVICENAME</filename>.
+Under previous versions of Windows this will be
+<filename>C:\Documents and Settings\username\Application Data\SERVICENAME</filename>.
+If <code>RABBITMQ_CONSOLE_LOG</code> is set to <code>new</code> then a new file will be
+created each time the service starts. If <code>RABBITMQ_CONSOLE_LOG</code> is
+set to <code>reuse</code> then the file will be overwritten each time the
+service starts. The default behaviour when <code>RABBITMQ_CONSOLE_LOG</code> is
+not set or set to a value other than <code>new</code> or <code>reuse</code> is to discard
+the server output.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+</refentry>
--- /dev/null
+%% -*- mode: erlang -*-
+%% ----------------------------------------------------------------------------
+%% RabbitMQ Sample Configuration File.
+%%
+%% See http://www.rabbitmq.com/configure.html for details.
+%% ----------------------------------------------------------------------------
+[
+ {rabbit,
+ [%%
+ %% Network Connectivity
+ %% ====================
+ %%
+
+ %% By default, RabbitMQ will listen on all interfaces, using
+ %% the standard (reserved) AMQP port.
+ %%
+ %% {tcp_listeners, [5672]},
+
+ %% To listen on a specific interface, provide a tuple of {IpAddress, Port}.
+ %% For example, to listen only on localhost for both IPv4 and IPv6:
+ %%
+ %% {tcp_listeners, [{"127.0.0.1", 5672},
+ %% {"::1", 5672}]},
+
+ %% SSL listeners are configured in the same fashion as TCP listeners,
+ %% including the option to control the choice of interface.
+ %%
+ %% {ssl_listeners, [5671]},
+
+ %% Log levels (currently just used for connection logging).
+ %% One of 'info', 'warning', 'error' or 'none', in decreasing order
+ %% of verbosity. Defaults to 'info'.
+ %%
+ %% {log_levels, [{connection, info}]},
+
+ %% Set to 'true' to perform reverse DNS lookups when accepting a
+ %% connection. Hostnames will then be shown instead of IP addresses
+ %% in rabbitmqctl and the management plugin.
+ %%
+ %% {reverse_dns_lookups, true},
+
+ %%
+ %% Security / AAA
+ %% ==============
+ %%
+
+ %% The default "guest" user is only permitted to access the server
+ %% via a loopback interface (e.g. localhost).
+ %% {loopback_users, [<<"guest">>]},
+ %%
+ %% Uncomment the following line if you want to allow access to the
+ %% guest user from anywhere on the network.
+ %% {loopback_users, []},
+
+ %% Configuring SSL.
+ %% See http://www.rabbitmq.com/ssl.html for full documentation.
+ %%
+ %% {ssl_options, [{cacertfile, "/path/to/testca/cacert.pem"},
+ %% {certfile, "/path/to/server/cert.pem"},
+ %% {keyfile, "/path/to/server/key.pem"},
+ %% {verify, verify_peer},
+ %% {fail_if_no_peer_cert, false}]},
+
+ %% Choose the available SASL mechanism(s) to expose.
+ %% The two default (built in) mechanisms are 'PLAIN' and
+ %% 'AMQPLAIN'. Additional mechanisms can be added via
+ %% plugins.
+ %%
+ %% See http://www.rabbitmq.com/authentication.html for more details.
+ %%
+ %% {auth_mechanisms, ['PLAIN', 'AMQPLAIN']},
+
+ %% Select an authentication database to use. RabbitMQ comes bundled
+ %% with a built-in auth-database, based on mnesia.
+ %%
+ %% {auth_backends, [rabbit_auth_backend_internal]},
+
+ %% Configurations supporting the rabbitmq_auth_mechanism_ssl and
+ %% rabbitmq_auth_backend_ldap plugins.
+ %%
+ %% NB: These options require that the relevant plugin is enabled.
+ %% See http://www.rabbitmq.com/plugins.html for further details.
+
+ %% The RabbitMQ-auth-mechanism-ssl plugin makes it possible to
+ %% authenticate a user based on the client's SSL certificate.
+ %%
+ %% To use auth-mechanism-ssl, add to or replace the auth_mechanisms
+ %% list with the entry 'EXTERNAL'.
+ %%
+ %% {auth_mechanisms, ['EXTERNAL']},
+
+ %% The rabbitmq_auth_backend_ldap plugin allows the broker to
+ %% perform authentication and authorisation by deferring to an
+ %% external LDAP server.
+ %%
+ %% For more information about configuring the LDAP backend, see
+ %% http://www.rabbitmq.com/ldap.html.
+ %%
+ %% Enable the LDAP auth backend by adding to or replacing the
+ %% auth_backends entry:
+ %%
+ %% {auth_backends, [rabbit_auth_backend_ldap]},
+
+ %% This pertains to both the rabbitmq_auth_mechanism_ssl plugin and
+ %% STOMP ssl_cert_login configurations. See the rabbitmq_stomp
+ %% configuration section later in this fail and the README in
+ %% https://github.com/rabbitmq/rabbitmq-auth-mechanism-ssl for further
+ %% details.
+ %%
+ %% To use the SSL cert's CN instead of its DN as the username
+ %%
+ %% {ssl_cert_login_from, common_name},
+
+ %%
+ %% Default User / VHost
+ %% ====================
+ %%
+
+ %% On first start RabbitMQ will create a vhost and a user. These
+ %% config items control what gets created. See
+ %% http://www.rabbitmq.com/access-control.html for further
+ %% information about vhosts and access control.
+ %%
+ %% {default_vhost, <<"/">>},
+ %% {default_user, <<"guest">>},
+ %% {default_pass, <<"guest">>},
+ %% {default_permissions, [<<".*">>, <<".*">>, <<".*">>]},
+
+ %% Tags for default user
+ %%
+ %% For more details about tags, see the documentation for the
+ %% Management Plugin at http://www.rabbitmq.com/management.html.
+ %%
+ %% {default_user_tags, [administrator]},
+
+ %%
+ %% Additional network and protocol related configuration
+ %% =====================================================
+ %%
+
+ %% Set the default AMQP heartbeat delay (in seconds).
+ %%
+ %% {heartbeat, 600},
+
+ %% Set the max permissible size of an AMQP frame (in bytes).
+ %%
+ %% {frame_max, 131072},
+
+ %% Set the max permissible number of channels per connection.
+ %% 0 means "no limit".
+ %%
+ %% {channel_max, 128},
+
+ %% Customising Socket Options.
+ %%
+ %% See (http://www.erlang.org/doc/man/inet.html#setopts-2) for
+ %% further documentation.
+ %%
+ %% {tcp_listen_options, [binary,
+ %% {packet, raw},
+ %% {reuseaddr, true},
+ %% {backlog, 128},
+ %% {nodelay, true},
+ %% {exit_on_close, false}]},
+
+ %%
+ %% Resource Limits & Flow Control
+ %% ==============================
+ %%
+ %% See http://www.rabbitmq.com/memory.html for full details.
+
+ %% Memory-based Flow Control threshold.
+ %%
+ %% {vm_memory_high_watermark, 0.4},
+
+ %% Fraction of the high watermark limit at which queues start to
+ %% page message out to disc in order to free up memory.
+ %%
+ %% {vm_memory_high_watermark_paging_ratio, 0.5},
+
+ %% Set disk free limit (in bytes). Once free disk space reaches this
+ %% lower bound, a disk alarm will be set - see the documentation
+ %% listed above for more details.
+ %%
+ %% {disk_free_limit, 50000000},
+
+ %% Alternatively, we can set a limit relative to total available RAM.
+ %%
+ %% {disk_free_limit, {mem_relative, 1.0}},
+
+ %%
+ %% Misc/Advanced Options
+ %% =====================
+ %%
+ %% NB: Change these only if you understand what you are doing!
+ %%
+
+ %% To announce custom properties to clients on connection:
+ %%
+ %% {server_properties, []},
+
+ %% How to respond to cluster partitions.
+ %% See http://www.rabbitmq.com/partitions.html for further details.
+ %%
+ %% {cluster_partition_handling, ignore},
+
+ %% Make clustering happen *automatically* at startup - only applied
+ %% to nodes that have just been reset or started for the first time.
+ %% See http://www.rabbitmq.com/clustering.html#auto-config for
+ %% further details.
+ %%
+ %% {cluster_nodes, {['rabbit@my.host.com'], disc}},
+
+ %% Set (internal) statistics collection granularity.
+ %%
+ %% {collect_statistics, none},
+
+ %% Statistics collection interval (in milliseconds).
+ %%
+ %% {collect_statistics_interval, 5000},
+
+ %% Explicitly enable/disable hipe compilation.
+ %%
+ %% {hipe_compile, true}
+
+ ]},
+
+ %% ----------------------------------------------------------------------------
+ %% Advanced Erlang Networking/Clustering Options.
+ %%
+ %% See http://www.rabbitmq.com/clustering.html for details
+ %% ----------------------------------------------------------------------------
+ {kernel,
+ [%% Sets the net_kernel tick time.
+ %% Please see http://erlang.org/doc/man/kernel_app.html and
+ %% http://www.rabbitmq.com/nettick.html for further details.
+ %%
+ %% {net_ticktime, 60}
+ ]},
+
+ %% ----------------------------------------------------------------------------
+ %% RabbitMQ Management Plugin
+ %%
+ %% See http://www.rabbitmq.com/management.html for details
+ %% ----------------------------------------------------------------------------
+
+ {rabbitmq_management,
+ [%% Pre-Load schema definitions from the following JSON file. See
+ %% http://www.rabbitmq.com/management.html#load-definitions
+ %%
+ %% {load_definitions, "/path/to/schema.json"},
+
+ %% Log all requests to the management HTTP API to a file.
+ %%
+ %% {http_log_dir, "/path/to/access.log"},
+
+ %% Change the port on which the HTTP listener listens,
+ %% specifying an interface for the web server to bind to.
+ %% Also set the listener to use SSL and provide SSL options.
+ %%
+ %% {listener, [{port, 12345},
+ %% {ip, "127.0.0.1"},
+ %% {ssl, true},
+ %% {ssl_opts, [{cacertfile, "/path/to/cacert.pem"},
+ %% {certfile, "/path/to/cert.pem"},
+ %% {keyfile, "/path/to/key.pem"}]}]},
+
+ %% Configure how long aggregated data (such as message rates and queue
+ %% lengths) is retained. Please read the plugin's documentation in
+ %% https://www.rabbitmq.com/management.html#configuration for more
+ %% details.
+ %%
+ %% {sample_retention_policies,
+ %% [{global, [{60, 5}, {3600, 60}, {86400, 1200}]},
+ %% {basic, [{60, 5}, {3600, 60}]},
+ %% {detailed, [{10, 5}]}]}
+ ]},
+
+ {rabbitmq_management_agent,
+ [%% Misc/Advanced Options
+ %%
+ %% NB: Change these only if you understand what you are doing!
+ %%
+ %% {force_fine_statistics, true}
+ ]},
+
+ %% ----------------------------------------------------------------------------
+ %% RabbitMQ Shovel Plugin
+ %%
+ %% See http://www.rabbitmq.com/shovel.html for details
+ %% ----------------------------------------------------------------------------
+
+ {rabbitmq_shovel,
+ [{shovels,
+ [%% A named shovel worker.
+ %% {my_first_shovel,
+ %% [
+
+ %% List the source broker(s) from which to consume.
+ %%
+ %% {sources,
+ %% [%% URI(s) and pre-declarations for all source broker(s).
+ %% {brokers, ["amqp://user:password@host.domain/my_vhost"]},
+ %% {declarations, []}
+ %% ]},
+
+ %% List the destination broker(s) to publish to.
+ %% {destinations,
+ %% [%% A singular version of the 'brokers' element.
+ %% {broker, "amqp://"},
+ %% {declarations, []}
+ %% ]},
+
+ %% Name of the queue to shovel messages from.
+ %%
+ %% {queue, <<"your-queue-name-goes-here">>},
+
+ %% Optional prefetch count.
+ %%
+ %% {prefetch_count, 10},
+
+ %% when to acknowledge messages:
+ %% - no_ack: never (auto)
+ %% - on_publish: after each message is republished
+ %% - on_confirm: when the destination broker confirms receipt
+ %%
+ %% {ack_mode, on_confirm},
+
+ %% Overwrite fields of the outbound basic.publish.
+ %%
+ %% {publish_fields, [{exchange, <<"my_exchange">>},
+ %% {routing_key, <<"from_shovel">>}]},
+
+ %% Static list of basic.properties to set on re-publication.
+ %%
+ %% {publish_properties, [{delivery_mode, 2}]},
+
+ %% The number of seconds to wait before attempting to
+ %% reconnect in the event of a connection failure.
+ %%
+ %% {reconnect_delay, 2.5}
+
+ %% ]} %% End of my_first_shovel
+ ]}
+ %% Rather than specifying some values per-shovel, you can specify
+ %% them for all shovels here.
+ %%
+ %% {defaults, [{prefetch_count, 0},
+ %% {ack_mode, on_confirm},
+ %% {publish_fields, []},
+ %% {publish_properties, [{delivery_mode, 2}]},
+ %% {reconnect_delay, 2.5}]}
+ ]},
+
+ %% ----------------------------------------------------------------------------
+ %% RabbitMQ Stomp Adapter
+ %%
+ %% See http://www.rabbitmq.com/stomp.html for details
+ %% ----------------------------------------------------------------------------
+
+ {rabbitmq_stomp,
+ [%% Network Configuration - the format is generally the same as for the broker
+
+ %% Listen only on localhost (ipv4 & ipv6) on a specific port.
+ %% {tcp_listeners, [{"127.0.0.1", 61613},
+ %% {"::1", 61613}]},
+
+ %% Listen for SSL connections on a specific port.
+ %% {ssl_listeners, [61614]},
+
+ %% Additional SSL options
+
+ %% Extract a name from the client's certificate when using SSL.
+ %%
+ %% {ssl_cert_login, true},
+
+ %% Set a default user name and password. This is used as the default login
+ %% whenever a CONNECT frame omits the login and passcode headers.
+ %%
+ %% Please note that setting this will allow clients to connect without
+ %% authenticating!
+ %%
+ %% {default_user, [{login, "guest"},
+ %% {passcode, "guest"}]},
+
+ %% If a default user is configured, or you have configured use SSL client
+ %% certificate based authentication, you can choose to allow clients to
+ %% omit the CONNECT frame entirely. If set to true, the client is
+ %% automatically connected as the default user or user supplied in the
+ %% SSL certificate whenever the first frame sent on a session is not a
+ %% CONNECT frame.
+ %%
+ %% {implicit_connect, true}
+ ]},
+
+ %% ----------------------------------------------------------------------------
+ %% RabbitMQ MQTT Adapter
+ %%
+ %% See http://hg.rabbitmq.com/rabbitmq-mqtt/file/stable/README.md for details
+ %% ----------------------------------------------------------------------------
+
+ {rabbitmq_mqtt,
+ [%% Set the default user name and password. Will be used as the default login
+ %% if a connecting client provides no other login details.
+ %%
+ %% Please note that setting this will allow clients to connect without
+ %% authenticating!
+ %%
+ %% {default_user, <<"guest">>},
+ %% {default_pass, <<"guest">>},
+
+ %% Enable anonymous access. If this is set to false, clients MUST provide
+ %% login information in order to connect. See the default_user/default_pass
+ %% configuration elements for managing logins without authentication.
+ %%
+ %% {allow_anonymous, true},
+
+ %% If you have multiple chosts, specify the one to which the
+ %% adapter connects.
+ %%
+ %% {vhost, <<"/">>},
+
+ %% Specify the exchange to which messages from MQTT clients are published.
+ %%
+ %% {exchange, <<"amq.topic">>},
+
+ %% Specify TTL (time to live) to control the lifetime of non-clean sessions.
+ %%
+ %% {subscription_ttl, 1800000},
+
+ %% Set the prefetch count (governing the maximum number of unacknowledged
+ %% messages that will be delivered).
+ %%
+ %% {prefetch, 10},
+
+ %% TCP/SSL Configuration (as per the broker configuration).
+ %%
+ %% {tcp_listeners, [1883]},
+ %% {ssl_listeners, []},
+
+ %% TCP/Socket options (as per the broker configuration).
+ %%
+ %% {tcp_listen_options, [binary,
+ %% {packet, raw},
+ %% {reuseaddr, true},
+ %% {backlog, 128},
+ %% {nodelay, true}]}
+ ]},
+
+ %% ----------------------------------------------------------------------------
+ %% RabbitMQ AMQP 1.0 Support
+ %%
+ %% See http://hg.rabbitmq.com/rabbitmq-amqp1.0/file/default/README.md
+ %% for details
+ %% ----------------------------------------------------------------------------
+
+ {rabbitmq_amqp1_0,
+ [%% Connections that are not authenticated with SASL will connect as this
+ %% account. See the README for more information.
+ %%
+ %% Please note that setting this will allow clients to connect without
+ %% authenticating!
+ %%
+ %% {default_user, "guest"},
+
+ %% Enable protocol strict mode. See the README for more information.
+ %%
+ %% {protocol_strict_mode, false}
+ ]},
+
+ %% ----------------------------------------------------------------------------
+ %% RabbitMQ LDAP Plugin
+ %%
+ %% See http://www.rabbitmq.com/ldap.html for details.
+ %%
+ %% ----------------------------------------------------------------------------
+
+ {rabbitmq_auth_backend_ldap,
+ [%%
+ %% Connecting to the LDAP server(s)
+ %% ================================
+ %%
+
+ %% Specify servers to bind to. You *must* set this in order for the plugin
+ %% to work properly.
+ %%
+ %% {servers, ["your-server-name-goes-here"]},
+
+ %% Connect to the LDAP server using SSL
+ %%
+ %% {use_ssl, false},
+
+ %% Specify the LDAP port to connect to
+ %%
+ %% {port, 389},
+
+ %% LDAP connection timeout, in milliseconds or 'infinity'
+ %%
+ %% {timeout, infinity},
+
+ %% Enable logging of LDAP queries.
+ %% One of
+ %% - false (no logging is performed)
+ %% - true (verbose logging of the logic used by the plugin)
+ %% - network (as true, but additionally logs LDAP network traffic)
+ %%
+ %% Defaults to false.
+ %%
+ %% {log, false},
+
+ %%
+ %% Authentication
+ %% ==============
+ %%
+
+ %% Pattern to convert the username given through AMQP to a DN before
+ %% binding
+ %%
+ %% {user_dn_pattern, "cn=${username},ou=People,dc=example,dc=com"},
+
+ %% Alternatively, you can convert a username to a Distinguished
+ %% Name via an LDAP lookup after binding. See the documentation for
+ %% full details.
+
+ %% When converting a username to a dn via a lookup, set these to
+ %% the name of the attribute that represents the user name, and the
+ %% base DN for the lookup query.
+ %%
+ %% {dn_lookup_attribute, "userPrincipalName"},
+ %% {dn_lookup_base, "DC=gopivotal,DC=com"},
+
+ %% Controls how to bind for authorisation queries and also to
+ %% retrieve the details of users logging in without presenting a
+ %% password (e.g., SASL EXTERNAL).
+ %% One of
+ %% - as_user (to bind as the authenticated user - requires a password)
+ %% - anon (to bind anonymously)
+ %% - {UserDN, Password} (to bind with a specified user name and password)
+ %%
+ %% Defaults to 'as_user'.
+ %%
+ %% {other_bind, as_user},
+
+ %%
+ %% Authorisation
+ %% =============
+ %%
+
+ %% The LDAP plugin can perform a variety of queries against your
+ %% LDAP server to determine questions of authorisation. See
+ %% http://www.rabbitmq.com/ldap.html#authorisation for more
+ %% information.
+
+ %% Set the query to use when determining vhost access
+ %%
+ %% {vhost_access_query, {in_group,
+ %% "ou=${vhost}-users,ou=vhosts,dc=example,dc=com"}},
+
+ %% Set the query to use when determining resource (e.g., queue) access
+ %%
+ %% {resource_access_query, {constant, true}},
+
+ %% Set queries to determine which tags a user has
+ %%
+ %% {tag_queries, []}
+ ]}
+].
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.docbook.org/xml/4.5/docbookx.dtd">
+<!--
+ There is some extra magic in this document besides the usual DocBook semantics
+ to allow us to derive manpages, HTML and usage messages from the same source
+ document.
+
+ Examples need to be moved to the end for man pages. To this end, <para>s and
+ <screen>s with role="example" will be moved, and with role="example-prefix"
+ will be removed.
+
+ The usage messages are more involved. We have some magic in usage.xsl to pull
+ out the command synopsis, global option and subcommand synopses. We also pull
+ out <para>s with role="usage".
+
+ Finally we construct lists of possible values for subcommand options, if the
+ subcommand's <varlistentry> has role="usage-has-option-list". The option which
+ takes the values should be marked with role="usage-option-list".
+-->
+
+<refentry lang="en">
+ <refentryinfo>
+ <productname>RabbitMQ Server</productname>
+ <authorgroup>
+ <corpauthor>The RabbitMQ Team <<ulink url="mailto:info@rabbitmq.com"><email>info@rabbitmq.com</email></ulink>></corpauthor>
+ </authorgroup>
+ </refentryinfo>
+
+ <refmeta>
+ <refentrytitle>rabbitmqctl</refentrytitle>
+ <manvolnum>1</manvolnum>
+ <refmiscinfo class="manual">RabbitMQ Service</refmiscinfo>
+ </refmeta>
+
+ <refnamediv>
+ <refname>rabbitmqctl</refname>
+ <refpurpose>command line tool for managing a RabbitMQ broker</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+ <cmdsynopsis>
+ <command>rabbitmqctl</command>
+ <arg choice="opt">-n <replaceable>node</replaceable></arg>
+ <arg choice="opt">-q</arg>
+ <arg choice="req"><replaceable>command</replaceable></arg>
+ <arg choice="opt" rep="repeat"><replaceable>command options</replaceable></arg>
+ </cmdsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>Description</title>
+ <para>
+ RabbitMQ is an implementation of AMQP, the emerging standard for high
+ performance enterprise messaging. The RabbitMQ server is a robust and
+ scalable implementation of an AMQP broker.
+ </para>
+ <para>
+ <command>rabbitmqctl</command> is a command line tool for managing a
+ RabbitMQ broker. It performs all actions by connecting to one of the
+ broker's nodes.
+ </para>
+ <para>
+ Diagnostic information is displayed if the broker was not
+ running, could not be reached, or rejected the connection due to
+ mismatching Erlang cookies.
+ </para>
+ </refsect1>
+
+ <refsect1>
+ <title>Options</title>
+ <variablelist>
+ <varlistentry>
+ <term><cmdsynopsis><arg choice="opt">-n <replaceable>node</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <para role="usage">
+ Default node is "rabbit@server", where server is the local host. On
+ a host named "server.example.com", the node name of the RabbitMQ
+ Erlang node will usually be rabbit@server (unless RABBITMQ_NODENAME
+ has been set to some non-default value at broker startup time). The
+ output of <command>hostname -s</command> is usually the correct suffix to use after the
+ "@" sign. See rabbitmq-server(1) for details of configuring the
+ RabbitMQ broker.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><cmdsynopsis><arg choice="opt">-q</arg></cmdsynopsis></term>
+ <listitem>
+ <para role="usage">
+ Quiet output mode is selected with the "-q" flag. Informational
+ messages are suppressed when quiet mode is in effect.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+
+ <refsect1>
+ <title>Commands</title>
+
+ <refsect2>
+ <title>Application and Cluster Management</title>
+
+ <variablelist>
+ <varlistentry>
+ <term><cmdsynopsis><command>stop</command> <arg choice="opt"><replaceable>pid_file</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Stops the Erlang node on which RabbitMQ is running. To
+ restart the node follow the instructions for <citetitle>Running
+ the Server</citetitle> in the <ulink url="http://www.rabbitmq.com/install.html">installation
+ guide</ulink>.
+ </para>
+ <para>
+ If a <option>pid_file</option> is specified, also waits
+ for the process specified there to terminate. See the
+ description of the <option>wait</option> command below
+ for details on this file.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl stop</screen>
+ <para role="example">
+ This command instructs the RabbitMQ node to terminate.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry id="stop_app">
+ <term><cmdsynopsis><command>stop_app</command></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Stops the RabbitMQ application, leaving the Erlang node
+ running.
+ </para>
+ <para>
+ This command is typically run prior to performing other
+ management actions that require the RabbitMQ application
+ to be stopped, e.g. <link
+ linkend="reset"><command>reset</command></link>.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl stop_app</screen>
+ <para role="example">
+ This command instructs the RabbitMQ node to stop the
+ RabbitMQ application.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><cmdsynopsis><command>start_app</command></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Starts the RabbitMQ application.
+ </para>
+ <para>
+ This command is typically run after performing other
+ management actions that required the RabbitMQ application
+ to be stopped, e.g. <link
+ linkend="reset"><command>reset</command></link>.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl start_app</screen>
+ <para role="example">
+ This command instructs the RabbitMQ node to start the
+ RabbitMQ application.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><cmdsynopsis><command>wait</command> <arg choice="req"><replaceable>pid_file</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Wait for the RabbitMQ application to start.
+ </para>
+ <para>
+ This command will wait for the RabbitMQ application to
+ start at the node. It will wait for the pid file to
+ be created, then for a process with a pid specified in the
+ pid file to start, and then for the RabbitMQ application
+ to start in that process. It will fail if the process
+ terminates without starting the RabbitMQ application.
+ </para>
+ <para>
+ A suitable pid file is created by
+ the <command>rabbitmq-server</command> script. By
+ default this is located in the Mnesia directory. Modify
+ the <command>RABBITMQ_PID_FILE</command> environment
+ variable to change the location.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl wait /var/run/rabbitmq/pid</screen>
+ <para role="example">
+ This command will return when the RabbitMQ node has
+ started up.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry id="reset">
+ <term><cmdsynopsis><command>reset</command></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Return a RabbitMQ node to its virgin state.
+ </para>
+ <para>
+ Removes the node from any cluster it belongs to, removes
+ all data from the management database, such as configured
+ users and vhosts, and deletes all persistent
+ messages.
+ </para>
+ <para>
+ For <command>reset</command> and <command>force_reset</command> to
+ succeed the RabbitMQ application must have been stopped,
+ e.g. with <link linkend="stop_app"><command>stop_app</command></link>.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl reset</screen>
+ <para role="example">
+ This command resets the RabbitMQ node.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><cmdsynopsis><command>force_reset</command></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Forcefully return a RabbitMQ node to its virgin state.
+ </para>
+ <para>
+ The <command>force_reset</command> command differs from
+ <command>reset</command> in that it resets the node
+ unconditionally, regardless of the current management
+ database state and cluster configuration. It should only
+ be used as a last resort if the database or cluster
+ configuration has been corrupted.
+ </para>
+ <para>
+ For <command>reset</command> and <command>force_reset</command> to
+ succeed the RabbitMQ application must have been stopped,
+ e.g. with <link linkend="stop_app"><command>stop_app</command></link>.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl force_reset</screen>
+ <para role="example">
+ This command resets the RabbitMQ node.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><cmdsynopsis><command>rotate_logs</command> <arg choice="req"><replaceable>suffix</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Instruct the RabbitMQ node to rotate the log files.
+ </para>
+ <para>
+ The RabbitMQ broker appends the contents of its log
+ files to files with names composed of the original name
+ and the suffix, and then resumes logging to freshly
+ created files at the original location. I.e. effectively
+ the current log contents are moved to the end of the
+ suffixed files.
+ </para>
+ <para>
+ When the target files do not exist they are created.
+ When no <option>suffix</option> is specified, the empty
+ log files are simply created at the original location;
+ no rotation takes place.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl rotate_logs .1</screen>
+ <para role="example">
+ This command instructs the RabbitMQ node to append the contents
+ of the log files to files with names consisting of the original logs'
+ names and ".1" suffix, e.g. rabbit@mymachine.log.1 and
+ rabbit@mymachine-sasl.log.1. Finally, logging resumes to
+ fresh files at the old locations.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect2>
+
+ <refsect2>
+ <title>Cluster management</title>
+
+ <variablelist>
+ <varlistentry id="join_cluster">
+ <term><cmdsynopsis><command>join_cluster</command> <arg choice="req"><replaceable>clusternode</replaceable></arg> <arg choice="opt">--ram</arg></cmdsynopsis></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>clusternode</term>
+ <listitem><para>Node to cluster with.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><cmdsynopsis><arg choice="opt">--ram</arg></cmdsynopsis></term>
+ <listitem>
+ <para>
+ If provided, the node will join the cluster as a RAM node.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ Instruct the node to become a member of the cluster that the
+ specified node is in. Before clustering, the node is reset, so be
+ careful when using this command. For this command to succeed the
+ RabbitMQ application must have been stopped, e.g. with <link
+ linkend="stop_app"><command>stop_app</command></link>.
+ </para>
+ <para>
+ Cluster nodes can be of two types: disc or RAM. Disc nodes
+ replicate data in RAM and on disc, thus providing redundancy in
+ the event of node failure and recovery from global events such
+ as power failure across all nodes. RAM nodes replicate data in
+ RAM only (with the exception of queue contents, which can reside
+ on disc if the queue is persistent or too big to fit in memory)
+ and are mainly used for scalability. RAM nodes are more
+ performant only when managing resources (e.g. adding/removing
+ queues, exchanges, or bindings). A cluster must always have at
+ least one disc node, and usually should have more than one.
+ </para>
+ <para>
+ The node will be a disc node by default. If you wish to
+ create a RAM node, provide the <command>--ram</command> flag.
+ </para>
+ <para>
+ After executing the <command>cluster</command> command, whenever
+ the RabbitMQ application is started on the current node it will
+ attempt to connect to the nodes that were in the cluster when the
+ node went down.
+ </para>
+ <para>
+ To leave a cluster, <command>reset</command> the node. You can
+ also remove nodes remotely with the
+ <command>forget_cluster_node</command> command.
+ </para>
+ <para>
+ For more details see the <ulink
+ url="http://www.rabbitmq.com/clustering.html">clustering
+ guide</ulink>.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl join_cluster hare@elena --ram</screen>
+ <para role="example">
+ This command instructs the RabbitMQ node to join the cluster that
+ <command>hare@elena</command> is part of, as a ram node.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><cmdsynopsis><command>cluster_status</command></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Displays all the nodes in the cluster grouped by node type,
+ together with the currently running nodes.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl cluster_status</screen>
+ <para role="example">
+ This command displays the nodes in the cluster.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><cmdsynopsis><command>change_cluster_node_type</command> <arg choice="req">disc | ram</arg></cmdsynopsis>
+ </term>
+ <listitem>
+ <para>
+ Changes the type of the cluster node. The node must be stopped for
+ this operation to succeed, and when turning a node into a RAM node
+ the node must not be the only disc node in the cluster.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl change_cluster_node_type disc</screen>
+ <para role="example">
+ This command will turn a RAM node into a disc node.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><cmdsynopsis><command>forget_cluster_node</command> <arg choice="opt">--offline</arg></cmdsynopsis></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term><cmdsynopsis><arg choice="opt">--offline</arg></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Enables node removal from an offline node. This is only
+ useful in the situation where all the nodes are offline and
+ the last node to go down cannot be brought online, thus
+ preventing the whole cluster from starting. It should not be
+ used in any other circumstances since it can lead to
+ inconsistencies.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ Removes a cluster node remotely. The node that is being removed
+ must be offline, while the node we are removing from must be
+ online, except when using the <command>--offline</command> flag.
+ </para>
+ <para>
+ When using the <command>--offline</command> flag the node you
+ connect to will become the canonical source for cluster metadata
+ (e.g. which queues exist), even if it was not before. Therefore
+ you should use this command on the latest node to shut down if
+ at all possible.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl -n hare@mcnulty forget_cluster_node rabbit@stringer</screen>
+ <para role="example">
+ This command will remove the node
+ <command>rabbit@stringer</command> from the node
+ <command>hare@mcnulty</command>.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><cmdsynopsis><command>update_cluster_nodes</command> <arg choice="req">clusternode</arg></cmdsynopsis>
+ </term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>clusternode</term>
+ <listitem>
+ <para>
+ The node to consult for up to date information.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ Instructs an already clustered node to contact
+ <command>clusternode</command> to cluster when waking up. This is
+ different from <command>join_cluster</command> since it does not
+ join any cluster - it checks that the node is already in a cluster
+ with <command>clusternode</command>.
+ </para>
+ <para>
+ The need for this command is motivated by the fact that clusters
+ can change while a node is offline. Consider the situation in
+ which node A and B are clustered. A goes down, C clusters with B,
+ and then B leaves the cluster. When A wakes up, it'll try to
+ contact B, but this will fail since B is not in the cluster
+ anymore. <command>update_cluster_nodes -n A C</command> will solve
+ this situation.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><cmdsynopsis><command>sync_queue</command> <arg choice="req">queue</arg></cmdsynopsis>
+ </term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>queue</term>
+ <listitem>
+ <para>
+ The name of the queue to synchronise.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ Instructs a mirrored queue with unsynchronised slaves to
+ synchronise itself. The queue will block while
+ synchronisation takes place (all publishers to and
+ consumers from the queue will block). The queue must be
+ mirrored for this command to succeed.
+ </para>
+ <para>
+ Note that unsynchronised queues from which messages are
+ being drained will become synchronised eventually. This
+ command is primarily useful for queues which are not
+ being drained.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><cmdsynopsis><command>cancel_sync_queue</command> <arg choice="req">queue</arg></cmdsynopsis>
+ </term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>queue</term>
+ <listitem>
+ <para>
+ The name of the queue to cancel synchronisation for.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ Instructs a synchronising mirrored queue to stop
+ synchronising itself.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><cmdsynopsis><command>set_cluster_name</command> <arg choice="req">name</arg></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Sets the cluster name. The cluster name is announced to
+ clients on connection, and used by the federation and
+ shovel plugins to record where a message has been. The
+ cluster name is by default derived from the hostname of
+ the first node in the cluster, but can be changed.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl set_cluster_name london</screen>
+ <para role="example">
+ This sets the cluster name to "london".
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect2>
+
+ <refsect2>
+ <title>User management</title>
+ <para>
+ Note that <command>rabbitmqctl</command> manages the RabbitMQ
+ internal user database. Users from any alternative
+ authentication backend will not be visible
+ to <command>rabbitmqctl</command>.
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term><cmdsynopsis><command>add_user</command> <arg choice="req"><replaceable>username</replaceable></arg> <arg choice="req"><replaceable>password</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>username</term>
+ <listitem><para>The name of the user to create.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>password</term>
+ <listitem><para>The password the created user will use to log in to the broker.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl add_user tonyg changeit</screen>
+ <para role="example">
+ This command instructs the RabbitMQ broker to create a
+ (non-administrative) user named <command>tonyg</command> with
+ (initial) password
+ <command>changeit</command>.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><cmdsynopsis><command>delete_user</command> <arg choice="req"><replaceable>username</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>username</term>
+ <listitem><para>The name of the user to delete.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl delete_user tonyg</screen>
+ <para role="example">
+ This command instructs the RabbitMQ broker to delete the
+ user named <command>tonyg</command>.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><cmdsynopsis><command>change_password</command> <arg choice="req"><replaceable>username</replaceable></arg> <arg choice="req"><replaceable>newpassword</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>username</term>
+ <listitem><para>The name of the user whose password is to be changed.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>newpassword</term>
+ <listitem><para>The new password for the user.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl change_password tonyg newpass</screen>
+ <para role="example">
+ This command instructs the RabbitMQ broker to change the
+ password for the user named <command>tonyg</command> to
+ <command>newpass</command>.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><cmdsynopsis><command>clear_password</command> <arg choice="req"><replaceable>username</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>username</term>
+ <listitem><para>The name of the user whose password is to be cleared.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl clear_password tonyg</screen>
+ <para role="example">
+ This command instructs the RabbitMQ broker to clear the
+ password for the user named
+ <command>tonyg</command>. This user now cannot log in with a password (but may be able to through e.g. SASL EXTERNAL if configured).
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><cmdsynopsis><command>set_user_tags</command> <arg choice="req"><replaceable>username</replaceable></arg> <arg choice="req"><replaceable>tag</replaceable> ...</arg></cmdsynopsis></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>username</term>
+ <listitem><para>The name of the user whose tags are to
+ be set.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>tag</term>
+ <listitem><para>Zero, one or more tags to set. Any
+ existing tags will be removed.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl set_user_tags tonyg administrator</screen>
+ <para role="example">
+ This command instructs the RabbitMQ broker to ensure the user
+ named <command>tonyg</command> is an administrator. This has no
+ effect when the user logs in via AMQP, but can be used to permit
+ the user to manage users, virtual hosts and permissions when the
+ user logs in via some other means (for example with the
+ management plugin).
+ </para>
+ <screen role="example">rabbitmqctl set_user_tags tonyg</screen>
+ <para role="example">
+ This command instructs the RabbitMQ broker to remove any
+ tags from the user named <command>tonyg</command>.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><cmdsynopsis><command>list_users</command></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Lists users. Each result row will contain the user name
+ followed by a list of the tags set for that user.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl list_users</screen>
+ <para role="example">
+ This command instructs the RabbitMQ broker to list all
+ users.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect2>
+
+ <refsect2>
+ <title>Access control</title>
+ <para>
+ Note that <command>rabbitmqctl</command> manages the RabbitMQ
+ internal user database. Permissions for users from any
+ alternative authorisation backend will not be visible
+ to <command>rabbitmqctl</command>.
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term><cmdsynopsis><command>add_vhost</command> <arg choice="req"><replaceable>vhostpath</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>vhostpath</term>
+ <listitem><para>The name of the virtual host entry to create.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ Creates a virtual host.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl add_vhost test</screen>
+ <para role="example">
+ This command instructs the RabbitMQ broker to create a new
+ virtual host called <command>test</command>.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><cmdsynopsis><command>delete_vhost</command> <arg choice="req"><replaceable>vhostpath</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>vhostpath</term>
+ <listitem><para>The name of the virtual host entry to delete.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ Deletes a virtual host.
+ </para>
+ <para>
+ Deleting a virtual host deletes all its exchanges,
+ queues, bindings, user permissions, parameters and policies.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl delete_vhost test</screen>
+ <para role="example">
+ This command instructs the RabbitMQ broker to delete the
+ virtual host called <command>test</command>.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry role="usage-has-option-list">
+ <term><cmdsynopsis><command>list_vhosts</command> <arg choice="opt" role="usage-option-list"><replaceable>vhostinfoitem</replaceable> ...</arg></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Lists virtual hosts.
+ </para>
+ <para>
+ The <command>vhostinfoitem</command> parameter is used to indicate which
+ virtual host information items to include in the results. The column order in the
+ results will match the order of the parameters.
+ <command>vhostinfoitem</command> can take any value from
+ the list that follows:
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>name</term>
+ <listitem><para>The name of the virtual host with non-ASCII characters escaped as in C.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>tracing</term>
+ <listitem><para>Whether tracing is enabled for this virtual host.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ If no <command>vhostinfoitem</command>s are specified
+ then the vhost name is displayed.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl list_vhosts name tracing</screen>
+ <para role="example">
+ This command instructs the RabbitMQ broker to list all
+ virtual hosts.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><cmdsynopsis><command>set_permissions</command> <arg choice="opt">-p <replaceable>vhostpath</replaceable></arg> <arg choice="req"><replaceable>user</replaceable></arg> <arg choice="req"><replaceable>conf</replaceable></arg> <arg choice="req"><replaceable>write</replaceable></arg> <arg choice="req"><replaceable>read</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>vhostpath</term>
+ <listitem><para>The name of the virtual host to which to grant the user access, defaulting to <command>/</command>.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>user</term>
+ <listitem><para>The name of the user to grant access to the specified virtual host.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>conf</term>
+ <listitem><para>A regular expression matching resource names for which the user is granted configure permissions.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>write</term>
+ <listitem><para>A regular expression matching resource names for which the user is granted write permissions.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>read</term>
+ <listitem><para>A regular expression matching resource names for which the user is granted read permissions.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ Sets user permissions.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl set_permissions -p /myvhost tonyg "^tonyg-.*" ".*" ".*"</screen>
+ <para role="example">
+ This command instructs the RabbitMQ broker to grant the
+ user named <command>tonyg</command> access to the virtual host
+ called <command>/myvhost</command>, with configure permissions
+ on all resources whose names starts with "tonyg-", and
+ write and read permissions on all resources.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><cmdsynopsis><command>clear_permissions</command> <arg choice="opt">-p <replaceable>vhostpath</replaceable></arg> <arg choice="req"><replaceable>username</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>vhostpath</term>
+ <listitem><para>The name of the virtual host to which to deny the user access, defaulting to <command>/</command>.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>username</term>
+ <listitem><para>The name of the user to deny access to the specified virtual host.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ Sets user permissions.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl clear_permissions -p /myvhost tonyg</screen>
+ <para role="example">
+ This command instructs the RabbitMQ broker to deny the
+ user named <command>tonyg</command> access to the virtual host
+ called <command>/myvhost</command>.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><cmdsynopsis><command>list_permissions</command> <arg choice="opt">-p <replaceable>vhostpath</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>vhostpath</term>
+ <listitem><para>The name of the virtual host for which to list the users that have been granted access to it, and their permissions. Defaults to <command>/</command>.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ Lists permissions in a virtual host.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl list_permissions -p /myvhost</screen>
+ <para role="example">
+ This command instructs the RabbitMQ broker to list all
+ the users which have been granted access to the virtual
+ host called <command>/myvhost</command>, and the
+ permissions they have for operations on resources in
+ that virtual host. Note that an empty string means no
+ permissions granted.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><cmdsynopsis><command>list_user_permissions</command> <arg choice="req"><replaceable>username</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>username</term>
+ <listitem><para>The name of the user for which to list the permissions.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ Lists user permissions.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl list_user_permissions tonyg</screen>
+ <para role="example">
+ This command instructs the RabbitMQ broker to list all the
+ virtual hosts to which the user named <command>tonyg</command>
+ has been granted access, and the permissions the user has
+ for operations on resources in these virtual hosts.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect2>
+
+ <refsect2>
+ <title>Parameter Management</title>
+ <para>
+ Certain features of RabbitMQ (such as the federation plugin)
+ are controlled by dynamic,
+ cluster-wide <emphasis>parameters</emphasis>. Each parameter
+ consists of a component name, a name and a value, and is
+ associated with a virtual host. The component name and name are
+ strings, and the value is an Erlang term. Parameters can be
+ set, cleared and listed. In general you should refer to the
+ documentation for the feature in question to see how to set
+ parameters.
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term><cmdsynopsis><command>set_parameter</command> <arg choice="opt">-p <replaceable>vhostpath</replaceable></arg> <arg choice="req"><replaceable>component_name</replaceable></arg> <arg choice="req"><replaceable>name</replaceable></arg> <arg choice="req"><replaceable>value</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Sets a parameter.
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>component_name</term>
+ <listitem><para>
+ The name of the component for which the
+ parameter is being set.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>name</term>
+ <listitem><para>
+ The name of the parameter being set.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>value</term>
+ <listitem><para>
+ The value for the parameter, as a
+ JSON term. In most shells you are very likely to
+ need to quote this.
+ </para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl set_parameter federation local_username '"guest"'</screen>
+ <para role="example">
+ This command sets the parameter <command>local_username</command> for the <command>federation</command> component in the default virtual host to the JSON term <command>"guest"</command>.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><cmdsynopsis><command>clear_parameter</command> <arg choice="opt">-p <replaceable>vhostpath</replaceable></arg> <arg choice="req"><replaceable>component_name</replaceable></arg> <arg choice="req"><replaceable>key</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Clears a parameter.
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>component_name</term>
+ <listitem><para>
+ The name of the component for which the
+ parameter is being cleared.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>name</term>
+ <listitem><para>
+ The name of the parameter being cleared.
+ </para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl clear_parameter federation local_username</screen>
+ <para role="example">
+ This command clears the parameter <command>local_username</command> for the <command>federation</command> component in the default virtual host.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><cmdsynopsis><command>list_parameters</command> <arg choice="opt">-p <replaceable>vhostpath</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Lists all parameters for a virtual host.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl list_parameters</screen>
+ <para role="example">
+ This command lists all parameters in the default virtual host.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect2>
+
+ <refsect2>
+ <title>Policy Management</title>
+ <para>
+ Policies are used to control and modify the behaviour of queues
+ and exchanges on a cluster-wide basis. Policies apply within a
+ given vhost, and consist of a name, pattern, definition and an
+ optional priority. Policies can be set, cleared and listed.
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term><cmdsynopsis><command>set_policy</command> <arg choice="opt">-p <replaceable>vhostpath</replaceable></arg> <arg choice="opt">--priority <replaceable>priority</replaceable></arg> <arg choice="opt">--apply-to <replaceable>apply-to</replaceable></arg> <arg choice="req"><replaceable>name</replaceable></arg> <arg choice="req"><replaceable>pattern</replaceable></arg> <arg choice="req"><replaceable>definition</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Sets a policy.
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>name</term>
+ <listitem><para>
+ The name of the policy.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>pattern</term>
+ <listitem><para>
+ The regular expression, which when matches on a given resources causes the policy to apply.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>definition</term>
+ <listitem><para>
+ The definition of the policy, as a
+ JSON term. In most shells you are very likely to
+ need to quote this.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>priority</term>
+ <listitem><para>
+ The priority of the policy as an integer. Higher numbers indicate greater precedence. The default is 0.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>apply-to</term>
+ <listitem><para>
+ Which types of object this policy should apply to - "queues", "exchanges" or "all". The default is "all".
+ </para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl set_policy federate-me "^amq." '{"federation-upstream-set":"all"}'</screen>
+ <para role="example">
+ This command sets the policy <command>federate-me</command> in the default virtual host so that built-in exchanges are federated.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><cmdsynopsis><command>clear_policy</command> <arg choice="opt">-p <replaceable>vhostpath</replaceable></arg> <arg choice="req"><replaceable>name</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Clears a policy.
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>name</term>
+ <listitem><para>
+ The name of the policy being cleared.
+ </para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl clear_policy federate-me</screen>
+ <para role="example">
+ This command clears the <command>federate-me</command> policy in the default virtual host.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><cmdsynopsis><command>list_policies</command> <arg choice="opt">-p <replaceable>vhostpath</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Lists all policies for a virtual host.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl list_policies</screen>
+ <para role="example">
+ This command lists all policies in the default virtual host.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect2>
+
+ <refsect2>
+ <title>Server Status</title>
+ <para>
+ The server status queries interrogate the server and return a list of
+ results with tab-delimited columns. Some queries (<command>list_queues</command>,
+ <command>list_exchanges</command>, <command>list_bindings</command>, and
+ <command>list_consumers</command>) accept an
+ optional <command>vhost</command> parameter. This parameter, if present, must be
+ specified immediately after the query.
+ </para>
+ <para role="usage">
+ The list_queues, list_exchanges and list_bindings commands accept an
+ optional virtual host parameter for which to display results. The
+ default value is "/".
+ </para>
+
+ <variablelist>
+ <varlistentry role="usage-has-option-list">
+ <term><cmdsynopsis><command>list_queues</command> <arg choice="opt">-p <replaceable>vhostpath</replaceable></arg> <arg choice="opt" role="usage-option-list"><replaceable>queueinfoitem</replaceable> ...</arg></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Returns queue details. Queue details of the <command>/</command> virtual host
+ are returned if the "-p" flag is absent. The "-p" flag can be used to
+ override this default.
+ </para>
+ <para>
+ The <command>queueinfoitem</command> parameter is used to indicate which queue
+ information items to include in the results. The column order in the
+ results will match the order of the parameters.
+ <command>queueinfoitem</command> can take any value from the list
+ that follows:
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>name</term>
+ <listitem><para>The name of the queue with non-ASCII characters escaped as in C.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>durable</term>
+ <listitem><para>Whether or not the queue survives server restarts.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>auto_delete</term>
+ <listitem><para>Whether the queue will be deleted automatically when no longer used.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>arguments</term>
+ <listitem><para>Queue arguments.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>policy</term>
+ <listitem><para>Policy name applying to the queue.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>pid</term>
+ <listitem><para>Id of the Erlang process associated with the queue.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>owner_pid</term>
+ <listitem><para>Id of the Erlang process representing the connection
+ which is the exclusive owner of the queue. Empty if the
+ queue is non-exclusive.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>exclusive_consumer_pid</term>
+ <listitem><para>Id of the Erlang process representing the channel of the
+ exclusive consumer subscribed to this queue. Empty if
+ there is no exclusive consumer.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>exclusive_consumer_tag</term>
+ <listitem><para>Consumer tag of the exclusive consumer subscribed to
+ this queue. Empty if there is no exclusive consumer.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>messages_ready</term>
+ <listitem><para>Number of messages ready to be delivered to clients.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>messages_unacknowledged</term>
+ <listitem><para>Number of messages delivered to clients but not yet acknowledged.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>messages</term>
+ <listitem><para>Sum of ready and unacknowledged messages
+ (queue depth).</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>consumers</term>
+ <listitem><para>Number of consumers.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>consumer_utilisation</term>
+ <listitem><para>Fraction of the time (between 0.0 and 1.0)
+ that the queue is able to immediately deliver messages to
+ consumers. This can be less than 1.0 if consumers are limited
+ by network congestion or prefetch count.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>memory</term>
+ <listitem><para>Bytes of memory consumed by the Erlang process associated with the
+ queue, including stack, heap and internal structures.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>slave_pids</term>
+ <listitem><para>If the queue is mirrored, this gives the IDs of the current slaves.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>synchronised_slave_pids</term>
+ <listitem><para>If the queue is mirrored, this gives the IDs of
+ the current slaves which are synchronised with the master -
+ i.e. those which could take over from the master without
+ message loss.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>status</term>
+ <listitem><para>The status of the queue. Normally
+ 'running', but may be "{syncing, MsgCount}" if the queue is
+ synchronising.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ If no <command>queueinfoitem</command>s are specified then queue name and depth are
+ displayed.
+ </para>
+ <para role="example-prefix">
+ For example:
+ </para>
+ <screen role="example">rabbitmqctl list_queues -p /myvhost messages consumers</screen>
+ <para role="example">
+ This command displays the depth and number of consumers for each
+ queue of the virtual host named <command>/myvhost</command>.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry role="usage-has-option-list">
+ <term><cmdsynopsis><command>list_exchanges</command> <arg choice="opt">-p <replaceable>vhostpath</replaceable></arg> <arg choice="opt" role="usage-option-list"><replaceable>exchangeinfoitem</replaceable> ...</arg></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Returns exchange details. Exchange details of the <command>/</command> virtual host
+ are returned if the "-p" flag is absent. The "-p" flag can be used to
+ override this default.
+ </para>
+ <para>
+ The <command>exchangeinfoitem</command> parameter is used to indicate which
+ exchange information items to include in the results. The column order in the
+ results will match the order of the parameters.
+ <command>exchangeinfoitem</command> can take any value from the list
+ that follows:
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>name</term>
+ <listitem><para>The name of the exchange with non-ASCII characters escaped as in C.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>type</term>
+ <listitem><para>The exchange type (such as
+ [<command>direct</command>,
+ <command>topic</command>, <command>headers</command>,
+ <command>fanout</command>]).</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>durable</term>
+ <listitem><para>Whether or not the exchange survives server restarts.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>auto_delete</term>
+ <listitem><para>Whether the exchange will be deleted automatically when no longer used.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>internal</term>
+ <listitem><para>Whether the exchange is internal, i.e. cannot be directly published to by a client.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>arguments</term>
+ <listitem><para>Exchange arguments.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>policy</term>
+ <listitem><para>Policy name for applying to the exchange.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ If no <command>exchangeinfoitem</command>s are specified then
+ exchange name and type are displayed.
+ </para>
+ <para role="example-prefix">
+ For example:
+ </para>
+ <screen role="example">rabbitmqctl list_exchanges -p /myvhost name type</screen>
+ <para role="example">
+ This command displays the name and type for each
+ exchange of the virtual host named <command>/myvhost</command>.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry role="usage-has-option-list">
+ <term><cmdsynopsis><command>list_bindings</command> <arg choice="opt">-p <replaceable>vhostpath</replaceable></arg> <arg choice="opt" role="usage-option-list"><replaceable>bindinginfoitem</replaceable> ...</arg></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Returns binding details. By default the bindings for
+ the <command>/</command> virtual host are returned. The
+ "-p" flag can be used to override this default.
+ </para>
+ <para>
+ The <command>bindinginfoitem</command> parameter is used
+ to indicate which binding information items to include
+ in the results. The column order in the results will
+ match the order of the parameters.
+ <command>bindinginfoitem</command> can take any value
+ from the list that follows:
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>source_name</term>
+ <listitem><para>The name of the source of messages to
+ which the binding is attached. With non-ASCII
+ characters escaped as in C.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>source_kind</term>
+ <listitem><para>The kind of the source of messages to
+ which the binding is attached. Currently always
+ exchange. With non-ASCII characters escaped as in
+ C.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>destination_name</term>
+ <listitem><para>The name of the destination of
+ messages to which the binding is attached. With
+ non-ASCII characters escaped as in
+ C.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>destination_kind</term>
+ <listitem><para>The kind of the destination of
+ messages to which the binding is attached. With
+ non-ASCII characters escaped as in
+ C.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>routing_key</term>
+ <listitem><para>The binding's routing key, with
+ non-ASCII characters escaped as in C.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>arguments</term>
+ <listitem><para>The binding's arguments.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ If no <command>bindinginfoitem</command>s are specified then
+ all above items are displayed.
+ </para>
+ <para role="example-prefix">
+ For example:
+ </para>
+ <screen role="example">rabbitmqctl list_bindings -p /myvhost exchange_name queue_name</screen>
+ <para role="example">
+ This command displays the exchange name and queue name
+ of the bindings in the virtual host
+ named <command>/myvhost</command>.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry id="list_connections" role="usage-has-option-list">
+ <term><cmdsynopsis><command>list_connections</command> <arg choice="opt" role="usage-option-list"><replaceable>connectioninfoitem</replaceable> ...</arg></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Returns TCP/IP connection statistics.
+ </para>
+ <para>
+ The <command>connectioninfoitem</command> parameter is used to indicate
+ which connection information items to include in the results. The
+ column order in the results will match the order of the parameters.
+ <command>connectioninfoitem</command> can take any value from the list
+ that follows:
+ </para>
+
+ <variablelist>
+ <varlistentry>
+ <term>pid</term>
+ <listitem><para>Id of the Erlang process associated with the connection.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>name</term>
+ <listitem><para>Readable name for the connection.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>port</term>
+ <listitem><para>Server port.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>host</term>
+ <listitem><para>Server hostname obtained via reverse
+ DNS, or its IP address if reverse DNS failed or was
+ not enabled.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>peer_port</term>
+ <listitem><para>Peer port.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>peer_host</term>
+ <listitem><para>Peer hostname obtained via reverse
+ DNS, or its IP address if reverse DNS failed or was
+ not enabled.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>ssl</term>
+ <listitem><para>Boolean indicating whether the
+ connection is secured with SSL.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>ssl_protocol</term>
+ <listitem><para>SSL protocol
+ (e.g. tlsv1)</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>ssl_key_exchange</term>
+ <listitem><para>SSL key exchange algorithm
+ (e.g. rsa)</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>ssl_cipher</term>
+ <listitem><para>SSL cipher algorithm
+ (e.g. aes_256_cbc)</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>ssl_hash</term>
+ <listitem><para>SSL hash function
+ (e.g. sha)</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>peer_cert_subject</term>
+ <listitem><para>The subject of the peer's SSL
+ certificate, in RFC4514 form.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>peer_cert_issuer</term>
+ <listitem><para>The issuer of the peer's SSL
+ certificate, in RFC4514 form.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>peer_cert_validity</term>
+ <listitem><para>The period for which the peer's SSL
+ certificate is valid.</para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>state</term>
+ <listitem><para>Connection state (one of [<command>starting</command>, <command>tuning</command>,
+ <command>opening</command>, <command>running</command>, <command>flow</command>, <command>blocking</command>, <command>blocked</command>, <command>closing</command>, <command>closed</command>]).</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>channels</term>
+ <listitem><para>Number of channels using the connection.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>protocol</term>
+ <listitem><para>Version of the AMQP protocol in use (currently one of <command>{0,9,1}</command> or <command>{0,8,0}</command>). Note that if a client requests an AMQP 0-9 connection, we treat it as AMQP 0-9-1.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>auth_mechanism</term>
+ <listitem><para>SASL authentication mechanism used, such as <command>PLAIN</command>.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>user</term>
+ <listitem><para>Username associated with the connection.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>vhost</term>
+ <listitem><para>Virtual host name with non-ASCII characters escaped as in C.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>timeout</term>
+ <listitem><para>Connection timeout / negotiated heartbeat interval, in seconds.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>frame_max</term>
+ <listitem><para>Maximum frame size (bytes).</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>channel_max</term>
+ <listitem><para>Maximum number of channels on this connection.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>client_properties</term>
+ <listitem><para>Informational properties transmitted by the client
+ during connection establishment.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>recv_oct</term>
+ <listitem><para>Octets received.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>recv_cnt</term>
+ <listitem><para>Packets received.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>send_oct</term>
+ <listitem><para>Octets send.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>send_cnt</term>
+ <listitem><para>Packets sent.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>send_pend</term>
+ <listitem><para>Send queue size.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ If no <command>connectioninfoitem</command>s are
+ specified then user, peer host, peer port, time since
+ flow control and memory block state are displayed.
+ </para>
+
+ <para role="example-prefix">
+ For example:
+ </para>
+ <screen role="example">rabbitmqctl list_connections send_pend port</screen>
+ <para role="example">
+ This command displays the send queue size and server port for each
+ connection.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry role="usage-has-option-list">
+ <term><cmdsynopsis><command>list_channels</command> <arg choice="opt" role="usage-option-list"><replaceable>channelinfoitem</replaceable> ...</arg></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Returns information on all current channels, the logical
+ containers executing most AMQP commands. This includes
+ channels that are part of ordinary AMQP connections, and
+ channels created by various plug-ins and other extensions.
+ </para>
+ <para>
+ The <command>channelinfoitem</command> parameter is used to
+ indicate which channel information items to include in the
+ results. The column order in the results will match the
+ order of the parameters.
+ <command>channelinfoitem</command> can take any value from the list
+ that follows:
+ </para>
+
+ <variablelist>
+ <varlistentry>
+ <term>pid</term>
+ <listitem><para>Id of the Erlang process associated with the connection.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>connection</term>
+ <listitem><para>Id of the Erlang process associated with the connection
+ to which the channel belongs.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>name</term>
+ <listitem><para>Readable name for the channel.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>number</term>
+ <listitem><para>The number of the channel, which uniquely identifies it within
+ a connection.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>user</term>
+ <listitem><para>Username associated with the channel.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>vhost</term>
+ <listitem><para>Virtual host in which the channel operates.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>transactional</term>
+ <listitem><para>True if the channel is in transactional mode, false otherwise.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>confirm</term>
+ <listitem><para>True if the channel is in confirm mode, false otherwise.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>consumer_count</term>
+ <listitem><para>Number of logical AMQP consumers retrieving messages via
+ the channel.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>messages_unacknowledged</term>
+ <listitem><para>Number of messages delivered via this channel but not
+ yet acknowledged.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>messages_uncommitted</term>
+ <listitem><para>Number of messages received in an as yet
+ uncommitted transaction.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>acks_uncommitted</term>
+ <listitem><para>Number of acknowledgements received in an as yet
+ uncommitted transaction.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>messages_unconfirmed</term>
+ <listitem><para>Number of published messages not yet
+ confirmed. On channels not in confirm mode, this
+ remains 0.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>prefetch_count</term>
+ <listitem><para>QoS prefetch limit for new consumers, 0 if unlimited.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>global_prefetch_count</term>
+ <listitem><para>QoS prefetch limit for the entire channel, 0 if unlimited.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ If no <command>channelinfoitem</command>s are specified then pid,
+ user, consumer_count, and messages_unacknowledged are assumed.
+ </para>
+
+ <para role="example-prefix">
+ For example:
+ </para>
+ <screen role="example">rabbitmqctl list_channels connection messages_unacknowledged</screen>
+ <para role="example">
+ This command displays the connection process and count
+ of unacknowledged messages for each channel.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><cmdsynopsis><command>list_consumers</command> <arg choice="opt">-p <replaceable>vhostpath</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <para>
+ List consumers, i.e. subscriptions to a queue's message
+ stream. Each line printed shows, separated by tab
+ characters, the name of the queue subscribed to, the id of
+ the channel process via which the subscription was created
+ and is managed, the consumer tag which uniquely identifies
+ the subscription within a channel, a boolean
+ indicating whether acknowledgements are expected for
+ messages delivered to this consumer, an integer indicating
+ the prefetch limit (with 0 meaning 'none'), and any arguments
+ for this consumer.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><cmdsynopsis><command>status</command></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Displays broker status information such as the running
+ applications on the current Erlang node, RabbitMQ and
+ Erlang versions, OS name, memory and file descriptor
+ statistics. (See the <command>cluster_status</command>
+ command to find out which nodes are clustered and
+ running.)
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl status</screen>
+ <para role="example">
+ This command displays information about the RabbitMQ
+ broker.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><cmdsynopsis><command>environment</command></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Display the name and value of each variable in the
+ application environment.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><cmdsynopsis><command>report</command></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Generate a server status report containing a
+ concatenation of all server status information for
+ support purposes. The output should be redirected to a
+ file when accompanying a support request.
+ </para>
+ <para role="example-prefix">
+ For example:
+ </para>
+ <screen role="example">rabbitmqctl report > server_report.txt</screen>
+ <para role="example">
+ This command creates a server report which may be
+ attached to a support request email.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><cmdsynopsis><command>eval</command> <arg choice="req"><replaceable>expr</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Evaluate an arbitrary Erlang expression.
+ </para>
+ <para role="example-prefix">
+ For example:
+ </para>
+ <screen role="example">rabbitmqctl eval 'node().'</screen>
+ <para role="example">
+ This command returns the name of the node to which rabbitmqctl has connected.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect2>
+
+ <refsect2>
+ <title>Miscellaneous</title>
+ <variablelist>
+ <varlistentry>
+ <term><cmdsynopsis><command>close_connection</command> <arg choice="req"><replaceable>connectionpid</replaceable></arg> <arg choice="req"><replaceable>explanation</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>connectionpid</term>
+ <listitem><para>Id of the Erlang process associated with the connection to close.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>explanation</term>
+ <listitem><para>Explanation string.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ Instruct the broker to close the connection associated
+ with the Erlang process id <option>connectionpid</option> (see also the
+ <link linkend="list_connections"><command>list_connections</command></link>
+ command), passing the <option>explanation</option> string to the
+ connected client as part of the AMQP connection shutdown
+ protocol.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl close_connection "<rabbit@tanto.4262.0>" "go away"</screen>
+ <para role="example">
+ This command instructs the RabbitMQ broker to close the
+ connection associated with the Erlang process
+ id <command><rabbit@tanto.4262.0></command>, passing the
+ explanation <command>go away</command> to the connected client.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><cmdsynopsis><command>trace_on</command> <arg choice="opt">-p <replaceable>vhost</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>vhost</term>
+ <listitem><para>The name of the virtual host for which to start tracing.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ Starts tracing.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><cmdsynopsis><command>trace_off</command> <arg choice="opt">-p <replaceable>vhost</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>vhost</term>
+ <listitem><para>The name of the virtual host for which to stop tracing.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ Stops tracing.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><cmdsynopsis><command>set_vm_memory_high_watermark</command> <arg choice="req"><replaceable>fraction</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>fraction</term>
+ <listitem><para>
+ The new memory threshold fraction at which flow
+ control is triggered, as a floating point number
+ greater than or equal to 0.
+ </para></listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect2>
+ </refsect1>
+
+</refentry>
--- /dev/null
+<?xml version='1.0'?>
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ xmlns:doc="http://www.rabbitmq.com/namespaces/ad-hoc/doc"
+ xmlns="http://www.w3.org/1999/xhtml"
+ version='1.0'>
+
+<xsl:output method="xml" />
+
+ <!-- Copy every element through with local name only -->
+ <xsl:template match="*">
+ <xsl:element name="{local-name()}" namespace="">
+ <xsl:apply-templates select="@*|node()"/>
+ </xsl:element>
+ </xsl:template>
+
+ <!-- Copy every attribute through -->
+ <xsl:template match="@*"><xsl:copy/></xsl:template>
+</xsl:stylesheet>
--- /dev/null
+<?xml version='1.0'?>
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ version='1.0'>
+
+<xsl:param name="modulename"/>
+
+<xsl:output method="text"
+ encoding="UTF-8"
+ indent="no"/>
+<xsl:strip-space elements="*"/>
+<xsl:preserve-space elements="cmdsynopsis arg" />
+
+<xsl:template match="/">
+<!-- Pull out cmdsynopsis to show the command usage line. -->%% Generated, do not edit!
+-module(<xsl:value-of select="$modulename" />).
+-export([usage/0]).
+usage() -> %QUOTE%Usage:
+<xsl:value-of select="refentry/refsynopsisdiv/cmdsynopsis/command"/>
+<xsl:text> </xsl:text>
+<xsl:for-each select="refentry/refsynopsisdiv/cmdsynopsis/arg">
+ <xsl:apply-templates select="." />
+ <xsl:text> </xsl:text>
+</xsl:for-each>
+
+<xsl:text> </xsl:text>
+
+<!-- List options (any variable list in a section called "Options"). -->
+<xsl:for-each select=".//*[title='Options']/variablelist">
+ <xsl:if test="position() = 1"> Options: </xsl:if>
+ <xsl:for-each select="varlistentry">
+ <xsl:text> </xsl:text>
+ <xsl:for-each select=".//term">
+ <xsl:value-of select="."/>
+ <xsl:if test="not(position() = last())">, </xsl:if>
+ </xsl:for-each><xsl:text> </xsl:text>
+ </xsl:for-each>
+</xsl:for-each>
+
+<!-- Any paragraphs which have been marked as role="usage" (principally for global flags). -->
+<xsl:text> </xsl:text>
+<xsl:for-each select=".//*[title='Options']//para[@role='usage']">
+<xsl:value-of select="normalize-space(.)"/><xsl:text> </xsl:text>
+</xsl:for-each>
+
+<!-- List commands (any first-level variable list in a section called "Commands"). -->
+<xsl:for-each select=".//*[title='Commands']/variablelist | .//*[title='Commands']/refsect2/variablelist">
+ <xsl:if test="position() = 1">Commands: </xsl:if>
+ <xsl:for-each select="varlistentry">
+ <xsl:text> </xsl:text>
+ <xsl:apply-templates select="term"/>
+ <xsl:text> </xsl:text>
+ </xsl:for-each>
+ <xsl:text> </xsl:text>
+</xsl:for-each>
+
+<xsl:apply-templates select=".//*[title='Commands']/refsect2" mode="command-usage" />
+%QUOTE%.
+</xsl:template>
+
+<!-- Option lists in command usage -->
+<xsl:template match="varlistentry[@role='usage-has-option-list']" mode="command-usage"><<xsl:value-of select="term/cmdsynopsis/arg[@role='usage-option-list']/replaceable"/>> must be a member of the list [<xsl:for-each select="listitem/variablelist/varlistentry"><xsl:apply-templates select="term"/><xsl:if test="not(position() = last())">, </xsl:if></xsl:for-each>].<xsl:text> </xsl:text></xsl:template>
+
+<!-- Usage paras in command usage -->
+<xsl:template match="para[@role='usage']" mode="command-usage">
+<xsl:value-of select="normalize-space(.)"/><xsl:text> </xsl:text>
+</xsl:template>
+
+<!-- Don't show anything else in command usage -->
+<xsl:template match="text()" mode="command-usage"/>
+
+<xsl:template match="arg[@choice='opt']">[<xsl:apply-templates/>]</xsl:template>
+<xsl:template match="replaceable"><<xsl:value-of select="."/>></xsl:template>
+
+</xsl:stylesheet>
--- /dev/null
+{application, rabbit, %% -*- erlang -*-
+ [{description, "RabbitMQ"},
+ {id, "RabbitMQ"},
+ {vsn, "3.3.5"},
+ {modules, []},
+ {registered, [rabbit_amqqueue_sup,
+ rabbit_log,
+ rabbit_node_monitor,
+ rabbit_router,
+ rabbit_sup,
+ rabbit_tcp_client_sup,
+ rabbit_direct_client_sup]},
+ {applications, [kernel, stdlib, sasl, mnesia, os_mon, xmerl]},
+%% we also depend on crypto, public_key and ssl but they shouldn't be
+%% in here as we don't actually want to start it
+ {mod, {rabbit, []}},
+ {env, [{tcp_listeners, [5672]},
+ {ssl_listeners, []},
+ {ssl_options, []},
+ {vm_memory_high_watermark, 0.4},
+ {vm_memory_high_watermark_paging_ratio, 0.5},
+ {disk_free_limit, 50000000}, %% 50MB
+ {msg_store_index_module, rabbit_msg_store_ets_index},
+ {backing_queue_module, rabbit_variable_queue},
+ %% 0 ("no limit") would make a better default, but that
+ %% breaks the QPid Java client
+ {frame_max, 131072},
+ {channel_max, 0},
+ {heartbeat, 580},
+ {msg_store_file_size_limit, 16777216},
+ {queue_index_max_journal_entries, 65536},
+ {default_user, <<"guest">>},
+ {default_pass, <<"guest">>},
+ {default_user_tags, [administrator]},
+ {default_vhost, <<"/">>},
+ {default_permissions, [<<".*">>, <<".*">>, <<".*">>]},
+ {loopback_users, [<<"guest">>]},
+ {cluster_nodes, {[], disc}},
+ {server_properties, []},
+ {collect_statistics, none},
+ {collect_statistics_interval, 5000},
+ {auth_mechanisms, ['PLAIN', 'AMQPLAIN']},
+ {auth_backends, [rabbit_auth_backend_internal]},
+ {delegate_count, 16},
+ {trace_vhosts, []},
+ {log_levels, [{connection, info}]},
+ {ssl_cert_login_from, distinguished_name},
+ {reverse_dns_lookups, false},
+ {cluster_partition_handling, ignore},
+ {tcp_listen_options, [binary,
+ {packet, raw},
+ {reuseaddr, true},
+ {backlog, 128},
+ {nodelay, true},
+ {linger, {true, 0}},
+ {exit_on_close, false}]},
+ {halt_on_upgrade_failure, true},
+ {hipe_compile, false},
+ %% see bug 24513 for how this list was created
+ {hipe_modules,
+ [rabbit_reader, rabbit_channel, gen_server2, rabbit_exchange,
+ rabbit_command_assembler, rabbit_framing_amqp_0_9_1, rabbit_basic,
+ rabbit_event, lists, queue, priority_queue, rabbit_router,
+ rabbit_trace, rabbit_misc, rabbit_binary_parser,
+ rabbit_exchange_type_direct, rabbit_guid, rabbit_net,
+ rabbit_amqqueue_process, rabbit_variable_queue,
+ rabbit_binary_generator, rabbit_writer, delegate, gb_sets, lqueue,
+ sets, orddict, rabbit_amqqueue, rabbit_limiter, gb_trees,
+ rabbit_queue_index, rabbit_exchange_decorator, gen, dict, ordsets,
+ file_handle_cache, rabbit_msg_store, array,
+ rabbit_msg_store_ets_index, rabbit_msg_file,
+ rabbit_exchange_type_fanout, rabbit_exchange_type_topic, mnesia,
+ mnesia_lib, rpc, mnesia_tm, qlc, sofs, proplists, credit_flow,
+ pmon, ssl_connection, tls_connection, ssl_record, tls_record,
+ gen_fsm, ssl]},
+ {ssl_apps, [asn1, crypto, public_key, ssl]}
+ ]}]}.
--- /dev/null
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+main([InFile, OutFile | SrcDirs]) ->
+ Modules = [list_to_atom(filename:basename(F, ".erl")) ||
+ SrcDir <- SrcDirs,
+ F <- filelib:wildcard("*.erl", SrcDir)],
+ {ok, [{application, Application, Properties}]} = file:consult(InFile),
+ NewProperties =
+ case proplists:get_value(modules, Properties) of
+ [] -> lists:keyreplace(modules, 1, Properties, {modules, Modules});
+ _ -> Properties
+ end,
+ file:write_file(
+ OutFile,
+ io_lib:format("~p.~n", [{application, Application, NewProperties}])).
--- /dev/null
+#!/usr/bin/env escript
+%% -*- erlang -*-
+-mode(compile).
+
+%% We expect the list of Erlang source and header files to arrive on
+%% stdin, with the entries colon-separated.
+main([TargetFile, EbinDir]) ->
+ ErlsAndHrls = [ string:strip(S,left) ||
+ S <- string:tokens(io:get_line(""), ":\n")],
+ ErlFiles = [F || F <- ErlsAndHrls, lists:suffix(".erl", F)],
+ Modules = sets:from_list(
+ [list_to_atom(filename:basename(FileName, ".erl")) ||
+ FileName <- ErlFiles]),
+ HrlFiles = [F || F <- ErlsAndHrls, lists:suffix(".hrl", F)],
+ IncludeDirs = lists:usort([filename:dirname(Path) || Path <- HrlFiles]),
+ Headers = sets:from_list(HrlFiles),
+ Deps = lists:foldl(
+ fun (Path, Deps1) ->
+ dict:store(Path, detect_deps(IncludeDirs, EbinDir,
+ Modules, Headers, Path),
+ Deps1)
+ end, dict:new(), ErlFiles),
+ {ok, Hdl} = file:open(TargetFile, [write, delayed_write]),
+ dict:fold(
+ fun (_Path, [], ok) ->
+ ok;
+ (Path, Dep, ok) ->
+ Module = filename:basename(Path, ".erl"),
+ ok = file:write(Hdl, [EbinDir, "/", Module, ".beam: ",
+ Path]),
+ ok = sets:fold(fun (E, ok) -> file:write(Hdl, [" ", E]) end,
+ ok, Dep),
+ file:write(Hdl, ["\n"])
+ end, ok, Deps),
+ ok = file:write(Hdl, [TargetFile, ": ", escript:script_name(), "\n"]),
+ ok = file:sync(Hdl),
+ ok = file:close(Hdl).
+
+detect_deps(IncludeDirs, EbinDir, Modules, Headers, Path) ->
+ {ok, Forms} = epp:parse_file(Path, IncludeDirs, [{use_specs, true}]),
+ lists:foldl(
+ fun ({attribute, _LineNumber, Attribute, Behaviour}, Deps)
+ when Attribute =:= behaviour orelse Attribute =:= behavior ->
+ case sets:is_element(Behaviour, Modules) of
+ true -> sets:add_element(
+ [EbinDir, "/", atom_to_list(Behaviour), ".beam"],
+ Deps);
+ false -> Deps
+ end;
+ ({attribute, _LineNumber, file, {FileName, _LineNumber1}}, Deps) ->
+ case sets:is_element(FileName, Headers) of
+ true -> sets:add_element(FileName, Deps);
+ false -> Deps
+ end;
+ (_Form, Deps) ->
+ Deps
+ end, sets:new(), Forms).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-ifdef(use_specs).
+
+-type(callback_result() :: 'ok' | {'stop', any()} | {'become', atom(), args()}).
+-type(args() :: any()).
+-type(members() :: [pid()]).
+
+-spec(joined/2 :: (args(), members()) -> callback_result()).
+-spec(members_changed/3 :: (args(), members(), members()) -> callback_result()).
+-spec(handle_msg/3 :: (args(), pid(), any()) -> callback_result()).
+-spec(terminate/2 :: (args(), term()) -> any()).
+
+-endif.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-record(user, {username,
+ tags,
+ auth_backend, %% Module this user came from
+ impl %% Scratch space for that module
+ }).
+
+-record(internal_user, {username, password_hash, tags}).
+-record(permission, {configure, write, read}).
+-record(user_vhost, {username, virtual_host}).
+-record(user_permission, {user_vhost, permission}).
+
+-record(vhost, {virtual_host, dummy}).
+
+-record(content,
+ {class_id,
+ properties, %% either 'none', or a decoded record/tuple
+ properties_bin, %% either 'none', or an encoded properties binary
+ %% Note: at most one of properties and properties_bin can be
+ %% 'none' at once.
+ protocol, %% The protocol under which properties_bin was encoded
+ payload_fragments_rev %% list of binaries, in reverse order (!)
+ }).
+
+-record(resource, {virtual_host, kind, name}).
+
+-record(exchange, {name, type, durable, auto_delete, internal, arguments,
+ scratches, policy, decorators}).
+-record(exchange_serial, {name, next}).
+
+-record(amqqueue, {name, durable, auto_delete, exclusive_owner = none,
+ arguments, pid, slave_pids, sync_slave_pids, policy,
+ gm_pids, decorators}).
+
+%% mnesia doesn't like unary records, so we add a dummy 'value' field
+-record(route, {binding, value = const}).
+-record(reverse_route, {reverse_binding, value = const}).
+
+-record(binding, {source, key, destination, args = []}).
+-record(reverse_binding, {destination, key, source, args = []}).
+
+-record(topic_trie_node, {trie_node, edge_count, binding_count}).
+-record(topic_trie_edge, {trie_edge, node_id}).
+-record(topic_trie_binding, {trie_binding, value = const}).
+
+-record(trie_node, {exchange_name, node_id}).
+-record(trie_edge, {exchange_name, node_id, word}).
+-record(trie_binding, {exchange_name, node_id, destination, arguments}).
+
+-record(listener, {node, protocol, host, ip_address, port}).
+
+-record(runtime_parameters, {key, value}).
+
+-record(basic_message, {exchange_name, routing_keys = [], content, id,
+ is_persistent}).
+
+-record(ssl_socket, {tcp, ssl}).
+-record(delivery, {mandatory, confirm, sender, message, msg_seq_no}).
+-record(amqp_error, {name, explanation = "", method = none}).
+
+-record(event, {type, props, reference = undefined, timestamp}).
+
+-record(message_properties, {expiry, needs_confirming = false}).
+
+-record(plugin, {name, %% atom()
+ version, %% string()
+ description, %% string()
+ type, %% 'ez' or 'dir'
+ dependencies, %% [{atom(), string()}]
+ location}). %% string()
+
+%%----------------------------------------------------------------------------
+
+-define(COPYRIGHT_MESSAGE, "Copyright (C) 2007-2014 GoPivotal, Inc.").
+-define(INFORMATION_MESSAGE, "Licensed under the MPL. See http://www.rabbitmq.com/").
+-define(ERTS_MINIMUM, "5.6.3").
+
+%% EMPTY_FRAME_SIZE, 8 = 1 + 2 + 4 + 1
+%% - 1 byte of frame type
+%% - 2 bytes of channel number
+%% - 4 bytes of frame payload length
+%% - 1 byte of payload trailer FRAME_END byte
+%% See rabbit_binary_generator:check_empty_frame_size/0, an assertion
+%% called at startup.
+-define(EMPTY_FRAME_SIZE, 8).
+
+-define(MAX_WAIT, 16#ffffffff).
+
+-define(HIBERNATE_AFTER_MIN, 1000).
+-define(DESIRED_HIBERNATE, 10000).
+-define(CREDIT_DISC_BOUND, {2000, 500}).
+
+%% This is dictated by `erlang:send_after' on which we depend to implement TTL.
+-define(MAX_EXPIRY_TIMER, 4294967295).
+
+-define(INVALID_HEADERS_KEY, <<"x-invalid-headers">>).
+-define(ROUTING_HEADERS, [<<"CC">>, <<"BCC">>]).
+-define(DELETED_HEADER, <<"BCC">>).
+
+%% Trying to send a term across a cluster larger than 2^31 bytes will
+%% cause the VM to exit with "Absurdly large distribution output data
+%% buffer". So we limit the max message size to 2^31 - 10^6 bytes (1MB
+%% to allow plenty of leeway for the #basic_message{} and #content{}
+%% wrapping the message body).
+-define(MAX_MSG_SIZE, 2147383648).
+
+%% First number is maximum size in bytes before we start to
+%% truncate. The following 4-tuple is:
+%%
+%% 1) Maximum size of printable lists and binaries.
+%% 2) Maximum size of any structural term.
+%% 3) Amount to decrease 1) every time we descend while truncating.
+%% 4) Amount to decrease 2) every time we descend while truncating.
+%%
+%% Whole thing feeds into truncate:log_event/2.
+-define(LOG_TRUNC, {100000, {2000, 100, 50, 5}}).
+
+-define(store_proc_name(N), rabbit_misc:store_proc_name(?MODULE, N)).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-include("rabbit.hrl").
+
+-ifdef(use_specs).
+
+-type(msg() :: any()).
+
+-endif.
+
+-record(msg_location, {msg_id, ref_count, file, offset, total_size}).
--- /dev/null
+.PHONY: default
+default:
+ @echo No default target && false
+
+REPOS:= \
+ rabbitmq-server \
+ rabbitmq-codegen \
+ rabbitmq-java-client \
+ rabbitmq-dotnet-client \
+ rabbitmq-test \
+ cowboy-wrapper \
+ eldap-wrapper \
+ mochiweb-wrapper \
+ rabbitmq-amqp1.0 \
+ rabbitmq-auth-backend-ldap \
+ rabbitmq-auth-mechanism-ssl \
+ rabbitmq-consistent-hash-exchange \
+ rabbitmq-erlang-client \
+ rabbitmq-federation \
+ rabbitmq-federation-management \
+ rabbitmq-management \
+ rabbitmq-management-agent \
+ rabbitmq-management-visualiser \
+ rabbitmq-metronome \
+ rabbitmq-web-dispatch \
+ rabbitmq-mqtt \
+ rabbitmq-shovel \
+ rabbitmq-shovel-management \
+ rabbitmq-stomp \
+ rabbitmq-toke \
+ rabbitmq-tracing \
+ rabbitmq-web-stomp \
+ rabbitmq-web-stomp-examples \
+ sockjs-erlang-wrapper \
+ toke \
+ webmachine-wrapper
+
+BRANCH:=default
+
+HG_CORE_REPOBASE:=$(shell dirname `hg paths default 2>/dev/null` 2>/dev/null)
+ifndef HG_CORE_REPOBASE
+HG_CORE_REPOBASE:=http://hg.rabbitmq.com/
+endif
+
+VERSION:=0.0.0
+
+#----------------------------------
+
+all:
+ $(MAKE) -f all-packages.mk all-packages VERSION=$(VERSION)
+
+test:
+ $(MAKE) -f all-packages.mk test-all-packages VERSION=$(VERSION)
+
+release:
+ $(MAKE) -f all-packages.mk all-releasable VERSION=$(VERSION)
+
+clean:
+ $(MAKE) -f all-packages.mk clean-all-packages
+
+check-xref:
+ $(MAKE) -f all-packages.mk check-xref-packages
+
+plugins-dist: release
+ rm -rf $(PLUGINS_DIST_DIR)
+ mkdir -p $(PLUGINS_DIST_DIR)
+ $(MAKE) -f all-packages.mk copy-releasable VERSION=$(VERSION) PLUGINS_DIST_DIR=$(PLUGINS_DIST_DIR)
+
+plugins-srcdist:
+ rm -rf $(PLUGINS_SRC_DIST_DIR)
+ mkdir -p $(PLUGINS_SRC_DIST_DIR)/licensing
+
+ rsync -a --exclude '.hg*' rabbitmq-erlang-client $(PLUGINS_SRC_DIST_DIR)/
+ touch $(PLUGINS_SRC_DIST_DIR)/rabbitmq-erlang-client/.srcdist_done
+
+ rsync -a --exclude '.hg*' rabbitmq-server $(PLUGINS_SRC_DIST_DIR)/
+ touch $(PLUGINS_SRC_DIST_DIR)/rabbitmq-server/.srcdist_done
+
+ $(MAKE) -f all-packages.mk copy-srcdist VERSION=$(VERSION) PLUGINS_SRC_DIST_DIR=$(PLUGINS_SRC_DIST_DIR)
+ cp Makefile *.mk generate* $(PLUGINS_SRC_DIST_DIR)/
+ echo "This is the released version of rabbitmq-public-umbrella. \
+You can clone the full version with: hg clone http://hg.rabbitmq.com/rabbitmq-public-umbrella" > $(PLUGINS_SRC_DIST_DIR)/README
+
+ PRESERVE_CLONE_DIR=1 make -C $(PLUGINS_SRC_DIST_DIR) clean
+ rm -rf $(PLUGINS_SRC_DIST_DIR)/rabbitmq-server
+
+#----------------------------------
+# Convenience aliases
+
+.PHONY: co
+co: checkout
+
+.PHONY: ci
+ci: checkin
+
+.PHONY: up
+up: update
+
+.PHONY: st
+st: status
+
+.PHONY: up_c
+up_c: named_update
+
+#----------------------------------
+
+$(REPOS):
+ hg clone $(HG_CORE_REPOBASE)/$@
+
+.PHONY: checkout
+checkout: $(REPOS)
+
+#----------------------------------
+# Subrepository management
+
+
+# $(1) is the target
+# $(2) is the target dependency. Can use % to get current REPO
+# $(3) is the target body. Can use % to get current REPO
+define repo_target
+
+.PHONY: $(1)
+$(1): $(2)
+ $(3)
+
+endef
+
+# $(1) is the list of repos
+# $(2) is the suffix
+# $(3) is the target dependency. Can use % to get current REPO
+# $(4) is the target body. Can use % to get current REPO
+define repo_targets
+$(foreach REPO,$(1),$(call repo_target,$(REPO)+$(2),\
+ $(patsubst %,$(3),$(REPO)),$(patsubst %,$(4),$(REPO))))
+endef
+
+# Do not allow status to fork with -j otherwise output will be garbled
+.PHONY: status
+status: checkout
+ $(foreach DIR,. $(REPOS), \
+ (cd $(DIR); OUT=$$(hg st -mad); \
+ if \[ ! -z "$$OUT" \]; then echo "\n$(DIR):\n$$OUT"; fi) &&) true
+
+.PHONY: pull
+pull: $(foreach DIR,. $(REPOS),$(DIR)+pull)
+
+$(eval $(call repo_targets,. $(REPOS),pull,| %,(cd % && hg pull)))
+
+.PHONY: update
+update: $(foreach DIR,. $(REPOS),$(DIR)+update)
+
+$(eval $(call repo_targets,. $(REPOS),update,%+pull,(cd % && hg up)))
+
+.PHONY: named_update
+named_update: $(foreach DIR,. $(REPOS),$(DIR)+named_update)
+
+$(eval $(call repo_targets,. $(REPOS),named_update,%+pull,\
+ (cd % && hg up -C $(BRANCH))))
+
+.PHONY: tag
+tag: $(foreach DIR,. $(REPOS),$(DIR)+tag)
+
+$(eval $(call repo_targets,. $(REPOS),tag,| %,(cd % && hg tag $(TAG))))
+
+.PHONY: push
+push: $(foreach DIR,. $(REPOS),$(DIR)+push)
+
+# "|| true" sicne hg push fails if there are no changes
+$(eval $(call repo_targets,. $(REPOS),push,| %,(cd % && hg push -f || true)))
+
+.PHONY: checkin
+checkin: $(foreach DIR,. $(REPOS),$(DIR)+checkin)
+
+$(eval $(call repo_targets,. $(REPOS),checkin,| %,(cd % && hg ci)))
--- /dev/null
+This is the released version of rabbitmq-public-umbrella. You can clone the full version with: hg clone http://hg.rabbitmq.com/rabbitmq-public-umbrella
--- /dev/null
+UMBRELLA_BASE_DIR:=.
+
+include common.mk
+
+CHAIN_TESTS:=true
+
+# Pull in all the packages
+$(foreach PACKAGE_MK,$(wildcard */package.mk),$(eval $(call do_package,$(call canonical_path,$(patsubst %/,%,$(dir $(PACKAGE_MK)))))))
+
+# ...and the non-integrated ones
+$(foreach V,$(.VARIABLES),$(if $(filter NON_INTEGRATED_%,$(filter-out NON_INTEGRATED_DEPS_%,$V)),$(eval $(call do_package,$(subst NON_INTEGRATED_,,$V)))))
+
+test-all-packages: $(CHAINED_TESTS)
--- /dev/null
+# Various global definitions
+
+# UMBRELLA_BASE_DIR should be set to the path of the
+# rabbitmq-public-umbrella directory before this file is included.
+
+# Make version check
+REQUIRED_MAKE_VERSION:=3.81
+ifneq ($(shell ( echo "$(MAKE_VERSION)" ; echo "$(REQUIRED_MAKE_VERSION)" ) | sort -t. -n | head -1),$(REQUIRED_MAKE_VERSION))
+$(error GNU make version $(REQUIRED_MAKE_VERSION) required)
+endif
+
+# This is the standard trick for making pattern substitution work
+# (amongst others) when the replacement needs to include a comma.
+COMMA:=,
+
+# Global settings that can be overridden on the command line
+
+# These ones are expected to be passed down to the sub-makes invoked
+# for non-integrated packages
+VERSION ?= 0.0.0
+ERL ?= erl
+ERL_OPTS ?=
+ERLC ?= erlc
+ERLC_OPTS ?= -Wall +debug_info
+TMPDIR ?= /tmp
+
+NODENAME ?= rabbit-test
+ERL_CALL ?= erl_call
+ERL_CALL_OPTS ?= -sname $(NODENAME) -e
+
+# Where we put all the files produced when running tests.
+TEST_TMPDIR=$(TMPDIR)/rabbitmq-test
+
+# Callable functions
+
+# Convert a package name to the corresponding erlang app name
+define package_to_app_name
+$(subst -,_,$(1))
+endef
+
+# If the variable named $(1) holds a non-empty value, return it.
+# Otherwise, set the variable to $(2) and return that value.
+define memoize
+$(if $($(1)),$($(1)),$(eval $(1):=$(2))$(2))
+endef
+
+# Return a canonical form for the path in $(1)
+#
+# Absolute path names can be a bit verbose. This provides a way to
+# canonicalize path names with more concise results.
+define canonical_path
+$(call memoize,SHORT_$(realpath $(1)),$(1))
+endef
+
+# Convert a package name to a path name
+define package_to_path
+$(call canonical_path,$(UMBRELLA_BASE_DIR)/$(1))
+endef
+
+# Produce a cp command to copy from $(1) to $(2), unless $(1) is
+# empty, in which case do nothing.
+#
+# The optional $(3) gives a suffix to append to the command, if a
+# command is produced.
+define copy
+$(if $(1),cp -r $(1) $(2)$(if $(3), $(3)))
+endef
+
+# Produce the makefile fragment for the package with path in $(1), if
+# it hasn't already been visited. The path should have been
+# canonicalized via canonical_path.
+define do_package
+# Have we already visited this package? If so, skip it
+ifndef DONE_$(1)
+PACKAGE_DIR:=$(1)
+include $(UMBRELLA_BASE_DIR)/do-package.mk
+endif
+endef
+
+# This is used to chain test rules, so that test-all-packages works in
+# the presence of 'make -j'
+define chain_test
+$(if $(CHAIN_TESTS),$(CHAINED_TESTS)$(eval CHAINED_TESTS+=$(1)))
+endef
+
+# Mark the non-integrated repos
+NON_INTEGRATED_$(call package_to_path,rabbitmq-server):=true
+NON_INTEGRATED_$(call package_to_path,rabbitmq-erlang-client):=true
+NON_INTEGRATED_$(call package_to_path,rabbitmq-java-client):=true
+NON_INTEGRATED_$(call package_to_path,rabbitmq-dotnet-client):=true
+NON_INTEGRATED_DEPS_$(call package_to_path,rabbitmq-erlang-client):=rabbitmq-server
+
+# Where the coverage package lives
+COVERAGE_PATH:=$(call package_to_path,coverage)
+
+# Where the rabbitmq-server package lives
+RABBITMQ_SERVER_PATH=$(call package_to_path,rabbitmq-server)
+
+# Cleaning support
+ifndef MAKECMDGOALS
+TESTABLEGOALS:=$(.DEFAULT_GOAL)
+else
+TESTABLEGOALS:=$(MAKECMDGOALS)
+endif
+
+# The CLEANING variable can be used to determine whether the top-level
+# goal is cleaning related. In particular, it can be used to prevent
+# including generated files when cleaning, which might otherwise
+# trigger undesirable activity.
+ifeq "$(strip $(patsubst clean%,,$(patsubst %clean,,$(TESTABLEGOALS))))" ""
+CLEANING:=true
+endif
+
+# Include a generated makefile fragment
+#
+# Note that this includes using "-include", and thus make will proceed
+# even if an error occurs while the fragment is being re-made (we
+# don't use "include" becuase it will produce a superfluous error
+# message when the fragment is re-made because it doesn't exist).
+# Thus you should also list the fragment as a dependency of any rules
+# that will refer to the contents of the fragment.
+define safe_include
+ifndef CLEANING
+-include $(1)
+
+# If we fail to make the fragment, make will just loop trying to
+# create it. So we have to explicitly catch that case.
+$$(if $$(MAKE_RESTARTS),$$(if $$(wildcard $(1)),,$$(error Failed to produce $(1))))
+
+endif
+endef
+
+# This is not the make default, but it is a good idea
+.DELETE_ON_ERROR:
+
+# Declarations for global targets
+.PHONY: all-releasable copy-releasable copy-srcdist all-packages clean-all-packages
+all-releasable::
+copy-releasable::
+copy-srcdist::
+all-packages::
+clean-all-packages::
+check-xref-packages::
--- /dev/null
+From c2303fb756eeb8bd92dc04764970a43f59940208 Mon Sep 17 00:00:00 2001
+From: Marek Majkowski <majek04@gmail.com>
+Date: Thu, 26 Jan 2012 12:48:41 +0000
+Subject: [PATCH 1/7] R12 - Fake iodata() type
+
+---
+ include/http.hrl | 2 +-
+ src/cowboy_http.erl | 3 ++-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/include/http.hrl b/include/http.hrl
+index c66f2b0..c98f873 100644
+--- a/include/http.hrl
++++ b/include/http.hrl
+@@ -47,7 +47,7 @@
+ %% Response.
+ resp_state = waiting :: locked | waiting | chunks | done,
+ resp_headers = [] :: cowboy_http:headers(),
+- resp_body = <<>> :: iodata() | {non_neg_integer(),
++ resp_body = <<>> :: cowboy_http:fake_iodata() | {non_neg_integer(),
+ fun(() -> {sent, non_neg_integer()})},
+
+ %% Functions.
+diff --git a/src/cowboy_http.erl b/src/cowboy_http.erl
+index 32b0ca9..95a7334 100644
+--- a/src/cowboy_http.erl
++++ b/src/cowboy_http.erl
+@@ -46,7 +46,8 @@
+ | 'Expires' | 'Last-Modified' | 'Accept-Ranges' | 'Set-Cookie'
+ | 'Set-Cookie2' | 'X-Forwarded-For' | 'Cookie' | 'Keep-Alive'
+ | 'Proxy-Connection' | binary().
+--type headers() :: [{header(), iodata()}].
++-type fake_iodata() :: iolist() | binary().
++-type headers() :: [{header(), fake_iodata()}].
+ -type status() :: non_neg_integer() | binary().
+
+ -export_type([method/0, uri/0, version/0, header/0, headers/0, status/0]).
+--
+1.7.0.4
+
--- /dev/null
+From 257e64326ad786d19328d343da0ff7d29adbae4e Mon Sep 17 00:00:00 2001
+From: Marek Majkowski <majek04@gmail.com>
+Date: Thu, 26 Jan 2012 12:51:30 +0000
+Subject: [PATCH 2/7] R12 - drop all references to boolean() type
+
+---
+ src/cowboy_cookies.erl | 8 --------
+ src/cowboy_http.erl | 1 -
+ src/cowboy_http_protocol.erl | 3 +--
+ src/cowboy_http_req.erl | 2 --
+ src/cowboy_http_static.erl | 5 -----
+ src/cowboy_http_websocket.erl | 2 +-
+ 6 files changed, 2 insertions(+), 19 deletions(-)
+
+diff --git a/src/cowboy_cookies.erl b/src/cowboy_cookies.erl
+index 6818a86..7f5ab60 100644
+--- a/src/cowboy_cookies.erl
++++ b/src/cowboy_cookies.erl
+@@ -112,7 +112,6 @@ cookie(Key, Value, Options) when is_binary(Key)
+ %% Internal.
+
+ %% @doc Check if a character is a white space character.
+--spec is_whitespace(char()) -> boolean().
+ is_whitespace($\s) -> true;
+ is_whitespace($\t) -> true;
+ is_whitespace($\r) -> true;
+@@ -120,7 +119,6 @@ is_whitespace($\n) -> true;
+ is_whitespace(_) -> false.
+
+ %% @doc Check if a character is a seperator.
+--spec is_separator(char()) -> boolean().
+ is_separator(C) when C < 32 -> true;
+ is_separator($\s) -> true;
+ is_separator($\t) -> true;
+@@ -144,7 +142,6 @@ is_separator($}) -> true;
+ is_separator(_) -> false.
+
+ %% @doc Check if a binary has an ASCII seperator character.
+--spec has_seperator(binary()) -> boolean().
+ has_seperator(<<>>) ->
+ false;
+ has_seperator(<<$/, Rest/binary>>) ->
+@@ -228,7 +225,6 @@ read_quoted(<<C, Rest/binary>>, Acc) ->
+ read_quoted(Rest, <<Acc/binary, C>>).
+
+ %% @doc Drop characters while a function returns true.
+--spec binary_dropwhile(fun((char()) -> boolean()), binary()) -> binary().
+ binary_dropwhile(_F, <<"">>) ->
+ <<"">>;
+ binary_dropwhile(F, String) ->
+@@ -246,8 +242,6 @@ skip_whitespace(String) ->
+ binary_dropwhile(fun is_whitespace/1, String).
+
+ %% @doc Split a binary when the current character causes F to return true.
+--spec binary_splitwith(fun((char()) -> boolean()), binary(), binary())
+- -> {binary(), binary()}.
+ binary_splitwith(_F, Head, <<>>) ->
+ {Head, <<>>};
+ binary_splitwith(F, Head, Tail) ->
+@@ -260,8 +254,6 @@ binary_splitwith(F, Head, Tail) ->
+ end.
+
+ %% @doc Split a binary with a function returning true or false on each char.
+--spec binary_splitwith(fun((char()) -> boolean()), binary())
+- -> {binary(), binary()}.
+ binary_splitwith(F, String) ->
+ binary_splitwith(F, <<>>, String).
+
+diff --git a/src/cowboy_http.erl b/src/cowboy_http.erl
+index 95a7334..d7261c8 100644
+--- a/src/cowboy_http.erl
++++ b/src/cowboy_http.erl
+@@ -755,7 +755,6 @@ urlencode(Bin, Opts) ->
+ Upper = proplists:get_value(upper, Opts, false),
+ urlencode(Bin, <<>>, Plus, Upper).
+
+--spec urlencode(binary(), binary(), boolean(), boolean()) -> binary().
+ urlencode(<<C, Rest/binary>>, Acc, P=Plus, U=Upper) ->
+ if C >= $0, C =< $9 -> urlencode(Rest, <<Acc/binary, C>>, P, U);
+ C >= $A, C =< $Z -> urlencode(Rest, <<Acc/binary, C>>, P, U);
+diff --git a/src/cowboy_http_protocol.erl b/src/cowboy_http_protocol.erl
+index baee081..b80745f 100644
+--- a/src/cowboy_http_protocol.erl
++++ b/src/cowboy_http_protocol.erl
+@@ -55,7 +55,7 @@
+ max_line_length :: integer(),
+ timeout :: timeout(),
+ buffer = <<>> :: binary(),
+- hibernate = false :: boolean(),
++ hibernate = false,
+ loop_timeout = infinity :: timeout(),
+ loop_timeout_ref :: undefined | reference()
+ }).
+@@ -440,7 +440,6 @@ format_header(Field) when byte_size(Field) =< 20; byte_size(Field) > 32 ->
+ format_header(Field) ->
+ format_header(Field, true, <<>>).
+
+--spec format_header(binary(), boolean(), binary()) -> binary().
+ format_header(<<>>, _Any, Acc) ->
+ Acc;
+ %% Replicate a bug in OTP for compatibility reasons when there's a - right
+diff --git a/src/cowboy_http_req.erl b/src/cowboy_http_req.erl
+index 92d96ad..d729d6c 100644
+--- a/src/cowboy_http_req.erl
++++ b/src/cowboy_http_req.erl
+@@ -515,13 +515,11 @@ set_resp_body_fun(StreamLen, StreamFun, Req) ->
+
+
+ %% @doc Return whether the given header has been set for the response.
+--spec has_resp_header(cowboy_http:header(), #http_req{}) -> boolean().
+ has_resp_header(Name, #http_req{resp_headers=RespHeaders}) ->
+ NameBin = header_to_binary(Name),
+ lists:keymember(NameBin, 1, RespHeaders).
+
+ %% @doc Return whether a body has been set for the response.
+--spec has_resp_body(#http_req{}) -> boolean().
+ has_resp_body(#http_req{resp_body={Length, _}}) ->
+ Length > 0;
+ has_resp_body(#http_req{resp_body=RespBody}) ->
+diff --git a/src/cowboy_http_static.erl b/src/cowboy_http_static.erl
+index 0ee996a..d370046 100644
+--- a/src/cowboy_http_static.erl
++++ b/src/cowboy_http_static.erl
+@@ -207,8 +207,6 @@ allowed_methods(Req, State) ->
+ {['GET', 'HEAD'], Req, State}.
+
+ %% @private
+--spec malformed_request(#http_req{}, #state{}) ->
+- {boolean(), #http_req{}, #state{}}.
+ malformed_request(Req, #state{filepath=error}=State) ->
+ {true, Req, State};
+ malformed_request(Req, State) ->
+@@ -216,8 +214,6 @@ malformed_request(Req, State) ->
+
+
+ %% @private Check if the resource exists under the document root.
+--spec resource_exists(#http_req{}, #state{}) ->
+- {boolean(), #http_req{}, #state{}}.
+ resource_exists(Req, #state{fileinfo={error, _}}=State) ->
+ {false, Req, State};
+ resource_exists(Req, #state{fileinfo={ok, Fileinfo}}=State) ->
+@@ -227,7 +223,6 @@ resource_exists(Req, #state{fileinfo={ok, Fileinfo}}=State) ->
+ %% @private
+ %% Access to a file resource is forbidden if it exists and the local node does
+ %% not have permission to read it. Directory listings are always forbidden.
+--spec forbidden(#http_req{}, #state{}) -> {boolean(), #http_req{}, #state{}}.
+ forbidden(Req, #state{fileinfo={_, #file_info{type=directory}}}=State) ->
+ {true, Req, State};
+ forbidden(Req, #state{fileinfo={error, eacces}}=State) ->
+diff --git a/src/cowboy_http_websocket.erl b/src/cowboy_http_websocket.erl
+index 0f0204c..5f59891 100644
+--- a/src/cowboy_http_websocket.erl
++++ b/src/cowboy_http_websocket.erl
+@@ -54,7 +54,7 @@
+ timeout = infinity :: timeout(),
+ timeout_ref = undefined :: undefined | reference(),
+ messages = undefined :: undefined | {atom(), atom(), atom()},
+- hibernate = false :: boolean(),
++ hibernate = false,
+ eop :: undefined | tuple(), %% hixie-76 specific.
+ origin = undefined :: undefined | binary() %% hixie-76 specific.
+ }).
+--
+1.7.0.4
+
--- /dev/null
+From 4db80ab7bacf04502ad2d29d4760e04a6d787a83 Mon Sep 17 00:00:00 2001
+From: Marek Majkowski <majek04@gmail.com>
+Date: Thu, 26 Jan 2012 12:52:23 +0000
+Subject: [PATCH 3/7] R12: drop all references to reference() type
+
+---
+ src/cowboy_http_protocol.erl | 2 +-
+ src/cowboy_http_websocket.erl | 2 +-
+ src/cowboy_listener.erl | 2 +-
+ 3 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/src/cowboy_http_protocol.erl b/src/cowboy_http_protocol.erl
+index b80745f..0183785 100644
+--- a/src/cowboy_http_protocol.erl
++++ b/src/cowboy_http_protocol.erl
+@@ -57,7 +57,7 @@
+ buffer = <<>> :: binary(),
+ hibernate = false,
+ loop_timeout = infinity :: timeout(),
+- loop_timeout_ref :: undefined | reference()
++ loop_timeout_ref
+ }).
+
+ %% API.
+diff --git a/src/cowboy_http_websocket.erl b/src/cowboy_http_websocket.erl
+index 5f59891..5100213 100644
+--- a/src/cowboy_http_websocket.erl
++++ b/src/cowboy_http_websocket.erl
+@@ -52,7 +52,7 @@
+ opts :: any(),
+ challenge = undefined :: undefined | binary() | {binary(), binary()},
+ timeout = infinity :: timeout(),
+- timeout_ref = undefined :: undefined | reference(),
++ timeout_ref = undefined,
+ messages = undefined :: undefined | {atom(), atom(), atom()},
+ hibernate = false,
+ eop :: undefined | tuple(), %% hixie-76 specific.
+diff --git a/src/cowboy_listener.erl b/src/cowboy_listener.erl
+index c19d079..86e87f1 100644
+--- a/src/cowboy_listener.erl
++++ b/src/cowboy_listener.erl
+@@ -23,8 +23,8 @@
+
+ -record(state, {
+ req_pools = [] :: [{atom(), non_neg_integer()}],
+- reqs_table :: ets:tid(),
+- queue = [] :: [{pid(), reference()}]
++ reqs_table,
++ queue = []
+ }).
+
+ %% API.
+--
+1.7.0.4
+
--- /dev/null
+From dfb750f491208a8e30cab0fa701dd866d60734b8 Mon Sep 17 00:00:00 2001
+From: Marek Majkowski <majek04@gmail.com>
+Date: Thu, 26 Jan 2012 12:53:08 +0000
+Subject: [PATCH 4/7] R12: drop references to iodata() type
+
+---
+ src/cowboy_http_req.erl | 6 ------
+ 1 files changed, 0 insertions(+), 6 deletions(-)
+
+diff --git a/src/cowboy_http_req.erl b/src/cowboy_http_req.erl
+index d729d6c..64e757c 100644
+--- a/src/cowboy_http_req.erl
++++ b/src/cowboy_http_req.erl
+@@ -478,8 +478,6 @@ set_resp_cookie(Name, Value, Options, Req) ->
+ set_resp_header(HeaderName, HeaderValue, Req).
+
+ %% @doc Add a header to the response.
+--spec set_resp_header(cowboy_http:header(), iodata(), #http_req{})
+- -> {ok, #http_req{}}.
+ set_resp_header(Name, Value, Req=#http_req{resp_headers=RespHeaders}) ->
+ NameBin = header_to_binary(Name),
+ {ok, Req#http_req{resp_headers=[{NameBin, Value}|RespHeaders]}}.
+@@ -489,7 +487,6 @@ set_resp_header(Name, Value, Req=#http_req{resp_headers=RespHeaders}) ->
+ %% The body set here is ignored if the response is later sent using
+ %% anything other than reply/2 or reply/3. The response body is expected
+ %% to be a binary or an iolist.
+--spec set_resp_body(iodata(), #http_req{}) -> {ok, #http_req{}}.
+ set_resp_body(Body, Req) ->
+ {ok, Req#http_req{resp_body=Body}}.
+
+@@ -537,8 +534,6 @@ reply(Status, Headers, Req=#http_req{resp_body=Body}) ->
+ reply(Status, Headers, Body, Req).
+
+ %% @doc Send a reply to the client.
+--spec reply(cowboy_http:status(), cowboy_http:headers(), iodata(), #http_req{})
+- -> {ok, #http_req{}}.
+ reply(Status, Headers, Body, Req=#http_req{socket=Socket,
+ transport=Transport, connection=Connection, pid=ReqPid,
+ method=Method, resp_state=waiting, resp_headers=RespHeaders}) ->
+@@ -586,7 +581,6 @@ chunked_reply(Status, Headers, Req=#http_req{socket=Socket,
+ %% @doc Send a chunk of data.
+ %%
+ %% A chunked reply must have been initiated before calling this function.
+--spec chunk(iodata(), #http_req{}) -> ok | {error, atom()}.
+ chunk(_Data, #http_req{socket=_Socket, transport=_Transport, method='HEAD'}) ->
+ ok;
+ chunk(Data, #http_req{socket=Socket, transport=Transport, resp_state=chunks}) ->
+--
+1.7.0.4
+
--- /dev/null
+From c7aef1d044a1e83fcd6be7a83b2c763c0366d4f8 Mon Sep 17 00:00:00 2001
+From: Marek Majkowski <majek04@gmail.com>
+Date: Thu, 26 Jan 2012 12:53:36 +0000
+Subject: [PATCH 5/7] R12: drop references to Default:any() type
+
+---
+ src/cowboy_http_req.erl | 8 --------
+ 1 files changed, 0 insertions(+), 8 deletions(-)
+
+diff --git a/src/cowboy_http_req.erl b/src/cowboy_http_req.erl
+index 64e757c..c884f5a 100644
+--- a/src/cowboy_http_req.erl
++++ b/src/cowboy_http_req.erl
+@@ -147,8 +147,6 @@ qs_val(Name, Req) when is_binary(Name) ->
+
+ %% @doc Return the query string value for the given key, or a default if
+ %% missing.
+--spec qs_val(binary(), #http_req{}, Default)
+- -> {binary() | true | Default, #http_req{}} when Default::any().
+ qs_val(Name, Req=#http_req{raw_qs=RawQs, qs_vals=undefined,
+ urldecode={URLDecFun, URLDecArg}}, Default) when is_binary(Name) ->
+ QsVals = parse_qs(RawQs, fun(Bin) -> URLDecFun(Bin, URLDecArg) end),
+@@ -180,8 +178,6 @@ binding(Name, Req) when is_atom(Name) ->
+
+ %% @doc Return the binding value for the given key obtained when matching
+ %% the host and path against the dispatch list, or a default if missing.
+--spec binding(atom(), #http_req{}, Default)
+- -> {binary() | Default, #http_req{}} when Default::any().
+ binding(Name, Req, Default) when is_atom(Name) ->
+ case lists:keyfind(Name, 1, Req#http_req.bindings) of
+ {Name, Value} -> {Value, Req};
+@@ -200,8 +196,6 @@ header(Name, Req) when is_atom(Name) orelse is_binary(Name) ->
+ header(Name, Req, undefined).
+
+ %% @doc Return the header value for the given key, or a default if missing.
+--spec header(atom() | binary(), #http_req{}, Default)
+- -> {binary() | Default, #http_req{}} when Default::any().
+ header(Name, Req, Default) when is_atom(Name) orelse is_binary(Name) ->
+ case lists:keyfind(Name, 1, Req#http_req.headers) of
+ {Name, Value} -> {Value, Req};
+@@ -313,8 +307,6 @@ cookie(Name, Req) when is_binary(Name) ->
+
+ %% @doc Return the cookie value for the given key, or a default if
+ %% missing.
+--spec cookie(binary(), #http_req{}, Default)
+- -> {binary() | true | Default, #http_req{}} when Default::any().
+ cookie(Name, Req=#http_req{cookies=undefined}, Default) when is_binary(Name) ->
+ case header('Cookie', Req) of
+ {undefined, Req2} ->
+--
+1.7.0.4
+
--- /dev/null
+From 81106c53b80f5d0fa441b893048bbdc6c9e2c4f0 Mon Sep 17 00:00:00 2001
+From: Marek Majkowski <majek04@gmail.com>
+Date: Thu, 26 Jan 2012 12:54:31 +0000
+Subject: [PATCH 6/7] Use erlang:integer_to_list and lists:max instead of bifs
+
+---
+ src/cowboy_http_req.erl | 2 +-
+ src/cowboy_http_static.erl | 2 +-
+ src/cowboy_multipart.erl | 4 ++--
+ 3 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/src/cowboy_http_req.erl b/src/cowboy_http_req.erl
+index c884f5a..bf4ac7a 100644
+--- a/src/cowboy_http_req.erl
++++ b/src/cowboy_http_req.erl
+@@ -576,7 +576,7 @@ chunked_reply(Status, Headers, Req=#http_req{socket=Socket,
+ chunk(_Data, #http_req{socket=_Socket, transport=_Transport, method='HEAD'}) ->
+ ok;
+ chunk(Data, #http_req{socket=Socket, transport=Transport, resp_state=chunks}) ->
+- Transport:send(Socket, [integer_to_list(iolist_size(Data), 16),
++ Transport:send(Socket, [erlang:integer_to_list(iolist_size(Data), 16),
+ <<"\r\n">>, Data, <<"\r\n">>]).
+
+ %% @doc Send an upgrade reply.
+diff --git a/src/cowboy_http_static.erl b/src/cowboy_http_static.erl
+index d370046..da3bd33 100644
+--- a/src/cowboy_http_static.erl
++++ b/src/cowboy_http_static.erl
+@@ -412,7 +412,7 @@ attr_etag_function(Args, Attrs) ->
+
+ -spec attr_etag_function([etagarg()], [fileattr()], [binary()]) -> binary().
+ attr_etag_function(_Args, [], Acc) ->
+- list_to_binary(integer_to_list(erlang:crc32(Acc), 16));
++ list_to_binary(erlang:integer_to_list(erlang:crc32(Acc), 16));
+ attr_etag_function(Args, [H|T], Acc) ->
+ {_, Value} = lists:keyfind(H, 1, Args),
+ attr_etag_function(Args, T, [term_to_binary(Value)|Acc]).
+diff --git a/src/cowboy_multipart.erl b/src/cowboy_multipart.erl
+index b7aeb54..c9b5b6c 100644
+--- a/src/cowboy_multipart.erl
++++ b/src/cowboy_multipart.erl
+@@ -105,7 +105,7 @@ parse_boundary_eol(Bin, Pattern) ->
+ cowboy_http:whitespace(Rest, Fun);
+ nomatch ->
+ % CRLF not found in the given binary.
+- RestStart = max(byte_size(Bin) - 1, 0),
++ RestStart = lists:max([byte_size(Bin) - 1, 0]),
+ <<_:RestStart/binary, Rest/binary>> = Bin,
+ more(Rest, fun (NewBin) -> parse_boundary_eol(NewBin, Pattern) end)
+ end.
+@@ -175,7 +175,7 @@ skip(Bin, Pattern = {P, PSize}) ->
+ parse_boundary_tail(Rest, Pattern);
+ nomatch ->
+ % Boundary not found, need more data.
+- RestStart = max(byte_size(Bin) - PSize + 1, 0),
++ RestStart = lists:max([byte_size(Bin) - PSize + 1, 0]),
+ <<_:RestStart/binary, Rest/binary>> = Bin,
+ more(Rest, fun (NewBin) -> skip(NewBin, Pattern) end)
+ end.
+--
+1.7.0.4
+
--- /dev/null
+From 547731d5490b36f1239a99e6c4acc1964e724a6e Mon Sep 17 00:00:00 2001
+From: Marek Majkowski <majek04@gmail.com>
+Date: Thu, 26 Jan 2012 12:54:49 +0000
+Subject: [PATCH 7/7] R12 - type definitions must be ordered
+
+---
+ src/cowboy_multipart.erl | 10 +++++-----
+ 1 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/src/cowboy_multipart.erl b/src/cowboy_multipart.erl
+index c9b5b6c..0bd123a 100644
+--- a/src/cowboy_multipart.erl
++++ b/src/cowboy_multipart.erl
+@@ -15,15 +15,15 @@
+ %% @doc Multipart parser.
+ -module(cowboy_multipart).
+
+--type part_parser() :: parser(more(part_result())).
++-type part_parser() :: any().
+ -type parser(T) :: fun((binary()) -> T).
+ -type more(T) :: T | {more, parser(T)}.
+--type part_result() :: headers() | eof.
+--type headers() :: {headers, http_headers(), body_cont()}.
++-type part_result() :: any().
++-type headers() :: any().
+ -type http_headers() :: [{atom() | binary(), binary()}].
+--type body_cont() :: cont(more(body_result())).
++-type body_cont() :: any().
+ -type cont(T) :: fun(() -> T).
+--type body_result() :: {body, binary(), body_cont()} | end_of_part().
++-type body_result() :: any().
+ -type end_of_part() :: {end_of_part, cont(more(part_result()))}.
+ -type disposition() :: {binary(), [{binary(), binary()}]}.
+
+--
+1.7.0.4
+
--- /dev/null
+diff --git a/src/cowboy_http_req.erl b/src/cowboy_http_req.erl
+index 92d96ad..dd772df 100644
+--- a/src/cowboy_http_req.erl
++++ b/src/cowboy_http_req.erl
+@@ -288,6 +282,11 @@ parse_header(Name, Req, Default) when Name =:= 'Upgrade' ->
+ fun (Value) ->
+ cowboy_http:nonempty_list(Value, fun cowboy_http:token_ci/2)
+ end);
++parse_header(Name, Req, Default) when Name =:= <<"sec-websocket-protocol">> ->
++ parse_header(Name, Req, Default,
++ fun (Value) ->
++ cowboy_http:nonempty_list(Value, fun cowboy_http:token/2)
++ end);
+ parse_header(Name, Req, Default) ->
+ {Value, Req2} = header(Name, Req, Default),
+ {undefined, Value, Req2}.
--- /dev/null
+include ../umbrella.mk
--- /dev/null
+Cowboy requires R14
--- /dev/null
+language: erlang
+otp_release:
+ - R15B
+ - R14B04
+ - R14B03
+ - R14B02
+script: "make tests"
--- /dev/null
+Cowboy is available thanks to the work of:
+
+Loïc Hoguin
+Anthony Ramine
+Magnus Klaar
+Paul Oliver
+Steven Gravell
+Tom Burdick
+Hunter Morris
+Yurii Rashkovskii
+Ali Sabil
+Hans Ulrich Niedermann
+Jesper Louis Andersen
+Mathieu Lecarme
+Max Lapshin
+Michiel Hakvoort
+Ori Bar
+Alisdair Sullivan
--- /dev/null
+CHANGELOG
+=========
+
+0.4.0
+-----
+
+* Set the cowboy_listener process priority to high
+
+ As it is the central process used by all incoming requests
+ we need to set its priority to high to avoid timeouts that
+ would happen otherwise when reaching a huge number of
+ concurrent requests.
+
+* Add cowboy:child_spec/6 for embedding in other applications
+
+* Add cowboy_http_rest, an experimental REST protocol support
+
+ Based on the Webmachine diagram and documentation. It is a
+ new implementation, not a port, therefore a few changes have
+ been made. However all the callback names are the same and
+ should behave similarly to Webmachine.
+
+ There is currently no documentation other than the Webmachine
+ resource documentation and the comments found in cowboy_http_rest,
+ which itself should be fairly easy to read and understand.
+
+* Add cowboy_http_static, an experimental static file handler
+
+ Makes use of the aforementioned REST protocol support to
+ deliver files with proper content type and cache headers.
+
+ Note that this uses the new file:sendfile support when
+ appropriate, which currently requires the VM to be started
+ with the +A option defined, else errors may randomly appear.
+
+* Add cowboy_bstr module for binary strings related functions
+
+* Add cowboy_http module for HTTP parsing functions
+
+ This module so far contains various functions for HTTP header
+ parsing along with URL encoding and decoding.
+
+* Remove quoted from the default dependencies
+
+ This should make Cowboy much easier to compile and use by default.
+ It is of course still possible to use quoted as your URL decoding
+ library in Cowboy thanks to the newly added urldecode option.
+
+* Fix supervisor spec for non dynamic modules to allow upgrades to complete
+
+* Add cowboy:accept_ack/1 for a cleaner handling of the shoot message
+
+ Before, when the listener accepted a connection, the newly created
+ process was waiting for a message containing the atom 'shoot' before
+ proceeding. This has been replaced by the cowboy:accept_ack/1 function.
+
+ This function should be used where 'shoot' was received because the
+ contents of the message have changed (and could change again in the
+ distant future).
+
+* Update binary parsing expressions to avoid hype crashes
+
+ More specifically, /bits was replaced by /binary.
+
+* Rename the type cowboy_dispatcher:path_tokens/0 to tokens/0
+
+* Remove the cowboy_clock:date/0, time/0 and datetime/0 types
+
+ The calendar module exports those same types properly since R14B04.
+
+* Add cacertfile configuration option to cowboy_ssl_transport
+
+* Add cowboy_protocol behaviour
+
+* Remove -Wbehaviours dialyzer option unavailable in R15B
+
+* Many tests and specs improvements
+
+### cowboy_http_req
+
+* Fix a crash when reading the request body
+
+* Add parse_header/2 and parse_header/3
+
+ The following headers can now be semantically parsed: Connection, Accept,
+ Accept-Charset, Accept-Encoding, Accept-Language, Content-Length,
+ Content-Type, If-Match, If-None-Match, If-Modified-Since,
+ If-Unmodified-Since, Upgrade
+
+* Add set_resp_header/3, set_resp_cookie/4 and set_resp_body/2
+
+ These functions allow handlers to set response headers and body
+ without having to reply directly.
+
+* Add set_resp_body_fun/3
+
+ This function allows handlers to stream the body of the response
+ using the given fun. The size of the response must be known beforehand.
+
+* Add transport/1 to obtain the transport and socket for the request
+
+ This allows handlers to have low-level socket access in those cases
+ where they do need it, like when streaming a response body with
+ set_resp_body_fun/3.
+
+* Add peer_addr/1
+
+ This function tries to guess the real peer IP based on the HTTP
+ headers received.
+
+* Add meta/2 and meta/3 to save useful protocol information
+
+ Currently used to save the Websocket protocol version currently used,
+ and to save request information in the REST protocol handler.
+
+* Add reply/2 and reply/3 aliases to reply/4
+
+* Add upgrade_reply/3 for protocol upgrades
+
+### cowboy_http_protocol
+
+* Add the {urldecode, fun urldecode/2} option
+
+ Added when quoted was removed from the default build. Can be used to
+ tell Cowboy to use quoted or any other URL decoding routine.
+
+* Add the max_keepalive option
+
+* Add the max_line_length option
+
+* Allow HTTP handlers to stop during init/3
+
+ To do so they can return {shutdown, Req, State}.
+
+* Add loops support in HTTP handlers for proper long-polling support
+
+ A loop can be entered by returning either of {loop, Req, State},
+ {loop, Req, State, hibernate}, {loop, Req, State, Timeout} or
+ {loop, Req, State, Timeout, hibernate} from init/3.
+
+ Loops are useful when we cannot reply immediately and instead
+ are waiting for an Erlang message to be able to complete the request,
+ as would typically be done for long-polling.
+
+ Loop support in the protocol means that timeouts and hibernating
+ are well tested and handled so you can use those options without
+ worrying. It is recommended to set the timeout option.
+
+ When a loop is started, handle/2 will never be called so it does
+ not need to be defined. When the request process receives an Erlang
+ message, it will call the info/3 function with the message as the
+ first argument.
+
+ Like in OTP, you do need to set timeout and hibernate again when
+ returning from info/3 to enable them until the next call.
+
+* Fix the sending of 500 errors when handlers crash
+
+ Now we send an error response when no response has been sent,
+ and do nothing more than close the connection if anything
+ did get sent.
+
+* Fix a crash when the server is sent HTTP responses
+
+* Fix HTTP timeouts handling when the Request-Line wasn't received
+
+* Fix the handling of the max number of empty lines between requests
+
+* Fix the handling of HEAD requests
+
+* Fix HTTP/1.0 Host header handling
+
+* Reply status 400 if we receive an unexpected value or error for headers
+
+* Properly close when the application sends "Connection: close" header
+
+* Close HTTP connections on all errors
+
+* Improve the error message for HTTP handlers
+
+### cowboy_http_websocket
+
+* Add websocket support for all versions up to RFC 6455
+
+ Support isn't perfect yet according to the specifications, but
+ is working against all currently known client implementations.
+
+* Allow websocket_init/3 to return with the hibernate option set
+
+* Add {shutdown, Req} return value to websocket_init/3 to fail an upgrade
+
+* Fix websocket timeout handling
+
+* Fix error messages: wrong callback name was reported on error
+
+* Fix byte-by-byte websocket handling
+
+* Fix an issue when using hixie-76 with certain proxies
+
+* Fix a crash in the hixie-76 handshake
+
+* Fix the handshake when SSL is used on port 443
+
+* Fix a crash in the handshake when cowboy_http_req:compact/1 is used
+
+* Fix handshake when a query string is present
+
+* Fix a crash when the Upgrade header contains more than one token
+
+0.2.0
+-----
+
+* Initial release.
--- /dev/null
+Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
--- /dev/null
+# See LICENSE for licensing information.
+
+DIALYZER = dialyzer
+REBAR = rebar
+
+all: app
+
+app: deps
+ @$(REBAR) compile
+
+deps:
+ @$(REBAR) get-deps
+
+clean:
+ @$(REBAR) clean
+ rm -f test/*.beam
+ rm -f erl_crash.dump
+
+tests: clean app eunit ct
+
+eunit:
+ @$(REBAR) eunit skip_deps=true
+
+ct:
+ @$(REBAR) ct skip_deps=true
+
+build-plt:
+ @$(DIALYZER) --build_plt --output_plt .cowboy_dialyzer.plt \
+ --apps kernel stdlib sasl inets crypto public_key ssl
+
+dialyze:
+ @$(DIALYZER) --src src --plt .cowboy_dialyzer.plt -Werror_handling \
+ -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+
+docs:
+ @$(REBAR) doc skip_deps=true
--- /dev/null
+Cowboy
+======
+
+Cowboy is a small, fast and modular HTTP server written in Erlang.
+
+Cowboy is also a socket acceptor pool, able to accept connections
+for any kind of TCP protocol.
+
+Goals
+-----
+
+Cowboy aims to provide the following advantages:
+
+* **Small** code base.
+* Damn **fast**.
+* **Modular**: transport and protocol handlers are replaceable.
+* **Binary HTTP** for greater speed and lower memory usage.
+* Easy to **embed** inside another application.
+* Selectively **dispatch** requests to handlers, allowing you to send some
+ requests to your embedded code and others to a FastCGI application in
+ PHP or Ruby.
+* No parameterized module. No process dictionary. **Clean** Erlang code.
+
+The server is currently in early development. Comments and suggestions are
+more than welcome. To contribute, either open bug reports, or fork the project
+and send us pull requests with new or improved functionality. You should
+discuss your plans with us before doing any serious work, though, to avoid
+duplicating efforts.
+
+Quick start
+-----------
+
+* Add Cowboy as a rebar or agner dependency to your application.
+* Start Cowboy and add one or more listeners.
+* Write handlers for your application.
+* Check out [examples](https://github.com/extend/cowboy_examples)!
+
+Getting Started
+---------------
+
+At heart, Cowboy is nothing more than an TCP acceptor pool. All it does is
+accept connections received on a given port and using a given transport,
+like TCP or SSL, and forward them to a request handler for the given
+protocol. Acceptors and request handlers are of course supervised
+automatically.
+
+It just so happens that Cowboy also includes an HTTP protocol handler.
+But Cowboy does nothing by default. You need to explicitly ask Cowboy
+to listen on a port with your chosen transport and protocol handlers.
+To do so, you must start a listener.
+
+A listener is a special kind of supervisor that manages both the
+acceptor pool and the request processes. It is named and can thus be
+started and stopped at will.
+
+An acceptor pool is a pool of processes whose only role is to accept
+new connections. It's good practice to have many of these processes
+as they are very cheap and allow much quicker response when you get
+many connections. Of course, as with everything else, you should
+**benchmark** before you decide what's best for you.
+
+Cowboy includes a TCP transport handler for HTTP and an SSL transport
+handler for HTTPS. The transport handlers can of course be reused for
+other protocols like FTP or IRC.
+
+The HTTP protocol requires one last thing to continue: dispatching rules.
+Don't worry about it right now though and continue reading, it'll all
+be explained.
+
+You can start and stop listeners by calling `cowboy:start_listener/6` and
+`cowboy:stop_listener/1` respectively.
+
+The following example demonstrates the startup of a very simple listener.
+
+``` erlang
+application:start(cowboy),
+Dispatch = [
+ %% {Host, list({Path, Handler, Opts})}
+ {'_', [{'_', my_handler, []}]}
+],
+%% Name, NbAcceptors, Transport, TransOpts, Protocol, ProtoOpts
+cowboy:start_listener(my_http_listener, 100,
+ cowboy_tcp_transport, [{port, 8080}],
+ cowboy_http_protocol, [{dispatch, Dispatch}]
+).
+```
+
+This is not enough though, you must also write the my_handler module
+to process the incoming HTTP requests. Of course Cowboy comes with
+predefined handlers for specific tasks but most of the time you'll
+want to write your own handlers for your application.
+
+Following is an example of a "Hello World!" HTTP handler.
+
+``` erlang
+-module(my_handler).
+-export([init/3, handle/2, terminate/2]).
+
+init({tcp, http}, Req, Opts) ->
+ {ok, Req, undefined_state}.
+
+handle(Req, State) ->
+ {ok, Req2} = cowboy_http_req:reply(200, [], <<"Hello World!">>, Req),
+ {ok, Req2, State}.
+
+terminate(Req, State) ->
+ ok.
+```
+
+You can also write handlers that do not reply directly. Instead, such handlers
+will wait for an Erlang message from another process and only reply when
+receiving such message, or timeout if it didn't arrive in time.
+
+This is especially useful for long-polling functionality, as Cowboy will handle
+process hibernation and timeouts properly, preventing mistakes if you were to
+write the code yourself. An handler of that kind can be defined like this:
+
+``` erlang
+-module(my_loop_handler).
+-export([init/3, info/3, terminate/2]).
+
+-define(TIMEOUT, 60000).
+
+init({tcp, http}, Req, Opts) ->
+ {loop, Req, undefined_state, ?TIMEOUT, hibernate}.
+
+info({reply, Body}, Req, State) ->
+ {ok, Req2} = cowboy_http_req:reply(200, [], Body, Req),
+ {ok, Req2, State};
+info(Message, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(Req, State) ->
+ ok.
+```
+
+It is of course possible to combine both type of handlers together as long as
+you return the proper tuple from init/3.
+
+**Note**: versions prior to `0.4.0` used the
+[quoted](https://github.com/klaar/quoted.erl) library instead of the built in
+`cowboy_http:urldecode/2` function. If you want to retain this you must add it
+as a dependency to your application and add the following cowboy_http_protocol
+option:
+
+``` erlang
+ {urldecode, {fun quoted:from_url/2, quoted:make([])}}
+```
+
+Continue reading to learn how to dispatch rules and handle requests.
+
+Dispatch rules
+--------------
+
+Cowboy allows you to dispatch HTTP requests directly to a specific handler
+based on the hostname and path information from the request. It also lets
+you define static options for the handler directly in the rules.
+
+To match the hostname and path, Cowboy requires a list of tokens. For
+example, to match the "dev-extend.eu" domain name, you must specify
+`[<<"dev-extend">>, <<"eu">>]`. Or, to match the "/path/to/my/resource"
+you must use `[<<"path">>, <<"to">>, <<"my">>, <<"resource">>]`. All the
+tokens must be given as binary.
+
+You can use the special token `'_'` (the atom underscore) to indicate that
+you accept anything in that position. For example if you have both
+"dev-extend.eu" and "dev-extend.fr" domains, you can use the match spec
+`[<<"dev-extend">>, '_']` to match any top level extension.
+
+Finally, you can also match multiple leading segments of the domain name and
+multiple trailing segments of the request path using the atom `'...'` (the atom
+ellipsis) respectively as the first host token or the last path token. For
+example, host rule `['...', <<"dev-extend">>, <<"eu">>]` can match both
+"cowboy.bugs.dev-extend.eu" and "dev-extend.eu" and path rule
+`[<<"projects">>, '...']` can match both "/projects" and
+"/projects/cowboy/issues/42". The host leading segments and the path trailing
+segments can later be retrieved through `cowboy_http_req:host_info/1` and
+`cowboy_http_req:path_info/1`.
+
+Any other atom used as a token will bind the value to this atom when
+matching. To follow on our hostnames example, `[<<"dev-extend">>, ext]`
+would bind the values `<<"eu">>` and `<<"fr">>` to the ext atom, that you
+can later retrieve in your handler by calling `cowboy_http_req:binding/{2,3}`.
+
+You can also accept any match spec by using the atom `'_'` directly instead of
+a list of tokens. Our hello world example above uses this to forward all
+requests to a single handler.
+
+There is currently no way to match multiple tokens at once.
+
+Requests handling
+-----------------
+
+Requests are passed around in the Request variable. Although they are
+defined as a record, it is recommended to access them only through the
+cowboy_http_req module API.
+
+You can retrieve the HTTP method, HTTP version, peer address and port,
+host tokens, raw host, used port, path tokens, raw path, query string
+values, bound values from the dispatch step, header values from the
+request. You can also read the request body, if any, optionally parsing
+it as a query string. Finally, the request allows you to send a response
+to the client.
+
+See the cowboy_http_req module for more information.
+
+Websockets
+----------
+
+The Websocket protocol is built upon the HTTP protocol. It first sends
+an HTTP request for an handshake, performs it and then switches
+to Websocket. Therefore you need to write a standard HTTP handler to
+confirm the handshake should be completed and then the Websocket-specific
+callbacks.
+
+A simple handler doing nothing but sending a repetitive message using
+Websocket would look like this:
+
+``` erlang
+-module(my_ws_handler).
+-export([init/3]).
+-export([websocket_init/3, websocket_handle/3,
+ websocket_info/3, websocket_terminate/3]).
+
+init({tcp, http}, Req, Opts) ->
+ {upgrade, protocol, cowboy_http_websocket}.
+
+websocket_init(TransportName, Req, _Opts) ->
+ erlang:start_timer(1000, self(), <<"Hello!">>),
+ {ok, Req, undefined_state}.
+
+websocket_handle({text, Msg}, Req, State) ->
+ {reply, {text, << "That's what she said! ", Msg/binary >>}, Req, State};
+websocket_handle(_Data, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info({timeout, _Ref, Msg}, Req, State) ->
+ erlang:start_timer(1000, self(), <<"How' you doin'?">>),
+ {reply, {text, Msg}, Req, State};
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+```
+
+Of course you can have an HTTP handler doing both HTTP and Websocket
+handling, but for the sake of this example we're ignoring the HTTP
+part entirely.
+
+As the Websocket protocol is still a draft the API is subject to change
+regularly when support to the most recent drafts gets added. Features may
+be added, changed or removed before the protocol gets finalized. Cowboy
+tries to implement all drafts transparently and give a single interface to
+handle them all, however.
+
+Using Cowboy with other protocols
+---------------------------------
+
+One of the strengths of Cowboy is of course that you can use it with any
+protocol you want. The only downside is that if it's not HTTP, you'll
+probably have to write the protocol handler yourself.
+
+The only exported function a protocol handler needs is the start_link/4
+function, with arguments ListenerPid, Socket, Transport and Opts. ListenerPid
+is the pid to the listener's gen_server, managing the connections. Socket is of
+course the client socket; Transport is the module name of the chosen transport
+handler and Opts is protocol options defined when starting the listener.
+
+After initializing your protocol, it is recommended to call the
+function cowboy:accept_ack/1 with the ListenerPid as argument,
+as it will ensure Cowboy has been able to fully initialize the socket.
+Anything you do past this point is up to you!
+
+If you need to change some socket options, like enabling raw mode for example,
+you can call the <em>Transport:setopts/2</em> function. It is the protocol's
+responsability to manage the socket usage, there should be no need for an user
+to specify that kind of options while starting a listener.
+
+You should definitely look at the cowboy_http_protocol module for a great
+example of fast request handling if you need to. Otherwise it's probably
+safe to use `{active, once}` mode and handle everything as it comes.
+
+Note that while you technically can run a protocol handler directly as a
+gen_server or a gen_fsm, it's probably not a good idea, as the only call
+you'll ever receive from Cowboy is the start_link/4 call. On the other
+hand, feel free to write a very basic protocol handler which then forwards
+requests to a gen_server or gen_fsm. By doing so however you must take
+care to supervise their processes as Cowboy only knows about the protocol
+handler itself.
--- /dev/null
+{incl_app, cowboy, details}.
--- /dev/null
+@author Loïc Hoguin <essen@dev-extend.eu>
+@copyright 2011 Loïc Hoguin
+@version HEAD
+@title Small, fast, modular HTTP server.
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%% Copyright (c) 2011, Anthony Ramine <nox@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+-record(http_req, {
+ %% Transport.
+ socket = undefined :: undefined | inet:socket(),
+ transport = undefined :: undefined | module(),
+ connection = keepalive :: keepalive | close,
+
+ %% Request.
+ pid = undefined :: pid(),
+ method = 'GET' :: cowboy_http:method(),
+ version = {1, 1} :: cowboy_http:version(),
+ peer = undefined :: undefined | {inet:ip_address(), inet:ip_port()},
+ host = undefined :: undefined | cowboy_dispatcher:tokens(),
+ host_info = undefined :: undefined | cowboy_dispatcher:tokens(),
+ raw_host = undefined :: undefined | binary(),
+ port = undefined :: undefined | inet:ip_port(),
+ path = undefined :: undefined | '*' | cowboy_dispatcher:tokens(),
+ path_info = undefined :: undefined | cowboy_dispatcher:tokens(),
+ raw_path = undefined :: undefined | binary(),
+ qs_vals = undefined :: undefined | list({binary(), binary() | true}),
+ raw_qs = undefined :: undefined | binary(),
+ bindings = undefined :: undefined | cowboy_dispatcher:bindings(),
+ headers = [] :: cowboy_http:headers(),
+ p_headers = [] :: [any()], %% @todo Improve those specs.
+ cookies = undefined :: undefined | [{binary(), binary()}],
+ meta = [] :: [{atom(), any()}],
+
+ %% Request body.
+ body_state = waiting :: waiting | done |
+ {multipart, non_neg_integer(), fun()},
+ buffer = <<>> :: binary(),
+
+ %% Response.
+ resp_state = waiting :: locked | waiting | chunks | done,
+ resp_headers = [] :: cowboy_http:headers(),
+ resp_body = <<>> :: cowboy_http:fake_iodata() | {non_neg_integer(),
+ fun(() -> {sent, non_neg_integer()})},
+
+ %% Functions.
+ urldecode :: {fun((binary(), T) -> binary()), T}
+}).
--- /dev/null
+{cover_enabled, true}.
+{deps, [
+ {proper, "1.0",
+ {git, "git://github.com/manopapad/proper.git", {tag, "v1.0"}}}
+]}.
+{eunit_opts, [verbose, {report, {eunit_surefire, [{dir, "."}]}}]}.
+{erl_opts, [
+%% bin_opt_info,
+%% warn_missing_spec,
+ warnings_as_errors,
+ warn_export_all
+]}.
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+{application, cowboy, [
+ {description, "Small, fast, modular HTTP server."},
+ {vsn, "0.5.0"},
+ {modules, []},
+ {registered, [cowboy_clock, cowboy_sup]},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {cowboy_app, []}},
+ {env, []}
+]}.
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+%% @doc Cowboy API to start and stop listeners.
+-module(cowboy).
+
+-export([start_listener/6, stop_listener/1, child_spec/6, accept_ack/1]).
+
+%% @doc Start a listener for the given transport and protocol.
+%%
+%% A listener is effectively a pool of <em>NbAcceptors</em> acceptors.
+%% Acceptors accept connections on the given <em>Transport</em> and forward
+%% requests to the given <em>Protocol</em> handler. Both transport and protocol
+%% modules can be given options through the <em>TransOpts</em> and the
+%% <em>ProtoOpts</em> arguments. Available options are documented in the
+%% <em>listen</em> transport function and in the protocol module of your choice.
+%%
+%% All acceptor and request processes are supervised by the listener.
+%%
+%% It is recommended to set a large enough number of acceptors to improve
+%% performance. The exact number depends of course on your hardware, on the
+%% protocol used and on the number of expected simultaneous connections.
+%%
+%% The <em>Transport</em> option <em>max_connections</em> allows you to define
+%% the maximum number of simultaneous connections for this listener. It defaults
+%% to 1024. See <em>cowboy_listener</em> for more details on limiting the number
+%% of connections.
+%%
+%% Although Cowboy includes a <em>cowboy_http_protocol</em> handler, other
+%% handlers can be created for different protocols like IRC, FTP and more.
+%%
+%% <em>Ref</em> can be used to stop the listener later on.
+-spec start_listener(any(), non_neg_integer(), module(), any(), module(), any())
+ -> {ok, pid()}.
+start_listener(Ref, NbAcceptors, Transport, TransOpts, Protocol, ProtoOpts)
+ when is_integer(NbAcceptors) andalso is_atom(Transport)
+ andalso is_atom(Protocol) ->
+ supervisor:start_child(cowboy_sup, child_spec(Ref, NbAcceptors,
+ Transport, TransOpts, Protocol, ProtoOpts)).
+
+%% @doc Stop a listener identified by <em>Ref</em>.
+%% @todo Currently request processes aren't terminated with the listener.
+-spec stop_listener(any()) -> ok | {error, not_found}.
+stop_listener(Ref) ->
+ case supervisor:terminate_child(cowboy_sup, {cowboy_listener_sup, Ref}) of
+ ok ->
+ supervisor:delete_child(cowboy_sup, {cowboy_listener_sup, Ref});
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+%% @doc Return a child spec suitable for embedding.
+%%
+%% When you want to embed cowboy in another application, you can use this
+%% function to create a <em>ChildSpec</em> suitable for use in a supervisor.
+%% The parameters are the same as in <em>start_listener/6</em> but rather
+%% than hooking the listener to the cowboy internal supervisor, it just returns
+%% the spec.
+-spec child_spec(any(), non_neg_integer(), module(), any(), module(), any())
+ -> supervisor:child_spec().
+child_spec(Ref, NbAcceptors, Transport, TransOpts, Protocol, ProtoOpts)
+ when is_integer(NbAcceptors) andalso is_atom(Transport)
+ andalso is_atom(Protocol) ->
+ {{cowboy_listener_sup, Ref}, {cowboy_listener_sup, start_link, [
+ NbAcceptors, Transport, TransOpts, Protocol, ProtoOpts
+ ]}, permanent, 5000, supervisor, [cowboy_listener_sup]}.
+
+%% @doc Acknowledge the accepted connection.
+%%
+%% Effectively used to make sure the socket control has been given to
+%% the protocol process before starting to use it.
+-spec accept_ack(pid()) -> ok.
+accept_ack(ListenerPid) ->
+ receive {shoot, ListenerPid} -> ok end.
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+%% @private
+-module(cowboy_acceptor).
+
+-export([start_link/7]). %% API.
+-export([acceptor/7]). %% Internal.
+
+%% API.
+
+-spec start_link(inet:socket(), module(), module(), any(),
+ non_neg_integer(), pid(), pid()) -> {ok, pid()}.
+start_link(LSocket, Transport, Protocol, Opts,
+ MaxConns, ListenerPid, ReqsSup) ->
+ Pid = spawn_link(?MODULE, acceptor,
+ [LSocket, Transport, Protocol, Opts, MaxConns, ListenerPid, ReqsSup]),
+ {ok, Pid}.
+
+%% Internal.
+
+-spec acceptor(inet:socket(), module(), module(), any(),
+ non_neg_integer(), pid(), pid()) -> no_return().
+acceptor(LSocket, Transport, Protocol, Opts, MaxConns, ListenerPid, ReqsSup) ->
+ case Transport:accept(LSocket, 2000) of
+ {ok, CSocket} ->
+ {ok, Pid} = supervisor:start_child(ReqsSup,
+ [ListenerPid, CSocket, Transport, Protocol, Opts]),
+ Transport:controlling_process(CSocket, Pid),
+ {ok, NbConns} = cowboy_listener:add_connection(ListenerPid,
+ default, Pid),
+ Pid ! {shoot, ListenerPid},
+ limit_reqs(ListenerPid, NbConns, MaxConns);
+ {error, timeout} ->
+ ignore;
+ {error, _Reason} ->
+ %% @todo Probably do something here. If the socket was closed,
+ %% we may want to try and listen again on the port?
+ ignore
+ end,
+ ?MODULE:acceptor(LSocket, Transport, Protocol, Opts,
+ MaxConns, ListenerPid, ReqsSup).
+
+-spec limit_reqs(pid(), non_neg_integer(), non_neg_integer()) -> ok.
+limit_reqs(_ListenerPid, NbConns, MaxConns) when NbConns =< MaxConns ->
+ ok;
+limit_reqs(ListenerPid, _NbConns, MaxConns) ->
+ cowboy_listener:wait(ListenerPid, default, MaxConns).
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+%% @private
+-module(cowboy_acceptors_sup).
+-behaviour(supervisor).
+
+-export([start_link/7]). %% API.
+-export([init/1]). %% supervisor.
+
+%% API.
+
+-spec start_link(non_neg_integer(), module(), any(),
+ module(), any(), pid(), pid()) -> {ok, pid()}.
+start_link(NbAcceptors, Transport, TransOpts,
+ Protocol, ProtoOpts, ListenerPid, ReqsPid) ->
+ supervisor:start_link(?MODULE, [NbAcceptors, Transport, TransOpts,
+ Protocol, ProtoOpts, ListenerPid, ReqsPid]).
+
+%% supervisor.
+
+-spec init(list()) -> {ok, {{one_for_one, 10, 10}, list()}}.
+init([NbAcceptors, Transport, TransOpts,
+ Protocol, ProtoOpts, ListenerPid, ReqsPid]) ->
+ {ok, LSocket} = Transport:listen(TransOpts),
+ MaxConns = proplists:get_value(max_connections, TransOpts, 1024),
+ Procs = [{{acceptor, self(), N}, {cowboy_acceptor, start_link, [
+ LSocket, Transport, Protocol, ProtoOpts,
+ MaxConns, ListenerPid, ReqsPid
+ ]}, permanent, brutal_kill, worker, []}
+ || N <- lists:seq(1, NbAcceptors)],
+ {ok, {{one_for_one, 10, 10}, Procs}}.
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+%% @private
+-module(cowboy_app).
+-behaviour(application).
+
+-export([start/2, stop/1, profile_output/0]). %% API.
+
+-type application_start_type() :: normal
+ | {takeover, node()} | {failover, node()}.
+
+%% API.
+
+-spec start(application_start_type(), any()) -> {ok, pid()}.
+start(_Type, _Args) ->
+ consider_profiling(),
+ cowboy_sup:start_link().
+
+-spec stop(any()) -> ok.
+stop(_State) ->
+ ok.
+
+-spec profile_output() -> ok.
+profile_output() ->
+ eprof:stop_profiling(),
+ eprof:log("procs.profile"),
+ eprof:analyze(procs),
+ eprof:log("total.profile"),
+ eprof:analyze(total).
+
+%% Internal.
+
+-spec consider_profiling() -> profiling | not_profiling.
+consider_profiling() ->
+ case application:get_env(profile) of
+ {ok, true} ->
+ {ok, _Pid} = eprof:start(),
+ eprof:start_profiling([self()]);
+ _ ->
+ not_profiling
+ end.
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+%% @doc Binary string manipulation.
+-module(cowboy_bstr).
+
+-export([to_lower/1]). %% Binary strings.
+-export([char_to_lower/1, char_to_upper/1]). %% Characters.
+
+%% @doc Convert a binary string to lowercase.
+-spec to_lower(binary()) -> binary().
+to_lower(L) ->
+ << << (char_to_lower(C)) >> || << C >> <= L >>.
+
+%% @doc Convert [A-Z] characters to lowercase.
+%% @end
+%% We gain noticeable speed by matching each value directly.
+-spec char_to_lower(char()) -> char().
+char_to_lower($A) -> $a;
+char_to_lower($B) -> $b;
+char_to_lower($C) -> $c;
+char_to_lower($D) -> $d;
+char_to_lower($E) -> $e;
+char_to_lower($F) -> $f;
+char_to_lower($G) -> $g;
+char_to_lower($H) -> $h;
+char_to_lower($I) -> $i;
+char_to_lower($J) -> $j;
+char_to_lower($K) -> $k;
+char_to_lower($L) -> $l;
+char_to_lower($M) -> $m;
+char_to_lower($N) -> $n;
+char_to_lower($O) -> $o;
+char_to_lower($P) -> $p;
+char_to_lower($Q) -> $q;
+char_to_lower($R) -> $r;
+char_to_lower($S) -> $s;
+char_to_lower($T) -> $t;
+char_to_lower($U) -> $u;
+char_to_lower($V) -> $v;
+char_to_lower($W) -> $w;
+char_to_lower($X) -> $x;
+char_to_lower($Y) -> $y;
+char_to_lower($Z) -> $z;
+char_to_lower(Ch) -> Ch.
+
+%% @doc Convert [a-z] characters to uppercase.
+-spec char_to_upper(char()) -> char().
+char_to_upper($a) -> $A;
+char_to_upper($b) -> $B;
+char_to_upper($c) -> $C;
+char_to_upper($d) -> $D;
+char_to_upper($e) -> $E;
+char_to_upper($f) -> $F;
+char_to_upper($g) -> $G;
+char_to_upper($h) -> $H;
+char_to_upper($i) -> $I;
+char_to_upper($j) -> $J;
+char_to_upper($k) -> $K;
+char_to_upper($l) -> $L;
+char_to_upper($m) -> $M;
+char_to_upper($n) -> $N;
+char_to_upper($o) -> $O;
+char_to_upper($p) -> $P;
+char_to_upper($q) -> $Q;
+char_to_upper($r) -> $R;
+char_to_upper($s) -> $S;
+char_to_upper($t) -> $T;
+char_to_upper($u) -> $U;
+char_to_upper($v) -> $V;
+char_to_upper($w) -> $W;
+char_to_upper($x) -> $X;
+char_to_upper($y) -> $Y;
+char_to_upper($z) -> $Z;
+char_to_upper(Ch) -> Ch.
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+%% @doc Date and time related functions.
+%%
+%% While a gen_server process runs in the background to update
+%% the cache of formatted dates every second, all API calls are
+%% local and directly read from the ETS cache table, providing
+%% fast time and date computations.
+-module(cowboy_clock).
+-behaviour(gen_server).
+
+-export([start_link/0, stop/0, rfc1123/0, rfc2109/1]). %% API.
+-export([init/1, handle_call/3, handle_cast/2,
+ handle_info/2, terminate/2, code_change/3]). %% gen_server.
+
+-record(state, {
+ universaltime = undefined :: undefined | calendar:datetime(),
+ rfc1123 = <<>> :: binary(),
+ tref = undefined :: undefined | timer:tref()
+}).
+
+-define(SERVER, ?MODULE).
+-define(TABLE, ?MODULE).
+
+-include_lib("eunit/include/eunit.hrl").
+
+%% API.
+
+%% @private
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+%% @private
+-spec stop() -> stopped.
+stop() ->
+ gen_server:call(?SERVER, stop).
+
+%% @doc Return the current date and time formatted according to RFC-1123.
+%%
+%% This format is used in the <em>'Date'</em> header sent with HTTP responses.
+-spec rfc1123() -> binary().
+rfc1123() ->
+ ets:lookup_element(?TABLE, rfc1123, 2).
+
+%% @doc Return the current date and time formatted according to RFC-2109.
+%%
+%% This format is used in the <em>'Set-Cookie'</em> header sent with
+%% HTTP responses.
+-spec rfc2109(calendar:datetime()) -> binary().
+rfc2109(LocalTime) ->
+ {{YYYY,MM,DD},{Hour,Min,Sec}} =
+ case calendar:local_time_to_universal_time_dst(LocalTime) of
+ [Gmt] -> Gmt;
+ [_,Gmt] -> Gmt
+ end,
+ Wday = calendar:day_of_the_week({YYYY,MM,DD}),
+ DayBin = pad_int(DD),
+ YearBin = list_to_binary(integer_to_list(YYYY)),
+ HourBin = pad_int(Hour),
+ MinBin = pad_int(Min),
+ SecBin = pad_int(Sec),
+ WeekDay = weekday(Wday),
+ Month = month(MM),
+ <<WeekDay/binary, ", ",
+ DayBin/binary, " ", Month/binary, " ",
+ YearBin/binary, " ",
+ HourBin/binary, ":",
+ MinBin/binary, ":",
+ SecBin/binary, " GMT">>.
+
+%% gen_server.
+
+%% @private
+-spec init([]) -> {ok, #state{}}.
+init([]) ->
+ ?TABLE = ets:new(?TABLE, [set, protected,
+ named_table, {read_concurrency, true}]),
+ T = erlang:universaltime(),
+ B = update_rfc1123(<<>>, undefined, T),
+ {ok, TRef} = timer:send_interval(1000, update),
+ ets:insert(?TABLE, {rfc1123, B}),
+ {ok, #state{universaltime=T, rfc1123=B, tref=TRef}}.
+
+%% @private
+-spec handle_call(_, _, State)
+ -> {reply, ignored, State} | {stop, normal, stopped, State}.
+handle_call(stop, _From, State=#state{tref=TRef}) ->
+ {ok, cancel} = timer:cancel(TRef),
+ {stop, normal, stopped, State};
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+%% @private
+-spec handle_cast(_, State) -> {noreply, State}.
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+%% @private
+-spec handle_info(_, State) -> {noreply, State}.
+handle_info(update, #state{universaltime=Prev, rfc1123=B1, tref=TRef}) ->
+ T = erlang:universaltime(),
+ B2 = update_rfc1123(B1, Prev, T),
+ ets:insert(?TABLE, {rfc1123, B2}),
+ {noreply, #state{universaltime=T, rfc1123=B2, tref=TRef}};
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+%% @private
+-spec terminate(_, _) -> ok.
+terminate(_Reason, _State) ->
+ ok.
+
+%% @private
+-spec code_change(_, State, _) -> {ok, State}.
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%% Internal.
+
+-spec update_rfc1123(binary(), undefined | calendar:datetime(),
+ calendar:datetime()) -> binary().
+update_rfc1123(Bin, Now, Now) ->
+ Bin;
+update_rfc1123(<< Keep:23/binary, _/bits >>,
+ {Date, {H, M, _}}, {Date, {H, M, S}}) ->
+ << Keep/binary, (pad_int(S))/binary, " GMT" >>;
+update_rfc1123(<< Keep:20/binary, _/bits >>,
+ {Date, {H, _, _}}, {Date, {H, M, S}}) ->
+ << Keep/binary, (pad_int(M))/binary, $:, (pad_int(S))/binary, " GMT" >>;
+update_rfc1123(<< Keep:17/binary, _/bits >>, {Date, _}, {Date, {H, M, S}}) ->
+ << Keep/binary, (pad_int(H))/binary, $:, (pad_int(M))/binary,
+ $:, (pad_int(S))/binary, " GMT" >>;
+update_rfc1123(<< _:7/binary, Keep:10/binary, _/bits >>,
+ {{Y, Mo, _}, _}, {Date = {Y, Mo, D}, {H, M, S}}) ->
+ Wday = calendar:day_of_the_week(Date),
+ << (weekday(Wday))/binary, ", ", (pad_int(D))/binary, Keep/binary,
+ (pad_int(H))/binary, $:, (pad_int(M))/binary,
+ $:, (pad_int(S))/binary, " GMT" >>;
+update_rfc1123(<< _:11/binary, Keep:6/binary, _/bits >>,
+ {{Y, _, _}, _}, {Date = {Y, Mo, D}, {H, M, S}}) ->
+ Wday = calendar:day_of_the_week(Date),
+ << (weekday(Wday))/binary, ", ", (pad_int(D))/binary, " ",
+ (month(Mo))/binary, Keep/binary,
+ (pad_int(H))/binary, $:, (pad_int(M))/binary,
+ $:, (pad_int(S))/binary, " GMT" >>;
+update_rfc1123(_, _, {Date = {Y, Mo, D}, {H, M, S}}) ->
+ Wday = calendar:day_of_the_week(Date),
+ << (weekday(Wday))/binary, ", ", (pad_int(D))/binary, " ",
+ (month(Mo))/binary, " ", (list_to_binary(integer_to_list(Y)))/binary,
+ " ", (pad_int(H))/binary, $:, (pad_int(M))/binary,
+ $:, (pad_int(S))/binary, " GMT" >>.
+
+%% Following suggestion by MononcQc on #erlounge.
+-spec pad_int(0..59) -> binary().
+pad_int(X) when X < 10 ->
+ << $0, ($0 + X) >>;
+pad_int(X) ->
+ list_to_binary(integer_to_list(X)).
+
+-spec weekday(1..7) -> <<_:24>>.
+weekday(1) -> <<"Mon">>;
+weekday(2) -> <<"Tue">>;
+weekday(3) -> <<"Wed">>;
+weekday(4) -> <<"Thu">>;
+weekday(5) -> <<"Fri">>;
+weekday(6) -> <<"Sat">>;
+weekday(7) -> <<"Sun">>.
+
+-spec month(1..12) -> <<_:24>>.
+month( 1) -> <<"Jan">>;
+month( 2) -> <<"Feb">>;
+month( 3) -> <<"Mar">>;
+month( 4) -> <<"Apr">>;
+month( 5) -> <<"May">>;
+month( 6) -> <<"Jun">>;
+month( 7) -> <<"Jul">>;
+month( 8) -> <<"Aug">>;
+month( 9) -> <<"Sep">>;
+month(10) -> <<"Oct">>;
+month(11) -> <<"Nov">>;
+month(12) -> <<"Dec">>.
+
+%% Tests.
+
+-ifdef(TEST).
+
+update_rfc1123_test_() ->
+ Tests = [
+ {<<"Sat, 14 May 2011 14:25:33 GMT">>, undefined,
+ {{2011, 5, 14}, {14, 25, 33}}, <<>>},
+ {<<"Sat, 14 May 2011 14:25:33 GMT">>, {{2011, 5, 14}, {14, 25, 33}},
+ {{2011, 5, 14}, {14, 25, 33}}, <<"Sat, 14 May 2011 14:25:33 GMT">>},
+ {<<"Sat, 14 May 2011 14:25:34 GMT">>, {{2011, 5, 14}, {14, 25, 33}},
+ {{2011, 5, 14}, {14, 25, 34}}, <<"Sat, 14 May 2011 14:25:33 GMT">>},
+ {<<"Sat, 14 May 2011 14:26:00 GMT">>, {{2011, 5, 14}, {14, 25, 59}},
+ {{2011, 5, 14}, {14, 26, 0}}, <<"Sat, 14 May 2011 14:25:59 GMT">>},
+ {<<"Sat, 14 May 2011 15:00:00 GMT">>, {{2011, 5, 14}, {14, 59, 59}},
+ {{2011, 5, 14}, {15, 0, 0}}, <<"Sat, 14 May 2011 14:59:59 GMT">>},
+ {<<"Sun, 15 May 2011 00:00:00 GMT">>, {{2011, 5, 14}, {23, 59, 59}},
+ {{2011, 5, 15}, { 0, 0, 0}}, <<"Sat, 14 May 2011 23:59:59 GMT">>},
+ {<<"Wed, 01 Jun 2011 00:00:00 GMT">>, {{2011, 5, 31}, {23, 59, 59}},
+ {{2011, 6, 1}, { 0, 0, 0}}, <<"Tue, 31 May 2011 23:59:59 GMT">>},
+ {<<"Sun, 01 Jan 2012 00:00:00 GMT">>, {{2011, 5, 31}, {23, 59, 59}},
+ {{2012, 1, 1}, { 0, 0, 0}}, <<"Sat, 31 Dec 2011 23:59:59 GMT">>}
+ ],
+ [{R, fun() -> R = update_rfc1123(B, P, N) end} || {R, P, N, B} <- Tests].
+
+pad_int_test_() ->
+ Tests = [
+ { 0, <<"00">>}, { 1, <<"01">>}, { 2, <<"02">>}, { 3, <<"03">>},
+ { 4, <<"04">>}, { 5, <<"05">>}, { 6, <<"06">>}, { 7, <<"07">>},
+ { 8, <<"08">>}, { 9, <<"09">>}, {10, <<"10">>}, {11, <<"11">>},
+ {12, <<"12">>}, {13, <<"13">>}, {14, <<"14">>}, {15, <<"15">>},
+ {16, <<"16">>}, {17, <<"17">>}, {18, <<"18">>}, {19, <<"19">>},
+ {20, <<"20">>}, {21, <<"21">>}, {22, <<"22">>}, {23, <<"23">>},
+ {24, <<"24">>}, {25, <<"25">>}, {26, <<"26">>}, {27, <<"27">>},
+ {28, <<"28">>}, {29, <<"29">>}, {30, <<"30">>}, {31, <<"31">>},
+ {32, <<"32">>}, {33, <<"33">>}, {34, <<"34">>}, {35, <<"35">>},
+ {36, <<"36">>}, {37, <<"37">>}, {38, <<"38">>}, {39, <<"39">>},
+ {40, <<"40">>}, {41, <<"41">>}, {42, <<"42">>}, {43, <<"43">>},
+ {44, <<"44">>}, {45, <<"45">>}, {46, <<"46">>}, {47, <<"47">>},
+ {48, <<"48">>}, {49, <<"49">>}, {50, <<"50">>}, {51, <<"51">>},
+ {52, <<"52">>}, {53, <<"53">>}, {54, <<"54">>}, {55, <<"55">>},
+ {56, <<"56">>}, {57, <<"57">>}, {58, <<"58">>}, {59, <<"59">>}
+ ],
+ [{I, fun() -> O = pad_int(I) end} || {I, O} <- Tests].
+
+-endif.
--- /dev/null
+%% Copyright 2007 Mochi Media, Inc.
+%% Copyright 2011 Thomas Burdick <thomas.burdick@gmail.com>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+%% @doc HTTP Cookie parsing and generating (RFC 2965).
+
+-module(cowboy_cookies).
+
+-export([parse_cookie/1, cookie/3, cookie/2]). %% API.
+
+%% Types.
+-type kv() :: {Name::binary(), Value::binary()}.
+-type kvlist() :: [kv()].
+-type cookie_option() :: {max_age, integer()}
+ | {local_time, calendar:datetime()}
+ | {domain, binary()} | {path, binary()}
+ | {secure, true | false} | {http_only, true | false}.
+-export_type([kv/0, kvlist/0, cookie_option/0]).
+
+-define(QUOTE, $\").
+
+-include_lib("eunit/include/eunit.hrl").
+
+%% API.
+
+%% @doc Parse the contents of a Cookie header field, ignoring cookie
+%% attributes, and return a simple property list.
+-spec parse_cookie(binary()) -> kvlist().
+parse_cookie(<<>>) ->
+ [];
+parse_cookie(Cookie) when is_binary(Cookie) ->
+ parse_cookie(Cookie, []).
+
+%% @equiv cookie(Key, Value, [])
+-spec cookie(binary(), binary()) -> kv().
+cookie(Key, Value) when is_binary(Key) andalso is_binary(Value) ->
+ cookie(Key, Value, []).
+
+%% @doc Generate a Set-Cookie header field tuple.
+-spec cookie(binary(), binary(), [cookie_option()]) -> kv().
+cookie(Key, Value, Options) when is_binary(Key)
+ andalso is_binary(Value) andalso is_list(Options) ->
+ Cookie = <<(any_to_binary(Key))/binary, "=",
+ (quote(Value))/binary, "; Version=1">>,
+ %% Set-Cookie:
+ %% Comment, Domain, Max-Age, Path, Secure, Version
+ ExpiresPart =
+ case proplists:get_value(max_age, Options) of
+ undefined ->
+ <<"">>;
+ RawAge ->
+ When = case proplists:get_value(local_time, Options) of
+ undefined ->
+ calendar:local_time();
+ LocalTime ->
+ LocalTime
+ end,
+ Age = case RawAge < 0 of
+ true ->
+ 0;
+ false ->
+ RawAge
+ end,
+ AgeBinary = quote(Age),
+ CookieDate = age_to_cookie_date(Age, When),
+ <<"; Expires=", CookieDate/binary,
+ "; Max-Age=", AgeBinary/binary>>
+ end,
+ SecurePart =
+ case proplists:get_value(secure, Options) of
+ true ->
+ <<"; Secure">>;
+ _ ->
+ <<"">>
+ end,
+ DomainPart =
+ case proplists:get_value(domain, Options) of
+ undefined ->
+ <<"">>;
+ Domain ->
+ <<"; Domain=", (quote(Domain))/binary>>
+ end,
+ PathPart =
+ case proplists:get_value(path, Options) of
+ undefined ->
+ <<"">>;
+ Path ->
+ <<"; Path=", (quote(Path))/binary>>
+ end,
+ HttpOnlyPart =
+ case proplists:get_value(http_only, Options) of
+ true ->
+ <<"; HttpOnly">>;
+ _ ->
+ <<"">>
+ end,
+ CookieParts = <<Cookie/binary, ExpiresPart/binary, SecurePart/binary,
+ DomainPart/binary, PathPart/binary, HttpOnlyPart/binary>>,
+ {<<"Set-Cookie">>, CookieParts}.
+
+%% Internal.
+
+%% @doc Check if a character is a white space character.
+is_whitespace($\s) -> true;
+is_whitespace($\t) -> true;
+is_whitespace($\r) -> true;
+is_whitespace($\n) -> true;
+is_whitespace(_) -> false.
+
+%% @doc Check if a character is a seperator.
+is_separator(C) when C < 32 -> true;
+is_separator($\s) -> true;
+is_separator($\t) -> true;
+is_separator($() -> true;
+is_separator($)) -> true;
+is_separator($<) -> true;
+is_separator($>) -> true;
+is_separator($@) -> true;
+is_separator($,) -> true;
+is_separator($;) -> true;
+is_separator($:) -> true;
+is_separator($\\) -> true;
+is_separator(?QUOTE) -> true;
+is_separator($/) -> true;
+is_separator($[) -> true;
+is_separator($]) -> true;
+is_separator($?) -> true;
+is_separator($=) -> true;
+is_separator(${) -> true;
+is_separator($}) -> true;
+is_separator(_) -> false.
+
+%% @doc Check if a binary has an ASCII seperator character.
+has_seperator(<<>>) ->
+ false;
+has_seperator(<<$/, Rest/binary>>) ->
+ has_seperator(Rest);
+has_seperator(<<C, Rest/binary>>) ->
+ case is_separator(C) of
+ true ->
+ true;
+ false ->
+ has_seperator(Rest)
+ end.
+
+%% @doc Convert to a binary and raise an error if quoting is required. Quoting
+%% is broken in different ways for different browsers. Its better to simply
+%% avoiding doing it at all.
+%% @end
+-spec quote(term()) -> binary().
+quote(V0) ->
+ V = any_to_binary(V0),
+ case has_seperator(V) of
+ true ->
+ erlang:error({cookie_quoting_required, V});
+ false ->
+ V
+ end.
+
+-spec add_seconds(integer(), calendar:datetime()) -> calendar:datetime().
+add_seconds(Secs, LocalTime) ->
+ Greg = calendar:datetime_to_gregorian_seconds(LocalTime),
+ calendar:gregorian_seconds_to_datetime(Greg + Secs).
+
+-spec age_to_cookie_date(integer(), calendar:datetime()) -> binary().
+age_to_cookie_date(Age, LocalTime) ->
+ cowboy_clock:rfc2109(add_seconds(Age, LocalTime)).
+
+-spec parse_cookie(binary(), kvlist()) -> kvlist().
+parse_cookie(<<>>, Acc) ->
+ lists:reverse(Acc);
+parse_cookie(String, Acc) ->
+ {{Token, Value}, Rest} = read_pair(String),
+ Acc1 = case Token of
+ <<"">> ->
+ Acc;
+ <<"$", _R/binary>> ->
+ Acc;
+ _ ->
+ [{Token, Value} | Acc]
+ end,
+ parse_cookie(Rest, Acc1).
+
+-spec read_pair(binary()) -> {{binary(), binary()}, binary()}.
+read_pair(String) ->
+ {Token, Rest} = read_token(skip_whitespace(String)),
+ {Value, Rest1} = read_value(skip_whitespace(Rest)),
+ {{Token, Value}, skip_past_separator(Rest1)}.
+
+-spec read_value(binary()) -> {binary(), binary()}.
+read_value(<<"=", Value/binary>>) ->
+ Value1 = skip_whitespace(Value),
+ case Value1 of
+ <<?QUOTE, _R/binary>> ->
+ read_quoted(Value1);
+ _ ->
+ read_token(Value1)
+ end;
+read_value(String) ->
+ {<<"">>, String}.
+
+-spec read_quoted(binary()) -> {binary(), binary()}.
+read_quoted(<<?QUOTE, String/binary>>) ->
+ read_quoted(String, <<"">>).
+
+-spec read_quoted(binary(), binary()) -> {binary(), binary()}.
+read_quoted(<<"">>, Acc) ->
+ {Acc, <<"">>};
+read_quoted(<<?QUOTE, Rest/binary>>, Acc) ->
+ {Acc, Rest};
+read_quoted(<<$\\, Any, Rest/binary>>, Acc) ->
+ read_quoted(Rest, <<Acc/binary, Any>>);
+read_quoted(<<C, Rest/binary>>, Acc) ->
+ read_quoted(Rest, <<Acc/binary, C>>).
+
+%% @doc Drop characters while a function returns true.
+binary_dropwhile(_F, <<"">>) ->
+ <<"">>;
+binary_dropwhile(F, String) ->
+ <<C, Rest/binary>> = String,
+ case F(C) of
+ true ->
+ binary_dropwhile(F, Rest);
+ false ->
+ String
+ end.
+
+%% @doc Remove leading whitespace.
+-spec skip_whitespace(binary()) -> binary().
+skip_whitespace(String) ->
+ binary_dropwhile(fun is_whitespace/1, String).
+
+%% @doc Split a binary when the current character causes F to return true.
+binary_splitwith(_F, Head, <<>>) ->
+ {Head, <<>>};
+binary_splitwith(F, Head, Tail) ->
+ <<C, NTail/binary>> = Tail,
+ case F(C) of
+ true ->
+ {Head, Tail};
+ false ->
+ binary_splitwith(F, <<Head/binary, C>>, NTail)
+ end.
+
+%% @doc Split a binary with a function returning true or false on each char.
+binary_splitwith(F, String) ->
+ binary_splitwith(F, <<>>, String).
+
+%% @doc Split the binary when the next seperator is found.
+-spec read_token(binary()) -> {binary(), binary()}.
+read_token(String) ->
+ binary_splitwith(fun is_separator/1, String).
+
+%% @doc Return string after ; or , characters.
+-spec skip_past_separator(binary()) -> binary().
+skip_past_separator(<<"">>) ->
+ <<"">>;
+skip_past_separator(<<";", Rest/binary>>) ->
+ Rest;
+skip_past_separator(<<",", Rest/binary>>) ->
+ Rest;
+skip_past_separator(<<_C, Rest/binary>>) ->
+ skip_past_separator(Rest).
+
+-spec any_to_binary(binary() | string() | atom() | integer()) -> binary().
+any_to_binary(V) when is_binary(V) ->
+ V;
+any_to_binary(V) when is_list(V) ->
+ erlang:list_to_binary(V);
+any_to_binary(V) when is_atom(V) ->
+ erlang:atom_to_binary(V, latin1);
+any_to_binary(V) when is_integer(V) ->
+ list_to_binary(integer_to_list(V)).
+
+%% Tests.
+
+-ifdef(TEST).
+
+quote_test() ->
+ %% ?assertError eunit macro is not compatible with coverage module
+ _ = try quote(<<":wq">>)
+ catch error:{cookie_quoting_required, <<":wq">>} -> ok
+ end,
+ ?assertEqual(<<"foo">>,quote(foo)),
+ ok.
+
+parse_cookie_test() ->
+ %% RFC example
+ C1 = <<"$Version=\"1\"; Customer=\"WILE_E_COYOTE\"; $Path=\"/acme\";
+ Part_Number=\"Rocket_Launcher_0001\"; $Path=\"/acme\";
+ Shipping=\"FedEx\"; $Path=\"/acme\"">>,
+ ?assertEqual(
+ [{<<"Customer">>,<<"WILE_E_COYOTE">>},
+ {<<"Part_Number">>,<<"Rocket_Launcher_0001">>},
+ {<<"Shipping">>,<<"FedEx">>}],
+ parse_cookie(C1)),
+ %% Potential edge cases
+ ?assertEqual(
+ [{<<"foo">>, <<"x">>}],
+ parse_cookie(<<"foo=\"\\x\"">>)),
+ ?assertEqual(
+ [],
+ parse_cookie(<<"=">>)),
+ ?assertEqual(
+ [{<<"foo">>, <<"">>}, {<<"bar">>, <<"">>}],
+ parse_cookie(<<" foo ; bar ">>)),
+ ?assertEqual(
+ [{<<"foo">>, <<"">>}, {<<"bar">>, <<"">>}],
+ parse_cookie(<<"foo=;bar=">>)),
+ ?assertEqual(
+ [{<<"foo">>, <<"\";">>}, {<<"bar">>, <<"">>}],
+ parse_cookie(<<"foo = \"\\\";\";bar ">>)),
+ ?assertEqual(
+ [{<<"foo">>, <<"\";bar">>}],
+ parse_cookie(<<"foo=\"\\\";bar">>)),
+ ?assertEqual(
+ [],
+ parse_cookie(<<"">>)),
+ ?assertEqual(
+ [{<<"foo">>, <<"bar">>}, {<<"baz">>, <<"wibble">>}],
+ parse_cookie(<<"foo=bar , baz=wibble ">>)),
+ ok.
+
+domain_test() ->
+ ?assertEqual(
+ {<<"Set-Cookie">>,
+ <<"Customer=WILE_E_COYOTE; "
+ "Version=1; "
+ "Domain=acme.com; "
+ "HttpOnly">>},
+ cookie(<<"Customer">>, <<"WILE_E_COYOTE">>,
+ [{http_only, true}, {domain, <<"acme.com">>}])),
+ ok.
+
+local_time_test() ->
+ {<<"Set-Cookie">>, B} = cookie(<<"Customer">>, <<"WILE_E_COYOTE">>,
+ [{max_age, 111}, {secure, true}]),
+
+ ?assertMatch(
+ [<<"Customer=WILE_E_COYOTE">>,
+ <<" Version=1">>,
+ <<" Expires=", _R/binary>>,
+ <<" Max-Age=111">>,
+ <<" Secure">>],
+ binary:split(B, <<";">>, [global])),
+ ok.
+
+-spec cookie_test() -> no_return(). %% Not actually true, just a bad option.
+cookie_test() ->
+ C1 = {<<"Set-Cookie">>,
+ <<"Customer=WILE_E_COYOTE; "
+ "Version=1; "
+ "Path=/acme">>},
+ C1 = cookie(<<"Customer">>, <<"WILE_E_COYOTE">>, [{path, <<"/acme">>}]),
+
+ C1 = cookie(<<"Customer">>, <<"WILE_E_COYOTE">>,
+ [{path, <<"/acme">>}, {badoption, <<"negatory">>}]),
+
+ {<<"Set-Cookie">>,<<"=NoKey; Version=1">>}
+ = cookie(<<"">>, <<"NoKey">>, []),
+ {<<"Set-Cookie">>,<<"=NoKey; Version=1">>}
+ = cookie(<<"">>, <<"NoKey">>),
+ LocalTime = calendar:universal_time_to_local_time(
+ {{2007, 5, 15}, {13, 45, 33}}),
+ C2 = {<<"Set-Cookie">>,
+ <<"Customer=WILE_E_COYOTE; "
+ "Version=1; "
+ "Expires=Tue, 15 May 2007 13:45:33 GMT; "
+ "Max-Age=0">>},
+ C2 = cookie(<<"Customer">>, <<"WILE_E_COYOTE">>,
+ [{max_age, -111}, {local_time, LocalTime}]),
+ C3 = {<<"Set-Cookie">>,
+ <<"Customer=WILE_E_COYOTE; "
+ "Version=1; "
+ "Expires=Wed, 16 May 2007 13:45:50 GMT; "
+ "Max-Age=86417">>},
+ C3 = cookie(<<"Customer">>, <<"WILE_E_COYOTE">>,
+ [{max_age, 86417}, {local_time, LocalTime}]),
+ ok.
+
+-endif.
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%% Copyright (c) 2011, Anthony Ramine <nox@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+%% @doc Dispatch requests according to a hostname and path.
+-module(cowboy_dispatcher).
+
+-export([split_host/1, split_path/2, match/3]). %% API.
+
+-type bindings() :: list({atom(), binary()}).
+-type tokens() :: list(binary()).
+-type match_rule() :: '_' | '*' | list(binary() | '_' | '...' | atom()).
+-type dispatch_path() :: list({match_rule(), module(), any()}).
+-type dispatch_rule() :: {Host::match_rule(), Path::dispatch_path()}.
+-type dispatch_rules() :: list(dispatch_rule()).
+
+-export_type([bindings/0, tokens/0, dispatch_rules/0]).
+
+-include_lib("eunit/include/eunit.hrl").
+
+%% API.
+
+%% @doc Split a hostname into a list of tokens.
+-spec split_host(binary())
+ -> {tokens(), binary(), undefined | inet:ip_port()}.
+split_host(<<>>) ->
+ {[], <<>>, undefined};
+split_host(Host) ->
+ case binary:split(Host, <<":">>) of
+ [Host] ->
+ {binary:split(Host, <<".">>, [global, trim]), Host, undefined};
+ [Host2, Port] ->
+ {binary:split(Host2, <<".">>, [global, trim]), Host2,
+ list_to_integer(binary_to_list(Port))}
+ end.
+
+%% @doc Split a path into a list of path segments.
+%%
+%% Following RFC2396, this function may return path segments containing any
+%% character, including <em>/</em> if, and only if, a <em>/</em> was escaped
+%% and part of a path segment.
+-spec split_path(binary(), fun((binary()) -> binary())) ->
+ {tokens(), binary(), binary()}.
+split_path(Path, URLDec) ->
+ case binary:split(Path, <<"?">>) of
+ [Path] -> {do_split_path(Path, <<"/">>, URLDec), Path, <<>>};
+ [<<>>, Qs] -> {[], <<>>, Qs};
+ [Path2, Qs] -> {do_split_path(Path2, <<"/">>, URLDec), Path2, Qs}
+ end.
+
+-spec do_split_path(binary(), <<_:8>>, fun((binary()) -> binary())) -> tokens().
+do_split_path(RawPath, Separator, URLDec) ->
+ EncodedPath = case binary:split(RawPath, Separator, [global, trim]) of
+ [<<>>|Path] -> Path;
+ Path -> Path
+ end,
+ [URLDec(Token) || Token <- EncodedPath].
+
+%% @doc Match hostname tokens and path tokens against dispatch rules.
+%%
+%% It is typically used for matching tokens for the hostname and path of
+%% the request against a global dispatch rule for your listener.
+%%
+%% Dispatch rules are a list of <em>{Hostname, PathRules}</em> tuples, with
+%% <em>PathRules</em> being a list of <em>{Path, HandlerMod, HandlerOpts}</em>.
+%%
+%% <em>Hostname</em> and <em>Path</em> are match rules and can be either the
+%% atom <em>'_'</em>, which matches everything for a single token, the atom
+%% <em>'*'</em>, which matches everything for the rest of the tokens, or a
+%% list of tokens. Each token can be either a binary, the atom <em>'_'</em>,
+%% the atom '...' or a named atom. A binary token must match exactly,
+%% <em>'_'</em> matches everything for a single token, <em>'...'</em> matches
+%% everything for the rest of the tokens and a named atom will bind the
+%% corresponding token value and return it.
+%%
+%% The list of hostname tokens is reversed before matching. For example, if
+%% we were to match "www.dev-extend.eu", we would first match "eu", then
+%% "dev-extend", then "www". This means that in the context of hostnames,
+%% the <em>'...'</em> atom matches properly the lower levels of the domain
+%% as would be expected.
+%%
+%% When a result is found, this function will return the handler module and
+%% options found in the dispatch list, a key-value list of bindings and
+%% the tokens that were matched by the <em>'...'</em> atom for both the
+%% hostname and path.
+-spec match(Host::tokens(), Path::tokens(), dispatch_rules())
+ -> {ok, module(), any(), bindings(),
+ HostInfo::undefined | tokens(),
+ PathInfo::undefined | tokens()}
+ | {error, notfound, host} | {error, notfound, path}.
+match(_Host, _Path, []) ->
+ {error, notfound, host};
+match(_Host, Path, [{'_', PathMatchs}|_Tail]) ->
+ match_path(Path, PathMatchs, [], undefined);
+match(Host, Path, [{HostMatch, PathMatchs}|Tail]) ->
+ case try_match(host, Host, HostMatch) of
+ false ->
+ match(Host, Path, Tail);
+ {true, HostBinds, undefined} ->
+ match_path(Path, PathMatchs, HostBinds, undefined);
+ {true, HostBinds, HostInfo} ->
+ match_path(Path, PathMatchs, HostBinds, lists:reverse(HostInfo))
+ end.
+
+-spec match_path(tokens(), dispatch_path(), bindings(),
+ HostInfo::undefined | tokens())
+ -> {ok, module(), any(), bindings(),
+ HostInfo::undefined | tokens(),
+ PathInfo::undefined | tokens()}
+ | {error, notfound, path}.
+match_path(_Path, [], _HostBinds, _HostInfo) ->
+ {error, notfound, path};
+match_path(_Path, [{'_', Handler, Opts}|_Tail], HostBinds, HostInfo) ->
+ {ok, Handler, Opts, HostBinds, HostInfo, undefined};
+match_path('*', [{'*', Handler, Opts}|_Tail], HostBinds, HostInfo) ->
+ {ok, Handler, Opts, HostBinds, HostInfo, undefined};
+match_path(Path, [{PathMatch, Handler, Opts}|Tail], HostBinds, HostInfo) ->
+ case try_match(path, Path, PathMatch) of
+ false ->
+ match_path(Path, Tail, HostBinds, HostInfo);
+ {true, PathBinds, PathInfo} ->
+ {ok, Handler, Opts, HostBinds ++ PathBinds, HostInfo, PathInfo}
+ end.
+
+%% Internal.
+
+-spec try_match(host | path, tokens(), match_rule())
+ -> {true, bindings(), undefined | tokens()} | false.
+try_match(host, List, Match) ->
+ list_match(lists:reverse(List), lists:reverse(Match), []);
+try_match(path, List, Match) ->
+ list_match(List, Match, []).
+
+-spec list_match(tokens(), match_rule(), bindings())
+ -> {true, bindings(), undefined | tokens()} | false.
+%% Atom '...' matches any trailing path, stop right now.
+list_match(List, ['...'], Binds) ->
+ {true, Binds, List};
+%% Atom '_' matches anything, continue.
+list_match([_E|Tail], ['_'|TailMatch], Binds) ->
+ list_match(Tail, TailMatch, Binds);
+%% Both values match, continue.
+list_match([E|Tail], [E|TailMatch], Binds) ->
+ list_match(Tail, TailMatch, Binds);
+%% Bind E to the variable name V and continue.
+list_match([E|Tail], [V|TailMatch], Binds) when is_atom(V) ->
+ list_match(Tail, TailMatch, [{V, E}|Binds]);
+%% Match complete.
+list_match([], [], Binds) ->
+ {true, Binds, undefined};
+%% Values don't match, stop.
+list_match(_List, _Match, _Binds) ->
+ false.
+
+%% Tests.
+
+-ifdef(TEST).
+
+split_host_test_() ->
+ %% {Host, Result}
+ Tests = [
+ {<<"">>, {[], <<"">>, undefined}},
+ {<<".........">>, {[], <<".........">>, undefined}},
+ {<<"*">>, {[<<"*">>], <<"*">>, undefined}},
+ {<<"cowboy.dev-extend.eu">>,
+ {[<<"cowboy">>, <<"dev-extend">>, <<"eu">>],
+ <<"cowboy.dev-extend.eu">>, undefined}},
+ {<<"dev-extend..eu">>,
+ {[<<"dev-extend">>, <<>>, <<"eu">>],
+ <<"dev-extend..eu">>, undefined}},
+ {<<"dev-extend.eu">>,
+ {[<<"dev-extend">>, <<"eu">>], <<"dev-extend.eu">>, undefined}},
+ {<<"dev-extend.eu:8080">>,
+ {[<<"dev-extend">>, <<"eu">>], <<"dev-extend.eu">>, 8080}},
+ {<<"a.b.c.d.e.f.g.h.i.j.k.l.m.n.o.p.q.r.s.t.u.v.w.x.y.z">>,
+ {[<<"a">>, <<"b">>, <<"c">>, <<"d">>, <<"e">>, <<"f">>, <<"g">>,
+ <<"h">>, <<"i">>, <<"j">>, <<"k">>, <<"l">>, <<"m">>, <<"n">>,
+ <<"o">>, <<"p">>, <<"q">>, <<"r">>, <<"s">>, <<"t">>, <<"u">>,
+ <<"v">>, <<"w">>, <<"x">>, <<"y">>, <<"z">>],
+ <<"a.b.c.d.e.f.g.h.i.j.k.l.m.n.o.p.q.r.s.t.u.v.w.x.y.z">>,
+ undefined}}
+ ],
+ [{H, fun() -> R = split_host(H) end} || {H, R} <- Tests].
+
+split_host_fail_test_() ->
+ Tests = [
+ <<"dev-extend.eu:owns">>,
+ <<"dev-extend.eu: owns">>,
+ <<"dev-extend.eu:42fun">>,
+ <<"dev-extend.eu: 42fun">>,
+ <<"dev-extend.eu:42 fun">>,
+ <<"dev-extend.eu:fun 42">>,
+ <<"dev-extend.eu: 42">>,
+ <<":owns">>,
+ <<":42 fun">>
+ ],
+ [{H, fun() -> case catch split_host(H) of
+ {'EXIT', _Reason} -> ok
+ end end} || H <- Tests].
+
+split_path_test_() ->
+ %% {Path, Result, QueryString}
+ Tests = [
+ {<<"?">>, [], <<"">>, <<"">>},
+ {<<"???">>, [], <<"">>, <<"??">>},
+ {<<"/">>, [], <<"/">>, <<"">>},
+ {<<"/users">>, [<<"users">>], <<"/users">>, <<"">>},
+ {<<"/users?">>, [<<"users">>], <<"/users">>, <<"">>},
+ {<<"/users?a">>, [<<"users">>], <<"/users">>, <<"a">>},
+ {<<"/users/42/friends?a=b&c=d&e=notsure?whatever">>,
+ [<<"users">>, <<"42">>, <<"friends">>],
+ <<"/users/42/friends">>, <<"a=b&c=d&e=notsure?whatever">>},
+ {<<"/users/a+b/c%21d?e+f=g+h">>,
+ [<<"users">>, <<"a b">>, <<"c!d">>],
+ <<"/users/a+b/c%21d">>, <<"e+f=g+h">>}
+ ],
+ URLDecode = fun(Bin) -> cowboy_http:urldecode(Bin, crash) end,
+ [{P, fun() -> {R, RawP, Qs} = split_path(P, URLDecode) end}
+ || {P, R, RawP, Qs} <- Tests].
+
+match_test_() ->
+ Dispatch = [
+ {[<<"www">>, '_', <<"dev-extend">>, <<"eu">>], [
+ {[<<"users">>, '_', <<"mails">>], match_any_subdomain_users, []}
+ ]},
+ {[<<"dev-extend">>, <<"eu">>], [
+ {[<<"users">>, id, <<"friends">>], match_extend_users_friends, []},
+ {'_', match_extend, []}
+ ]},
+ {[<<"dev-extend">>, var], [
+ {[<<"threads">>, var], match_duplicate_vars,
+ [we, {expect, two}, var, here]}
+ ]},
+ {[<<"erlang">>, ext], [
+ {'_', match_erlang_ext, []}
+ ]},
+ {'_', [
+ {[<<"users">>, id, <<"friends">>], match_users_friends, []},
+ {'_', match_any, []}
+ ]}
+ ],
+ %% {Host, Path, Result}
+ Tests = [
+ {[<<"any">>], [], {ok, match_any, [], []}},
+ {[<<"www">>, <<"any">>, <<"dev-extend">>, <<"eu">>],
+ [<<"users">>, <<"42">>, <<"mails">>],
+ {ok, match_any_subdomain_users, [], []}},
+ {[<<"www">>, <<"dev-extend">>, <<"eu">>],
+ [<<"users">>, <<"42">>, <<"mails">>], {ok, match_any, [], []}},
+ {[<<"www">>, <<"dev-extend">>, <<"eu">>], [], {ok, match_any, [], []}},
+ {[<<"www">>, <<"any">>, <<"dev-extend">>, <<"eu">>],
+ [<<"not_users">>, <<"42">>, <<"mails">>], {error, notfound, path}},
+ {[<<"dev-extend">>, <<"eu">>], [], {ok, match_extend, [], []}},
+ {[<<"dev-extend">>, <<"eu">>], [<<"users">>, <<"42">>, <<"friends">>],
+ {ok, match_extend_users_friends, [], [{id, <<"42">>}]}},
+ {[<<"erlang">>, <<"fr">>], '_',
+ {ok, match_erlang_ext, [], [{ext, <<"fr">>}]}},
+ {[<<"any">>], [<<"users">>, <<"444">>, <<"friends">>],
+ {ok, match_users_friends, [], [{id, <<"444">>}]}},
+ {[<<"dev-extend">>, <<"fr">>], [<<"threads">>, <<"987">>],
+ {ok, match_duplicate_vars, [we, {expect, two}, var, here],
+ [{var, <<"fr">>}, {var, <<"987">>}]}}
+ ],
+ [{lists:flatten(io_lib:format("~p, ~p", [H, P])), fun() ->
+ {ok, Handler, Opts, Binds, undefined, undefined} = match(H, P, Dispatch)
+ end} || {H, P, {ok, Handler, Opts, Binds}} <- Tests].
+
+match_info_test_() ->
+ Dispatch = [
+ {[<<"www">>, <<"dev-extend">>, <<"eu">>], [
+ {[<<"pathinfo">>, <<"is">>, <<"next">>, '...'], match_path, []}
+ ]},
+ {['...', <<"dev-extend">>, <<"eu">>], [
+ {'_', match_any, []}
+ ]}
+ ],
+ Tests = [
+ {[<<"dev-extend">>, <<"eu">>], [],
+ {ok, match_any, [], [], [], undefined}},
+ {[<<"bugs">>, <<"dev-extend">>, <<"eu">>], [],
+ {ok, match_any, [], [], [<<"bugs">>], undefined}},
+ {[<<"cowboy">>, <<"bugs">>, <<"dev-extend">>, <<"eu">>], [],
+ {ok, match_any, [], [], [<<"cowboy">>, <<"bugs">>], undefined}},
+ {[<<"www">>, <<"dev-extend">>, <<"eu">>],
+ [<<"pathinfo">>, <<"is">>, <<"next">>],
+ {ok, match_path, [], [], undefined, []}},
+ {[<<"www">>, <<"dev-extend">>, <<"eu">>],
+ [<<"pathinfo">>, <<"is">>, <<"next">>, <<"path_info">>],
+ {ok, match_path, [], [], undefined, [<<"path_info">>]}},
+ {[<<"www">>, <<"dev-extend">>, <<"eu">>],
+ [<<"pathinfo">>, <<"is">>, <<"next">>, <<"foo">>, <<"bar">>],
+ {ok, match_path, [], [], undefined, [<<"foo">>, <<"bar">>]}}
+ ],
+ [{lists:flatten(io_lib:format("~p, ~p", [H, P])), fun() ->
+ R = match(H, P, Dispatch)
+ end} || {H, P, R} <- Tests].
+
+-endif.
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%% Copyright (c) 2011, Anthony Ramine <nox@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+%% @doc Core HTTP parsing API.
+-module(cowboy_http).
+
+%% Parsing.
+-export([list/2, nonempty_list/2, content_type/1, content_type_params/3,
+ media_range/2, conneg/2, language_range/2, entity_tag_match/1,
+ http_date/1, rfc1123_date/1, rfc850_date/1, asctime_date/1,
+ whitespace/2, digits/1, token/2, token_ci/2, quoted_string/2]).
+
+%% Interpretation.
+-export([connection_to_atom/1, urldecode/1, urldecode/2, urlencode/1,
+ urlencode/2]).
+
+-type method() :: 'OPTIONS' | 'GET' | 'HEAD'
+ | 'POST' | 'PUT' | 'DELETE' | 'TRACE' | binary().
+-type uri() :: '*' | {absoluteURI, http | https, Host::binary(),
+ Port::integer() | undefined, Path::binary()}
+ | {scheme, Scheme::binary(), binary()}
+ | {abs_path, binary()} | binary().
+-type version() :: {Major::non_neg_integer(), Minor::non_neg_integer()}.
+-type header() :: 'Cache-Control' | 'Connection' | 'Date' | 'Pragma'
+ | 'Transfer-Encoding' | 'Upgrade' | 'Via' | 'Accept' | 'Accept-Charset'
+ | 'Accept-Encoding' | 'Accept-Language' | 'Authorization' | 'From' | 'Host'
+ | 'If-Modified-Since' | 'If-Match' | 'If-None-Match' | 'If-Range'
+ | 'If-Unmodified-Since' | 'Max-Forwards' | 'Proxy-Authorization' | 'Range'
+ | 'Referer' | 'User-Agent' | 'Age' | 'Location' | 'Proxy-Authenticate'
+ | 'Public' | 'Retry-After' | 'Server' | 'Vary' | 'Warning'
+ | 'Www-Authenticate' | 'Allow' | 'Content-Base' | 'Content-Encoding'
+ | 'Content-Language' | 'Content-Length' | 'Content-Location'
+ | 'Content-Md5' | 'Content-Range' | 'Content-Type' | 'Etag'
+ | 'Expires' | 'Last-Modified' | 'Accept-Ranges' | 'Set-Cookie'
+ | 'Set-Cookie2' | 'X-Forwarded-For' | 'Cookie' | 'Keep-Alive'
+ | 'Proxy-Connection' | binary().
+-type fake_iodata() :: iolist() | binary().
+-type headers() :: [{header(), fake_iodata()}].
+-type status() :: non_neg_integer() | binary().
+
+-export_type([method/0, uri/0, version/0, header/0, headers/0, status/0]).
+
+-include("include/http.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+%% Parsing.
+
+%% @doc Parse a non-empty list of the given type.
+-spec nonempty_list(binary(), fun()) -> [any(), ...] | {error, badarg}.
+nonempty_list(Data, Fun) ->
+ case list(Data, Fun, []) of
+ {error, badarg} -> {error, badarg};
+ [] -> {error, badarg};
+ L -> lists:reverse(L)
+ end.
+
+%% @doc Parse a list of the given type.
+-spec list(binary(), fun()) -> list() | {error, badarg}.
+list(Data, Fun) ->
+ case list(Data, Fun, []) of
+ {error, badarg} -> {error, badarg};
+ L -> lists:reverse(L)
+ end.
+
+-spec list(binary(), fun(), [binary()]) -> [any()] | {error, badarg}.
+%% From the RFC:
+%% <blockquote>Wherever this construct is used, null elements are allowed,
+%% but do not contribute to the count of elements present.
+%% That is, "(element), , (element) " is permitted, but counts
+%% as only two elements. Therefore, where at least one element is required,
+%% at least one non-null element MUST be present.</blockquote>
+list(Data, Fun, Acc) ->
+ whitespace(Data,
+ fun (<<>>) -> Acc;
+ (<< $,, Rest/binary >>) -> list(Rest, Fun, Acc);
+ (Rest) -> Fun(Rest,
+ fun (D, I) -> whitespace(D,
+ fun (<<>>) -> [I|Acc];
+ (<< $,, R/binary >>) -> list(R, Fun, [I|Acc]);
+ (_Any) -> {error, badarg}
+ end)
+ end)
+ end).
+
+%% @doc Parse a content type.
+-spec content_type(binary()) -> any().
+content_type(Data) ->
+ media_type(Data,
+ fun (Rest, Type, SubType) ->
+ content_type_params(Rest,
+ fun (Params) -> {Type, SubType, Params} end, [])
+ end).
+
+-spec content_type_params(binary(), fun(), list({binary(), binary()}))
+ -> any().
+content_type_params(Data, Fun, Acc) ->
+ whitespace(Data,
+ fun (<< $;, Rest/binary >>) -> content_type_param(Rest, Fun, Acc);
+ (<<>>) -> Fun(lists:reverse(Acc));
+ (_Rest) -> {error, badarg}
+ end).
+
+-spec content_type_param(binary(), fun(), list({binary(), binary()}))
+ -> any().
+content_type_param(Data, Fun, Acc) ->
+ whitespace(Data,
+ fun (Rest) ->
+ token_ci(Rest,
+ fun (_Rest2, <<>>) -> {error, badarg};
+ (<< $=, Rest2/binary >>, Attr) ->
+ word(Rest2,
+ fun (Rest3, Value) ->
+ content_type_params(Rest3, Fun,
+ [{Attr, Value}|Acc])
+ end);
+ (_Rest2, _Attr) -> {error, badarg}
+ end)
+ end).
+
+%% @doc Parse a media range.
+-spec media_range(binary(), fun()) -> any().
+media_range(Data, Fun) ->
+ media_type(Data,
+ fun (Rest, Type, SubType) ->
+ media_range_params(Rest, Fun, Type, SubType, [])
+ end).
+
+-spec media_range_params(binary(), fun(), binary(), binary(),
+ [{binary(), binary()}]) -> any().
+media_range_params(Data, Fun, Type, SubType, Acc) ->
+ whitespace(Data,
+ fun (<< $;, Rest/binary >>) ->
+ whitespace(Rest,
+ fun (Rest2) ->
+ media_range_param_attr(Rest2, Fun, Type, SubType, Acc)
+ end);
+ (Rest) -> Fun(Rest, {{Type, SubType, lists:reverse(Acc)}, 1000, []})
+ end).
+
+-spec media_range_param_attr(binary(), fun(), binary(), binary(),
+ [{binary(), binary()}]) -> any().
+media_range_param_attr(Data, Fun, Type, SubType, Acc) ->
+ token_ci(Data,
+ fun (_Rest, <<>>) -> {error, badarg};
+ (<< $=, Rest/binary >>, Attr) ->
+ media_range_param_value(Rest, Fun, Type, SubType, Acc, Attr)
+ end).
+
+-spec media_range_param_value(binary(), fun(), binary(), binary(),
+ [{binary(), binary()}], binary()) -> any().
+media_range_param_value(Data, Fun, Type, SubType, Acc, <<"q">>) ->
+ qvalue(Data,
+ fun (Rest, Quality) ->
+ accept_ext(Rest, Fun, Type, SubType, Acc, Quality, [])
+ end);
+media_range_param_value(Data, Fun, Type, SubType, Acc, Attr) ->
+ word(Data,
+ fun (Rest, Value) ->
+ media_range_params(Rest, Fun,
+ Type, SubType, [{Attr, Value}|Acc])
+ end).
+
+%% @doc Parse a media type.
+-spec media_type(binary(), fun()) -> any().
+media_type(Data, Fun) ->
+ token_ci(Data,
+ fun (_Rest, <<>>) -> {error, badarg};
+ (<< $/, Rest/binary >>, Type) ->
+ token_ci(Rest,
+ fun (_Rest2, <<>>) -> {error, badarg};
+ (Rest2, SubType) -> Fun(Rest2, Type, SubType)
+ end);
+ (_Rest, _Type) -> {error, badarg}
+ end).
+
+-spec accept_ext(binary(), fun(), binary(), binary(),
+ [{binary(), binary()}], 0..1000,
+ [{binary(), binary()} | binary()]) -> any().
+accept_ext(Data, Fun, Type, SubType, Params, Quality, Acc) ->
+ whitespace(Data,
+ fun (<< $;, Rest/binary >>) ->
+ whitespace(Rest,
+ fun (Rest2) ->
+ accept_ext_attr(Rest2, Fun,
+ Type, SubType, Params, Quality, Acc)
+ end);
+ (Rest) ->
+ Fun(Rest, {{Type, SubType, lists:reverse(Params)},
+ Quality, lists:reverse(Acc)})
+ end).
+
+-spec accept_ext_attr(binary(), fun(), binary(), binary(),
+ [{binary(), binary()}], 0..1000,
+ [{binary(), binary()} | binary()]) -> any().
+accept_ext_attr(Data, Fun, Type, SubType, Params, Quality, Acc) ->
+ token_ci(Data,
+ fun (_Rest, <<>>) -> {error, badarg};
+ (<< $=, Rest/binary >>, Attr) ->
+ accept_ext_value(Rest, Fun, Type, SubType, Params,
+ Quality, Acc, Attr);
+ (Rest, Attr) ->
+ accept_ext(Rest, Fun, Type, SubType, Params,
+ Quality, [Attr|Acc])
+ end).
+
+-spec accept_ext_value(binary(), fun(), binary(), binary(),
+ [{binary(), binary()}], 0..1000,
+ [{binary(), binary()} | binary()], binary()) -> any().
+accept_ext_value(Data, Fun, Type, SubType, Params, Quality, Acc, Attr) ->
+ word(Data,
+ fun (Rest, Value) ->
+ accept_ext(Rest, Fun,
+ Type, SubType, Params, Quality, [{Attr, Value}|Acc])
+ end).
+
+%% @doc Parse a conneg header (Accept-Charset, Accept-Encoding),
+%% followed by an optional quality value.
+-spec conneg(binary(), fun()) -> any().
+conneg(Data, Fun) ->
+ token_ci(Data,
+ fun (_Rest, <<>>) -> {error, badarg};
+ (Rest, Conneg) ->
+ maybe_qparam(Rest,
+ fun (Rest2, Quality) ->
+ Fun(Rest2, {Conneg, Quality})
+ end)
+ end).
+
+%% @doc Parse a language range, followed by an optional quality value.
+-spec language_range(binary(), fun()) -> any().
+language_range(<< $*, Rest/binary >>, Fun) ->
+ language_range_ret(Rest, Fun, '*');
+language_range(Data, Fun) ->
+ language_tag(Data,
+ fun (Rest, LanguageTag) ->
+ language_range_ret(Rest, Fun, LanguageTag)
+ end).
+
+-spec language_range_ret(binary(), fun(), '*' | {binary(), [binary()]}) -> any().
+language_range_ret(Data, Fun, LanguageTag) ->
+ maybe_qparam(Data,
+ fun (Rest, Quality) ->
+ Fun(Rest, {LanguageTag, Quality})
+ end).
+
+-spec language_tag(binary(), fun()) -> any().
+language_tag(Data, Fun) ->
+ alpha(Data,
+ fun (_Rest, Tag) when byte_size(Tag) =:= 0; byte_size(Tag) > 8 ->
+ {error, badarg};
+ (<< $-, Rest/binary >>, Tag) ->
+ language_subtag(Rest, Fun, Tag, []);
+ (Rest, Tag) ->
+ Fun(Rest, Tag)
+ end).
+
+-spec language_subtag(binary(), fun(), binary(), [binary()]) -> any().
+language_subtag(Data, Fun, Tag, Acc) ->
+ alpha(Data,
+ fun (_Rest, SubTag) when byte_size(SubTag) =:= 0;
+ byte_size(SubTag) > 8 -> {error, badarg};
+ (<< $-, Rest/binary >>, SubTag) ->
+ language_subtag(Rest, Fun, Tag, [SubTag|Acc]);
+ (Rest, SubTag) ->
+ %% Rebuild the full tag now that we know it's correct
+ Sub = << << $-, S/binary >> || S <- lists:reverse([SubTag|Acc]) >>,
+ Fun(Rest, << Tag/binary, Sub/binary >>)
+ end).
+
+-spec maybe_qparam(binary(), fun()) -> any().
+maybe_qparam(Data, Fun) ->
+ whitespace(Data,
+ fun (<< $;, Rest/binary >>) ->
+ whitespace(Rest,
+ fun (Rest2) ->
+ qparam(Rest2, Fun)
+ end);
+ (Rest) ->
+ Fun(Rest, 1000)
+ end).
+
+%% @doc Parse a quality parameter string (for example q=0.500).
+-spec qparam(binary(), fun()) -> any().
+qparam(<< Q, $=, Data/binary >>, Fun) when Q =:= $q; Q =:= $Q ->
+ qvalue(Data, Fun).
+
+%% @doc Parse either a list of entity tags or a "*".
+-spec entity_tag_match(binary()) -> any().
+entity_tag_match(<< $*, Rest/binary >>) ->
+ whitespace(Rest,
+ fun (<<>>) -> '*';
+ (_Any) -> {error, badarg}
+ end);
+entity_tag_match(Data) ->
+ nonempty_list(Data, fun entity_tag/2).
+
+%% @doc Parse an entity-tag.
+-spec entity_tag(binary(), fun()) -> any().
+entity_tag(<< "W/", Rest/binary >>, Fun) ->
+ opaque_tag(Rest, Fun, weak);
+entity_tag(Data, Fun) ->
+ opaque_tag(Data, Fun, strong).
+
+-spec opaque_tag(binary(), fun(), weak | strong) -> any().
+opaque_tag(Data, Fun, Strength) ->
+ quoted_string(Data,
+ fun (_Rest, <<>>) -> {error, badarg};
+ (Rest, OpaqueTag) -> Fun(Rest, {Strength, OpaqueTag})
+ end).
+
+%% @doc Parse an HTTP date (RFC1123, RFC850 or asctime date).
+%% @end
+%%
+%% While this may not be the most efficient date parsing we can do,
+%% it should work fine for our purposes because all HTTP dates should
+%% be sent as RFC1123 dates in HTTP/1.1.
+-spec http_date(binary()) -> any().
+http_date(Data) ->
+ case rfc1123_date(Data) of
+ {error, badarg} ->
+ case rfc850_date(Data) of
+ {error, badarg} ->
+ case asctime_date(Data) of
+ {error, badarg} ->
+ {error, badarg};
+ HTTPDate ->
+ HTTPDate
+ end;
+ HTTPDate ->
+ HTTPDate
+ end;
+ HTTPDate ->
+ HTTPDate
+ end.
+
+%% @doc Parse an RFC1123 date.
+-spec rfc1123_date(binary()) -> any().
+rfc1123_date(Data) ->
+ wkday(Data,
+ fun (<< ", ", Rest/binary >>, _WkDay) ->
+ date1(Rest,
+ fun (<< " ", Rest2/binary >>, Date) ->
+ time(Rest2,
+ fun (<< " GMT", Rest3/binary >>, Time) ->
+ http_date_ret(Rest3, {Date, Time});
+ (_Any, _Time) ->
+ {error, badarg}
+ end);
+ (_Any, _Date) ->
+ {error, badarg}
+ end);
+ (_Any, _WkDay) ->
+ {error, badarg}
+ end).
+
+%% @doc Parse an RFC850 date.
+-spec rfc850_date(binary()) -> any().
+%% From the RFC:
+%% HTTP/1.1 clients and caches SHOULD assume that an RFC-850 date
+%% which appears to be more than 50 years in the future is in fact
+%% in the past (this helps solve the "year 2000" problem).
+rfc850_date(Data) ->
+ weekday(Data,
+ fun (<< ", ", Rest/binary >>, _WeekDay) ->
+ date2(Rest,
+ fun (<< " ", Rest2/binary >>, Date) ->
+ time(Rest2,
+ fun (<< " GMT", Rest3/binary >>, Time) ->
+ http_date_ret(Rest3, {Date, Time});
+ (_Any, _Time) ->
+ {error, badarg}
+ end);
+ (_Any, _Date) ->
+ {error, badarg}
+ end);
+ (_Any, _WeekDay) ->
+ {error, badarg}
+ end).
+
+%% @doc Parse an asctime date.
+-spec asctime_date(binary()) -> any().
+asctime_date(Data) ->
+ wkday(Data,
+ fun (<< " ", Rest/binary >>, _WkDay) ->
+ date3(Rest,
+ fun (<< " ", Rest2/binary >>, PartialDate) ->
+ time(Rest2,
+ fun (<< " ", Rest3/binary >>, Time) ->
+ asctime_year(Rest3,
+ PartialDate, Time);
+ (_Any, _Time) ->
+ {error, badarg}
+ end);
+ (_Any, _PartialDate) ->
+ {error, badarg}
+ end);
+ (_Any, _WkDay) ->
+ {error, badarg1}
+ end).
+
+-spec asctime_year(binary(), tuple(), tuple()) -> any().
+asctime_year(<< Y1, Y2, Y3, Y4, Rest/binary >>, {Month, Day}, Time)
+ when Y1 >= $0, Y1 =< $9, Y2 >= $0, Y2 =< $9,
+ Y3 >= $0, Y3 =< $9, Y4 >= $0, Y4 =< $9 ->
+ Year = (Y1 - $0) * 1000 + (Y2 - $0) * 100 + (Y3 - $0) * 10 + (Y4 - $0),
+ http_date_ret(Rest, {{Year, Month, Day}, Time}).
+
+-spec http_date_ret(binary(), tuple()) -> any().
+http_date_ret(Data, DateTime = {Date, _Time}) ->
+ whitespace(Data,
+ fun (<<>>) ->
+ case calendar:valid_date(Date) of
+ true -> DateTime;
+ false -> {error, badarg}
+ end;
+ (_Any) ->
+ {error, badarg}
+ end).
+
+%% We never use it, pretty much just checks the wkday is right.
+-spec wkday(binary(), fun()) -> any().
+wkday(<< WkDay:3/binary, Rest/binary >>, Fun)
+ when WkDay =:= <<"Mon">>; WkDay =:= <<"Tue">>; WkDay =:= <<"Wed">>;
+ WkDay =:= <<"Thu">>; WkDay =:= <<"Fri">>; WkDay =:= <<"Sat">>;
+ WkDay =:= <<"Sun">> ->
+ Fun(Rest, WkDay);
+wkday(_Any, _Fun) ->
+ {error, badarg}.
+
+%% We never use it, pretty much just checks the weekday is right.
+-spec weekday(binary(), fun()) -> any().
+weekday(<< "Monday", Rest/binary >>, Fun) ->
+ Fun(Rest, <<"Monday">>);
+weekday(<< "Tuesday", Rest/binary >>, Fun) ->
+ Fun(Rest, <<"Tuesday">>);
+weekday(<< "Wednesday", Rest/binary >>, Fun) ->
+ Fun(Rest, <<"Wednesday">>);
+weekday(<< "Thursday", Rest/binary >>, Fun) ->
+ Fun(Rest, <<"Thursday">>);
+weekday(<< "Friday", Rest/binary >>, Fun) ->
+ Fun(Rest, <<"Friday">>);
+weekday(<< "Saturday", Rest/binary >>, Fun) ->
+ Fun(Rest, <<"Saturday">>);
+weekday(<< "Sunday", Rest/binary >>, Fun) ->
+ Fun(Rest, <<"Sunday">>);
+weekday(_Any, _Fun) ->
+ {error, badarg}.
+
+-spec date1(binary(), fun()) -> any().
+date1(<< D1, D2, " ", M:3/binary, " ", Y1, Y2, Y3, Y4, Rest/binary >>, Fun)
+ when D1 >= $0, D1 =< $9, D2 >= $0, D2 =< $9,
+ Y1 >= $0, Y1 =< $9, Y2 >= $0, Y2 =< $9,
+ Y3 >= $0, Y3 =< $9, Y4 >= $0, Y4 =< $9 ->
+ case month(M) of
+ {error, badarg} ->
+ {error, badarg};
+ Month ->
+ Fun(Rest, {
+ (Y1 - $0) * 1000 + (Y2 - $0) * 100 + (Y3 - $0) * 10 + (Y4 - $0),
+ Month,
+ (D1 - $0) * 10 + (D2 - $0)
+ })
+ end;
+date1(_Data, _Fun) ->
+ {error, badarg}.
+
+-spec date2(binary(), fun()) -> any().
+date2(<< D1, D2, "-", M:3/binary, "-", Y1, Y2, Rest/binary >>, Fun)
+ when D1 >= $0, D1 =< $9, D2 >= $0, D2 =< $9,
+ Y1 >= $0, Y1 =< $9, Y2 >= $0, Y2 =< $9 ->
+ case month(M) of
+ {error, badarg} ->
+ {error, badarg};
+ Month ->
+ Year = (Y1 - $0) * 10 + (Y2 - $0),
+ Year2 = case Year > 50 of
+ true -> Year + 1900;
+ false -> Year + 2000
+ end,
+ Fun(Rest, {
+ Year2,
+ Month,
+ (D1 - $0) * 10 + (D2 - $0)
+ })
+ end;
+date2(_Data, _Fun) ->
+ {error, badarg}.
+
+-spec date3(binary(), fun()) -> any().
+date3(<< M:3/binary, " ", D1, D2, Rest/binary >>, Fun)
+ when (D1 >= $0 andalso D1 =< $3) orelse D1 =:= $\s,
+ D2 >= $0, D2 =< $9 ->
+ case month(M) of
+ {error, badarg} ->
+ {error, badarg};
+ Month ->
+ Day = case D1 of
+ $\s -> D2 - $0;
+ D1 -> (D1 - $0) * 10 + (D2 - $0)
+ end,
+ Fun(Rest, {Month, Day})
+ end;
+date3(_Data, _Fun) ->
+ {error, badarg}.
+
+-spec month(<< _:24 >>) -> 1..12 | {error, badarg}.
+month(<<"Jan">>) -> 1;
+month(<<"Feb">>) -> 2;
+month(<<"Mar">>) -> 3;
+month(<<"Apr">>) -> 4;
+month(<<"May">>) -> 5;
+month(<<"Jun">>) -> 6;
+month(<<"Jul">>) -> 7;
+month(<<"Aug">>) -> 8;
+month(<<"Sep">>) -> 9;
+month(<<"Oct">>) -> 10;
+month(<<"Nov">>) -> 11;
+month(<<"Dec">>) -> 12;
+month(_Any) -> {error, badarg}.
+
+-spec time(binary(), fun()) -> any().
+time(<< H1, H2, ":", M1, M2, ":", S1, S2, Rest/binary >>, Fun)
+ when H1 >= $0, H1 =< $2, H2 >= $0, H2 =< $9,
+ M1 >= $0, M1 =< $5, M2 >= $0, M2 =< $9,
+ S1 >= $0, S1 =< $5, S2 >= $0, S2 =< $9 ->
+ Hour = (H1 - $0) * 10 + (H2 - $0),
+ case Hour < 24 of
+ true ->
+ Time = {
+ Hour,
+ (M1 - $0) * 10 + (M2 - $0),
+ (S1 - $0) * 10 + (S2 - $0)
+ },
+ Fun(Rest, Time);
+ false ->
+ {error, badarg}
+ end.
+
+%% @doc Skip whitespace.
+-spec whitespace(binary(), fun()) -> any().
+whitespace(<< C, Rest/binary >>, Fun)
+ when C =:= $\s; C =:= $\t ->
+ whitespace(Rest, Fun);
+whitespace(Data, Fun) ->
+ Fun(Data).
+
+%% @doc Parse a list of digits as a non negative integer.
+-spec digits(binary()) -> non_neg_integer() | {error, badarg}.
+digits(Data) ->
+ digits(Data,
+ fun (Rest, I) ->
+ whitespace(Rest,
+ fun (<<>>) ->
+ I;
+ (_Rest2) ->
+ {error, badarg}
+ end)
+ end).
+
+-spec digits(binary(), fun()) -> any().
+digits(<< C, Rest/binary >>, Fun)
+ when C >= $0, C =< $9 ->
+ digits(Rest, Fun, C - $0);
+digits(_Data, _Fun) ->
+ {error, badarg}.
+
+-spec digits(binary(), fun(), non_neg_integer()) -> any().
+digits(<< C, Rest/binary >>, Fun, Acc)
+ when C >= $0, C =< $9 ->
+ digits(Rest, Fun, Acc * 10 + (C - $0));
+digits(Data, Fun, Acc) ->
+ Fun(Data, Acc).
+
+%% @doc Parse a list of case-insensitive alpha characters.
+%%
+%% Changes all characters to lowercase.
+-spec alpha(binary(), fun()) -> any().
+alpha(Data, Fun) ->
+ alpha(Data, Fun, <<>>).
+
+-spec alpha(binary(), fun(), binary()) -> any().
+alpha(<<>>, Fun, Acc) ->
+ Fun(<<>>, Acc);
+alpha(<< C, Rest/binary >>, Fun, Acc)
+ when C >= $a andalso C =< $z;
+ C >= $A andalso C =< $Z ->
+ C2 = cowboy_bstr:char_to_lower(C),
+ alpha(Rest, Fun, << Acc/binary, C2 >>);
+alpha(Data, Fun, Acc) ->
+ Fun(Data, Acc).
+
+%% @doc Parse either a token or a quoted string.
+-spec word(binary(), fun()) -> any().
+word(Data = << $", _/binary >>, Fun) ->
+ quoted_string(Data, Fun);
+word(Data, Fun) ->
+ token(Data,
+ fun (_Rest, <<>>) -> {error, badarg};
+ (Rest, Token) -> Fun(Rest, Token)
+ end).
+
+%% @doc Parse a case-insensitive token.
+%%
+%% Changes all characters to lowercase.
+-spec token_ci(binary(), fun()) -> any().
+token_ci(Data, Fun) ->
+ token(Data, Fun, ci, <<>>).
+
+%% @doc Parse a token.
+-spec token(binary(), fun()) -> any().
+token(Data, Fun) ->
+ token(Data, Fun, cs, <<>>).
+
+-spec token(binary(), fun(), ci | cs, binary()) -> any().
+token(<<>>, Fun, _Case, Acc) ->
+ Fun(<<>>, Acc);
+token(Data = << C, _Rest/binary >>, Fun, _Case, Acc)
+ when C =:= $(; C =:= $); C =:= $<; C =:= $>; C =:= $@;
+ C =:= $,; C =:= $;; C =:= $:; C =:= $\\; C =:= $";
+ C =:= $/; C =:= $[; C =:= $]; C =:= $?; C =:= $=;
+ C =:= ${; C =:= $}; C =:= $\s; C =:= $\t;
+ C < 32; C =:= 127 ->
+ Fun(Data, Acc);
+token(<< C, Rest/binary >>, Fun, Case = ci, Acc) ->
+ C2 = cowboy_bstr:char_to_lower(C),
+ token(Rest, Fun, Case, << Acc/binary, C2 >>);
+token(<< C, Rest/binary >>, Fun, Case, Acc) ->
+ token(Rest, Fun, Case, << Acc/binary, C >>).
+
+%% @doc Parse a quoted string.
+-spec quoted_string(binary(), fun()) -> any().
+quoted_string(<< $", Rest/binary >>, Fun) ->
+ quoted_string(Rest, Fun, <<>>).
+
+-spec quoted_string(binary(), fun(), binary()) -> any().
+quoted_string(<<>>, _Fun, _Acc) ->
+ {error, badarg};
+quoted_string(<< $", Rest/binary >>, Fun, Acc) ->
+ Fun(Rest, Acc);
+quoted_string(<< $\\, C, Rest/binary >>, Fun, Acc) ->
+ quoted_string(Rest, Fun, << Acc/binary, C >>);
+quoted_string(<< C, Rest/binary >>, Fun, Acc) ->
+ quoted_string(Rest, Fun, << Acc/binary, C >>).
+
+%% @doc Parse a quality value.
+-spec qvalue(binary(), fun()) -> any().
+qvalue(<< $0, $., Rest/binary >>, Fun) ->
+ qvalue(Rest, Fun, 0, 100);
+qvalue(<< $0, Rest/binary >>, Fun) ->
+ Fun(Rest, 0);
+qvalue(<< $1, $., $0, $0, $0, Rest/binary >>, Fun) ->
+ Fun(Rest, 1000);
+qvalue(<< $1, $., $0, $0, Rest/binary >>, Fun) ->
+ Fun(Rest, 1000);
+qvalue(<< $1, $., $0, Rest/binary >>, Fun) ->
+ Fun(Rest, 1000);
+qvalue(<< $1, Rest/binary >>, Fun) ->
+ Fun(Rest, 1000);
+qvalue(_Data, _Fun) ->
+ {error, badarg}.
+
+-spec qvalue(binary(), fun(), integer(), 1 | 10 | 100) -> any().
+qvalue(Data, Fun, Q, 0) ->
+ Fun(Data, Q);
+qvalue(<< C, Rest/binary >>, Fun, Q, M)
+ when C >= $0, C =< $9 ->
+ qvalue(Rest, Fun, Q + (C - $0) * M, M div 10);
+qvalue(Data, Fun, Q, _M) ->
+ Fun(Data, Q).
+
+
+%% Interpretation.
+
+%% @doc Walk through a tokens list and return whether
+%% the connection is keepalive or closed.
+%%
+%% The connection token is expected to be lower-case.
+-spec connection_to_atom([binary()]) -> keepalive | close.
+connection_to_atom([]) ->
+ keepalive;
+connection_to_atom([<<"keep-alive">>|_Tail]) ->
+ keepalive;
+connection_to_atom([<<"close">>|_Tail]) ->
+ close;
+connection_to_atom([_Any|Tail]) ->
+ connection_to_atom(Tail).
+
+%% @doc Decode a URL encoded binary.
+%% @equiv urldecode(Bin, crash)
+-spec urldecode(binary()) -> binary().
+urldecode(Bin) when is_binary(Bin) ->
+ urldecode(Bin, <<>>, crash).
+
+%% @doc Decode a URL encoded binary.
+%% The second argument specifies how to handle percent characters that are not
+%% followed by two valid hex characters. Use `skip' to ignore such errors,
+%% if `crash' is used the function will fail with the reason `badarg'.
+-spec urldecode(binary(), crash | skip) -> binary().
+urldecode(Bin, OnError) when is_binary(Bin) ->
+ urldecode(Bin, <<>>, OnError).
+
+-spec urldecode(binary(), binary(), crash | skip) -> binary().
+urldecode(<<$%, H, L, Rest/binary>>, Acc, OnError) ->
+ G = unhex(H),
+ M = unhex(L),
+ if G =:= error; M =:= error ->
+ case OnError of skip -> ok; crash -> erlang:error(badarg) end,
+ urldecode(<<H, L, Rest/binary>>, <<Acc/binary, $%>>, OnError);
+ true ->
+ urldecode(Rest, <<Acc/binary, (G bsl 4 bor M)>>, OnError)
+ end;
+urldecode(<<$%, Rest/binary>>, Acc, OnError) ->
+ case OnError of skip -> ok; crash -> erlang:error(badarg) end,
+ urldecode(Rest, <<Acc/binary, $%>>, OnError);
+urldecode(<<$+, Rest/binary>>, Acc, OnError) ->
+ urldecode(Rest, <<Acc/binary, $ >>, OnError);
+urldecode(<<C, Rest/binary>>, Acc, OnError) ->
+ urldecode(Rest, <<Acc/binary, C>>, OnError);
+urldecode(<<>>, Acc, _OnError) ->
+ Acc.
+
+-spec unhex(byte()) -> byte() | error.
+unhex(C) when C >= $0, C =< $9 -> C - $0;
+unhex(C) when C >= $A, C =< $F -> C - $A + 10;
+unhex(C) when C >= $a, C =< $f -> C - $a + 10;
+unhex(_) -> error.
+
+
+%% @doc URL encode a string binary.
+%% @equiv urlencode(Bin, [])
+-spec urlencode(binary()) -> binary().
+urlencode(Bin) ->
+ urlencode(Bin, []).
+
+%% @doc URL encode a string binary.
+%% The `noplus' option disables the default behaviour of quoting space
+%% characters, `\s', as `+'. The `upper' option overrides the default behaviour
+%% of writing hex numbers using lowecase letters to using uppercase letters
+%% instead.
+-spec urlencode(binary(), [noplus|upper]) -> binary().
+urlencode(Bin, Opts) ->
+ Plus = not proplists:get_value(noplus, Opts, false),
+ Upper = proplists:get_value(upper, Opts, false),
+ urlencode(Bin, <<>>, Plus, Upper).
+
+urlencode(<<C, Rest/binary>>, Acc, P=Plus, U=Upper) ->
+ if C >= $0, C =< $9 -> urlencode(Rest, <<Acc/binary, C>>, P, U);
+ C >= $A, C =< $Z -> urlencode(Rest, <<Acc/binary, C>>, P, U);
+ C >= $a, C =< $z -> urlencode(Rest, <<Acc/binary, C>>, P, U);
+ C =:= $.; C =:= $-; C =:= $~; C =:= $_ ->
+ urlencode(Rest, <<Acc/binary, C>>, P, U);
+ C =:= $ , Plus ->
+ urlencode(Rest, <<Acc/binary, $+>>, P, U);
+ true ->
+ H = C band 16#F0 bsr 4, L = C band 16#0F,
+ H1 = if Upper -> tohexu(H); true -> tohexl(H) end,
+ L1 = if Upper -> tohexu(L); true -> tohexl(L) end,
+ urlencode(Rest, <<Acc/binary, $%, H1, L1>>, P, U)
+ end;
+urlencode(<<>>, Acc, _Plus, _Upper) ->
+ Acc.
+
+-spec tohexu(byte()) -> byte().
+tohexu(C) when C < 10 -> $0 + C;
+tohexu(C) when C < 17 -> $A + C - 10.
+
+-spec tohexl(byte()) -> byte().
+tohexl(C) when C < 10 -> $0 + C;
+tohexl(C) when C < 17 -> $a + C - 10.
+
+
+%% Tests.
+
+-ifdef(TEST).
+
+nonempty_charset_list_test_() ->
+ %% {Value, Result}
+ Tests = [
+ {<<>>, {error, badarg}},
+ {<<"iso-8859-5, unicode-1-1;q=0.8">>, [
+ {<<"iso-8859-5">>, 1000},
+ {<<"unicode-1-1">>, 800}
+ ]}
+ ],
+ [{V, fun() -> R = nonempty_list(V, fun conneg/2) end} || {V, R} <- Tests].
+
+nonempty_language_range_list_test_() ->
+ %% {Value, Result}
+ Tests = [
+ {<<"da, en-gb;q=0.8, en;q=0.7">>, [
+ {<<"da">>, 1000},
+ {<<"en-gb">>, 800},
+ {<<"en">>, 700}
+ ]},
+ {<<"en, en-US, en-cockney, i-cherokee, x-pig-latin">>, [
+ {<<"en">>, 1000},
+ {<<"en-us">>, 1000},
+ {<<"en-cockney">>, 1000},
+ {<<"i-cherokee">>, 1000},
+ {<<"x-pig-latin">>, 1000}
+ ]}
+ ],
+ [{V, fun() -> R = nonempty_list(V, fun language_range/2) end}
+ || {V, R} <- Tests].
+
+nonempty_token_list_test_() ->
+ %% {Value, Result}
+ Tests = [
+ {<<>>, {error, badarg}},
+ {<<" ">>, {error, badarg}},
+ {<<" , ">>, {error, badarg}},
+ {<<",,,">>, {error, badarg}},
+ {<<"a b">>, {error, badarg}},
+ {<<"a , , , ">>, [<<"a">>]},
+ {<<" , , , a">>, [<<"a">>]},
+ {<<"a, , b">>, [<<"a">>, <<"b">>]},
+ {<<"close">>, [<<"close">>]},
+ {<<"keep-alive, upgrade">>, [<<"keep-alive">>, <<"upgrade">>]}
+ ],
+ [{V, fun() -> R = nonempty_list(V, fun token/2) end} || {V, R} <- Tests].
+
+media_range_list_test_() ->
+ %% {Tokens, Result}
+ Tests = [
+ {<<"audio/*; q=0.2, audio/basic">>, [
+ {{<<"audio">>, <<"*">>, []}, 200, []},
+ {{<<"audio">>, <<"basic">>, []}, 1000, []}
+ ]},
+ {<<"text/plain; q=0.5, text/html, "
+ "text/x-dvi; q=0.8, text/x-c">>, [
+ {{<<"text">>, <<"plain">>, []}, 500, []},
+ {{<<"text">>, <<"html">>, []}, 1000, []},
+ {{<<"text">>, <<"x-dvi">>, []}, 800, []},
+ {{<<"text">>, <<"x-c">>, []}, 1000, []}
+ ]},
+ {<<"text/*, text/html, text/html;level=1, */*">>, [
+ {{<<"text">>, <<"*">>, []}, 1000, []},
+ {{<<"text">>, <<"html">>, []}, 1000, []},
+ {{<<"text">>, <<"html">>, [{<<"level">>, <<"1">>}]}, 1000, []},
+ {{<<"*">>, <<"*">>, []}, 1000, []}
+ ]},
+ {<<"text/*;q=0.3, text/html;q=0.7, text/html;level=1, "
+ "text/html;level=2;q=0.4, */*;q=0.5">>, [
+ {{<<"text">>, <<"*">>, []}, 300, []},
+ {{<<"text">>, <<"html">>, []}, 700, []},
+ {{<<"text">>, <<"html">>, [{<<"level">>, <<"1">>}]}, 1000, []},
+ {{<<"text">>, <<"html">>, [{<<"level">>, <<"2">>}]}, 400, []},
+ {{<<"*">>, <<"*">>, []}, 500, []}
+ ]},
+ {<<"text/html;level=1;quoted=\"hi hi hi\";"
+ "q=0.123;standalone;complex=gits, text/plain">>, [
+ {{<<"text">>, <<"html">>,
+ [{<<"level">>, <<"1">>}, {<<"quoted">>, <<"hi hi hi">>}]}, 123,
+ [<<"standalone">>, {<<"complex">>, <<"gits">>}]},
+ {{<<"text">>, <<"plain">>, []}, 1000, []}
+ ]}
+ ],
+ [{V, fun() -> R = list(V, fun media_range/2) end} || {V, R} <- Tests].
+
+entity_tag_match_test_() ->
+ %% {Tokens, Result}
+ Tests = [
+ {<<"\"xyzzy\"">>, [{strong, <<"xyzzy">>}]},
+ {<<"\"xyzzy\", W/\"r2d2xxxx\", \"c3piozzzz\"">>,
+ [{strong, <<"xyzzy">>},
+ {weak, <<"r2d2xxxx">>},
+ {strong, <<"c3piozzzz">>}]},
+ {<<"*">>, '*'}
+ ],
+ [{V, fun() -> R = entity_tag_match(V) end} || {V, R} <- Tests].
+
+http_date_test_() ->
+ %% {Tokens, Result}
+ Tests = [
+ {<<"Sun, 06 Nov 1994 08:49:37 GMT">>, {{1994, 11, 6}, {8, 49, 37}}},
+ {<<"Sunday, 06-Nov-94 08:49:37 GMT">>, {{1994, 11, 6}, {8, 49, 37}}},
+ {<<"Sun Nov 6 08:49:37 1994">>, {{1994, 11, 6}, {8, 49, 37}}}
+ ],
+ [{V, fun() -> R = http_date(V) end} || {V, R} <- Tests].
+
+rfc1123_date_test_() ->
+ %% {Tokens, Result}
+ Tests = [
+ {<<"Sun, 06 Nov 1994 08:49:37 GMT">>, {{1994, 11, 6}, {8, 49, 37}}}
+ ],
+ [{V, fun() -> R = rfc1123_date(V) end} || {V, R} <- Tests].
+
+rfc850_date_test_() ->
+ %% {Tokens, Result}
+ Tests = [
+ {<<"Sunday, 06-Nov-94 08:49:37 GMT">>, {{1994, 11, 6}, {8, 49, 37}}}
+ ],
+ [{V, fun() -> R = rfc850_date(V) end} || {V, R} <- Tests].
+
+asctime_date_test_() ->
+ %% {Tokens, Result}
+ Tests = [
+ {<<"Sun Nov 6 08:49:37 1994">>, {{1994, 11, 6}, {8, 49, 37}}}
+ ],
+ [{V, fun() -> R = asctime_date(V) end} || {V, R} <- Tests].
+
+connection_to_atom_test_() ->
+ %% {Tokens, Result}
+ Tests = [
+ {[<<"close">>], close},
+ {[<<"keep-alive">>], keepalive},
+ {[<<"keep-alive">>, <<"upgrade">>], keepalive}
+ ],
+ [{lists:flatten(io_lib:format("~p", [T])),
+ fun() -> R = connection_to_atom(T) end} || {T, R} <- Tests].
+
+content_type_test_() ->
+ %% {ContentType, Result}
+ Tests = [
+ {<<"text/plain; charset=iso-8859-4">>,
+ {<<"text">>, <<"plain">>, [{<<"charset">>, <<"iso-8859-4">>}]}},
+ {<<"multipart/form-data \t;Boundary=\"MultipartIsUgly\"">>,
+ {<<"multipart">>, <<"form-data">>, [
+ {<<"boundary">>, <<"MultipartIsUgly">>}
+ ]}},
+ {<<"foo/bar; one=FirstParam; two=SecondParam">>,
+ {<<"foo">>, <<"bar">>, [
+ {<<"one">>, <<"FirstParam">>},
+ {<<"two">>, <<"SecondParam">>}
+ ]}}
+ ],
+ [{V, fun () -> R = content_type(V) end} || {V, R} <- Tests].
+
+digits_test_() ->
+ %% {Digits, Result}
+ Tests = [
+ {<<"42 ">>, 42},
+ {<<"69\t">>, 69},
+ {<<"1337">>, 1337}
+ ],
+ [{V, fun() -> R = digits(V) end} || {V, R} <- Tests].
+
+urldecode_test_() ->
+ U = fun urldecode/2,
+ [?_assertEqual(<<" ">>, U(<<"%20">>, crash)),
+ ?_assertEqual(<<" ">>, U(<<"+">>, crash)),
+ ?_assertEqual(<<0>>, U(<<"%00">>, crash)),
+ ?_assertEqual(<<255>>, U(<<"%fF">>, crash)),
+ ?_assertEqual(<<"123">>, U(<<"123">>, crash)),
+ ?_assertEqual(<<"%i5">>, U(<<"%i5">>, skip)),
+ ?_assertEqual(<<"%5">>, U(<<"%5">>, skip)),
+ ?_assertError(badarg, U(<<"%i5">>, crash)),
+ ?_assertError(badarg, U(<<"%5">>, crash))
+ ].
+
+urlencode_test_() ->
+ U = fun urlencode/2,
+ [?_assertEqual(<<"%ff%00">>, U(<<255,0>>, [])),
+ ?_assertEqual(<<"%FF%00">>, U(<<255,0>>, [upper])),
+ ?_assertEqual(<<"+">>, U(<<" ">>, [])),
+ ?_assertEqual(<<"%20">>, U(<<" ">>, [noplus])),
+ ?_assertEqual(<<"aBc">>, U(<<"aBc">>, [])),
+ ?_assertEqual(<<".-~_">>, U(<<".-~_">>, [])),
+ ?_assertEqual(<<"%ff+">>, urlencode(<<255, " ">>))
+ ].
+
+-endif.
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+%% @doc Handler for HTTP requests.
+%%
+%% HTTP handlers must implement three callbacks: <em>init/3</em>,
+%% <em>handle/2</em> and <em>terminate/2</em>, called one after another in
+%% that order.
+%%
+%% <em>init/3</em> is meant for initialization. It receives information about
+%% the transport and protocol used, along with the handler options from the
+%% dispatch list, and allows you to upgrade the protocol if needed. You can
+%% define a request-wide state here.
+%%
+%% <em>handle/2</em> is meant for handling the request. It receives the
+%% request and the state previously defined.
+%%
+%% <em>terminate/2</em> is meant for cleaning up. It also receives the
+%% request and the state previously defined.
+%%
+%% You do not have to read the request body or even send a reply if you do
+%% not need to. Cowboy will properly handle these cases and clean-up afterwards.
+%% In doubt it'll simply close the connection.
+%%
+%% Note that when upgrading the connection to WebSocket you do not need to
+%% define the <em>handle/2</em> and <em>terminate/2</em> callbacks.
+-module(cowboy_http_handler).
+
+-export([behaviour_info/1]).
+
+%% @private
+-spec behaviour_info(_)
+ -> undefined | [{handle, 2} | {init, 3} | {terminate, 2}, ...].
+behaviour_info(callbacks) ->
+ [{init, 3}, {handle, 2}, {terminate, 2}];
+behaviour_info(_Other) ->
+ undefined.
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%% Copyright (c) 2011, Anthony Ramine <nox@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+%% @doc HTTP protocol handler.
+%%
+%% The available options are:
+%% <dl>
+%% <dt>dispatch</dt><dd>The dispatch list for this protocol.</dd>
+%% <dt>max_empty_lines</dt><dd>Max number of empty lines before a request.
+%% Defaults to 5.</dd>
+%% <dt>timeout</dt><dd>Time in milliseconds before an idle
+%% connection is closed. Defaults to 5000 milliseconds.</dd>
+%% <dt>urldecode</dt><dd>Function and options argument to use when decoding
+%% URL encoded strings. Defaults to `{fun cowboy_http:urldecode/2, crash}'.
+%% </dd>
+%% </dl>
+%%
+%% Note that there is no need to monitor these processes when using Cowboy as
+%% an application as it already supervises them under the listener supervisor.
+%%
+%% @see cowboy_dispatcher
+%% @see cowboy_http_handler
+-module(cowboy_http_protocol).
+-behaviour(cowboy_protocol).
+
+-export([start_link/4]). %% API.
+-export([init/4, parse_request/1, handler_loop/3]). %% FSM.
+
+-include("include/http.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-record(state, {
+ listener :: pid(),
+ socket :: inet:socket(),
+ transport :: module(),
+ dispatch :: cowboy_dispatcher:dispatch_rules(),
+ handler :: {module(), any()},
+ urldecode :: {fun((binary(), T) -> binary()), T},
+ req_empty_lines = 0 :: integer(),
+ max_empty_lines :: integer(),
+ req_keepalive = 1 :: integer(),
+ max_keepalive :: integer(),
+ max_line_length :: integer(),
+ timeout :: timeout(),
+ buffer = <<>> :: binary(),
+ hibernate = false,
+ loop_timeout = infinity :: timeout(),
+ loop_timeout_ref
+}).
+
+%% API.
+
+%% @doc Start an HTTP protocol process.
+-spec start_link(pid(), inet:socket(), module(), any()) -> {ok, pid()}.
+start_link(ListenerPid, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [ListenerPid, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+%% FSM.
+
+%% @private
+-spec init(pid(), inet:socket(), module(), any()) -> ok | none().
+init(ListenerPid, Socket, Transport, Opts) ->
+ Dispatch = proplists:get_value(dispatch, Opts, []),
+ MaxEmptyLines = proplists:get_value(max_empty_lines, Opts, 5),
+ MaxKeepalive = proplists:get_value(max_keepalive, Opts, infinity),
+ MaxLineLength = proplists:get_value(max_line_length, Opts, 4096),
+ Timeout = proplists:get_value(timeout, Opts, 5000),
+ URLDecDefault = {fun cowboy_http:urldecode/2, crash},
+ URLDec = proplists:get_value(urldecode, Opts, URLDecDefault),
+ ok = cowboy:accept_ack(ListenerPid),
+ wait_request(#state{listener=ListenerPid, socket=Socket, transport=Transport,
+ dispatch=Dispatch, max_empty_lines=MaxEmptyLines,
+ max_keepalive=MaxKeepalive, max_line_length=MaxLineLength,
+ timeout=Timeout, urldecode=URLDec}).
+
+%% @private
+-spec parse_request(#state{}) -> ok | none().
+%% We limit the length of the Request-line to MaxLength to avoid endlessly
+%% reading from the socket and eventually crashing.
+parse_request(State=#state{buffer=Buffer, max_line_length=MaxLength}) ->
+ case erlang:decode_packet(http_bin, Buffer, []) of
+ {ok, Request, Rest} -> request(Request, State#state{buffer=Rest});
+ {more, _Length} when byte_size(Buffer) > MaxLength ->
+ error_terminate(413, State);
+ {more, _Length} -> wait_request(State);
+ {error, _Reason} -> error_terminate(400, State)
+ end.
+
+-spec wait_request(#state{}) -> ok | none().
+wait_request(State=#state{socket=Socket, transport=Transport,
+ timeout=T, buffer=Buffer}) ->
+ case Transport:recv(Socket, 0, T) of
+ {ok, Data} -> parse_request(State#state{
+ buffer= << Buffer/binary, Data/binary >>});
+ {error, _Reason} -> terminate(State)
+ end.
+
+-spec request({http_request, cowboy_http:method(), cowboy_http:uri(),
+ cowboy_http:version()}, #state{}) -> ok | none().
+request({http_request, _Method, _URI, Version}, State)
+ when Version =/= {1, 0}, Version =/= {1, 1} ->
+ error_terminate(505, State);
+request({http_request, Method, {abs_path, AbsPath}, Version},
+ State=#state{socket=Socket, transport=Transport,
+ urldecode={URLDecFun, URLDecArg}=URLDec}) ->
+ URLDecode = fun(Bin) -> URLDecFun(Bin, URLDecArg) end,
+ {Path, RawPath, Qs} = cowboy_dispatcher:split_path(AbsPath, URLDecode),
+ ConnAtom = version_to_connection(Version),
+ parse_header(#http_req{socket=Socket, transport=Transport,
+ connection=ConnAtom, pid=self(), method=Method, version=Version,
+ path=Path, raw_path=RawPath, raw_qs=Qs, urldecode=URLDec}, State);
+request({http_request, Method, '*', Version},
+ State=#state{socket=Socket, transport=Transport, urldecode=URLDec}) ->
+ ConnAtom = version_to_connection(Version),
+ parse_header(#http_req{socket=Socket, transport=Transport,
+ connection=ConnAtom, pid=self(), method=Method, version=Version,
+ path='*', raw_path= <<"*">>, raw_qs= <<>>, urldecode=URLDec}, State);
+request({http_request, _Method, _URI, _Version}, State) ->
+ error_terminate(501, State);
+request({http_error, <<"\r\n">>},
+ State=#state{req_empty_lines=N, max_empty_lines=N}) ->
+ error_terminate(400, State);
+request({http_error, <<"\r\n">>}, State=#state{req_empty_lines=N}) ->
+ parse_request(State#state{req_empty_lines=N + 1});
+request(_Any, State) ->
+ error_terminate(400, State).
+
+-spec parse_header(#http_req{}, #state{}) -> ok | none().
+parse_header(Req, State=#state{buffer=Buffer, max_line_length=MaxLength}) ->
+ case erlang:decode_packet(httph_bin, Buffer, []) of
+ {ok, Header, Rest} -> header(Header, Req, State#state{buffer=Rest});
+ {more, _Length} when byte_size(Buffer) > MaxLength ->
+ error_terminate(413, State);
+ {more, _Length} -> wait_header(Req, State);
+ {error, _Reason} -> error_terminate(400, State)
+ end.
+
+-spec wait_header(#http_req{}, #state{}) -> ok | none().
+wait_header(Req, State=#state{socket=Socket,
+ transport=Transport, timeout=T, buffer=Buffer}) ->
+ case Transport:recv(Socket, 0, T) of
+ {ok, Data} -> parse_header(Req, State#state{
+ buffer= << Buffer/binary, Data/binary >>});
+ {error, timeout} -> error_terminate(408, State);
+ {error, closed} -> terminate(State)
+ end.
+
+-spec header({http_header, integer(), cowboy_http:header(), any(), binary()}
+ | http_eoh, #http_req{}, #state{}) -> ok | none().
+header({http_header, _I, 'Host', _R, RawHost}, Req=#http_req{
+ transport=Transport, host=undefined}, State) ->
+ RawHost2 = cowboy_bstr:to_lower(RawHost),
+ case catch cowboy_dispatcher:split_host(RawHost2) of
+ {Host, RawHost3, undefined} ->
+ Port = default_port(Transport:name()),
+ dispatch(fun parse_header/2, Req#http_req{
+ host=Host, raw_host=RawHost3, port=Port,
+ headers=[{'Host', RawHost3}|Req#http_req.headers]}, State);
+ {Host, RawHost3, Port} ->
+ dispatch(fun parse_header/2, Req#http_req{
+ host=Host, raw_host=RawHost3, port=Port,
+ headers=[{'Host', RawHost3}|Req#http_req.headers]}, State);
+ {'EXIT', _Reason} ->
+ error_terminate(400, State)
+ end;
+%% Ignore Host headers if we already have it.
+header({http_header, _I, 'Host', _R, _V}, Req, State) ->
+ parse_header(Req, State);
+header({http_header, _I, 'Connection', _R, Connection},
+ Req=#http_req{headers=Headers}, State) ->
+ Req2 = Req#http_req{headers=[{'Connection', Connection}|Headers]},
+ {ConnTokens, Req3}
+ = cowboy_http_req:parse_header('Connection', Req2),
+ ConnAtom = cowboy_http:connection_to_atom(ConnTokens),
+ parse_header(Req3#http_req{connection=ConnAtom}, State);
+header({http_header, _I, Field, _R, Value}, Req, State) ->
+ Field2 = format_header(Field),
+ parse_header(Req#http_req{headers=[{Field2, Value}|Req#http_req.headers]},
+ State);
+%% The Host header is required in HTTP/1.1.
+header(http_eoh, #http_req{version={1, 1}, host=undefined}, State) ->
+ error_terminate(400, State);
+%% It is however optional in HTTP/1.0.
+header(http_eoh, Req=#http_req{version={1, 0}, transport=Transport,
+ host=undefined}, State=#state{buffer=Buffer}) ->
+ Port = default_port(Transport:name()),
+ dispatch(fun handler_init/2, Req#http_req{host=[], raw_host= <<>>,
+ port=Port, buffer=Buffer}, State#state{buffer= <<>>});
+header(http_eoh, Req, State=#state{buffer=Buffer}) ->
+ handler_init(Req#http_req{buffer=Buffer}, State#state{buffer= <<>>});
+header(_Any, _Req, State) ->
+ error_terminate(400, State).
+
+-spec dispatch(fun((#http_req{}, #state{}) -> ok),
+ #http_req{}, #state{}) -> ok | none().
+dispatch(Next, Req=#http_req{host=Host, path=Path},
+ State=#state{dispatch=Dispatch}) ->
+ %% @todo We should allow a configurable chain of handlers here to
+ %% allow things like url rewriting, site-wide authentication,
+ %% optional dispatching, and more. It would default to what
+ %% we are doing so far.
+ case cowboy_dispatcher:match(Host, Path, Dispatch) of
+ {ok, Handler, Opts, Binds, HostInfo, PathInfo} ->
+ Next(Req#http_req{host_info=HostInfo, path_info=PathInfo,
+ bindings=Binds}, State#state{handler={Handler, Opts}});
+ {error, notfound, host} ->
+ error_terminate(400, State);
+ {error, notfound, path} ->
+ error_terminate(404, State)
+ end.
+
+-spec handler_init(#http_req{}, #state{}) -> ok | none().
+handler_init(Req, State=#state{transport=Transport,
+ handler={Handler, Opts}}) ->
+ try Handler:init({Transport:name(), http}, Req, Opts) of
+ {ok, Req2, HandlerState} ->
+ handler_handle(HandlerState, Req2, State);
+ {loop, Req2, HandlerState} ->
+ handler_before_loop(HandlerState, Req2, State);
+ {loop, Req2, HandlerState, hibernate} ->
+ handler_before_loop(HandlerState, Req2,
+ State#state{hibernate=true});
+ {loop, Req2, HandlerState, Timeout} ->
+ handler_before_loop(HandlerState, Req2,
+ State#state{loop_timeout=Timeout});
+ {loop, Req2, HandlerState, Timeout, hibernate} ->
+ handler_before_loop(HandlerState, Req2,
+ State#state{hibernate=true, loop_timeout=Timeout});
+ {shutdown, Req2, HandlerState} ->
+ handler_terminate(HandlerState, Req2, State);
+ %% @todo {upgrade, transport, Module}
+ {upgrade, protocol, Module} ->
+ upgrade_protocol(Req, State, Module)
+ catch Class:Reason ->
+ error_terminate(500, State),
+ error_logger:error_msg(
+ "** Handler ~p terminating in init/3~n"
+ " for the reason ~p:~p~n"
+ "** Options were ~p~n"
+ "** Request was ~p~n** Stacktrace: ~p~n~n",
+ [Handler, Class, Reason, Opts, Req, erlang:get_stacktrace()])
+ end.
+
+-spec upgrade_protocol(#http_req{}, #state{}, atom()) -> ok | none().
+upgrade_protocol(Req, State=#state{listener=ListenerPid,
+ handler={Handler, Opts}}, Module) ->
+ case Module:upgrade(ListenerPid, Handler, Opts, Req) of
+ {UpgradeRes, Req2} -> next_request(Req2, State, UpgradeRes);
+ _Any -> terminate(State)
+ end.
+
+-spec handler_handle(any(), #http_req{}, #state{}) -> ok | none().
+handler_handle(HandlerState, Req, State=#state{handler={Handler, Opts}}) ->
+ try Handler:handle(Req, HandlerState) of
+ {ok, Req2, HandlerState2} ->
+ terminate_request(HandlerState2, Req2, State)
+ catch Class:Reason ->
+ error_logger:error_msg(
+ "** Handler ~p terminating in handle/2~n"
+ " for the reason ~p:~p~n"
+ "** Options were ~p~n** Handler state was ~p~n"
+ "** Request was ~p~n** Stacktrace: ~p~n~n",
+ [Handler, Class, Reason, Opts,
+ HandlerState, Req, erlang:get_stacktrace()]),
+ handler_terminate(HandlerState, Req, State),
+ error_terminate(500, State)
+ end.
+
+%% We don't listen for Transport closes because that would force us
+%% to receive data and buffer it indefinitely.
+-spec handler_before_loop(any(), #http_req{}, #state{}) -> ok | none().
+handler_before_loop(HandlerState, Req, State=#state{hibernate=true}) ->
+ State2 = handler_loop_timeout(State),
+ erlang:hibernate(?MODULE, handler_loop,
+ [HandlerState, Req, State2#state{hibernate=false}]);
+handler_before_loop(HandlerState, Req, State) ->
+ State2 = handler_loop_timeout(State),
+ handler_loop(HandlerState, Req, State2).
+
+%% Almost the same code can be found in cowboy_http_websocket.
+-spec handler_loop_timeout(#state{}) -> #state{}.
+handler_loop_timeout(State=#state{loop_timeout=infinity}) ->
+ State#state{loop_timeout_ref=undefined};
+handler_loop_timeout(State=#state{loop_timeout=Timeout,
+ loop_timeout_ref=PrevRef}) ->
+ _ = case PrevRef of undefined -> ignore; PrevRef ->
+ erlang:cancel_timer(PrevRef) end,
+ TRef = make_ref(),
+ erlang:send_after(Timeout, self(), {?MODULE, timeout, TRef}),
+ State#state{loop_timeout_ref=TRef}.
+
+-spec handler_loop(any(), #http_req{}, #state{}) -> ok | none().
+handler_loop(HandlerState, Req, State=#state{loop_timeout_ref=TRef}) ->
+ receive
+ {?MODULE, timeout, TRef} ->
+ terminate_request(HandlerState, Req, State);
+ {?MODULE, timeout, OlderTRef} when is_reference(OlderTRef) ->
+ handler_loop(HandlerState, Req, State);
+ Message ->
+ handler_call(HandlerState, Req, State, Message)
+ end.
+
+-spec handler_call(any(), #http_req{}, #state{}, any()) -> ok | none().
+handler_call(HandlerState, Req, State=#state{handler={Handler, Opts}},
+ Message) ->
+ try Handler:info(Message, Req, HandlerState) of
+ {ok, Req2, HandlerState2} ->
+ terminate_request(HandlerState2, Req2, State);
+ {loop, Req2, HandlerState2} ->
+ handler_before_loop(HandlerState2, Req2, State);
+ {loop, Req2, HandlerState2, hibernate} ->
+ handler_before_loop(HandlerState2, Req2,
+ State#state{hibernate=true})
+ catch Class:Reason ->
+ error_logger:error_msg(
+ "** Handler ~p terminating in info/3~n"
+ " for the reason ~p:~p~n"
+ "** Options were ~p~n** Handler state was ~p~n"
+ "** Request was ~p~n** Stacktrace: ~p~n~n",
+ [Handler, Class, Reason, Opts,
+ HandlerState, Req, erlang:get_stacktrace()]),
+ handler_terminate(HandlerState, Req, State),
+ error_terminate(500, State)
+ end.
+
+-spec handler_terminate(any(), #http_req{}, #state{}) -> ok.
+handler_terminate(HandlerState, Req, #state{handler={Handler, Opts}}) ->
+ try
+ Handler:terminate(Req#http_req{resp_state=locked}, HandlerState)
+ catch Class:Reason ->
+ error_logger:error_msg(
+ "** Handler ~p terminating in terminate/2~n"
+ " for the reason ~p:~p~n"
+ "** Options were ~p~n** Handler state was ~p~n"
+ "** Request was ~p~n** Stacktrace: ~p~n~n",
+ [Handler, Class, Reason, Opts,
+ HandlerState, Req, erlang:get_stacktrace()])
+ end.
+
+-spec terminate_request(any(), #http_req{}, #state{}) -> ok | none().
+terminate_request(HandlerState, Req, State) ->
+ HandlerRes = handler_terminate(HandlerState, Req, State),
+ next_request(Req, State, HandlerRes).
+
+-spec next_request(#http_req{}, #state{}, any()) -> ok | none().
+next_request(Req=#http_req{connection=Conn},
+ State=#state{req_keepalive=Keepalive, max_keepalive=MaxKeepalive},
+ HandlerRes) ->
+ RespRes = ensure_response(Req),
+ {BodyRes, Buffer} = ensure_body_processed(Req),
+ %% Flush the resp_sent message before moving on.
+ receive {cowboy_http_req, resp_sent} -> ok after 0 -> ok end,
+ case {HandlerRes, BodyRes, RespRes, Conn} of
+ {ok, ok, ok, keepalive} when Keepalive < MaxKeepalive ->
+ ?MODULE:parse_request(State#state{
+ buffer=Buffer, req_empty_lines=0,
+ req_keepalive=Keepalive + 1});
+ _Closed ->
+ terminate(State)
+ end.
+
+-spec ensure_body_processed(#http_req{}) -> {ok | close, binary()}.
+ensure_body_processed(#http_req{body_state=done, buffer=Buffer}) ->
+ {ok, Buffer};
+ensure_body_processed(Req=#http_req{body_state=waiting}) ->
+ case cowboy_http_req:body(Req) of
+ {error, badarg} -> {ok, Req#http_req.buffer}; %% No body.
+ {error, _Reason} -> {close, <<>>};
+ {ok, _, Req2} -> {ok, Req2#http_req.buffer}
+ end;
+ensure_body_processed(Req=#http_req{body_state={multipart, _, _}}) ->
+ {ok, Req2} = cowboy_http_req:multipart_skip(Req),
+ ensure_body_processed(Req2).
+
+-spec ensure_response(#http_req{}) -> ok.
+%% The handler has already fully replied to the client.
+ensure_response(#http_req{resp_state=done}) ->
+ ok;
+%% No response has been sent but everything apparently went fine.
+%% Reply with 204 No Content to indicate this.
+ensure_response(Req=#http_req{resp_state=waiting}) ->
+ _ = cowboy_http_req:reply(204, [], [], Req),
+ ok;
+%% Close the chunked reply.
+ensure_response(#http_req{method='HEAD', resp_state=chunks}) ->
+ close;
+ensure_response(#http_req{socket=Socket, transport=Transport,
+ resp_state=chunks}) ->
+ Transport:send(Socket, <<"0\r\n\r\n">>),
+ close.
+
+%% Only send an error reply if there is no resp_sent message.
+-spec error_terminate(cowboy_http:status(), #state{}) -> ok.
+error_terminate(Code, State=#state{socket=Socket, transport=Transport}) ->
+ receive
+ {cowboy_http_req, resp_sent} -> ok
+ after 0 ->
+ _ = cowboy_http_req:reply(Code, #http_req{
+ socket=Socket, transport=Transport,
+ connection=close, pid=self(), resp_state=waiting}),
+ ok
+ end,
+ terminate(State).
+
+-spec terminate(#state{}) -> ok.
+terminate(#state{socket=Socket, transport=Transport}) ->
+ Transport:close(Socket),
+ ok.
+
+%% Internal.
+
+-spec version_to_connection(cowboy_http:version()) -> keepalive | close.
+version_to_connection({1, 1}) -> keepalive;
+version_to_connection(_Any) -> close.
+
+-spec default_port(atom()) -> 80 | 443.
+default_port(ssl) -> 443;
+default_port(_) -> 80.
+
+%% @todo While 32 should be enough for everybody, we should probably make
+%% this configurable or something.
+-spec format_header(atom()) -> atom(); (binary()) -> binary().
+format_header(Field) when is_atom(Field) ->
+ Field;
+format_header(Field) when byte_size(Field) =< 20; byte_size(Field) > 32 ->
+ Field;
+format_header(Field) ->
+ format_header(Field, true, <<>>).
+
+format_header(<<>>, _Any, Acc) ->
+ Acc;
+%% Replicate a bug in OTP for compatibility reasons when there's a - right
+%% after another. Proper use should always be 'true' instead of 'not Bool'.
+format_header(<< $-, Rest/bits >>, Bool, Acc) ->
+ format_header(Rest, not Bool, << Acc/binary, $- >>);
+format_header(<< C, Rest/bits >>, true, Acc) ->
+ format_header(Rest, false, << Acc/binary, (cowboy_bstr:char_to_upper(C)) >>);
+format_header(<< C, Rest/bits >>, false, Acc) ->
+ format_header(Rest, false, << Acc/binary, (cowboy_bstr:char_to_lower(C)) >>).
+
+%% Tests.
+
+-ifdef(TEST).
+
+format_header_test_() ->
+ %% {Header, Result}
+ Tests = [
+ {<<"Sec-Websocket-Version">>, <<"Sec-Websocket-Version">>},
+ {<<"Sec-WebSocket-Version">>, <<"Sec-Websocket-Version">>},
+ {<<"sec-websocket-version">>, <<"Sec-Websocket-Version">>},
+ {<<"SEC-WEBSOCKET-VERSION">>, <<"Sec-Websocket-Version">>},
+ %% These last tests ensures we're formatting headers exactly like OTP.
+ %% Even though it's dumb, it's better for compatibility reasons.
+ {<<"Sec-WebSocket--Version">>, <<"Sec-Websocket--version">>},
+ {<<"Sec-WebSocket---Version">>, <<"Sec-Websocket---Version">>}
+ ],
+ [{H, fun() -> R = format_header(H) end} || {H, R} <- Tests].
+
+-endif.
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%% Copyright (c) 2011, Anthony Ramine <nox@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+%% @doc HTTP request manipulation API.
+%%
+%% Almost all functions in this module return a new <em>Req</em> variable.
+%% It should always be used instead of the one used in your function call
+%% because it keeps the state of the request. It also allows Cowboy to do
+%% some lazy evaluation and cache results where possible.
+-module(cowboy_http_req).
+
+-export([
+ method/1, version/1, peer/1, peer_addr/1,
+ host/1, host_info/1, raw_host/1, port/1,
+ path/1, path_info/1, raw_path/1,
+ qs_val/2, qs_val/3, qs_vals/1, raw_qs/1,
+ binding/2, binding/3, bindings/1,
+ header/2, header/3, headers/1,
+ parse_header/2, parse_header/3,
+ cookie/2, cookie/3, cookies/1,
+ meta/2, meta/3
+]). %% Request API.
+
+-export([
+ body/1, body/2, body_qs/1,
+ multipart_data/1, multipart_skip/1
+]). %% Request Body API.
+
+-export([
+ set_resp_cookie/4, set_resp_header/3, set_resp_body/2,
+ set_resp_body_fun/3, has_resp_header/2, has_resp_body/1,
+ reply/2, reply/3, reply/4,
+ chunked_reply/2, chunked_reply/3, chunk/2,
+ upgrade_reply/3
+]). %% Response API.
+
+-export([
+ compact/1, transport/1
+]). %% Misc API.
+
+-include("include/http.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+%% Request API.
+
+%% @doc Return the HTTP method of the request.
+-spec method(#http_req{}) -> {cowboy_http:method(), #http_req{}}.
+method(Req) ->
+ {Req#http_req.method, Req}.
+
+%% @doc Return the HTTP version used for the request.
+-spec version(#http_req{}) -> {cowboy_http:version(), #http_req{}}.
+version(Req) ->
+ {Req#http_req.version, Req}.
+
+%% @doc Return the peer address and port number of the remote host.
+-spec peer(#http_req{}) -> {{inet:ip_address(), inet:ip_port()}, #http_req{}}.
+peer(Req=#http_req{socket=Socket, transport=Transport, peer=undefined}) ->
+ {ok, Peer} = Transport:peername(Socket),
+ {Peer, Req#http_req{peer=Peer}};
+peer(Req) ->
+ {Req#http_req.peer, Req}.
+
+%% @doc Returns the peer address calculated from headers.
+-spec peer_addr(#http_req{}) -> {inet:ip_address(), #http_req{}}.
+peer_addr(Req = #http_req{}) ->
+ {RealIp, Req1} = header(<<"X-Real-Ip">>, Req),
+ {ForwardedForRaw, Req2} = header(<<"X-Forwarded-For">>, Req1),
+ {{PeerIp, _PeerPort}, Req3} = peer(Req2),
+ ForwardedFor = case ForwardedForRaw of
+ undefined ->
+ undefined;
+ ForwardedForRaw ->
+ case re:run(ForwardedForRaw, "^(?<first_ip>[^\\,]+)",
+ [{capture, [first_ip], binary}]) of
+ {match, [FirstIp]} -> FirstIp;
+ _Any -> undefined
+ end
+ end,
+ {ok, PeerAddr} = if
+ is_binary(RealIp) -> inet_parse:address(binary_to_list(RealIp));
+ is_binary(ForwardedFor) -> inet_parse:address(binary_to_list(ForwardedFor));
+ true -> {ok, PeerIp}
+ end,
+ {PeerAddr, Req3}.
+
+%% @doc Return the tokens for the hostname requested.
+-spec host(#http_req{}) -> {cowboy_dispatcher:tokens(), #http_req{}}.
+host(Req) ->
+ {Req#http_req.host, Req}.
+
+%% @doc Return the extra host information obtained from partially matching
+%% the hostname using <em>'...'</em>.
+-spec host_info(#http_req{})
+ -> {cowboy_dispatcher:tokens() | undefined, #http_req{}}.
+host_info(Req) ->
+ {Req#http_req.host_info, Req}.
+
+%% @doc Return the raw host directly taken from the request.
+-spec raw_host(#http_req{}) -> {binary(), #http_req{}}.
+raw_host(Req) ->
+ {Req#http_req.raw_host, Req}.
+
+%% @doc Return the port used for this request.
+-spec port(#http_req{}) -> {inet:ip_port(), #http_req{}}.
+port(Req) ->
+ {Req#http_req.port, Req}.
+
+%% @doc Return the path segments for the path requested.
+%%
+%% Following RFC2396, this function may return path segments containing any
+%% character, including <em>/</em> if, and only if, a <em>/</em> was escaped
+%% and part of a path segment in the path requested.
+-spec path(#http_req{}) -> {cowboy_dispatcher:tokens(), #http_req{}}.
+path(Req) ->
+ {Req#http_req.path, Req}.
+
+%% @doc Return the extra path information obtained from partially matching
+%% the patch using <em>'...'</em>.
+-spec path_info(#http_req{})
+ -> {cowboy_dispatcher:tokens() | undefined, #http_req{}}.
+path_info(Req) ->
+ {Req#http_req.path_info, Req}.
+
+%% @doc Return the raw path directly taken from the request.
+-spec raw_path(#http_req{}) -> {binary(), #http_req{}}.
+raw_path(Req) ->
+ {Req#http_req.raw_path, Req}.
+
+%% @equiv qs_val(Name, Req, undefined)
+-spec qs_val(binary(), #http_req{})
+ -> {binary() | true | undefined, #http_req{}}.
+qs_val(Name, Req) when is_binary(Name) ->
+ qs_val(Name, Req, undefined).
+
+%% @doc Return the query string value for the given key, or a default if
+%% missing.
+qs_val(Name, Req=#http_req{raw_qs=RawQs, qs_vals=undefined,
+ urldecode={URLDecFun, URLDecArg}}, Default) when is_binary(Name) ->
+ QsVals = parse_qs(RawQs, fun(Bin) -> URLDecFun(Bin, URLDecArg) end),
+ qs_val(Name, Req#http_req{qs_vals=QsVals}, Default);
+qs_val(Name, Req, Default) ->
+ case lists:keyfind(Name, 1, Req#http_req.qs_vals) of
+ {Name, Value} -> {Value, Req};
+ false -> {Default, Req}
+ end.
+
+%% @doc Return the full list of query string values.
+-spec qs_vals(#http_req{}) -> {list({binary(), binary() | true}), #http_req{}}.
+qs_vals(Req=#http_req{raw_qs=RawQs, qs_vals=undefined,
+ urldecode={URLDecFun, URLDecArg}}) ->
+ QsVals = parse_qs(RawQs, fun(Bin) -> URLDecFun(Bin, URLDecArg) end),
+ qs_vals(Req#http_req{qs_vals=QsVals});
+qs_vals(Req=#http_req{qs_vals=QsVals}) ->
+ {QsVals, Req}.
+
+%% @doc Return the raw query string directly taken from the request.
+-spec raw_qs(#http_req{}) -> {binary(), #http_req{}}.
+raw_qs(Req) ->
+ {Req#http_req.raw_qs, Req}.
+
+%% @equiv binding(Name, Req, undefined)
+-spec binding(atom(), #http_req{}) -> {binary() | undefined, #http_req{}}.
+binding(Name, Req) when is_atom(Name) ->
+ binding(Name, Req, undefined).
+
+%% @doc Return the binding value for the given key obtained when matching
+%% the host and path against the dispatch list, or a default if missing.
+binding(Name, Req, Default) when is_atom(Name) ->
+ case lists:keyfind(Name, 1, Req#http_req.bindings) of
+ {Name, Value} -> {Value, Req};
+ false -> {Default, Req}
+ end.
+
+%% @doc Return the full list of binding values.
+-spec bindings(#http_req{}) -> {list({atom(), binary()}), #http_req{}}.
+bindings(Req) ->
+ {Req#http_req.bindings, Req}.
+
+%% @equiv header(Name, Req, undefined)
+-spec header(atom() | binary(), #http_req{})
+ -> {binary() | undefined, #http_req{}}.
+header(Name, Req) when is_atom(Name) orelse is_binary(Name) ->
+ header(Name, Req, undefined).
+
+%% @doc Return the header value for the given key, or a default if missing.
+header(Name, Req, Default) when is_atom(Name) orelse is_binary(Name) ->
+ case lists:keyfind(Name, 1, Req#http_req.headers) of
+ {Name, Value} -> {Value, Req};
+ false -> {Default, Req}
+ end.
+
+%% @doc Return the full list of headers.
+-spec headers(#http_req{}) -> {cowboy_http:headers(), #http_req{}}.
+headers(Req) ->
+ {Req#http_req.headers, Req}.
+
+%% @doc Semantically parse headers.
+%%
+%% When the value isn't found, a proper default value for the type
+%% returned is used as a return value.
+%% @see parse_header/3
+-spec parse_header(cowboy_http:header(), #http_req{})
+ -> {any(), #http_req{}} | {error, badarg}.
+parse_header(Name, Req=#http_req{p_headers=PHeaders}) ->
+ case lists:keyfind(Name, 1, PHeaders) of
+ false -> parse_header(Name, Req, parse_header_default(Name));
+ {Name, Value} -> {Value, Req}
+ end.
+
+%% @doc Default values for semantic header parsing.
+-spec parse_header_default(cowboy_http:header()) -> any().
+parse_header_default('Connection') -> [];
+parse_header_default(_Name) -> undefined.
+
+%% @doc Semantically parse headers.
+%%
+%% When the header is unknown, the value is returned directly without parsing.
+-spec parse_header(cowboy_http:header(), #http_req{}, any())
+ -> {any(), #http_req{}} | {error, badarg}.
+parse_header(Name, Req, Default) when Name =:= 'Accept' ->
+ parse_header(Name, Req, Default,
+ fun (Value) ->
+ cowboy_http:list(Value, fun cowboy_http:media_range/2)
+ end);
+parse_header(Name, Req, Default) when Name =:= 'Accept-Charset' ->
+ parse_header(Name, Req, Default,
+ fun (Value) ->
+ cowboy_http:nonempty_list(Value, fun cowboy_http:conneg/2)
+ end);
+parse_header(Name, Req, Default) when Name =:= 'Accept-Encoding' ->
+ parse_header(Name, Req, Default,
+ fun (Value) ->
+ cowboy_http:list(Value, fun cowboy_http:conneg/2)
+ end);
+parse_header(Name, Req, Default) when Name =:= 'Accept-Language' ->
+ parse_header(Name, Req, Default,
+ fun (Value) ->
+ cowboy_http:nonempty_list(Value, fun cowboy_http:language_range/2)
+ end);
+parse_header(Name, Req, Default) when Name =:= 'Connection' ->
+ parse_header(Name, Req, Default,
+ fun (Value) ->
+ cowboy_http:nonempty_list(Value, fun cowboy_http:token_ci/2)
+ end);
+parse_header(Name, Req, Default) when Name =:= 'Content-Length' ->
+ parse_header(Name, Req, Default,
+ fun (Value) ->
+ cowboy_http:digits(Value)
+ end);
+parse_header(Name, Req, Default) when Name =:= 'Content-Type' ->
+ parse_header(Name, Req, Default,
+ fun (Value) ->
+ cowboy_http:content_type(Value)
+ end);
+parse_header(Name, Req, Default)
+ when Name =:= 'If-Match'; Name =:= 'If-None-Match' ->
+ parse_header(Name, Req, Default,
+ fun (Value) ->
+ cowboy_http:entity_tag_match(Value)
+ end);
+parse_header(Name, Req, Default)
+ when Name =:= 'If-Modified-Since'; Name =:= 'If-Unmodified-Since' ->
+ parse_header(Name, Req, Default,
+ fun (Value) ->
+ cowboy_http:http_date(Value)
+ end);
+parse_header(Name, Req, Default) when Name =:= 'Upgrade' ->
+ parse_header(Name, Req, Default,
+ fun (Value) ->
+ cowboy_http:nonempty_list(Value, fun cowboy_http:token_ci/2)
+ end);
+parse_header(Name, Req, Default) when Name =:= <<"sec-websocket-protocol">> ->
+ parse_header(Name, Req, Default,
+ fun (Value) ->
+ cowboy_http:nonempty_list(Value, fun cowboy_http:token/2)
+ end);
+parse_header(Name, Req, Default) ->
+ {Value, Req2} = header(Name, Req, Default),
+ {undefined, Value, Req2}.
+
+parse_header(Name, Req=#http_req{p_headers=PHeaders}, Default, Fun) ->
+ case header(Name, Req) of
+ {undefined, Req2} ->
+ {Default, Req2#http_req{p_headers=[{Name, Default}|PHeaders]}};
+ {Value, Req2} ->
+ case Fun(Value) of
+ {error, badarg} ->
+ {error, badarg};
+ P ->
+ {P, Req2#http_req{p_headers=[{Name, P}|PHeaders]}}
+ end
+ end.
+
+%% @equiv cookie(Name, Req, undefined)
+-spec cookie(binary(), #http_req{})
+ -> {binary() | true | undefined, #http_req{}}.
+cookie(Name, Req) when is_binary(Name) ->
+ cookie(Name, Req, undefined).
+
+%% @doc Return the cookie value for the given key, or a default if
+%% missing.
+cookie(Name, Req=#http_req{cookies=undefined}, Default) when is_binary(Name) ->
+ case header('Cookie', Req) of
+ {undefined, Req2} ->
+ {Default, Req2#http_req{cookies=[]}};
+ {RawCookie, Req2} ->
+ Cookies = cowboy_cookies:parse_cookie(RawCookie),
+ cookie(Name, Req2#http_req{cookies=Cookies}, Default)
+ end;
+cookie(Name, Req, Default) ->
+ case lists:keyfind(Name, 1, Req#http_req.cookies) of
+ {Name, Value} -> {Value, Req};
+ false -> {Default, Req}
+ end.
+
+%% @doc Return the full list of cookie values.
+-spec cookies(#http_req{}) -> {list({binary(), binary() | true}), #http_req{}}.
+cookies(Req=#http_req{cookies=undefined}) ->
+ case header('Cookie', Req) of
+ {undefined, Req2} ->
+ {[], Req2#http_req{cookies=[]}};
+ {RawCookie, Req2} ->
+ Cookies = cowboy_cookies:parse_cookie(RawCookie),
+ cookies(Req2#http_req{cookies=Cookies})
+ end;
+cookies(Req=#http_req{cookies=Cookies}) ->
+ {Cookies, Req}.
+
+%% @equiv meta(Name, Req, undefined)
+-spec meta(atom(), #http_req{}) -> {any() | undefined, #http_req{}}.
+meta(Name, Req) ->
+ meta(Name, Req, undefined).
+
+%% @doc Return metadata information about the request.
+%%
+%% Metadata information varies from one protocol to another. Websockets
+%% would define the protocol version here, while REST would use it to
+%% indicate which media type, language and charset were retained.
+-spec meta(atom(), #http_req{}, any()) -> {any(), #http_req{}}.
+meta(Name, Req, Default) ->
+ case lists:keyfind(Name, 1, Req#http_req.meta) of
+ {Name, Value} -> {Value, Req};
+ false -> {Default, Req}
+ end.
+
+%% Request Body API.
+
+%% @doc Return the full body sent with the request, or <em>{error, badarg}</em>
+%% if no <em>Content-Length</em> is available.
+%% @todo We probably want to allow a max length.
+%% @todo Add multipart support to this function.
+-spec body(#http_req{}) -> {ok, binary(), #http_req{}} | {error, atom()}.
+body(Req) ->
+ {Length, Req2} = cowboy_http_req:parse_header('Content-Length', Req),
+ case Length of
+ undefined -> {error, badarg};
+ {error, badarg} -> {error, badarg};
+ _Any ->
+ body(Length, Req2)
+ end.
+
+%% @doc Return <em>Length</em> bytes of the request body.
+%%
+%% You probably shouldn't be calling this function directly, as it expects the
+%% <em>Length</em> argument to be the full size of the body, and will consider
+%% the body to be fully read from the socket.
+%% @todo We probably want to configure the timeout.
+-spec body(non_neg_integer(), #http_req{})
+ -> {ok, binary(), #http_req{}} | {error, atom()}.
+body(Length, Req=#http_req{body_state=waiting, buffer=Buffer})
+ when is_integer(Length) andalso Length =< byte_size(Buffer) ->
+ << Body:Length/binary, Rest/bits >> = Buffer,
+ {ok, Body, Req#http_req{body_state=done, buffer=Rest}};
+body(Length, Req=#http_req{socket=Socket, transport=Transport,
+ body_state=waiting, buffer=Buffer}) ->
+ case Transport:recv(Socket, Length - byte_size(Buffer), 5000) of
+ {ok, Body} -> {ok, << Buffer/binary, Body/binary >>,
+ Req#http_req{body_state=done, buffer= <<>>}};
+ {error, Reason} -> {error, Reason}
+ end.
+
+%% @doc Return the full body sent with the reqest, parsed as an
+%% application/x-www-form-urlencoded string. Essentially a POST query string.
+-spec body_qs(#http_req{}) -> {list({binary(), binary() | true}), #http_req{}}.
+body_qs(Req=#http_req{urldecode={URLDecFun, URLDecArg}}) ->
+ {ok, Body, Req2} = body(Req),
+ {parse_qs(Body, fun(Bin) -> URLDecFun(Bin, URLDecArg) end), Req2}.
+
+%% Multipart Request API.
+
+%% @doc Return data from the multipart parser.
+%%
+%% Use this function for multipart streaming. For each part in the request,
+%% this function returns <em>{headers, Headers}</em> followed by a sequence of
+%% <em>{data, Data}</em> tuples and finally <em>end_of_part</em>. When there
+%% is no part to parse anymore, <em>eof</em> is returned.
+%%
+%% If the request Content-Type is not a multipart one, <em>{error, badarg}</em>
+%% is returned.
+-spec multipart_data(#http_req{})
+ -> {{headers, cowboy_http:headers()}
+ | {data, binary()} | end_of_part | eof,
+ #http_req{}}.
+multipart_data(Req=#http_req{body_state=waiting}) ->
+ {{<<"multipart">>, _SubType, Params}, Req2} =
+ parse_header('Content-Type', Req),
+ {_, Boundary} = lists:keyfind(<<"boundary">>, 1, Params),
+ {Length, Req3=#http_req{buffer=Buffer}} =
+ parse_header('Content-Length', Req2),
+ multipart_data(Req3, Length, cowboy_multipart:parser(Boundary), Buffer);
+multipart_data(Req=#http_req{body_state={multipart, Length, Cont}}) ->
+ multipart_data(Req, Length, Cont());
+multipart_data(Req=#http_req{body_state=done}) ->
+ {eof, Req}.
+
+multipart_data(Req, Length, Parser, Buffer) when byte_size(Buffer) >= Length ->
+ << Data:Length/binary, Rest/binary >> = Buffer,
+ multipart_data(Req#http_req{buffer=Rest}, 0, Parser(Data));
+multipart_data(Req, Length, Parser, Buffer) ->
+ NewLength = Length - byte_size(Buffer),
+ multipart_data(Req#http_req{buffer= <<>>}, NewLength, Parser(Buffer)).
+
+multipart_data(Req, Length, {headers, Headers, Cont}) ->
+ {{headers, Headers}, Req#http_req{body_state={multipart, Length, Cont}}};
+multipart_data(Req, Length, {body, Data, Cont}) ->
+ {{body, Data}, Req#http_req{body_state={multipart, Length, Cont}}};
+multipart_data(Req, Length, {end_of_part, Cont}) ->
+ {end_of_part, Req#http_req{body_state={multipart, Length, Cont}}};
+multipart_data(Req, 0, eof) ->
+ {eof, Req#http_req{body_state=done}};
+multipart_data(Req=#http_req{socket=Socket, transport=Transport},
+ Length, eof) ->
+ {ok, _Data} = Transport:recv(Socket, Length, 5000),
+ {eof, Req#http_req{body_state=done}};
+multipart_data(Req=#http_req{socket=Socket, transport=Transport},
+ Length, {more, Parser}) when Length > 0 ->
+ case Transport:recv(Socket, 0, 5000) of
+ {ok, << Data:Length/binary, Buffer/binary >>} ->
+ multipart_data(Req#http_req{buffer=Buffer}, 0, Parser(Data));
+ {ok, Data} ->
+ multipart_data(Req, Length - byte_size(Data), Parser(Data))
+ end.
+
+%% @doc Skip a part returned by the multipart parser.
+%%
+%% This function repeatedly calls <em>multipart_data/1</em> until
+%% <em>end_of_part</em> or <em>eof</em> is parsed.
+multipart_skip(Req) ->
+ case multipart_data(Req) of
+ {end_of_part, Req2} -> {ok, Req2};
+ {eof, Req2} -> {ok, Req2};
+ {_Other, Req2} -> multipart_skip(Req2)
+ end.
+
+%% Response API.
+
+%% @doc Add a cookie header to the response.
+-spec set_resp_cookie(binary(), binary(), [cowboy_cookies:cookie_option()],
+ #http_req{}) -> {ok, #http_req{}}.
+set_resp_cookie(Name, Value, Options, Req) ->
+ {HeaderName, HeaderValue} = cowboy_cookies:cookie(Name, Value, Options),
+ set_resp_header(HeaderName, HeaderValue, Req).
+
+%% @doc Add a header to the response.
+set_resp_header(Name, Value, Req=#http_req{resp_headers=RespHeaders}) ->
+ NameBin = header_to_binary(Name),
+ {ok, Req#http_req{resp_headers=[{NameBin, Value}|RespHeaders]}}.
+
+%% @doc Add a body to the response.
+%%
+%% The body set here is ignored if the response is later sent using
+%% anything other than reply/2 or reply/3. The response body is expected
+%% to be a binary or an iolist.
+set_resp_body(Body, Req) ->
+ {ok, Req#http_req{resp_body=Body}}.
+
+
+%% @doc Add a body function to the response.
+%%
+%% The response body may also be set to a content-length - stream-function pair.
+%% If the response body is of this type normal response headers will be sent.
+%% After the response headers has been sent the body function is applied.
+%% The body function is expected to write the response body directly to the
+%% socket using the transport module.
+%%
+%% If the body function crashes while writing the response body or writes fewer
+%% bytes than declared the behaviour is undefined. The body set here is ignored
+%% if the response is later sent using anything other than `reply/2' or
+%% `reply/3'.
+%%
+%% @see cowboy_http_req:transport/1.
+-spec set_resp_body_fun(non_neg_integer(), fun(() -> {sent, non_neg_integer()}),
+ #http_req{}) -> {ok, #http_req{}}.
+set_resp_body_fun(StreamLen, StreamFun, Req) ->
+ {ok, Req#http_req{resp_body={StreamLen, StreamFun}}}.
+
+
+%% @doc Return whether the given header has been set for the response.
+has_resp_header(Name, #http_req{resp_headers=RespHeaders}) ->
+ NameBin = header_to_binary(Name),
+ lists:keymember(NameBin, 1, RespHeaders).
+
+%% @doc Return whether a body has been set for the response.
+has_resp_body(#http_req{resp_body={Length, _}}) ->
+ Length > 0;
+has_resp_body(#http_req{resp_body=RespBody}) ->
+ iolist_size(RespBody) > 0.
+
+%% @equiv reply(Status, [], [], Req)
+-spec reply(cowboy_http:status(), #http_req{}) -> {ok, #http_req{}}.
+reply(Status, Req=#http_req{resp_body=Body}) ->
+ reply(Status, [], Body, Req).
+
+%% @equiv reply(Status, Headers, [], Req)
+-spec reply(cowboy_http:status(), cowboy_http:headers(), #http_req{})
+ -> {ok, #http_req{}}.
+reply(Status, Headers, Req=#http_req{resp_body=Body}) ->
+ reply(Status, Headers, Body, Req).
+
+%% @doc Send a reply to the client.
+reply(Status, Headers, Body, Req=#http_req{socket=Socket,
+ transport=Transport, connection=Connection, pid=ReqPid,
+ method=Method, resp_state=waiting, resp_headers=RespHeaders}) ->
+ RespConn = response_connection(Headers, Connection),
+ ContentLen = case Body of {CL, _} -> CL; _ -> iolist_size(Body) end,
+ Head = response_head(Status, Headers, RespHeaders, [
+ {<<"Connection">>, atom_to_connection(Connection)},
+ {<<"Content-Length">>, integer_to_list(ContentLen)},
+ {<<"Date">>, cowboy_clock:rfc1123()},
+ {<<"Server">>, <<"Cowboy">>}
+ ]),
+ case {Method, Body} of
+ {'HEAD', _} -> Transport:send(Socket, Head);
+ {_, {_, StreamFun}} -> Transport:send(Socket, Head), StreamFun();
+ {_, _} -> Transport:send(Socket, [Head, Body])
+ end,
+ ReqPid ! {?MODULE, resp_sent},
+ {ok, Req#http_req{connection=RespConn, resp_state=done,
+ resp_headers=[], resp_body= <<>>}}.
+
+%% @equiv chunked_reply(Status, [], Req)
+-spec chunked_reply(cowboy_http:status(), #http_req{}) -> {ok, #http_req{}}.
+chunked_reply(Status, Req) ->
+ chunked_reply(Status, [], Req).
+
+%% @doc Initiate the sending of a chunked reply to the client.
+%% @see cowboy_http_req:chunk/2
+-spec chunked_reply(cowboy_http:status(), cowboy_http:headers(), #http_req{})
+ -> {ok, #http_req{}}.
+chunked_reply(Status, Headers, Req=#http_req{socket=Socket,
+ transport=Transport, connection=Connection, pid=ReqPid,
+ resp_state=waiting, resp_headers=RespHeaders}) ->
+ RespConn = response_connection(Headers, Connection),
+ Head = response_head(Status, Headers, RespHeaders, [
+ {<<"Connection">>, atom_to_connection(Connection)},
+ {<<"Transfer-Encoding">>, <<"chunked">>},
+ {<<"Date">>, cowboy_clock:rfc1123()},
+ {<<"Server">>, <<"Cowboy">>}
+ ]),
+ Transport:send(Socket, Head),
+ ReqPid ! {?MODULE, resp_sent},
+ {ok, Req#http_req{connection=RespConn, resp_state=chunks,
+ resp_headers=[], resp_body= <<>>}}.
+
+%% @doc Send a chunk of data.
+%%
+%% A chunked reply must have been initiated before calling this function.
+chunk(_Data, #http_req{socket=_Socket, transport=_Transport, method='HEAD'}) ->
+ ok;
+chunk(Data, #http_req{socket=Socket, transport=Transport, resp_state=chunks}) ->
+ Transport:send(Socket, [erlang:integer_to_list(iolist_size(Data), 16),
+ <<"\r\n">>, Data, <<"\r\n">>]).
+
+%% @doc Send an upgrade reply.
+%% @private
+-spec upgrade_reply(cowboy_http:status(), cowboy_http:headers(), #http_req{})
+ -> {ok, #http_req{}}.
+upgrade_reply(Status, Headers, Req=#http_req{socket=Socket, transport=Transport,
+ pid=ReqPid, resp_state=waiting, resp_headers=RespHeaders}) ->
+ Head = response_head(Status, Headers, RespHeaders, [
+ {<<"Connection">>, <<"Upgrade">>}
+ ]),
+ Transport:send(Socket, Head),
+ ReqPid ! {?MODULE, resp_sent},
+ {ok, Req#http_req{resp_state=done, resp_headers=[], resp_body= <<>>}}.
+
+%% Misc API.
+
+%% @doc Compact the request data by removing all non-system information.
+%%
+%% This essentially removes the host, path, query string, bindings and headers.
+%% Use it when you really need to save up memory, for example when having
+%% many concurrent long-running connections.
+-spec compact(#http_req{}) -> #http_req{}.
+compact(Req) ->
+ Req#http_req{host=undefined, host_info=undefined, path=undefined,
+ path_info=undefined, qs_vals=undefined,
+ bindings=undefined, headers=[],
+ p_headers=[], cookies=[]}.
+
+%% @doc Return the transport module and socket associated with a request.
+%%
+%% This exposes the same socket interface used internally by the HTTP protocol
+%% implementation to developers that needs low level access to the socket.
+%%
+%% It is preferred to use this in conjuction with the stream function support
+%% in `set_resp_body_fun/3' if this is used to write a response body directly
+%% to the socket. This ensures that the response headers are set correctly.
+-spec transport(#http_req{}) -> {ok, module(), inet:socket()}.
+transport(#http_req{transport=Transport, socket=Socket}) ->
+ {ok, Transport, Socket}.
+
+%% Internal.
+
+-spec parse_qs(binary(), fun((binary()) -> binary())) ->
+ list({binary(), binary() | true}).
+parse_qs(<<>>, _URLDecode) ->
+ [];
+parse_qs(Qs, URLDecode) ->
+ Tokens = binary:split(Qs, <<"&">>, [global, trim]),
+ [case binary:split(Token, <<"=">>) of
+ [Token] -> {URLDecode(Token), true};
+ [Name, Value] -> {URLDecode(Name), URLDecode(Value)}
+ end || Token <- Tokens].
+
+-spec response_connection(cowboy_http:headers(), keepalive | close)
+ -> keepalive | close.
+response_connection([], Connection) ->
+ Connection;
+response_connection([{Name, Value}|Tail], Connection) ->
+ case Name of
+ 'Connection' -> response_connection_parse(Value);
+ Name when is_atom(Name) -> response_connection(Tail, Connection);
+ Name ->
+ Name2 = cowboy_bstr:to_lower(Name),
+ case Name2 of
+ <<"connection">> -> response_connection_parse(Value);
+ _Any -> response_connection(Tail, Connection)
+ end
+ end.
+
+-spec response_connection_parse(binary()) -> keepalive | close.
+response_connection_parse(ReplyConn) ->
+ Tokens = cowboy_http:nonempty_list(ReplyConn, fun cowboy_http:token/2),
+ cowboy_http:connection_to_atom(Tokens).
+
+-spec response_head(cowboy_http:status(), cowboy_http:headers(),
+ cowboy_http:headers(), cowboy_http:headers()) -> iolist().
+response_head(Status, Headers, RespHeaders, DefaultHeaders) ->
+ StatusLine = <<"HTTP/1.1 ", (status(Status))/binary, "\r\n">>,
+ Headers2 = [{header_to_binary(Key), Value} || {Key, Value} <- Headers],
+ Headers3 = merge_headers(
+ merge_headers(Headers2, RespHeaders),
+ DefaultHeaders),
+ Headers4 = [[Key, <<": ">>, Value, <<"\r\n">>]
+ || {Key, Value} <- Headers3],
+ [StatusLine, Headers4, <<"\r\n">>].
+
+-spec merge_headers(cowboy_http:headers(), cowboy_http:headers())
+ -> cowboy_http:headers().
+merge_headers(Headers, []) ->
+ Headers;
+merge_headers(Headers, [{Name, Value}|Tail]) ->
+ Headers2 = case lists:keymember(Name, 1, Headers) of
+ true -> Headers;
+ false -> Headers ++ [{Name, Value}]
+ end,
+ merge_headers(Headers2, Tail).
+
+-spec atom_to_connection(keepalive) -> <<_:80>>;
+ (close) -> <<_:40>>.
+atom_to_connection(keepalive) ->
+ <<"keep-alive">>;
+atom_to_connection(close) ->
+ <<"close">>.
+
+-spec status(cowboy_http:status()) -> binary().
+status(100) -> <<"100 Continue">>;
+status(101) -> <<"101 Switching Protocols">>;
+status(102) -> <<"102 Processing">>;
+status(200) -> <<"200 OK">>;
+status(201) -> <<"201 Created">>;
+status(202) -> <<"202 Accepted">>;
+status(203) -> <<"203 Non-Authoritative Information">>;
+status(204) -> <<"204 No Content">>;
+status(205) -> <<"205 Reset Content">>;
+status(206) -> <<"206 Partial Content">>;
+status(207) -> <<"207 Multi-Status">>;
+status(226) -> <<"226 IM Used">>;
+status(300) -> <<"300 Multiple Choices">>;
+status(301) -> <<"301 Moved Permanently">>;
+status(302) -> <<"302 Found">>;
+status(303) -> <<"303 See Other">>;
+status(304) -> <<"304 Not Modified">>;
+status(305) -> <<"305 Use Proxy">>;
+status(306) -> <<"306 Switch Proxy">>;
+status(307) -> <<"307 Temporary Redirect">>;
+status(400) -> <<"400 Bad Request">>;
+status(401) -> <<"401 Unauthorized">>;
+status(402) -> <<"402 Payment Required">>;
+status(403) -> <<"403 Forbidden">>;
+status(404) -> <<"404 Not Found">>;
+status(405) -> <<"405 Method Not Allowed">>;
+status(406) -> <<"406 Not Acceptable">>;
+status(407) -> <<"407 Proxy Authentication Required">>;
+status(408) -> <<"408 Request Timeout">>;
+status(409) -> <<"409 Conflict">>;
+status(410) -> <<"410 Gone">>;
+status(411) -> <<"411 Length Required">>;
+status(412) -> <<"412 Precondition Failed">>;
+status(413) -> <<"413 Request Entity Too Large">>;
+status(414) -> <<"414 Request-URI Too Long">>;
+status(415) -> <<"415 Unsupported Media Type">>;
+status(416) -> <<"416 Requested Range Not Satisfiable">>;
+status(417) -> <<"417 Expectation Failed">>;
+status(418) -> <<"418 I'm a teapot">>;
+status(422) -> <<"422 Unprocessable Entity">>;
+status(423) -> <<"423 Locked">>;
+status(424) -> <<"424 Failed Dependency">>;
+status(425) -> <<"425 Unordered Collection">>;
+status(426) -> <<"426 Upgrade Required">>;
+status(500) -> <<"500 Internal Server Error">>;
+status(501) -> <<"501 Not Implemented">>;
+status(502) -> <<"502 Bad Gateway">>;
+status(503) -> <<"503 Service Unavailable">>;
+status(504) -> <<"504 Gateway Timeout">>;
+status(505) -> <<"505 HTTP Version Not Supported">>;
+status(506) -> <<"506 Variant Also Negotiates">>;
+status(507) -> <<"507 Insufficient Storage">>;
+status(510) -> <<"510 Not Extended">>;
+status(B) when is_binary(B) -> B.
+
+-spec header_to_binary(cowboy_http:header()) -> binary().
+header_to_binary('Cache-Control') -> <<"Cache-Control">>;
+header_to_binary('Connection') -> <<"Connection">>;
+header_to_binary('Date') -> <<"Date">>;
+header_to_binary('Pragma') -> <<"Pragma">>;
+header_to_binary('Transfer-Encoding') -> <<"Transfer-Encoding">>;
+header_to_binary('Upgrade') -> <<"Upgrade">>;
+header_to_binary('Via') -> <<"Via">>;
+header_to_binary('Accept') -> <<"Accept">>;
+header_to_binary('Accept-Charset') -> <<"Accept-Charset">>;
+header_to_binary('Accept-Encoding') -> <<"Accept-Encoding">>;
+header_to_binary('Accept-Language') -> <<"Accept-Language">>;
+header_to_binary('Authorization') -> <<"Authorization">>;
+header_to_binary('From') -> <<"From">>;
+header_to_binary('Host') -> <<"Host">>;
+header_to_binary('If-Modified-Since') -> <<"If-Modified-Since">>;
+header_to_binary('If-Match') -> <<"If-Match">>;
+header_to_binary('If-None-Match') -> <<"If-None-Match">>;
+header_to_binary('If-Range') -> <<"If-Range">>;
+header_to_binary('If-Unmodified-Since') -> <<"If-Unmodified-Since">>;
+header_to_binary('Max-Forwards') -> <<"Max-Forwards">>;
+header_to_binary('Proxy-Authorization') -> <<"Proxy-Authorization">>;
+header_to_binary('Range') -> <<"Range">>;
+header_to_binary('Referer') -> <<"Referer">>;
+header_to_binary('User-Agent') -> <<"User-Agent">>;
+header_to_binary('Age') -> <<"Age">>;
+header_to_binary('Location') -> <<"Location">>;
+header_to_binary('Proxy-Authenticate') -> <<"Proxy-Authenticate">>;
+header_to_binary('Public') -> <<"Public">>;
+header_to_binary('Retry-After') -> <<"Retry-After">>;
+header_to_binary('Server') -> <<"Server">>;
+header_to_binary('Vary') -> <<"Vary">>;
+header_to_binary('Warning') -> <<"Warning">>;
+header_to_binary('Www-Authenticate') -> <<"Www-Authenticate">>;
+header_to_binary('Allow') -> <<"Allow">>;
+header_to_binary('Content-Base') -> <<"Content-Base">>;
+header_to_binary('Content-Encoding') -> <<"Content-Encoding">>;
+header_to_binary('Content-Language') -> <<"Content-Language">>;
+header_to_binary('Content-Length') -> <<"Content-Length">>;
+header_to_binary('Content-Location') -> <<"Content-Location">>;
+header_to_binary('Content-Md5') -> <<"Content-Md5">>;
+header_to_binary('Content-Range') -> <<"Content-Range">>;
+header_to_binary('Content-Type') -> <<"Content-Type">>;
+header_to_binary('Etag') -> <<"Etag">>;
+header_to_binary('Expires') -> <<"Expires">>;
+header_to_binary('Last-Modified') -> <<"Last-Modified">>;
+header_to_binary('Accept-Ranges') -> <<"Accept-Ranges">>;
+header_to_binary('Set-Cookie') -> <<"Set-Cookie">>;
+header_to_binary('Set-Cookie2') -> <<"Set-Cookie2">>;
+header_to_binary('X-Forwarded-For') -> <<"X-Forwarded-For">>;
+header_to_binary('Cookie') -> <<"Cookie">>;
+header_to_binary('Keep-Alive') -> <<"Keep-Alive">>;
+header_to_binary('Proxy-Connection') -> <<"Proxy-Connection">>;
+header_to_binary(B) when is_binary(B) -> B.
+
+%% Tests.
+
+-ifdef(TEST).
+
+parse_qs_test_() ->
+ %% {Qs, Result}
+ Tests = [
+ {<<"">>, []},
+ {<<"a=b">>, [{<<"a">>, <<"b">>}]},
+ {<<"aaa=bbb">>, [{<<"aaa">>, <<"bbb">>}]},
+ {<<"a&b">>, [{<<"a">>, true}, {<<"b">>, true}]},
+ {<<"a=b&c&d=e">>, [{<<"a">>, <<"b">>},
+ {<<"c">>, true}, {<<"d">>, <<"e">>}]},
+ {<<"a=b=c=d=e&f=g">>, [{<<"a">>, <<"b=c=d=e">>}, {<<"f">>, <<"g">>}]},
+ {<<"a+b=c+d">>, [{<<"a b">>, <<"c d">>}]}
+ ],
+ URLDecode = fun cowboy_http:urldecode/1,
+ [{Qs, fun() -> R = parse_qs(Qs, URLDecode) end} || {Qs, R} <- Tests].
+
+-endif.
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%% Copyright (c) 2011, Anthony Ramine <nox@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+%% @doc HTTP request manipulation API.
+%%
+%% Almost all functions in this module return a new <em>Req</em> variable.
+%% It should always be used instead of the one used in your function call
+%% because it keeps the state of the request. It also allows Cowboy to do
+%% some lazy evaluation and cache results where possible.
+-module(cowboy_http_req).
+
+-export([
+ method/1, version/1, peer/1, peer_addr/1,
+ host/1, host_info/1, raw_host/1, port/1,
+ path/1, path_info/1, raw_path/1,
+ qs_val/2, qs_val/3, qs_vals/1, raw_qs/1,
+ binding/2, binding/3, bindings/1,
+ header/2, header/3, headers/1,
+ parse_header/2, parse_header/3,
+ cookie/2, cookie/3, cookies/1,
+ meta/2, meta/3
+]). %% Request API.
+
+-export([
+ body/1, body/2, body_qs/1,
+ multipart_data/1, multipart_skip/1
+]). %% Request Body API.
+
+-export([
+ set_resp_cookie/4, set_resp_header/3, set_resp_body/2,
+ set_resp_body_fun/3, has_resp_header/2, has_resp_body/1,
+ reply/2, reply/3, reply/4,
+ chunked_reply/2, chunked_reply/3, chunk/2,
+ upgrade_reply/3
+]). %% Response API.
+
+-export([
+ compact/1, transport/1
+]). %% Misc API.
+
+-include("include/http.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+%% Request API.
+
+%% @doc Return the HTTP method of the request.
+-spec method(#http_req{}) -> {cowboy_http:method(), #http_req{}}.
+method(Req) ->
+ {Req#http_req.method, Req}.
+
+%% @doc Return the HTTP version used for the request.
+-spec version(#http_req{}) -> {cowboy_http:version(), #http_req{}}.
+version(Req) ->
+ {Req#http_req.version, Req}.
+
+%% @doc Return the peer address and port number of the remote host.
+-spec peer(#http_req{}) -> {{inet:ip_address(), inet:ip_port()}, #http_req{}}.
+peer(Req=#http_req{socket=Socket, transport=Transport, peer=undefined}) ->
+ {ok, Peer} = Transport:peername(Socket),
+ {Peer, Req#http_req{peer=Peer}};
+peer(Req) ->
+ {Req#http_req.peer, Req}.
+
+%% @doc Returns the peer address calculated from headers.
+-spec peer_addr(#http_req{}) -> {inet:ip_address(), #http_req{}}.
+peer_addr(Req = #http_req{}) ->
+ {RealIp, Req1} = header(<<"X-Real-Ip">>, Req),
+ {ForwardedForRaw, Req2} = header(<<"X-Forwarded-For">>, Req1),
+ {{PeerIp, _PeerPort}, Req3} = peer(Req2),
+ ForwardedFor = case ForwardedForRaw of
+ undefined ->
+ undefined;
+ ForwardedForRaw ->
+ case re:run(ForwardedForRaw, "^(?<first_ip>[^\\,]+)",
+ [{capture, [first_ip], binary}]) of
+ {match, [FirstIp]} -> FirstIp;
+ _Any -> undefined
+ end
+ end,
+ {ok, PeerAddr} = if
+ is_binary(RealIp) -> inet_parse:address(binary_to_list(RealIp));
+ is_binary(ForwardedFor) -> inet_parse:address(binary_to_list(ForwardedFor));
+ true -> {ok, PeerIp}
+ end,
+ {PeerAddr, Req3}.
+
+%% @doc Return the tokens for the hostname requested.
+-spec host(#http_req{}) -> {cowboy_dispatcher:tokens(), #http_req{}}.
+host(Req) ->
+ {Req#http_req.host, Req}.
+
+%% @doc Return the extra host information obtained from partially matching
+%% the hostname using <em>'...'</em>.
+-spec host_info(#http_req{})
+ -> {cowboy_dispatcher:tokens() | undefined, #http_req{}}.
+host_info(Req) ->
+ {Req#http_req.host_info, Req}.
+
+%% @doc Return the raw host directly taken from the request.
+-spec raw_host(#http_req{}) -> {binary(), #http_req{}}.
+raw_host(Req) ->
+ {Req#http_req.raw_host, Req}.
+
+%% @doc Return the port used for this request.
+-spec port(#http_req{}) -> {inet:ip_port(), #http_req{}}.
+port(Req) ->
+ {Req#http_req.port, Req}.
+
+%% @doc Return the path segments for the path requested.
+%%
+%% Following RFC2396, this function may return path segments containing any
+%% character, including <em>/</em> if, and only if, a <em>/</em> was escaped
+%% and part of a path segment in the path requested.
+-spec path(#http_req{}) -> {cowboy_dispatcher:tokens(), #http_req{}}.
+path(Req) ->
+ {Req#http_req.path, Req}.
+
+%% @doc Return the extra path information obtained from partially matching
+%% the patch using <em>'...'</em>.
+-spec path_info(#http_req{})
+ -> {cowboy_dispatcher:tokens() | undefined, #http_req{}}.
+path_info(Req) ->
+ {Req#http_req.path_info, Req}.
+
+%% @doc Return the raw path directly taken from the request.
+-spec raw_path(#http_req{}) -> {binary(), #http_req{}}.
+raw_path(Req) ->
+ {Req#http_req.raw_path, Req}.
+
+%% @equiv qs_val(Name, Req, undefined)
+-spec qs_val(binary(), #http_req{})
+ -> {binary() | true | undefined, #http_req{}}.
+qs_val(Name, Req) when is_binary(Name) ->
+ qs_val(Name, Req, undefined).
+
+%% @doc Return the query string value for the given key, or a default if
+%% missing.
+qs_val(Name, Req=#http_req{raw_qs=RawQs, qs_vals=undefined,
+ urldecode={URLDecFun, URLDecArg}}, Default) when is_binary(Name) ->
+ QsVals = parse_qs(RawQs, fun(Bin) -> URLDecFun(Bin, URLDecArg) end),
+ qs_val(Name, Req#http_req{qs_vals=QsVals}, Default);
+qs_val(Name, Req, Default) ->
+ case lists:keyfind(Name, 1, Req#http_req.qs_vals) of
+ {Name, Value} -> {Value, Req};
+ false -> {Default, Req}
+ end.
+
+%% @doc Return the full list of query string values.
+-spec qs_vals(#http_req{}) -> {list({binary(), binary() | true}), #http_req{}}.
+qs_vals(Req=#http_req{raw_qs=RawQs, qs_vals=undefined,
+ urldecode={URLDecFun, URLDecArg}}) ->
+ QsVals = parse_qs(RawQs, fun(Bin) -> URLDecFun(Bin, URLDecArg) end),
+ qs_vals(Req#http_req{qs_vals=QsVals});
+qs_vals(Req=#http_req{qs_vals=QsVals}) ->
+ {QsVals, Req}.
+
+%% @doc Return the raw query string directly taken from the request.
+-spec raw_qs(#http_req{}) -> {binary(), #http_req{}}.
+raw_qs(Req) ->
+ {Req#http_req.raw_qs, Req}.
+
+%% @equiv binding(Name, Req, undefined)
+-spec binding(atom(), #http_req{}) -> {binary() | undefined, #http_req{}}.
+binding(Name, Req) when is_atom(Name) ->
+ binding(Name, Req, undefined).
+
+%% @doc Return the binding value for the given key obtained when matching
+%% the host and path against the dispatch list, or a default if missing.
+binding(Name, Req, Default) when is_atom(Name) ->
+ case lists:keyfind(Name, 1, Req#http_req.bindings) of
+ {Name, Value} -> {Value, Req};
+ false -> {Default, Req}
+ end.
+
+%% @doc Return the full list of binding values.
+-spec bindings(#http_req{}) -> {list({atom(), binary()}), #http_req{}}.
+bindings(Req) ->
+ {Req#http_req.bindings, Req}.
+
+%% @equiv header(Name, Req, undefined)
+-spec header(atom() | binary(), #http_req{})
+ -> {binary() | undefined, #http_req{}}.
+header(Name, Req) when is_atom(Name) orelse is_binary(Name) ->
+ header(Name, Req, undefined).
+
+%% @doc Return the header value for the given key, or a default if missing.
+header(Name, Req, Default) when is_atom(Name) orelse is_binary(Name) ->
+ case lists:keyfind(Name, 1, Req#http_req.headers) of
+ {Name, Value} -> {Value, Req};
+ false -> {Default, Req}
+ end.
+
+%% @doc Return the full list of headers.
+-spec headers(#http_req{}) -> {cowboy_http:headers(), #http_req{}}.
+headers(Req) ->
+ {Req#http_req.headers, Req}.
+
+%% @doc Semantically parse headers.
+%%
+%% When the value isn't found, a proper default value for the type
+%% returned is used as a return value.
+%% @see parse_header/3
+-spec parse_header(cowboy_http:header(), #http_req{})
+ -> {any(), #http_req{}} | {error, badarg}.
+parse_header(Name, Req=#http_req{p_headers=PHeaders}) ->
+ case lists:keyfind(Name, 1, PHeaders) of
+ false -> parse_header(Name, Req, parse_header_default(Name));
+ {Name, Value} -> {Value, Req}
+ end.
+
+%% @doc Default values for semantic header parsing.
+-spec parse_header_default(cowboy_http:header()) -> any().
+parse_header_default('Connection') -> [];
+parse_header_default(_Name) -> undefined.
+
+%% @doc Semantically parse headers.
+%%
+%% When the header is unknown, the value is returned directly without parsing.
+-spec parse_header(cowboy_http:header(), #http_req{}, any())
+ -> {any(), #http_req{}} | {error, badarg}.
+parse_header(Name, Req, Default) when Name =:= 'Accept' ->
+ parse_header(Name, Req, Default,
+ fun (Value) ->
+ cowboy_http:list(Value, fun cowboy_http:media_range/2)
+ end);
+parse_header(Name, Req, Default) when Name =:= 'Accept-Charset' ->
+ parse_header(Name, Req, Default,
+ fun (Value) ->
+ cowboy_http:nonempty_list(Value, fun cowboy_http:conneg/2)
+ end);
+parse_header(Name, Req, Default) when Name =:= 'Accept-Encoding' ->
+ parse_header(Name, Req, Default,
+ fun (Value) ->
+ cowboy_http:list(Value, fun cowboy_http:conneg/2)
+ end);
+parse_header(Name, Req, Default) when Name =:= 'Accept-Language' ->
+ parse_header(Name, Req, Default,
+ fun (Value) ->
+ cowboy_http:nonempty_list(Value, fun cowboy_http:language_range/2)
+ end);
+parse_header(Name, Req, Default) when Name =:= 'Connection' ->
+ parse_header(Name, Req, Default,
+ fun (Value) ->
+ cowboy_http:nonempty_list(Value, fun cowboy_http:token_ci/2)
+ end);
+parse_header(Name, Req, Default) when Name =:= 'Content-Length' ->
+ parse_header(Name, Req, Default,
+ fun (Value) ->
+ cowboy_http:digits(Value)
+ end);
+parse_header(Name, Req, Default) when Name =:= 'Content-Type' ->
+ parse_header(Name, Req, Default,
+ fun (Value) ->
+ cowboy_http:content_type(Value)
+ end);
+parse_header(Name, Req, Default)
+ when Name =:= 'If-Match'; Name =:= 'If-None-Match' ->
+ parse_header(Name, Req, Default,
+ fun (Value) ->
+ cowboy_http:entity_tag_match(Value)
+ end);
+parse_header(Name, Req, Default)
+ when Name =:= 'If-Modified-Since'; Name =:= 'If-Unmodified-Since' ->
+ parse_header(Name, Req, Default,
+ fun (Value) ->
+ cowboy_http:http_date(Value)
+ end);
+parse_header(Name, Req, Default) when Name =:= 'Upgrade' ->
+ parse_header(Name, Req, Default,
+ fun (Value) ->
+ cowboy_http:nonempty_list(Value, fun cowboy_http:token_ci/2)
+ end);
+parse_header(Name, Req, Default) ->
+ {Value, Req2} = header(Name, Req, Default),
+ {undefined, Value, Req2}.
+
+parse_header(Name, Req=#http_req{p_headers=PHeaders}, Default, Fun) ->
+ case header(Name, Req) of
+ {undefined, Req2} ->
+ {Default, Req2#http_req{p_headers=[{Name, Default}|PHeaders]}};
+ {Value, Req2} ->
+ case Fun(Value) of
+ {error, badarg} ->
+ {error, badarg};
+ P ->
+ {P, Req2#http_req{p_headers=[{Name, P}|PHeaders]}}
+ end
+ end.
+
+%% @equiv cookie(Name, Req, undefined)
+-spec cookie(binary(), #http_req{})
+ -> {binary() | true | undefined, #http_req{}}.
+cookie(Name, Req) when is_binary(Name) ->
+ cookie(Name, Req, undefined).
+
+%% @doc Return the cookie value for the given key, or a default if
+%% missing.
+cookie(Name, Req=#http_req{cookies=undefined}, Default) when is_binary(Name) ->
+ case header('Cookie', Req) of
+ {undefined, Req2} ->
+ {Default, Req2#http_req{cookies=[]}};
+ {RawCookie, Req2} ->
+ Cookies = cowboy_cookies:parse_cookie(RawCookie),
+ cookie(Name, Req2#http_req{cookies=Cookies}, Default)
+ end;
+cookie(Name, Req, Default) ->
+ case lists:keyfind(Name, 1, Req#http_req.cookies) of
+ {Name, Value} -> {Value, Req};
+ false -> {Default, Req}
+ end.
+
+%% @doc Return the full list of cookie values.
+-spec cookies(#http_req{}) -> {list({binary(), binary() | true}), #http_req{}}.
+cookies(Req=#http_req{cookies=undefined}) ->
+ case header('Cookie', Req) of
+ {undefined, Req2} ->
+ {[], Req2#http_req{cookies=[]}};
+ {RawCookie, Req2} ->
+ Cookies = cowboy_cookies:parse_cookie(RawCookie),
+ cookies(Req2#http_req{cookies=Cookies})
+ end;
+cookies(Req=#http_req{cookies=Cookies}) ->
+ {Cookies, Req}.
+
+%% @equiv meta(Name, Req, undefined)
+-spec meta(atom(), #http_req{}) -> {any() | undefined, #http_req{}}.
+meta(Name, Req) ->
+ meta(Name, Req, undefined).
+
+%% @doc Return metadata information about the request.
+%%
+%% Metadata information varies from one protocol to another. Websockets
+%% would define the protocol version here, while REST would use it to
+%% indicate which media type, language and charset were retained.
+-spec meta(atom(), #http_req{}, any()) -> {any(), #http_req{}}.
+meta(Name, Req, Default) ->
+ case lists:keyfind(Name, 1, Req#http_req.meta) of
+ {Name, Value} -> {Value, Req};
+ false -> {Default, Req}
+ end.
+
+%% Request Body API.
+
+%% @doc Return the full body sent with the request, or <em>{error, badarg}</em>
+%% if no <em>Content-Length</em> is available.
+%% @todo We probably want to allow a max length.
+%% @todo Add multipart support to this function.
+-spec body(#http_req{}) -> {ok, binary(), #http_req{}} | {error, atom()}.
+body(Req) ->
+ {Length, Req2} = cowboy_http_req:parse_header('Content-Length', Req),
+ case Length of
+ undefined -> {error, badarg};
+ {error, badarg} -> {error, badarg};
+ _Any ->
+ body(Length, Req2)
+ end.
+
+%% @doc Return <em>Length</em> bytes of the request body.
+%%
+%% You probably shouldn't be calling this function directly, as it expects the
+%% <em>Length</em> argument to be the full size of the body, and will consider
+%% the body to be fully read from the socket.
+%% @todo We probably want to configure the timeout.
+-spec body(non_neg_integer(), #http_req{})
+ -> {ok, binary(), #http_req{}} | {error, atom()}.
+body(Length, Req=#http_req{body_state=waiting, buffer=Buffer})
+ when is_integer(Length) andalso Length =< byte_size(Buffer) ->
+ << Body:Length/binary, Rest/bits >> = Buffer,
+ {ok, Body, Req#http_req{body_state=done, buffer=Rest}};
+body(Length, Req=#http_req{socket=Socket, transport=Transport,
+ body_state=waiting, buffer=Buffer}) ->
+ case Transport:recv(Socket, Length - byte_size(Buffer), 5000) of
+ {ok, Body} -> {ok, << Buffer/binary, Body/binary >>,
+ Req#http_req{body_state=done, buffer= <<>>}};
+ {error, Reason} -> {error, Reason}
+ end.
+
+%% @doc Return the full body sent with the reqest, parsed as an
+%% application/x-www-form-urlencoded string. Essentially a POST query string.
+-spec body_qs(#http_req{}) -> {list({binary(), binary() | true}), #http_req{}}.
+body_qs(Req=#http_req{urldecode={URLDecFun, URLDecArg}}) ->
+ {ok, Body, Req2} = body(Req),
+ {parse_qs(Body, fun(Bin) -> URLDecFun(Bin, URLDecArg) end), Req2}.
+
+%% Multipart Request API.
+
+%% @doc Return data from the multipart parser.
+%%
+%% Use this function for multipart streaming. For each part in the request,
+%% this function returns <em>{headers, Headers}</em> followed by a sequence of
+%% <em>{data, Data}</em> tuples and finally <em>end_of_part</em>. When there
+%% is no part to parse anymore, <em>eof</em> is returned.
+%%
+%% If the request Content-Type is not a multipart one, <em>{error, badarg}</em>
+%% is returned.
+-spec multipart_data(#http_req{})
+ -> {{headers, cowboy_http:headers()}
+ | {data, binary()} | end_of_part | eof,
+ #http_req{}}.
+multipart_data(Req=#http_req{body_state=waiting}) ->
+ {{<<"multipart">>, _SubType, Params}, Req2} =
+ parse_header('Content-Type', Req),
+ {_, Boundary} = lists:keyfind(<<"boundary">>, 1, Params),
+ {Length, Req3=#http_req{buffer=Buffer}} =
+ parse_header('Content-Length', Req2),
+ multipart_data(Req3, Length, cowboy_multipart:parser(Boundary), Buffer);
+multipart_data(Req=#http_req{body_state={multipart, Length, Cont}}) ->
+ multipart_data(Req, Length, Cont());
+multipart_data(Req=#http_req{body_state=done}) ->
+ {eof, Req}.
+
+multipart_data(Req, Length, Parser, Buffer) when byte_size(Buffer) >= Length ->
+ << Data:Length/binary, Rest/binary >> = Buffer,
+ multipart_data(Req#http_req{buffer=Rest}, 0, Parser(Data));
+multipart_data(Req, Length, Parser, Buffer) ->
+ NewLength = Length - byte_size(Buffer),
+ multipart_data(Req#http_req{buffer= <<>>}, NewLength, Parser(Buffer)).
+
+multipart_data(Req, Length, {headers, Headers, Cont}) ->
+ {{headers, Headers}, Req#http_req{body_state={multipart, Length, Cont}}};
+multipart_data(Req, Length, {body, Data, Cont}) ->
+ {{body, Data}, Req#http_req{body_state={multipart, Length, Cont}}};
+multipart_data(Req, Length, {end_of_part, Cont}) ->
+ {end_of_part, Req#http_req{body_state={multipart, Length, Cont}}};
+multipart_data(Req, 0, eof) ->
+ {eof, Req#http_req{body_state=done}};
+multipart_data(Req=#http_req{socket=Socket, transport=Transport},
+ Length, eof) ->
+ {ok, _Data} = Transport:recv(Socket, Length, 5000),
+ {eof, Req#http_req{body_state=done}};
+multipart_data(Req=#http_req{socket=Socket, transport=Transport},
+ Length, {more, Parser}) when Length > 0 ->
+ case Transport:recv(Socket, 0, 5000) of
+ {ok, << Data:Length/binary, Buffer/binary >>} ->
+ multipart_data(Req#http_req{buffer=Buffer}, 0, Parser(Data));
+ {ok, Data} ->
+ multipart_data(Req, Length - byte_size(Data), Parser(Data))
+ end.
+
+%% @doc Skip a part returned by the multipart parser.
+%%
+%% This function repeatedly calls <em>multipart_data/1</em> until
+%% <em>end_of_part</em> or <em>eof</em> is parsed.
+multipart_skip(Req) ->
+ case multipart_data(Req) of
+ {end_of_part, Req2} -> {ok, Req2};
+ {eof, Req2} -> {ok, Req2};
+ {_Other, Req2} -> multipart_skip(Req2)
+ end.
+
+%% Response API.
+
+%% @doc Add a cookie header to the response.
+-spec set_resp_cookie(binary(), binary(), [cowboy_cookies:cookie_option()],
+ #http_req{}) -> {ok, #http_req{}}.
+set_resp_cookie(Name, Value, Options, Req) ->
+ {HeaderName, HeaderValue} = cowboy_cookies:cookie(Name, Value, Options),
+ set_resp_header(HeaderName, HeaderValue, Req).
+
+%% @doc Add a header to the response.
+set_resp_header(Name, Value, Req=#http_req{resp_headers=RespHeaders}) ->
+ NameBin = header_to_binary(Name),
+ {ok, Req#http_req{resp_headers=[{NameBin, Value}|RespHeaders]}}.
+
+%% @doc Add a body to the response.
+%%
+%% The body set here is ignored if the response is later sent using
+%% anything other than reply/2 or reply/3. The response body is expected
+%% to be a binary or an iolist.
+set_resp_body(Body, Req) ->
+ {ok, Req#http_req{resp_body=Body}}.
+
+
+%% @doc Add a body function to the response.
+%%
+%% The response body may also be set to a content-length - stream-function pair.
+%% If the response body is of this type normal response headers will be sent.
+%% After the response headers has been sent the body function is applied.
+%% The body function is expected to write the response body directly to the
+%% socket using the transport module.
+%%
+%% If the body function crashes while writing the response body or writes fewer
+%% bytes than declared the behaviour is undefined. The body set here is ignored
+%% if the response is later sent using anything other than `reply/2' or
+%% `reply/3'.
+%%
+%% @see cowboy_http_req:transport/1.
+-spec set_resp_body_fun(non_neg_integer(), fun(() -> {sent, non_neg_integer()}),
+ #http_req{}) -> {ok, #http_req{}}.
+set_resp_body_fun(StreamLen, StreamFun, Req) ->
+ {ok, Req#http_req{resp_body={StreamLen, StreamFun}}}.
+
+
+%% @doc Return whether the given header has been set for the response.
+has_resp_header(Name, #http_req{resp_headers=RespHeaders}) ->
+ NameBin = header_to_binary(Name),
+ lists:keymember(NameBin, 1, RespHeaders).
+
+%% @doc Return whether a body has been set for the response.
+has_resp_body(#http_req{resp_body={Length, _}}) ->
+ Length > 0;
+has_resp_body(#http_req{resp_body=RespBody}) ->
+ iolist_size(RespBody) > 0.
+
+%% @equiv reply(Status, [], [], Req)
+-spec reply(cowboy_http:status(), #http_req{}) -> {ok, #http_req{}}.
+reply(Status, Req=#http_req{resp_body=Body}) ->
+ reply(Status, [], Body, Req).
+
+%% @equiv reply(Status, Headers, [], Req)
+-spec reply(cowboy_http:status(), cowboy_http:headers(), #http_req{})
+ -> {ok, #http_req{}}.
+reply(Status, Headers, Req=#http_req{resp_body=Body}) ->
+ reply(Status, Headers, Body, Req).
+
+%% @doc Send a reply to the client.
+reply(Status, Headers, Body, Req=#http_req{socket=Socket,
+ transport=Transport, connection=Connection, pid=ReqPid,
+ method=Method, resp_state=waiting, resp_headers=RespHeaders}) ->
+ RespConn = response_connection(Headers, Connection),
+ ContentLen = case Body of {CL, _} -> CL; _ -> iolist_size(Body) end,
+ Head = response_head(Status, Headers, RespHeaders, [
+ {<<"Connection">>, atom_to_connection(Connection)},
+ {<<"Content-Length">>, integer_to_list(ContentLen)},
+ {<<"Date">>, cowboy_clock:rfc1123()},
+ {<<"Server">>, <<"Cowboy">>}
+ ]),
+ case {Method, Body} of
+ {'HEAD', _} -> Transport:send(Socket, Head);
+ {_, {_, StreamFun}} -> Transport:send(Socket, Head), StreamFun();
+ {_, _} -> Transport:send(Socket, [Head, Body])
+ end,
+ ReqPid ! {?MODULE, resp_sent},
+ {ok, Req#http_req{connection=RespConn, resp_state=done,
+ resp_headers=[], resp_body= <<>>}}.
+
+%% @equiv chunked_reply(Status, [], Req)
+-spec chunked_reply(cowboy_http:status(), #http_req{}) -> {ok, #http_req{}}.
+chunked_reply(Status, Req) ->
+ chunked_reply(Status, [], Req).
+
+%% @doc Initiate the sending of a chunked reply to the client.
+%% @see cowboy_http_req:chunk/2
+-spec chunked_reply(cowboy_http:status(), cowboy_http:headers(), #http_req{})
+ -> {ok, #http_req{}}.
+chunked_reply(Status, Headers, Req=#http_req{socket=Socket,
+ transport=Transport, connection=Connection, pid=ReqPid,
+ resp_state=waiting, resp_headers=RespHeaders}) ->
+ RespConn = response_connection(Headers, Connection),
+ Head = response_head(Status, Headers, RespHeaders, [
+ {<<"Connection">>, atom_to_connection(Connection)},
+ {<<"Transfer-Encoding">>, <<"chunked">>},
+ {<<"Date">>, cowboy_clock:rfc1123()},
+ {<<"Server">>, <<"Cowboy">>}
+ ]),
+ Transport:send(Socket, Head),
+ ReqPid ! {?MODULE, resp_sent},
+ {ok, Req#http_req{connection=RespConn, resp_state=chunks,
+ resp_headers=[], resp_body= <<>>}}.
+
+%% @doc Send a chunk of data.
+%%
+%% A chunked reply must have been initiated before calling this function.
+chunk(_Data, #http_req{socket=_Socket, transport=_Transport, method='HEAD'}) ->
+ ok;
+chunk(Data, #http_req{socket=Socket, transport=Transport, resp_state=chunks}) ->
+ Transport:send(Socket, [erlang:integer_to_list(iolist_size(Data), 16),
+ <<"\r\n">>, Data, <<"\r\n">>]).
+
+%% @doc Send an upgrade reply.
+%% @private
+-spec upgrade_reply(cowboy_http:status(), cowboy_http:headers(), #http_req{})
+ -> {ok, #http_req{}}.
+upgrade_reply(Status, Headers, Req=#http_req{socket=Socket, transport=Transport,
+ pid=ReqPid, resp_state=waiting, resp_headers=RespHeaders}) ->
+ Head = response_head(Status, Headers, RespHeaders, [
+ {<<"Connection">>, <<"Upgrade">>}
+ ]),
+ Transport:send(Socket, Head),
+ ReqPid ! {?MODULE, resp_sent},
+ {ok, Req#http_req{resp_state=done, resp_headers=[], resp_body= <<>>}}.
+
+%% Misc API.
+
+%% @doc Compact the request data by removing all non-system information.
+%%
+%% This essentially removes the host, path, query string, bindings and headers.
+%% Use it when you really need to save up memory, for example when having
+%% many concurrent long-running connections.
+-spec compact(#http_req{}) -> #http_req{}.
+compact(Req) ->
+ Req#http_req{host=undefined, host_info=undefined, path=undefined,
+ path_info=undefined, qs_vals=undefined,
+ bindings=undefined, headers=[],
+ p_headers=[], cookies=[]}.
+
+%% @doc Return the transport module and socket associated with a request.
+%%
+%% This exposes the same socket interface used internally by the HTTP protocol
+%% implementation to developers that needs low level access to the socket.
+%%
+%% It is preferred to use this in conjuction with the stream function support
+%% in `set_resp_body_fun/3' if this is used to write a response body directly
+%% to the socket. This ensures that the response headers are set correctly.
+-spec transport(#http_req{}) -> {ok, module(), inet:socket()}.
+transport(#http_req{transport=Transport, socket=Socket}) ->
+ {ok, Transport, Socket}.
+
+%% Internal.
+
+-spec parse_qs(binary(), fun((binary()) -> binary())) ->
+ list({binary(), binary() | true}).
+parse_qs(<<>>, _URLDecode) ->
+ [];
+parse_qs(Qs, URLDecode) ->
+ Tokens = binary:split(Qs, <<"&">>, [global, trim]),
+ [case binary:split(Token, <<"=">>) of
+ [Token] -> {URLDecode(Token), true};
+ [Name, Value] -> {URLDecode(Name), URLDecode(Value)}
+ end || Token <- Tokens].
+
+-spec response_connection(cowboy_http:headers(), keepalive | close)
+ -> keepalive | close.
+response_connection([], Connection) ->
+ Connection;
+response_connection([{Name, Value}|Tail], Connection) ->
+ case Name of
+ 'Connection' -> response_connection_parse(Value);
+ Name when is_atom(Name) -> response_connection(Tail, Connection);
+ Name ->
+ Name2 = cowboy_bstr:to_lower(Name),
+ case Name2 of
+ <<"connection">> -> response_connection_parse(Value);
+ _Any -> response_connection(Tail, Connection)
+ end
+ end.
+
+-spec response_connection_parse(binary()) -> keepalive | close.
+response_connection_parse(ReplyConn) ->
+ Tokens = cowboy_http:nonempty_list(ReplyConn, fun cowboy_http:token/2),
+ cowboy_http:connection_to_atom(Tokens).
+
+-spec response_head(cowboy_http:status(), cowboy_http:headers(),
+ cowboy_http:headers(), cowboy_http:headers()) -> iolist().
+response_head(Status, Headers, RespHeaders, DefaultHeaders) ->
+ StatusLine = <<"HTTP/1.1 ", (status(Status))/binary, "\r\n">>,
+ Headers2 = [{header_to_binary(Key), Value} || {Key, Value} <- Headers],
+ Headers3 = merge_headers(
+ merge_headers(Headers2, RespHeaders),
+ DefaultHeaders),
+ Headers4 = [[Key, <<": ">>, Value, <<"\r\n">>]
+ || {Key, Value} <- Headers3],
+ [StatusLine, Headers4, <<"\r\n">>].
+
+-spec merge_headers(cowboy_http:headers(), cowboy_http:headers())
+ -> cowboy_http:headers().
+merge_headers(Headers, []) ->
+ Headers;
+merge_headers(Headers, [{Name, Value}|Tail]) ->
+ Headers2 = case lists:keymember(Name, 1, Headers) of
+ true -> Headers;
+ false -> Headers ++ [{Name, Value}]
+ end,
+ merge_headers(Headers2, Tail).
+
+-spec atom_to_connection(keepalive) -> <<_:80>>;
+ (close) -> <<_:40>>.
+atom_to_connection(keepalive) ->
+ <<"keep-alive">>;
+atom_to_connection(close) ->
+ <<"close">>.
+
+-spec status(cowboy_http:status()) -> binary().
+status(100) -> <<"100 Continue">>;
+status(101) -> <<"101 Switching Protocols">>;
+status(102) -> <<"102 Processing">>;
+status(200) -> <<"200 OK">>;
+status(201) -> <<"201 Created">>;
+status(202) -> <<"202 Accepted">>;
+status(203) -> <<"203 Non-Authoritative Information">>;
+status(204) -> <<"204 No Content">>;
+status(205) -> <<"205 Reset Content">>;
+status(206) -> <<"206 Partial Content">>;
+status(207) -> <<"207 Multi-Status">>;
+status(226) -> <<"226 IM Used">>;
+status(300) -> <<"300 Multiple Choices">>;
+status(301) -> <<"301 Moved Permanently">>;
+status(302) -> <<"302 Found">>;
+status(303) -> <<"303 See Other">>;
+status(304) -> <<"304 Not Modified">>;
+status(305) -> <<"305 Use Proxy">>;
+status(306) -> <<"306 Switch Proxy">>;
+status(307) -> <<"307 Temporary Redirect">>;
+status(400) -> <<"400 Bad Request">>;
+status(401) -> <<"401 Unauthorized">>;
+status(402) -> <<"402 Payment Required">>;
+status(403) -> <<"403 Forbidden">>;
+status(404) -> <<"404 Not Found">>;
+status(405) -> <<"405 Method Not Allowed">>;
+status(406) -> <<"406 Not Acceptable">>;
+status(407) -> <<"407 Proxy Authentication Required">>;
+status(408) -> <<"408 Request Timeout">>;
+status(409) -> <<"409 Conflict">>;
+status(410) -> <<"410 Gone">>;
+status(411) -> <<"411 Length Required">>;
+status(412) -> <<"412 Precondition Failed">>;
+status(413) -> <<"413 Request Entity Too Large">>;
+status(414) -> <<"414 Request-URI Too Long">>;
+status(415) -> <<"415 Unsupported Media Type">>;
+status(416) -> <<"416 Requested Range Not Satisfiable">>;
+status(417) -> <<"417 Expectation Failed">>;
+status(418) -> <<"418 I'm a teapot">>;
+status(422) -> <<"422 Unprocessable Entity">>;
+status(423) -> <<"423 Locked">>;
+status(424) -> <<"424 Failed Dependency">>;
+status(425) -> <<"425 Unordered Collection">>;
+status(426) -> <<"426 Upgrade Required">>;
+status(500) -> <<"500 Internal Server Error">>;
+status(501) -> <<"501 Not Implemented">>;
+status(502) -> <<"502 Bad Gateway">>;
+status(503) -> <<"503 Service Unavailable">>;
+status(504) -> <<"504 Gateway Timeout">>;
+status(505) -> <<"505 HTTP Version Not Supported">>;
+status(506) -> <<"506 Variant Also Negotiates">>;
+status(507) -> <<"507 Insufficient Storage">>;
+status(510) -> <<"510 Not Extended">>;
+status(B) when is_binary(B) -> B.
+
+-spec header_to_binary(cowboy_http:header()) -> binary().
+header_to_binary('Cache-Control') -> <<"Cache-Control">>;
+header_to_binary('Connection') -> <<"Connection">>;
+header_to_binary('Date') -> <<"Date">>;
+header_to_binary('Pragma') -> <<"Pragma">>;
+header_to_binary('Transfer-Encoding') -> <<"Transfer-Encoding">>;
+header_to_binary('Upgrade') -> <<"Upgrade">>;
+header_to_binary('Via') -> <<"Via">>;
+header_to_binary('Accept') -> <<"Accept">>;
+header_to_binary('Accept-Charset') -> <<"Accept-Charset">>;
+header_to_binary('Accept-Encoding') -> <<"Accept-Encoding">>;
+header_to_binary('Accept-Language') -> <<"Accept-Language">>;
+header_to_binary('Authorization') -> <<"Authorization">>;
+header_to_binary('From') -> <<"From">>;
+header_to_binary('Host') -> <<"Host">>;
+header_to_binary('If-Modified-Since') -> <<"If-Modified-Since">>;
+header_to_binary('If-Match') -> <<"If-Match">>;
+header_to_binary('If-None-Match') -> <<"If-None-Match">>;
+header_to_binary('If-Range') -> <<"If-Range">>;
+header_to_binary('If-Unmodified-Since') -> <<"If-Unmodified-Since">>;
+header_to_binary('Max-Forwards') -> <<"Max-Forwards">>;
+header_to_binary('Proxy-Authorization') -> <<"Proxy-Authorization">>;
+header_to_binary('Range') -> <<"Range">>;
+header_to_binary('Referer') -> <<"Referer">>;
+header_to_binary('User-Agent') -> <<"User-Agent">>;
+header_to_binary('Age') -> <<"Age">>;
+header_to_binary('Location') -> <<"Location">>;
+header_to_binary('Proxy-Authenticate') -> <<"Proxy-Authenticate">>;
+header_to_binary('Public') -> <<"Public">>;
+header_to_binary('Retry-After') -> <<"Retry-After">>;
+header_to_binary('Server') -> <<"Server">>;
+header_to_binary('Vary') -> <<"Vary">>;
+header_to_binary('Warning') -> <<"Warning">>;
+header_to_binary('Www-Authenticate') -> <<"Www-Authenticate">>;
+header_to_binary('Allow') -> <<"Allow">>;
+header_to_binary('Content-Base') -> <<"Content-Base">>;
+header_to_binary('Content-Encoding') -> <<"Content-Encoding">>;
+header_to_binary('Content-Language') -> <<"Content-Language">>;
+header_to_binary('Content-Length') -> <<"Content-Length">>;
+header_to_binary('Content-Location') -> <<"Content-Location">>;
+header_to_binary('Content-Md5') -> <<"Content-Md5">>;
+header_to_binary('Content-Range') -> <<"Content-Range">>;
+header_to_binary('Content-Type') -> <<"Content-Type">>;
+header_to_binary('Etag') -> <<"Etag">>;
+header_to_binary('Expires') -> <<"Expires">>;
+header_to_binary('Last-Modified') -> <<"Last-Modified">>;
+header_to_binary('Accept-Ranges') -> <<"Accept-Ranges">>;
+header_to_binary('Set-Cookie') -> <<"Set-Cookie">>;
+header_to_binary('Set-Cookie2') -> <<"Set-Cookie2">>;
+header_to_binary('X-Forwarded-For') -> <<"X-Forwarded-For">>;
+header_to_binary('Cookie') -> <<"Cookie">>;
+header_to_binary('Keep-Alive') -> <<"Keep-Alive">>;
+header_to_binary('Proxy-Connection') -> <<"Proxy-Connection">>;
+header_to_binary(B) when is_binary(B) -> B.
+
+%% Tests.
+
+-ifdef(TEST).
+
+parse_qs_test_() ->
+ %% {Qs, Result}
+ Tests = [
+ {<<"">>, []},
+ {<<"a=b">>, [{<<"a">>, <<"b">>}]},
+ {<<"aaa=bbb">>, [{<<"aaa">>, <<"bbb">>}]},
+ {<<"a&b">>, [{<<"a">>, true}, {<<"b">>, true}]},
+ {<<"a=b&c&d=e">>, [{<<"a">>, <<"b">>},
+ {<<"c">>, true}, {<<"d">>, <<"e">>}]},
+ {<<"a=b=c=d=e&f=g">>, [{<<"a">>, <<"b=c=d=e">>}, {<<"f">>, <<"g">>}]},
+ {<<"a+b=c+d">>, [{<<"a b">>, <<"c d">>}]}
+ ],
+ URLDecode = fun cowboy_http:urldecode/1,
+ [{Qs, fun() -> R = parse_qs(Qs, URLDecode) end} || {Qs, R} <- Tests].
+
+-endif.
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+%% @doc Experimental REST protocol implementation.
+%%
+%% Based on the Webmachine Diagram from Alan Dean and Justin Sheehy, which
+%% can be found in the Webmachine source tree, and on the Webmachine
+%% documentation available at http://wiki.basho.com/Webmachine.html
+%% at the time of writing.
+-module(cowboy_http_rest).
+-export([upgrade/4]).
+
+-record(state, {
+ %% Handler.
+ handler :: atom(),
+ handler_state :: any(),
+
+ %% Media type.
+ content_types_p = [] ::
+ [{{binary(), binary(), [{binary(), binary()}]}, atom()}],
+ content_type_a :: undefined
+ | {{binary(), binary(), [{binary(), binary()}]}, atom()},
+
+ %% Language.
+ languages_p = [] :: [binary()],
+ language_a :: undefined | binary(),
+
+ %% Charset.
+ charsets_p = [] :: [binary()],
+ charset_a :: undefined | binary(),
+
+ %% Cached resource calls.
+ etag :: undefined | no_call | binary(),
+ last_modified :: undefined | no_call | calendar:datetime(),
+ expires :: undefined | no_call | calendar:datetime()
+}).
+
+-include("include/http.hrl").
+
+%% @doc Upgrade a HTTP request to the REST protocol.
+%%
+%% You do not need to call this function manually. To upgrade to the REST
+%% protocol, you simply need to return <em>{upgrade, protocol, {@module}}</em>
+%% in your <em>cowboy_http_handler:init/3</em> handler function.
+-spec upgrade(pid(), module(), any(), #http_req{})
+ -> {ok, #http_req{}} | close.
+upgrade(_ListenerPid, Handler, Opts, Req) ->
+ try
+ case erlang:function_exported(Handler, rest_init, 2) of
+ true ->
+ case Handler:rest_init(Req, Opts) of
+ {ok, Req2, HandlerState} ->
+ service_available(Req2, #state{handler=Handler,
+ handler_state=HandlerState})
+ end;
+ false ->
+ service_available(Req, #state{handler=Handler})
+ end
+ catch Class:Reason ->
+ error_logger:error_msg(
+ "** Handler ~p terminating in rest_init/3~n"
+ " for the reason ~p:~p~n** Options were ~p~n"
+ "** Request was ~p~n** Stacktrace: ~p~n~n",
+ [Handler, Class, Reason, Opts, Req, erlang:get_stacktrace()]),
+ {ok, _Req2} = cowboy_http_req:reply(500, Req),
+ close
+ end.
+
+service_available(Req, State) ->
+ expect(Req, State, service_available, true, fun known_methods/2, 503).
+
+%% known_methods/2 should return a list of atoms or binary methods.
+known_methods(Req=#http_req{method=Method}, State) ->
+ case call(Req, State, known_methods) of
+ no_call when Method =:= 'HEAD'; Method =:= 'GET'; Method =:= 'POST';
+ Method =:= 'PUT'; Method =:= 'DELETE'; Method =:= 'TRACE';
+ Method =:= 'CONNECT'; Method =:= 'OPTIONS' ->
+ next(Req, State, fun uri_too_long/2);
+ no_call ->
+ next(Req, State, 501);
+ {halt, Req2, HandlerState} ->
+ terminate(Req2, State#state{handler_state=HandlerState});
+ {List, Req2, HandlerState} ->
+ State2 = State#state{handler_state=HandlerState},
+ case lists:member(Method, List) of
+ true -> next(Req2, State2, fun uri_too_long/2);
+ false -> next(Req2, State2, 501)
+ end
+ end.
+
+uri_too_long(Req, State) ->
+ expect(Req, State, uri_too_long, false, fun allowed_methods/2, 414).
+
+%% allowed_methods/2 should return a list of atoms or binary methods.
+allowed_methods(Req=#http_req{method=Method}, State) ->
+ case call(Req, State, allowed_methods) of
+ no_call when Method =:= 'HEAD'; Method =:= 'GET' ->
+ next(Req, State, fun malformed_request/2);
+ no_call ->
+ method_not_allowed(Req, State, ['GET', 'HEAD']);
+ {halt, Req2, HandlerState} ->
+ terminate(Req2, State#state{handler_state=HandlerState});
+ {List, Req2, HandlerState} ->
+ State2 = State#state{handler_state=HandlerState},
+ case lists:member(Method, List) of
+ true -> next(Req2, State2, fun malformed_request/2);
+ false -> method_not_allowed(Req2, State2, List)
+ end
+ end.
+
+method_not_allowed(Req, State, Methods) ->
+ {ok, Req2} = cowboy_http_req:set_resp_header(
+ <<"Allow">>, method_not_allowed_build(Methods, []), Req),
+ respond(Req2, State, 405).
+
+method_not_allowed_build([], []) ->
+ <<>>;
+method_not_allowed_build([], [_Ignore|Acc]) ->
+ lists:reverse(Acc);
+method_not_allowed_build([Method|Tail], Acc) when is_atom(Method) ->
+ Method2 = list_to_binary(atom_to_list(Method)),
+ method_not_allowed_build(Tail, [<<", ">>, Method2|Acc]);
+method_not_allowed_build([Method|Tail], Acc) ->
+ method_not_allowed_build(Tail, [<<", ">>, Method|Acc]).
+
+malformed_request(Req, State) ->
+ expect(Req, State, malformed_request, false, fun is_authorized/2, 400).
+
+%% is_authorized/2 should return true or {false, WwwAuthenticateHeader}.
+is_authorized(Req, State) ->
+ case call(Req, State, is_authorized) of
+ no_call ->
+ forbidden(Req, State);
+ {halt, Req2, HandlerState} ->
+ terminate(Req2, State#state{handler_state=HandlerState});
+ {true, Req2, HandlerState} ->
+ forbidden(Req2, State#state{handler_state=HandlerState});
+ {{false, AuthHead}, Req2, HandlerState} ->
+ {ok, Req3} = cowboy_http_req:set_resp_header(
+ <<"Www-Authenticate">>, AuthHead, Req2),
+ respond(Req3, State#state{handler_state=HandlerState}, 401)
+ end.
+
+forbidden(Req, State) ->
+ expect(Req, State, forbidden, false, fun valid_content_headers/2, 403).
+
+valid_content_headers(Req, State) ->
+ expect(Req, State, valid_content_headers, true,
+ fun known_content_type/2, 501).
+
+known_content_type(Req, State) ->
+ expect(Req, State, known_content_type, true,
+ fun valid_entity_length/2, 413).
+
+valid_entity_length(Req, State) ->
+ expect(Req, State, valid_entity_length, true, fun options/2, 413).
+
+%% If you need to add additional headers to the response at this point,
+%% you should do it directly in the options/2 call using set_resp_headers.
+options(Req=#http_req{method='OPTIONS'}, State) ->
+ case call(Req, State, options) of
+ {halt, Req2, HandlerState} ->
+ terminate(Req2, State#state{handler_state=HandlerState});
+ {ok, Req2, HandlerState} ->
+ respond(Req2, State#state{handler_state=HandlerState}, 200)
+ end;
+options(Req, State) ->
+ content_types_provided(Req, State).
+
+%% content_types_provided/2 should return a list of content types and their
+%% associated callback function as a tuple: {{Type, SubType, Params}, Fun}.
+%% Type and SubType are the media type as binary. Params is a list of
+%% Key/Value tuple, with Key and Value a binary. Fun is the name of the
+%% callback that will be used to return the content of the response. It is
+%% given as an atom.
+%%
+%% An example of such return value would be:
+%% {{<<"text">>, <<"html">>, []}, to_html}
+%%
+%% Note that it is also possible to return a binary content type that will
+%% then be parsed by Cowboy. However note that while this may make your
+%% resources a little more readable, this is a lot less efficient. An example
+%% of such a return value would be:
+%% {<<"text/html">>, to_html}
+content_types_provided(Req=#http_req{meta=Meta}, State) ->
+ case call(Req, State, content_types_provided) of
+ no_call ->
+ not_acceptable(Req, State);
+ {halt, Req2, HandlerState} ->
+ terminate(Req2, State#state{handler_state=HandlerState});
+ {[], Req2, HandlerState} ->
+ not_acceptable(Req2, State#state{handler_state=HandlerState});
+ {CTP, Req2, HandlerState} ->
+ CTP2 = [normalize_content_types_provided(P) || P <- CTP],
+ State2 = State#state{
+ handler_state=HandlerState, content_types_p=CTP2},
+ {Accept, Req3} = cowboy_http_req:parse_header('Accept', Req2),
+ case Accept of
+ undefined ->
+ {PMT, _Fun} = HeadCTP = hd(CTP2),
+ languages_provided(
+ Req3#http_req{meta=[{media_type, PMT}|Meta]},
+ State2#state{content_type_a=HeadCTP});
+ Accept ->
+ Accept2 = prioritize_accept(Accept),
+ choose_media_type(Req3, State2, Accept2)
+ end
+ end.
+
+normalize_content_types_provided({ContentType, Handler})
+ when is_binary(ContentType) ->
+ {cowboy_http:content_type(ContentType), Handler};
+normalize_content_types_provided(Provided) ->
+ Provided.
+
+prioritize_accept(Accept) ->
+ lists:sort(
+ fun ({MediaTypeA, Quality, _AcceptParamsA},
+ {MediaTypeB, Quality, _AcceptParamsB}) ->
+ %% Same quality, check precedence in more details.
+ prioritize_mediatype(MediaTypeA, MediaTypeB);
+ ({_MediaTypeA, QualityA, _AcceptParamsA},
+ {_MediaTypeB, QualityB, _AcceptParamsB}) ->
+ %% Just compare the quality.
+ QualityA > QualityB
+ end, Accept).
+
+%% Media ranges can be overridden by more specific media ranges or
+%% specific media types. If more than one media range applies to a given
+%% type, the most specific reference has precedence.
+%%
+%% We always choose B over A when we can't decide between the two.
+prioritize_mediatype({TypeA, SubTypeA, ParamsA}, {TypeB, SubTypeB, ParamsB}) ->
+ case TypeB of
+ TypeA ->
+ case SubTypeB of
+ SubTypeA -> length(ParamsA) > length(ParamsB);
+ <<"*">> -> true;
+ _Any -> false
+ end;
+ <<"*">> -> true;
+ _Any -> false
+ end.
+
+%% Ignoring the rare AcceptParams. Not sure what should be done about them.
+choose_media_type(Req, State, []) ->
+ not_acceptable(Req, State);
+choose_media_type(Req, State=#state{content_types_p=CTP},
+ [MediaType|Tail]) ->
+ match_media_type(Req, State, Tail, CTP, MediaType).
+
+match_media_type(Req, State, Accept, [], _MediaType) ->
+ choose_media_type(Req, State, Accept);
+match_media_type(Req, State, Accept, CTP,
+ MediaType = {{<<"*">>, <<"*">>, _Params_A}, _QA, _APA}) ->
+ match_media_type_params(Req, State, Accept, CTP, MediaType);
+match_media_type(Req, State, Accept,
+ CTP = [{{Type, SubType_P, _PP}, _Fun}|_Tail],
+ MediaType = {{Type, SubType_A, _PA}, _QA, _APA})
+ when SubType_P =:= SubType_A; SubType_A =:= <<"*">> ->
+ match_media_type_params(Req, State, Accept, CTP, MediaType);
+match_media_type(Req, State, Accept, [_Any|Tail], MediaType) ->
+ match_media_type(Req, State, Accept, Tail, MediaType).
+
+match_media_type_params(Req=#http_req{meta=Meta}, State, Accept,
+ [Provided = {PMT = {_TP, _STP, Params_P}, _Fun}|Tail],
+ MediaType = {{_TA, _STA, Params_A}, _QA, _APA}) ->
+ case lists:sort(Params_P) =:= lists:sort(Params_A) of
+ true ->
+ languages_provided(Req#http_req{meta=[{media_type, PMT}|Meta]},
+ State#state{content_type_a=Provided});
+ false ->
+ match_media_type(Req, State, Accept, Tail, MediaType)
+ end.
+
+%% languages_provided should return a list of binary values indicating
+%% which languages are accepted by the resource.
+%%
+%% @todo I suppose we should also ask the resource if it wants to
+%% set a language itself or if it wants it to be automatically chosen.
+languages_provided(Req, State) ->
+ case call(Req, State, languages_provided) of
+ no_call ->
+ charsets_provided(Req, State);
+ {halt, Req2, HandlerState} ->
+ terminate(Req2, State#state{handler_state=HandlerState});
+ {[], Req2, HandlerState} ->
+ not_acceptable(Req2, State#state{handler_state=HandlerState});
+ {LP, Req2, HandlerState} ->
+ State2 = State#state{handler_state=HandlerState, languages_p=LP},
+ {AcceptLanguage, Req3} =
+ cowboy_http_req:parse_header('Accept-Language', Req2),
+ case AcceptLanguage of
+ undefined ->
+ set_language(Req3, State2#state{language_a=hd(LP)});
+ AcceptLanguage ->
+ AcceptLanguage2 = prioritize_languages(AcceptLanguage),
+ choose_language(Req3, State2, AcceptLanguage2)
+ end
+ end.
+
+%% A language-range matches a language-tag if it exactly equals the tag,
+%% or if it exactly equals a prefix of the tag such that the first tag
+%% character following the prefix is "-". The special range "*", if
+%% present in the Accept-Language field, matches every tag not matched
+%% by any other range present in the Accept-Language field.
+%%
+%% @todo The last sentence probably means we should always put '*'
+%% at the end of the list.
+prioritize_languages(AcceptLanguages) ->
+ lists:sort(
+ fun ({_TagA, QualityA}, {_TagB, QualityB}) ->
+ QualityA > QualityB
+ end, AcceptLanguages).
+
+choose_language(Req, State, []) ->
+ not_acceptable(Req, State);
+choose_language(Req, State=#state{languages_p=LP}, [Language|Tail]) ->
+ match_language(Req, State, Tail, LP, Language).
+
+match_language(Req, State, Accept, [], _Language) ->
+ choose_language(Req, State, Accept);
+match_language(Req, State, _Accept, [Provided|_Tail], {'*', _Quality}) ->
+ set_language(Req, State#state{language_a=Provided});
+match_language(Req, State, _Accept, [Provided|_Tail], {Provided, _Quality}) ->
+ set_language(Req, State#state{language_a=Provided});
+match_language(Req, State, Accept, [Provided|Tail],
+ Language = {Tag, _Quality}) ->
+ Length = byte_size(Tag),
+ case Provided of
+ << Tag:Length/binary, $-, _Any/bits >> ->
+ set_language(Req, State#state{language_a=Provided});
+ _Any ->
+ match_language(Req, State, Accept, Tail, Language)
+ end.
+
+set_language(Req=#http_req{meta=Meta}, State=#state{language_a=Language}) ->
+ {ok, Req2} = cowboy_http_req:set_resp_header(
+ <<"Content-Language">>, Language, Req),
+ charsets_provided(Req2#http_req{meta=[{language, Language}|Meta]}, State).
+
+%% charsets_provided should return a list of binary values indicating
+%% which charsets are accepted by the resource.
+charsets_provided(Req, State) ->
+ case call(Req, State, charsets_provided) of
+ no_call ->
+ set_content_type(Req, State);
+ {halt, Req2, HandlerState} ->
+ terminate(Req2, State#state{handler_state=HandlerState});
+ {[], Req2, HandlerState} ->
+ not_acceptable(Req2, State#state{handler_state=HandlerState});
+ {CP, Req2, HandlerState} ->
+ State2 = State#state{handler_state=HandlerState, charsets_p=CP},
+ {AcceptCharset, Req3} =
+ cowboy_http_req:parse_header('Accept-Charset', Req2),
+ case AcceptCharset of
+ undefined ->
+ set_content_type(Req3, State2#state{charset_a=hd(CP)});
+ AcceptCharset ->
+ AcceptCharset2 = prioritize_charsets(AcceptCharset),
+ choose_charset(Req3, State2, AcceptCharset2)
+ end
+ end.
+
+%% The special value "*", if present in the Accept-Charset field,
+%% matches every character set (including ISO-8859-1) which is not
+%% mentioned elsewhere in the Accept-Charset field. If no "*" is present
+%% in an Accept-Charset field, then all character sets not explicitly
+%% mentioned get a quality value of 0, except for ISO-8859-1, which gets
+%% a quality value of 1 if not explicitly mentioned.
+prioritize_charsets(AcceptCharsets) ->
+ AcceptCharsets2 = lists:sort(
+ fun ({_CharsetA, QualityA}, {_CharsetB, QualityB}) ->
+ QualityA > QualityB
+ end, AcceptCharsets),
+ case lists:keymember(<<"*">>, 1, AcceptCharsets2) of
+ true -> AcceptCharsets2;
+ false -> [{<<"iso-8859-1">>, 1000}|AcceptCharsets2]
+ end.
+
+choose_charset(Req, State, []) ->
+ not_acceptable(Req, State);
+choose_charset(Req, State=#state{charsets_p=CP}, [Charset|Tail]) ->
+ match_charset(Req, State, Tail, CP, Charset).
+
+match_charset(Req, State, Accept, [], _Charset) ->
+ choose_charset(Req, State, Accept);
+match_charset(Req, State, _Accept, [Provided|_Tail],
+ {Provided, _Quality}) ->
+ set_content_type(Req, State#state{charset_a=Provided});
+match_charset(Req, State, Accept, [_Provided|Tail], Charset) ->
+ match_charset(Req, State, Accept, Tail, Charset).
+
+set_content_type(Req=#http_req{meta=Meta}, State=#state{
+ content_type_a={{Type, SubType, Params}, _Fun},
+ charset_a=Charset}) ->
+ ParamsBin = set_content_type_build_params(Params, []),
+ ContentType = [Type, <<"/">>, SubType, ParamsBin],
+ ContentType2 = case Charset of
+ undefined -> ContentType;
+ Charset -> [ContentType, <<"; charset=">>, Charset]
+ end,
+ {ok, Req2} = cowboy_http_req:set_resp_header(
+ <<"Content-Type">>, ContentType2, Req),
+ encodings_provided(Req2#http_req{meta=[{charset, Charset}|Meta]}, State).
+
+set_content_type_build_params([], []) ->
+ <<>>;
+set_content_type_build_params([], Acc) ->
+ lists:reverse(Acc);
+set_content_type_build_params([{Attr, Value}|Tail], Acc) ->
+ set_content_type_build_params(Tail, [[Attr, <<"=">>, Value], <<";">>|Acc]).
+
+%% @todo Match for identity as we provide nothing else for now.
+%% @todo Don't forget to set the Content-Encoding header when we reply a body
+%% and the found encoding is something other than identity.
+encodings_provided(Req, State) ->
+ variances(Req, State).
+
+not_acceptable(Req, State) ->
+ respond(Req, State, 406).
+
+%% variances/2 should return a list of headers that will be added
+%% to the Vary response header. The Accept, Accept-Language,
+%% Accept-Charset and Accept-Encoding headers do not need to be
+%% specified.
+%%
+%% @todo Do Accept-Encoding too when we handle it.
+%% @todo Does the order matter?
+variances(Req, State=#state{content_types_p=CTP,
+ languages_p=LP, charsets_p=CP}) ->
+ Variances = case CTP of
+ [] -> [];
+ [_] -> [];
+ [_|_] -> [<<"Accept">>]
+ end,
+ Variances2 = case LP of
+ [] -> Variances;
+ [_] -> Variances;
+ [_|_] -> [<<"Accept-Language">>|Variances]
+ end,
+ Variances3 = case CP of
+ [] -> Variances2;
+ [_] -> Variances2;
+ [_|_] -> [<<"Accept-Charset">>|Variances2]
+ end,
+ {Variances4, Req3, State2} = case call(Req, State, variances) of
+ no_call ->
+ {Variances3, Req, State};
+ {HandlerVariances, Req2, HandlerState} ->
+ {Variances3 ++ HandlerVariances, Req2,
+ State#state{handler_state=HandlerState}}
+ end,
+ case [[<<", ">>, V] || V <- Variances4] of
+ [] ->
+ resource_exists(Req3, State2);
+ [[<<", ">>, H]|Variances5] ->
+ {ok, Req4} = cowboy_http_req:set_resp_header(
+ <<"Variances">>, [H|Variances5], Req3),
+ resource_exists(Req4, State2)
+ end.
+
+resource_exists(Req, State) ->
+ expect(Req, State, resource_exists, true,
+ fun if_match_exists/2, fun if_match_musnt_exist/2).
+
+if_match_exists(Req, State) ->
+ case cowboy_http_req:parse_header('If-Match', Req) of
+ {undefined, Req2} ->
+ if_unmodified_since_exists(Req2, State);
+ {'*', Req2} ->
+ if_unmodified_since_exists(Req2, State);
+ {ETagsList, Req2} ->
+ if_match(Req2, State, ETagsList)
+ end.
+
+if_match(Req, State, EtagsList) ->
+ {Etag, Req2, State2} = generate_etag(Req, State),
+ case Etag of
+ no_call ->
+ precondition_failed(Req2, State2);
+ Etag ->
+ case lists:member(Etag, EtagsList) of
+ true -> if_unmodified_since_exists(Req2, State2);
+ false -> precondition_failed(Req2, State2)
+ end
+ end.
+
+if_match_musnt_exist(Req, State) ->
+ case cowboy_http_req:header('If-Match', Req) of
+ {undefined, Req2} -> is_put_to_missing_resource(Req2, State);
+ {_Any, Req2} -> precondition_failed(Req2, State)
+ end.
+
+if_unmodified_since_exists(Req, State) ->
+ case cowboy_http_req:parse_header('If-Unmodified-Since', Req) of
+ {undefined, Req2} ->
+ if_none_match_exists(Req2, State);
+ {{error, badarg}, Req2} ->
+ if_none_match_exists(Req2, State);
+ {IfUnmodifiedSince, Req2} ->
+ if_unmodified_since(Req2, State, IfUnmodifiedSince)
+ end.
+
+%% If LastModified is the atom 'no_call', we continue.
+if_unmodified_since(Req, State, IfUnmodifiedSince) ->
+ {LastModified, Req2, State2} = last_modified(Req, State),
+ case LastModified > IfUnmodifiedSince of
+ true -> precondition_failed(Req2, State2);
+ false -> if_none_match_exists(Req2, State2)
+ end.
+
+if_none_match_exists(Req, State) ->
+ case cowboy_http_req:parse_header('If-None-Match', Req) of
+ {undefined, Req2} ->
+ if_modified_since_exists(Req2, State);
+ {'*', Req2} ->
+ precondition_is_head_get(Req2, State);
+ {EtagsList, Req2} ->
+ if_none_match(Req2, State, EtagsList)
+ end.
+
+if_none_match(Req, State, EtagsList) ->
+ {Etag, Req2, State2} = generate_etag(Req, State),
+ case Etag of
+ no_call ->
+ precondition_failed(Req2, State2);
+ Etag ->
+ case lists:member(Etag, EtagsList) of
+ true -> precondition_is_head_get(Req2, State2);
+ false -> if_modified_since_exists(Req2, State2)
+ end
+ end.
+
+precondition_is_head_get(Req=#http_req{method=Method}, State)
+ when Method =:= 'HEAD'; Method =:= 'GET' ->
+ not_modified(Req, State);
+precondition_is_head_get(Req, State) ->
+ precondition_failed(Req, State).
+
+if_modified_since_exists(Req, State) ->
+ case cowboy_http_req:parse_header('If-Modified-Since', Req) of
+ {undefined, Req2} ->
+ method(Req2, State);
+ {{error, badarg}, Req2} ->
+ method(Req2, State);
+ {IfModifiedSince, Req2} ->
+ if_modified_since_now(Req2, State, IfModifiedSince)
+ end.
+
+if_modified_since_now(Req, State, IfModifiedSince) ->
+ case IfModifiedSince > erlang:universaltime() of
+ true -> method(Req, State);
+ false -> if_modified_since(Req, State, IfModifiedSince)
+ end.
+
+if_modified_since(Req, State, IfModifiedSince) ->
+ {LastModified, Req2, State2} = last_modified(Req, State),
+ case LastModified of
+ no_call ->
+ method(Req2, State2);
+ LastModified ->
+ case LastModified > IfModifiedSince of
+ true -> method(Req2, State2);
+ false -> not_modified(Req2, State2)
+ end
+ end.
+
+not_modified(Req=#http_req{resp_headers=RespHeaders}, State) ->
+ RespHeaders2 = lists:keydelete(<<"Content-Type">>, 1, RespHeaders),
+ Req2 = Req#http_req{resp_headers=RespHeaders2},
+ {Req3, State2} = set_resp_etag(Req2, State),
+ {Req4, State3} = set_resp_expires(Req3, State2),
+ respond(Req4, State3, 304).
+
+precondition_failed(Req, State) ->
+ respond(Req, State, 412).
+
+is_put_to_missing_resource(Req=#http_req{method='PUT'}, State) ->
+ moved_permanently(Req, State, fun is_conflict/2);
+is_put_to_missing_resource(Req, State) ->
+ previously_existed(Req, State).
+
+%% moved_permanently/2 should return either false or {true, Location}
+%% with Location the full new URI of the resource.
+moved_permanently(Req, State, OnFalse) ->
+ case call(Req, State, moved_permanently) of
+ {{true, Location}, Req2, HandlerState} ->
+ {ok, Req3} = cowboy_http_req:set_resp_header(
+ <<"Location">>, Location, Req2),
+ respond(Req3, State#state{handler_state=HandlerState}, 301);
+ {false, Req2, HandlerState} ->
+ OnFalse(Req2, State#state{handler_state=HandlerState});
+ {halt, Req2, HandlerState} ->
+ terminate(Req2, State#state{handler_state=HandlerState});
+ no_call ->
+ OnFalse(Req, State)
+ end.
+
+previously_existed(Req, State) ->
+ expect(Req, State, previously_existed, false,
+ fun (R, S) -> is_post_to_missing_resource(R, S, 404) end,
+ fun (R, S) -> moved_permanently(R, S, fun moved_temporarily/2) end).
+
+%% moved_temporarily/2 should return either false or {true, Location}
+%% with Location the full new URI of the resource.
+moved_temporarily(Req, State) ->
+ case call(Req, State, moved_temporarily) of
+ {{true, Location}, Req2, HandlerState} ->
+ {ok, Req3} = cowboy_http_req:set_resp_header(
+ <<"Location">>, Location, Req2),
+ respond(Req3, State#state{handler_state=HandlerState}, 307);
+ {false, Req2, HandlerState} ->
+ is_post_to_missing_resource(Req2, State#state{handler_state=HandlerState}, 410);
+ {halt, Req2, HandlerState} ->
+ terminate(Req2, State#state{handler_state=HandlerState});
+ no_call ->
+ is_post_to_missing_resource(Req, State, 410)
+ end.
+
+is_post_to_missing_resource(Req=#http_req{method='POST'}, State, OnFalse) ->
+ allow_missing_post(Req, State, OnFalse);
+is_post_to_missing_resource(Req, State, OnFalse) ->
+ respond(Req, State, OnFalse).
+
+allow_missing_post(Req, State, OnFalse) ->
+ expect(Req, State, allow_missing_post, true, fun post_is_create/2, OnFalse).
+
+method(Req=#http_req{method='DELETE'}, State) ->
+ delete_resource(Req, State);
+method(Req=#http_req{method='POST'}, State) ->
+ post_is_create(Req, State);
+method(Req=#http_req{method='PUT'}, State) ->
+ is_conflict(Req, State);
+method(Req, State) ->
+ set_resp_body(Req, State).
+
+%% delete_resource/2 should start deleting the resource and return.
+delete_resource(Req, State) ->
+ expect(Req, State, delete_resource, true, fun delete_completed/2, 500).
+
+%% delete_completed/2 indicates whether the resource has been deleted yet.
+delete_completed(Req, State) ->
+ expect(Req, State, delete_completed, true, fun has_resp_body/2, 202).
+
+%% post_is_create/2 indicates whether the POST method can create new resources.
+post_is_create(Req, State) ->
+ expect(Req, State, post_is_create, false, fun process_post/2, fun create_path/2).
+
+%% When the POST method can create new resources, create_path/2 will be called
+%% and is expected to return the full path to the new resource
+%% (including the leading /).
+create_path(Req=#http_req{meta=Meta}, State) ->
+ case call(Req, State, create_path) of
+ {halt, Req2, HandlerState} ->
+ terminate(Req2, State#state{handler_state=HandlerState});
+ {Path, Req2, HandlerState} ->
+ Location = create_path_location(Req2, Path),
+ State2 = State#state{handler_state=HandlerState},
+ {ok, Req3} = cowboy_http_req:set_resp_header(
+ <<"Location">>, Location, Req2),
+ put_resource(Req3#http_req{meta=[{put_path, Path}|Meta]},
+ State2, 303)
+ end.
+
+create_path_location(#http_req{transport=Transport, raw_host=Host,
+ port=Port}, Path) ->
+ TransportName = Transport:name(),
+ << (create_path_location_protocol(TransportName))/binary, "://",
+ Host/binary, (create_path_location_port(TransportName, Port))/binary,
+ Path/binary >>.
+
+create_path_location_protocol(ssl) -> <<"https">>;
+create_path_location_protocol(_) -> <<"http">>.
+
+create_path_location_port(ssl, 443) ->
+ <<>>;
+create_path_location_port(tcp, 80) ->
+ <<>>;
+create_path_location_port(_, Port) ->
+ <<":", (list_to_binary(integer_to_list(Port)))/binary>>.
+
+%% process_post should return true when the POST body could be processed
+%% and false when it hasn't, in which case a 500 error is sent.
+process_post(Req, State) ->
+ case call(Req, State, process_post) of
+ {halt, Req2, HandlerState} ->
+ terminate(Req2, State#state{handler_state=HandlerState});
+ {true, Req2, HandlerState} ->
+ State2 = State#state{handler_state=HandlerState},
+ next(Req2, State2, 201);
+ {false, Req2, HandlerState} ->
+ State2 = State#state{handler_state=HandlerState},
+ respond(Req2, State2, 500)
+ end.
+
+is_conflict(Req, State) ->
+ expect(Req, State, is_conflict, false, fun put_resource/2, 409).
+
+put_resource(Req=#http_req{raw_path=RawPath, meta=Meta}, State) ->
+ Req2 = Req#http_req{meta=[{put_path, RawPath}|Meta]},
+ put_resource(Req2, State, fun is_new_resource/2).
+
+%% content_types_accepted should return a list of media types and their
+%% associated callback functions in the same format as content_types_provided.
+%%
+%% The callback will then be called and is expected to process the content
+%% pushed to the resource in the request body. The path to the new resource
+%% may be different from the request path, and is stored as request metadata.
+%% It is always defined past this point. It can be retrieved as demonstrated:
+%% {PutPath, Req2} = cowboy_http_req:meta(put_path, Req)
+put_resource(Req, State, OnTrue) ->
+ case call(Req, State, content_types_accepted) of
+ no_call ->
+ respond(Req, State, 415);
+ {halt, Req2, HandlerState} ->
+ terminate(Req2, State#state{handler_state=HandlerState});
+ {CTA, Req2, HandlerState} ->
+ State2 = State#state{handler_state=HandlerState},
+ {ContentType, Req3}
+ = cowboy_http_req:parse_header('Content-Type', Req2),
+ choose_content_type(Req3, State2, OnTrue, ContentType, CTA)
+ end.
+
+choose_content_type(Req, State, _OnTrue, _ContentType, []) ->
+ respond(Req, State, 415);
+choose_content_type(Req, State, OnTrue, ContentType,
+ [{Accepted, Fun}|_Tail]) when ContentType =:= Accepted ->
+ case call(Req, State, Fun) of
+ {halt, Req2, HandlerState} ->
+ terminate(Req2, State#state{handler_state=HandlerState});
+ {true, Req2, HandlerState} ->
+ State2 = State#state{handler_state=HandlerState},
+ next(Req2, State2, OnTrue);
+ {false, Req2, HandlerState} ->
+ State2 = State#state{handler_state=HandlerState},
+ respond(Req2, State2, 500)
+ end;
+choose_content_type(Req, State, OnTrue, ContentType, [_Any|Tail]) ->
+ choose_content_type(Req, State, OnTrue, ContentType, Tail).
+
+%% Whether we created a new resource, either through PUT or POST.
+%% This is easily testable because we would have set the Location
+%% header by this point if we did so.
+is_new_resource(Req, State) ->
+ case cowboy_http_req:has_resp_header(<<"Location">>, Req) of
+ true -> respond(Req, State, 201);
+ false -> has_resp_body(Req, State)
+ end.
+
+has_resp_body(Req, State) ->
+ case cowboy_http_req:has_resp_body(Req) of
+ true -> multiple_choices(Req, State);
+ false -> respond(Req, State, 204)
+ end.
+
+%% Set the response headers and call the callback found using
+%% content_types_provided/2 to obtain the request body and add
+%% it to the response.
+set_resp_body(Req=#http_req{method=Method},
+ State=#state{content_type_a={_Type, Fun}})
+ when Method =:= 'GET'; Method =:= 'HEAD' ->
+ {Req2, State2} = set_resp_etag(Req, State),
+ {LastModified, Req3, State3} = last_modified(Req2, State2),
+ case LastModified of
+ LastModified when is_atom(LastModified) ->
+ Req4 = Req3;
+ LastModified ->
+ LastModifiedStr = httpd_util:rfc1123_date(LastModified),
+ {ok, Req4} = cowboy_http_req:set_resp_header(
+ <<"Last-Modified">>, LastModifiedStr, Req3)
+ end,
+ {Req5, State4} = set_resp_expires(Req4, State3),
+ case call(Req5, State4, Fun) of
+ {halt, Req6, HandlerState} ->
+ terminate(Req6, State4#state{handler_state=HandlerState});
+ {Body, Req6, HandlerState} ->
+ State5 = State4#state{handler_state=HandlerState},
+ {ok, Req7} = case Body of
+ {stream, Len, Fun1} ->
+ cowboy_http_req:set_resp_body_fun(Len, Fun1, Req6);
+ _Contents ->
+ cowboy_http_req:set_resp_body(Body, Req6)
+ end,
+ multiple_choices(Req7, State5)
+ end;
+set_resp_body(Req, State) ->
+ multiple_choices(Req, State).
+
+multiple_choices(Req, State) ->
+ expect(Req, State, multiple_choices, false, 200, 300).
+
+%% Response utility functions.
+
+set_resp_etag(Req, State) ->
+ {Etag, Req2, State2} = generate_etag(Req, State),
+ case Etag of
+ undefined ->
+ {Req2, State2};
+ Etag ->
+ {ok, Req3} = cowboy_http_req:set_resp_header(
+ <<"Etag">>, Etag, Req2),
+ {Req3, State2}
+ end.
+
+set_resp_expires(Req, State) ->
+ {Expires, Req2, State2} = expires(Req, State),
+ case Expires of
+ Expires when is_atom(Expires) ->
+ {Req2, State2};
+ Expires ->
+ ExpiresStr = httpd_util:rfc1123_date(Expires),
+ {ok, Req3} = cowboy_http_req:set_resp_header(
+ <<"Expires">>, ExpiresStr, Req2),
+ {Req3, State2}
+ end.
+
+%% Info retrieval. No logic.
+
+generate_etag(Req, State=#state{etag=no_call}) ->
+ {undefined, Req, State};
+generate_etag(Req, State=#state{etag=undefined}) ->
+ case call(Req, State, generate_etag) of
+ no_call ->
+ {undefined, Req, State#state{etag=no_call}};
+ {Etag, Req2, HandlerState} ->
+ {Etag, Req2, State#state{handler_state=HandlerState, etag=Etag}}
+ end;
+generate_etag(Req, State=#state{etag=Etag}) ->
+ {Etag, Req, State}.
+
+last_modified(Req, State=#state{last_modified=no_call}) ->
+ {undefined, Req, State};
+last_modified(Req, State=#state{last_modified=undefined}) ->
+ case call(Req, State, last_modified) of
+ no_call ->
+ {undefined, Req, State#state{last_modified=no_call}};
+ {LastModified, Req2, HandlerState} ->
+ {LastModified, Req2, State#state{handler_state=HandlerState,
+ last_modified=LastModified}}
+ end;
+last_modified(Req, State=#state{last_modified=LastModified}) ->
+ {LastModified, Req, State}.
+
+expires(Req, State=#state{expires=no_call}) ->
+ {undefined, Req, State};
+expires(Req, State=#state{expires=undefined}) ->
+ case call(Req, State, expires) of
+ no_call ->
+ {undefined, Req, State#state{expires=no_call}};
+ {Expires, Req2, HandlerState} ->
+ {Expires, Req2, State#state{handler_state=HandlerState,
+ expires=Expires}}
+ end;
+expires(Req, State=#state{expires=Expires}) ->
+ {Expires, Req, State}.
+
+%% REST primitives.
+
+expect(Req, State, Callback, Expected, OnTrue, OnFalse) ->
+ case call(Req, State, Callback) of
+ no_call ->
+ next(Req, State, OnTrue);
+ {halt, Req2, HandlerState} ->
+ terminate(Req2, State#state{handler_state=HandlerState});
+ {Expected, Req2, HandlerState} ->
+ next(Req2, State#state{handler_state=HandlerState}, OnTrue);
+ {_Unexpected, Req2, HandlerState} ->
+ next(Req2, State#state{handler_state=HandlerState}, OnFalse)
+ end.
+
+call(Req, #state{handler=Handler, handler_state=HandlerState}, Fun) ->
+ case erlang:function_exported(Handler, Fun, 2) of
+ true -> Handler:Fun(Req, HandlerState);
+ false -> no_call
+ end.
+
+next(Req, State, Next) when is_function(Next) ->
+ Next(Req, State);
+next(Req, State, StatusCode) when is_integer(StatusCode) ->
+ respond(Req, State, StatusCode).
+
+%% @todo Allow some sort of callback for custom error pages.
+respond(Req, State, StatusCode) ->
+ {ok, Req2} = cowboy_http_req:reply(StatusCode, Req),
+ terminate(Req2, State).
+
+terminate(Req, #state{handler=Handler, handler_state=HandlerState}) ->
+ case erlang:function_exported(Handler, rest_terminate, 2) of
+ true -> ok = Handler:rest_terminate(
+ Req#http_req{resp_state=locked}, HandlerState);
+ false -> ok
+ end,
+ {ok, Req}.
--- /dev/null
+%% Copyright (c) 2011, Magnus Klaar <magnus.klaar@gmail.com>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+%% @doc Static resource handler.
+%%
+%% This built in HTTP handler provides a simple file serving capability for
+%% cowboy applications. It should be considered an experimental feature because
+%% of it's dependency on the experimental REST handler. It's recommended to be
+%% used for small or temporary environments where it is not preferrable to set
+%% up a second server just to serve files.
+%%
+%% If this handler is used the Erlang node running the cowboy application must
+%% be configured to use an async thread pool. This is configured by adding the
+%% `+A $POOL_SIZE' argument to the `erl' command used to start the node. See
+%% <a href="http://erlang.org/pipermail/erlang-bugs/2012-January/002720.html">
+%% this reply</a> from the OTP team to erlang-bugs
+%%
+%% == Base configuration ==
+%%
+%% The handler must be configured with a request path prefix to serve files
+%% under and the path to a directory to read files from. The request path prefix
+%% is defined in the path pattern of the cowboy dispatch rule for the handler.
+%% The request path pattern must end with a ``'...''' token.
+%% The directory path can be set to either an absolute or relative path in the
+%% form of a list or binary string representation of a file system path. A list
+%% of binary path segments, as is used throughout cowboy, is also a valid
+%% directory path.
+%%
+%% The directory path can also be set to a relative path within the `priv/'
+%% directory of an application. This is configured by setting the value of the
+%% directory option to a tuple of the form `{priv_dir, Application, Relpath}'.
+%%
+%% ==== Examples ====
+%% ```
+%% %% Serve files from /var/www/ under http://example.com/static/
+%% {[<<"static">>, '...'], cowboy_http_static,
+%% [{directory, "/var/www"}]}
+%%
+%% %% Serve files from the current working directory under http://example.com/static/
+%% {[<<"static">>, '...'], cowboy_http_static,
+%% [{directory, <<"./">>}]}
+%%
+%% %% Serve files from cowboy/priv/www under http://example.com/
+%% {['...'], cowboy_http_static,
+%% [{directory, {priv_dir, cowboy, [<<"www">>]}}]}
+%% '''
+%%
+%% == Content type configuration ==
+%%
+%% By default the content type of all static resources will be set to
+%% `application/octet-stream'. This can be overriden by supplying a list
+%% of filename extension to mimetypes pairs in the `mimetypes' option.
+%% The filename extension should be a binary string including the leading dot.
+%% The mimetypes must be of a type that the `cowboy_http_rest' protocol can
+%% handle.
+%%
+%% The <a href="https://github.com/spawngrid/mimetypes">spawngrid/mimetypes</a>
+%% application, or an arbitrary function accepting the path to the file being
+%% served, can also be used to generate the list of content types for a static
+%% file resource. The function used must accept an additional argument after
+%% the file path argument.
+%%
+%% ==== Example ====
+%% ```
+%% %% Use a static list of content types.
+%% {[<<"static">>, '...'], cowboy_http_static,
+%% [{directory, {priv_dir, cowboy, []}},
+%% {mimetypes, [
+%% {<<".css">>, [<<"text/css">>]},
+%% {<<".js">>, [<<"application/javascript">>]}]}]}
+%%
+%% %% Use the default database in the mimetypes application.
+%% {[<<"static">>, '...', cowboy_http_static,
+%% [{directory, {priv_dir, cowboy, []}},
+%% {mimetypes, {fun mimetypes:path_to_mimes/2, default}}]]}
+%% '''
+%%
+%% == ETag Header Function ==
+%%
+%% The default behaviour of the static file handler is to not generate ETag
+%% headers. This is because generating ETag headers based on file metadata
+%% causes different servers in a cluster to generate different ETag values for
+%% the same file unless the metadata is also synced. Generating strong ETags
+%% based on the contents of a file is currently out of scope for this module.
+%%
+%% The default behaviour can be overridden to generate an ETag header based on
+%% a combination of the file path, file size, inode and mtime values. If the
+%% option value is a list of attribute names tagged with `attributes' a hex
+%% encoded CRC32 checksum of the attribute values are used as the ETag header
+%% value.
+%%
+%% If a strong ETag is required a user defined function for generating the
+%% header value can be supplied. The function must accept a proplist of the
+%% file attributes as the first argument and a second argument containing any
+%% additional data that the function requires. The function must return a
+%% `binary()' or `undefined'.
+%%
+%% ==== Examples ====
+%% ```
+%% %% A value of default is equal to not specifying the option.
+%% {[<<"static">>, '...', cowboy_http_static,
+%% [{directory, {priv_dir, cowboy, []}},
+%% {etag, default}]]}
+%%
+%% %% Use all avaliable ETag function arguments to generate a header value.
+%% {[<<"static">>, '...', cowboy_http_static,
+%% [{directory, {priv_dir, cowboy, []}},
+%% {etag, {attributes, [filepath, filesize, inode, mtime]}}]]}
+%%
+%% %% Use a user defined function to generate a strong ETag header value.
+%% {[<<"static">>, '...', cowboy_http_static,
+%% [{directory, {priv_dir, cowboy, []}},
+%% {etag, {fun generate_strong_etag/2, strong_etag_extra}}]]}
+%%
+%% generate_strong_etag(Arguments, strong_etag_extra) ->
+%% {_, Filepath} = lists:keyfind(filepath, 1, Arguments),
+%% {_, _Filesize} = lists:keyfind(filesize, 1, Arguments),
+%% {_, _INode} = lists:keyfind(inode, 1, Arguments),
+%% {_, _Modified} = lists:keyfind(mtime, 1, Arguments),
+%% ChecksumCommand = lists:flatten(io_lib:format("sha1sum ~s", [Filepath])),
+%% [Checksum|_] = string:tokens(os:cmd(ChecksumCommand), " "),
+%% iolist_to_binary(Checksum).
+%% '''
+-module(cowboy_http_static).
+
+%% include files
+-include("http.hrl").
+-include_lib("kernel/include/file.hrl").
+
+%% cowboy_http_protocol callbacks
+-export([init/3]).
+
+%% cowboy_http_rest callbacks
+-export([rest_init/2, allowed_methods/2, malformed_request/2,
+ resource_exists/2, forbidden/2, last_modified/2, generate_etag/2,
+ content_types_provided/2, file_contents/2]).
+
+%% internal
+-export([path_to_mimetypes/2]).
+
+%% types
+-type dirpath() :: string() | binary() | [binary()].
+-type dirspec() :: dirpath() | {priv, atom(), dirpath()}.
+-type mimedef() :: {binary(), binary(), [{binary(), binary()}]}.
+-type etagarg() :: {filepath, binary()} | {mtime, calendar:datetime()}
+ | {inode, non_neg_integer()} | {filesize, non_neg_integer()}.
+
+%% handler state
+-record(state, {
+ filepath :: binary() | error,
+ fileinfo :: {ok, #file_info{}} | {error, _} | error,
+ mimetypes :: {fun((binary(), T) -> [mimedef()]), T} | undefined,
+ etag_fun :: {fun(([etagarg()], T) -> undefined | binary()), T}}).
+
+
+%% @private Upgrade from HTTP handler to REST handler.
+init({_Transport, http}, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_http_rest}.
+
+
+%% @private Set up initial state of REST handler.
+-spec rest_init(#http_req{}, list()) -> {ok, #http_req{}, #state{}}.
+rest_init(Req, Opts) ->
+ Directory = proplists:get_value(directory, Opts),
+ Directory1 = directory_path(Directory),
+ Mimetypes = proplists:get_value(mimetypes, Opts, []),
+ Mimetypes1 = case Mimetypes of
+ {_, _} -> Mimetypes;
+ [] -> {fun path_to_mimetypes/2, []};
+ [_|_] -> {fun path_to_mimetypes/2, Mimetypes}
+ end,
+ ETagFunction = case proplists:get_value(etag, Opts) of
+ default -> {fun no_etag_function/2, undefined};
+ undefined -> {fun no_etag_function/2, undefined};
+ {attributes, Attrs} -> {fun attr_etag_function/2, Attrs};
+ {_, _}=EtagFunction1 -> EtagFunction1
+ end,
+ {Filepath, Req1} = cowboy_http_req:path_info(Req),
+ State = case check_path(Filepath) of
+ error ->
+ #state{filepath=error, fileinfo=error, mimetypes=undefined,
+ etag_fun=ETagFunction};
+ ok ->
+ Filepath1 = join_paths(Directory1, Filepath),
+ Fileinfo = file:read_file_info(Filepath1),
+ #state{filepath=Filepath1, fileinfo=Fileinfo, mimetypes=Mimetypes1,
+ etag_fun=ETagFunction}
+ end,
+ {ok, Req1, State}.
+
+
+%% @private Only allow GET and HEAD requests on files.
+-spec allowed_methods(#http_req{}, #state{}) ->
+ {[atom()], #http_req{}, #state{}}.
+allowed_methods(Req, State) ->
+ {['GET', 'HEAD'], Req, State}.
+
+%% @private
+malformed_request(Req, #state{filepath=error}=State) ->
+ {true, Req, State};
+malformed_request(Req, State) ->
+ {false, Req, State}.
+
+
+%% @private Check if the resource exists under the document root.
+resource_exists(Req, #state{fileinfo={error, _}}=State) ->
+ {false, Req, State};
+resource_exists(Req, #state{fileinfo={ok, Fileinfo}}=State) ->
+ {Fileinfo#file_info.type =:= regular, Req, State}.
+
+
+%% @private
+%% Access to a file resource is forbidden if it exists and the local node does
+%% not have permission to read it. Directory listings are always forbidden.
+forbidden(Req, #state{fileinfo={_, #file_info{type=directory}}}=State) ->
+ {true, Req, State};
+forbidden(Req, #state{fileinfo={error, eacces}}=State) ->
+ {true, Req, State};
+forbidden(Req, #state{fileinfo={error, _}}=State) ->
+ {false, Req, State};
+forbidden(Req, #state{fileinfo={ok, #file_info{access=Access}}}=State) ->
+ {not (Access =:= read orelse Access =:= read_write), Req, State}.
+
+
+%% @private Read the time a file system system object was last modified.
+-spec last_modified(#http_req{}, #state{}) ->
+ {calendar:datetime(), #http_req{}, #state{}}.
+last_modified(Req, #state{fileinfo={ok, #file_info{mtime=Modified}}}=State) ->
+ {Modified, Req, State}.
+
+
+%% @private Generate the ETag header value for this file.
+%% The ETag header value is only generated if the resource is a file that
+%% exists in document root.
+-spec generate_etag(#http_req{}, #state{}) ->
+ {undefined | binary(), #http_req{}, #state{}}.
+generate_etag(Req, #state{fileinfo={_, #file_info{type=regular, inode=INode,
+ mtime=Modified, size=Filesize}}, filepath=Filepath,
+ etag_fun={ETagFun, ETagData}}=State) ->
+ ETagArgs = [
+ {filepath, Filepath}, {filesize, Filesize},
+ {inode, INode}, {mtime, Modified}],
+ {ETagFun(ETagArgs, ETagData), Req, State};
+generate_etag(Req, State) ->
+ {undefined, Req, State}.
+
+
+%% @private Return the content type of a file.
+-spec content_types_provided(#http_req{}, #state{}) -> tuple().
+content_types_provided(Req, #state{filepath=Filepath,
+ mimetypes={MimetypesFun, MimetypesData}}=State) ->
+ Mimetypes = [{T, file_contents}
+ || T <- MimetypesFun(Filepath, MimetypesData)],
+ {Mimetypes, Req, State}.
+
+
+%% @private Return a function that writes a file directly to the socket.
+-spec file_contents(#http_req{}, #state{}) -> tuple().
+file_contents(Req, #state{filepath=Filepath,
+ fileinfo={ok, #file_info{size=Filesize}}}=State) ->
+ {ok, Transport, Socket} = cowboy_http_req:transport(Req),
+ Writefile = content_function(Transport, Socket, Filepath),
+ {{stream, Filesize, Writefile}, Req, State}.
+
+
+%% @private Return a function writing the contents of a file to a socket.
+%% The function returns the number of bytes written to the socket to enable
+%% the calling function to determine if the expected number of bytes were
+%% written to the socket.
+-spec content_function(module(), inet:socket(), binary()) ->
+ fun(() -> {sent, non_neg_integer()}).
+content_function(Transport, Socket, Filepath) ->
+ %% `file:sendfile/2' will only work with the `cowboy_tcp_transport'
+ %% transport module. SSL or future SPDY transports that require the
+ %% content to be encrypted or framed as the content is sent.
+ case erlang:function_exported(file, sendfile, 2) of
+ false ->
+ fun() -> sfallback(Transport, Socket, Filepath) end;
+ _ when Transport =/= cowboy_tcp_transport ->
+ fun() -> sfallback(Transport, Socket, Filepath) end;
+ true ->
+ fun() -> sendfile(Socket, Filepath) end
+ end.
+
+
+%% @private Sendfile fallback function.
+-spec sfallback(module(), inet:socket(), binary()) -> {sent, non_neg_integer()}.
+sfallback(Transport, Socket, Filepath) ->
+ {ok, File} = file:open(Filepath, [read,binary,raw]),
+ sfallback(Transport, Socket, File, 0).
+
+-spec sfallback(module(), inet:socket(), file:io_device(),
+ non_neg_integer()) -> {sent, non_neg_integer()}.
+sfallback(Transport, Socket, File, Sent) ->
+ case file:read(File, 16#1FFF) of
+ eof ->
+ ok = file:close(File),
+ {sent, Sent};
+ {ok, Bin} ->
+ ok = Transport:send(Socket, Bin),
+ sfallback(Transport, Socket, File, Sent + byte_size(Bin))
+ end.
+
+
+%% @private Wrapper for sendfile function.
+-spec sendfile(inet:socket(), binary()) -> {sent, non_neg_integer()}.
+sendfile(Socket, Filepath) ->
+ {ok, Sent} = file:sendfile(Filepath, Socket),
+ {sent, Sent}.
+
+-spec directory_path(dirspec()) -> dirpath().
+directory_path({priv_dir, App, []}) ->
+ priv_dir_path(App);
+directory_path({priv_dir, App, [H|_]=Path}) when is_integer(H) ->
+ filename:join(priv_dir_path(App), Path);
+directory_path({priv_dir, App, [H|_]=Path}) when is_binary(H) ->
+ filename:join(filename:split(priv_dir_path(App)) ++ Path);
+directory_path({priv_dir, App, Path}) when is_binary(Path) ->
+ filename:join(priv_dir_path(App), Path);
+directory_path(Path) ->
+ Path.
+
+
+%% @private Validate a request path for unsafe characters.
+%% There is no way to escape special characters in a filesystem path.
+-spec check_path(Path::[binary()]) -> ok | error.
+check_path([]) -> ok;
+check_path([<<"">>|_T]) -> error;
+check_path([<<".">>|_T]) -> error;
+check_path([<<"..">>|_T]) -> error;
+check_path([H|T]) ->
+ case binary:match(H, <<"/">>) of
+ {_, _} -> error;
+ nomatch -> check_path(T)
+ end.
+
+
+%% @private Join the the directory and request paths.
+-spec join_paths(dirpath(), [binary()]) -> binary().
+join_paths([H|_]=Dirpath, Filepath) when is_integer(H) ->
+ filename:join(filename:split(Dirpath) ++ Filepath);
+join_paths([H|_]=Dirpath, Filepath) when is_binary(H) ->
+ filename:join(Dirpath ++ Filepath);
+join_paths(Dirpath, Filepath) when is_binary(Dirpath) ->
+ filename:join([Dirpath] ++ Filepath);
+join_paths([], Filepath) ->
+ filename:join(Filepath).
+
+
+%% @private Return the path to the priv/ directory of an application.
+-spec priv_dir_path(atom()) -> string().
+priv_dir_path(App) ->
+ case code:priv_dir(App) of
+ {error, bad_name} -> priv_dir_mod(App);
+ Dir -> Dir
+ end.
+
+-spec priv_dir_mod(atom()) -> string().
+priv_dir_mod(Mod) ->
+ case code:which(Mod) of
+ File when not is_list(File) -> "../priv";
+ File -> filename:join([filename:dirname(File),"../priv"])
+ end.
+
+
+%% @private Use application/octet-stream as the default mimetype.
+%% If a list of extension - mimetype pairs are provided as the mimetypes
+%% an attempt to find the mimetype using the file extension. If no match
+%% is found the default mimetype is returned.
+-spec path_to_mimetypes(binary(), [{binary(), [mimedef()]}]) ->
+ [mimedef()].
+path_to_mimetypes(Filepath, Extensions) when is_binary(Filepath) ->
+ Ext = filename:extension(Filepath),
+ case Ext of
+ <<>> -> default_mimetype();
+ _Ext -> path_to_mimetypes_(Ext, Extensions)
+ end.
+
+-spec path_to_mimetypes_(binary(), [{binary(), [mimedef()]}]) -> [mimedef()].
+path_to_mimetypes_(Ext, Extensions) ->
+ case lists:keyfind(Ext, 1, Extensions) of
+ {_, MTs} -> MTs;
+ _Unknown -> default_mimetype()
+ end.
+
+-spec default_mimetype() -> [mimedef()].
+default_mimetype() ->
+ [{<<"application">>, <<"octet-stream">>, []}].
+
+
+%% @private Do not send ETag headers in the default configuration.
+-spec no_etag_function([etagarg()], undefined) -> undefined.
+no_etag_function(_Args, undefined) ->
+ undefined.
+
+%% @private A simple alternative is to send an ETag based on file attributes.
+-type fileattr() :: filepath | filesize | mtime | inode.
+-spec attr_etag_function([etagarg()], [fileattr()]) -> binary().
+attr_etag_function(Args, Attrs) ->
+ attr_etag_function(Args, Attrs, []).
+
+-spec attr_etag_function([etagarg()], [fileattr()], [binary()]) -> binary().
+attr_etag_function(_Args, [], Acc) ->
+ list_to_binary(erlang:integer_to_list(erlang:crc32(Acc), 16));
+attr_etag_function(Args, [H|T], Acc) ->
+ {_, Value} = lists:keyfind(H, 1, Args),
+ attr_etag_function(Args, T, [term_to_binary(Value)|Acc]).
+
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+-define(_eq(E, I), ?_assertEqual(E, I)).
+
+check_path_test_() ->
+ C = fun check_path/1,
+ [?_eq(error, C([<<>>])),
+ ?_eq(ok, C([<<"abc">>])),
+ ?_eq(error, C([<<".">>])),
+ ?_eq(error, C([<<"..">>])),
+ ?_eq(error, C([<<"/">>]))
+ ].
+
+join_paths_test_() ->
+ P = fun join_paths/2,
+ [?_eq(<<"a">>, P([], [<<"a">>])),
+ ?_eq(<<"a/b/c">>, P(<<"a/b">>, [<<"c">>])),
+ ?_eq(<<"a/b/c">>, P("a/b", [<<"c">>])),
+ ?_eq(<<"a/b/c">>, P([<<"a">>, <<"b">>], [<<"c">>]))
+ ].
+
+directory_path_test_() ->
+ P = fun directory_path/1,
+ PL = fun(I) -> length(filename:split(P(I))) end,
+ Base = PL({priv_dir, cowboy, []}),
+ [?_eq(Base + 1, PL({priv_dir, cowboy, "a"})),
+ ?_eq(Base + 1, PL({priv_dir, cowboy, <<"a">>})),
+ ?_eq(Base + 1, PL({priv_dir, cowboy, [<<"a">>]})),
+ ?_eq(Base + 2, PL({priv_dir, cowboy, "a/b"})),
+ ?_eq(Base + 2, PL({priv_dir, cowboy, <<"a/b">>})),
+ ?_eq(Base + 2, PL({priv_dir, cowboy, [<<"a">>, <<"b">>]})),
+ ?_eq("a/b", P("a/b"))
+ ].
+
+
+-endif.
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+%% @doc WebSocket protocol implementation.
+%%
+%% Supports the protocol version 0 (hixie-76), version 7 (hybi-7)
+%% and version 8 (hybi-8, hybi-9 and hybi-10).
+%%
+%% Version 0 is supported by the following browsers:
+%% <ul>
+%% <li>Firefox 4-5 (disabled by default)</li>
+%% <li>Chrome 6-13</li>
+%% <li>Safari 5.0.1+</li>
+%% <li>Opera 11.00+ (disabled by default)</li>
+%% </ul>
+%%
+%% Version 7 is supported by the following browser:
+%% <ul>
+%% <li>Firefox 6</li>
+%% </ul>
+%%
+%% Version 8+ is supported by the following browsers:
+%% <ul>
+%% <li>Firefox 7+</li>
+%% <li>Chrome 14+</li>
+%% </ul>
+-module(cowboy_http_websocket).
+
+-export([upgrade/4]). %% API.
+-export([handler_loop/4]). %% Internal.
+
+-include("include/http.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-type opcode() :: 0 | 1 | 2 | 8 | 9 | 10.
+-type mask_key() :: 0..16#ffffffff.
+
+-record(state, {
+ version :: 0 | 7 | 8 | 13,
+ handler :: module(),
+ opts :: any(),
+ challenge = undefined :: undefined | binary() | {binary(), binary()},
+ timeout = infinity :: timeout(),
+ timeout_ref = undefined,
+ messages = undefined :: undefined | {atom(), atom(), atom()},
+ hibernate = false,
+ eop :: undefined | tuple(), %% hixie-76 specific.
+ origin = undefined :: undefined | binary() %% hixie-76 specific.
+}).
+
+%% @doc Upgrade a HTTP request to the WebSocket protocol.
+%%
+%% You do not need to call this function manually. To upgrade to the WebSocket
+%% protocol, you simply need to return <em>{upgrade, protocol, {@module}}</em>
+%% in your <em>cowboy_http_handler:init/3</em> handler function.
+-spec upgrade(pid(), module(), any(), #http_req{}) -> closed | none().
+upgrade(ListenerPid, Handler, Opts, Req) ->
+ cowboy_listener:move_connection(ListenerPid, websocket, self()),
+ case catch websocket_upgrade(#state{handler=Handler, opts=Opts}, Req) of
+ {ok, State, Req2} -> handler_init(State, Req2);
+ {'EXIT', _Reason} -> upgrade_error(Req)
+ end.
+
+-spec websocket_upgrade(#state{}, #http_req{}) -> {ok, #state{}, #http_req{}}.
+websocket_upgrade(State, Req) ->
+ {ConnTokens, Req2}
+ = cowboy_http_req:parse_header('Connection', Req),
+ true = lists:member(<<"upgrade">>, ConnTokens),
+ %% @todo Should probably send a 426 if the Upgrade header is missing.
+ {[<<"websocket">>], Req3} = cowboy_http_req:parse_header('Upgrade', Req2),
+ {Version, Req4} = cowboy_http_req:header(<<"Sec-Websocket-Version">>, Req3),
+ websocket_upgrade(Version, State, Req4).
+
+%% @todo Handle the Sec-Websocket-Protocol header.
+%% @todo Reply a proper error, don't die, if a required header is undefined.
+-spec websocket_upgrade(undefined | <<_:8>>, #state{}, #http_req{})
+ -> {ok, #state{}, #http_req{}}.
+%% No version given. Assuming hixie-76 draft.
+%%
+%% We need to wait to send a reply back before trying to read the
+%% third part of the challenge key, because proxies will wait for
+%% a reply before sending it. Therefore we calculate the challenge
+%% key only in websocket_handshake/3.
+websocket_upgrade(undefined, State, Req=#http_req{meta=Meta}) ->
+ {Origin, Req2} = cowboy_http_req:header(<<"Origin">>, Req),
+ {Key1, Req3} = cowboy_http_req:header(<<"Sec-Websocket-Key1">>, Req2),
+ {Key2, Req4} = cowboy_http_req:header(<<"Sec-Websocket-Key2">>, Req3),
+ false = lists:member(undefined, [Origin, Key1, Key2]),
+ EOP = binary:compile_pattern(<< 255 >>),
+ {ok, State#state{version=0, origin=Origin, challenge={Key1, Key2},
+ eop=EOP}, Req4#http_req{meta=[{websocket_version, 0}|Meta]}};
+%% Versions 7 and 8. Implementation follows the hybi 7 through 17 drafts.
+websocket_upgrade(Version, State, Req=#http_req{meta=Meta})
+ when Version =:= <<"7">>; Version =:= <<"8">>;
+ Version =:= <<"13">> ->
+ {Key, Req2} = cowboy_http_req:header(<<"Sec-Websocket-Key">>, Req),
+ false = Key =:= undefined,
+ Challenge = hybi_challenge(Key),
+ IntVersion = list_to_integer(binary_to_list(Version)),
+ {ok, State#state{version=IntVersion, challenge=Challenge},
+ Req2#http_req{meta=[{websocket_version, IntVersion}|Meta]}}.
+
+-spec handler_init(#state{}, #http_req{}) -> closed | none().
+handler_init(State=#state{handler=Handler, opts=Opts},
+ Req=#http_req{transport=Transport}) ->
+ try Handler:websocket_init(Transport:name(), Req, Opts) of
+ {ok, Req2, HandlerState} ->
+ websocket_handshake(State, Req2, HandlerState);
+ {ok, Req2, HandlerState, hibernate} ->
+ websocket_handshake(State#state{hibernate=true},
+ Req2, HandlerState);
+ {ok, Req2, HandlerState, Timeout} ->
+ websocket_handshake(State#state{timeout=Timeout},
+ Req2, HandlerState);
+ {ok, Req2, HandlerState, Timeout, hibernate} ->
+ websocket_handshake(State#state{timeout=Timeout,
+ hibernate=true}, Req2, HandlerState);
+ {shutdown, Req2} ->
+ upgrade_denied(Req2)
+ catch Class:Reason ->
+ upgrade_error(Req),
+ error_logger:error_msg(
+ "** Handler ~p terminating in websocket_init/3~n"
+ " for the reason ~p:~p~n** Options were ~p~n"
+ "** Request was ~p~n** Stacktrace: ~p~n~n",
+ [Handler, Class, Reason, Opts, Req, erlang:get_stacktrace()])
+ end.
+
+-spec upgrade_error(#http_req{}) -> closed.
+upgrade_error(Req) ->
+ {ok, _Req2} = cowboy_http_req:reply(400, [], [],
+ Req#http_req{resp_state=waiting}),
+ closed.
+
+%% @see cowboy_http_protocol:ensure_response/1
+-spec upgrade_denied(#http_req{}) -> closed.
+upgrade_denied(#http_req{resp_state=done}) ->
+ closed;
+upgrade_denied(Req=#http_req{resp_state=waiting}) ->
+ {ok, _Req2} = cowboy_http_req:reply(400, [], [], Req),
+ closed;
+upgrade_denied(#http_req{method='HEAD', resp_state=chunks}) ->
+ closed;
+upgrade_denied(#http_req{socket=Socket, transport=Transport,
+ resp_state=chunks}) ->
+ Transport:send(Socket, <<"0\r\n\r\n">>),
+ closed.
+
+-spec websocket_handshake(#state{}, #http_req{}, any()) -> closed | none().
+websocket_handshake(State=#state{version=0, origin=Origin,
+ challenge={Key1, Key2}}, Req=#http_req{socket=Socket,
+ transport=Transport, raw_host=Host, port=Port,
+ raw_path=Path, raw_qs=QS}, HandlerState) ->
+ Location = hixie76_location(Transport:name(), Host, Port, Path, QS),
+ {ok, Req2} = cowboy_http_req:upgrade_reply(
+ <<"101 WebSocket Protocol Handshake">>,
+ [{<<"Upgrade">>, <<"WebSocket">>},
+ {<<"Sec-Websocket-Location">>, Location},
+ {<<"Sec-Websocket-Origin">>, Origin}],
+ Req#http_req{resp_state=waiting}),
+ %% Flush the resp_sent message before moving on.
+ receive {cowboy_http_req, resp_sent} -> ok after 0 -> ok end,
+ %% We replied with a proper response. Proxies should be happy enough,
+ %% we can now read the 8 last bytes of the challenge keys and send
+ %% the challenge response directly to the socket.
+ case cowboy_http_req:body(8, Req2) of
+ {ok, Key3, Req3} ->
+ Challenge = hixie76_challenge(Key1, Key2, Key3),
+ Transport:send(Socket, Challenge),
+ handler_before_loop(State#state{messages=Transport:messages()},
+ Req3, HandlerState, <<>>);
+ _Any ->
+ closed %% If an error happened reading the body, stop there.
+ end;
+websocket_handshake(State=#state{challenge=Challenge},
+ Req=#http_req{transport=Transport}, HandlerState) ->
+ {ok, Req2} = cowboy_http_req:upgrade_reply(
+ 101,
+ [{<<"Upgrade">>, <<"websocket">>},
+ {<<"Sec-Websocket-Accept">>, Challenge}],
+ Req#http_req{resp_state=waiting}),
+ %% Flush the resp_sent message before moving on.
+ receive {cowboy_http_req, resp_sent} -> ok after 0 -> ok end,
+ handler_before_loop(State#state{messages=Transport:messages()},
+ Req2, HandlerState, <<>>).
+
+-spec handler_before_loop(#state{}, #http_req{}, any(), binary()) -> closed | none().
+handler_before_loop(State=#state{hibernate=true},
+ Req=#http_req{socket=Socket, transport=Transport},
+ HandlerState, SoFar) ->
+ Transport:setopts(Socket, [{active, once}]),
+ State2 = handler_loop_timeout(State),
+ erlang:hibernate(?MODULE, handler_loop, [State2#state{hibernate=false},
+ Req, HandlerState, SoFar]);
+handler_before_loop(State, Req=#http_req{socket=Socket, transport=Transport},
+ HandlerState, SoFar) ->
+ Transport:setopts(Socket, [{active, once}]),
+ State2 = handler_loop_timeout(State),
+ handler_loop(State2, Req, HandlerState, SoFar).
+
+-spec handler_loop_timeout(#state{}) -> #state{}.
+handler_loop_timeout(State=#state{timeout=infinity}) ->
+ State#state{timeout_ref=undefined};
+handler_loop_timeout(State=#state{timeout=Timeout, timeout_ref=PrevRef}) ->
+ _ = case PrevRef of undefined -> ignore; PrevRef ->
+ erlang:cancel_timer(PrevRef) end,
+ TRef = make_ref(),
+ erlang:send_after(Timeout, self(), {?MODULE, timeout, TRef}),
+ State#state{timeout_ref=TRef}.
+
+%% @private
+-spec handler_loop(#state{}, #http_req{}, any(), binary()) -> closed | none().
+handler_loop(State=#state{messages={OK, Closed, Error}, timeout_ref=TRef},
+ Req=#http_req{socket=Socket}, HandlerState, SoFar) ->
+ receive
+ {OK, Socket, Data} ->
+ websocket_data(State, Req, HandlerState,
+ << SoFar/binary, Data/binary >>);
+ {Closed, Socket} ->
+ handler_terminate(State, Req, HandlerState, {error, closed});
+ {Error, Socket, Reason} ->
+ handler_terminate(State, Req, HandlerState, {error, Reason});
+ {?MODULE, timeout, TRef} ->
+ websocket_close(State, Req, HandlerState, {normal, timeout});
+ {?MODULE, timeout, OlderTRef} when is_reference(OlderTRef) ->
+ handler_loop(State, Req, HandlerState, SoFar);
+ Message ->
+ handler_call(State, Req, HandlerState,
+ SoFar, websocket_info, Message, fun handler_before_loop/4)
+ end.
+
+-spec websocket_data(#state{}, #http_req{}, any(), binary()) -> closed | none().
+%% No more data.
+websocket_data(State, Req, HandlerState, <<>>) ->
+ handler_before_loop(State, Req, HandlerState, <<>>);
+%% hixie-76 close frame.
+websocket_data(State=#state{version=0}, Req, HandlerState,
+ << 255, 0, _Rest/binary >>) ->
+ websocket_close(State, Req, HandlerState, {normal, closed});
+%% hixie-76 data frame. We only support the frame type 0, same as the specs.
+websocket_data(State=#state{version=0, eop=EOP}, Req, HandlerState,
+ Data = << 0, _/binary >>) ->
+ case binary:match(Data, EOP) of
+ {Pos, 1} ->
+ Pos2 = Pos - 1,
+ << 0, Payload:Pos2/binary, 255, Rest/bits >> = Data,
+ handler_call(State, Req, HandlerState,
+ Rest, websocket_handle, {text, Payload}, fun websocket_data/4);
+ nomatch ->
+ %% @todo We probably should allow limiting frame length.
+ handler_before_loop(State, Req, HandlerState, Data)
+ end;
+%% incomplete hybi data frame.
+websocket_data(State=#state{version=Version}, Req, HandlerState, Data)
+ when Version =/= 0, byte_size(Data) =:= 1 ->
+ handler_before_loop(State, Req, HandlerState, Data);
+%% hybi data frame.
+%% @todo Handle Fin.
+websocket_data(State=#state{version=Version}, Req, HandlerState, Data)
+ when Version =/= 0 ->
+ << 1:1, 0:3, Opcode:4, Mask:1, PayloadLen:7, Rest/bits >> = Data,
+ case {PayloadLen, Rest} of
+ {126, _} when Opcode >= 8 -> websocket_close(
+ State, Req, HandlerState, {error, protocol});
+ {127, _} when Opcode >= 8 -> websocket_close(
+ State, Req, HandlerState, {error, protocol});
+ {126, << L:16, R/bits >>} -> websocket_before_unmask(
+ State, Req, HandlerState, Data, R, Opcode, Mask, L);
+ {126, Rest} -> websocket_before_unmask(
+ State, Req, HandlerState, Data, Rest, Opcode, Mask, undefined);
+ {127, << 0:1, L:63, R/bits >>} -> websocket_before_unmask(
+ State, Req, HandlerState, Data, R, Opcode, Mask, L);
+ {127, Rest} -> websocket_before_unmask(
+ State, Req, HandlerState, Data, Rest, Opcode, Mask, undefined);
+ {PayloadLen, Rest} -> websocket_before_unmask(
+ State, Req, HandlerState, Data, Rest, Opcode, Mask, PayloadLen)
+ end;
+%% Something was wrong with the frame. Close the connection.
+websocket_data(State, Req, HandlerState, _Bad) ->
+ websocket_close(State, Req, HandlerState, {error, badframe}).
+
+%% hybi routing depending on whether unmasking is needed.
+-spec websocket_before_unmask(#state{}, #http_req{}, any(), binary(),
+ binary(), opcode(), 0 | 1, non_neg_integer() | undefined)
+ -> closed | none().
+websocket_before_unmask(State, Req, HandlerState, Data,
+ Rest, Opcode, Mask, PayloadLen) ->
+ case {Mask, PayloadLen} of
+ {0, 0} ->
+ websocket_dispatch(State, Req, HandlerState, Rest, Opcode, <<>>);
+ {1, N} when N + 4 > byte_size(Rest); N =:= undefined ->
+ %% @todo We probably should allow limiting frame length.
+ handler_before_loop(State, Req, HandlerState, Data);
+ {1, _N} ->
+ << MaskKey:32, Payload:PayloadLen/binary, Rest2/bits >> = Rest,
+ websocket_unmask(State, Req, HandlerState, Rest2,
+ Opcode, Payload, MaskKey)
+ end.
+
+%% hybi unmasking.
+-spec websocket_unmask(#state{}, #http_req{}, any(), binary(),
+ opcode(), binary(), mask_key()) -> closed | none().
+websocket_unmask(State, Req, HandlerState, RemainingData,
+ Opcode, Payload, MaskKey) ->
+ websocket_unmask(State, Req, HandlerState, RemainingData,
+ Opcode, Payload, MaskKey, <<>>).
+
+-spec websocket_unmask(#state{}, #http_req{}, any(), binary(),
+ opcode(), binary(), mask_key(), binary()) -> closed | none().
+websocket_unmask(State, Req, HandlerState, RemainingData,
+ Opcode, << O:32, Rest/bits >>, MaskKey, Acc) ->
+ T = O bxor MaskKey,
+ websocket_unmask(State, Req, HandlerState, RemainingData,
+ Opcode, Rest, MaskKey, << Acc/binary, T:32 >>);
+websocket_unmask(State, Req, HandlerState, RemainingData,
+ Opcode, << O:24 >>, MaskKey, Acc) ->
+ << MaskKey2:24, _:8 >> = << MaskKey:32 >>,
+ T = O bxor MaskKey2,
+ websocket_dispatch(State, Req, HandlerState, RemainingData,
+ Opcode, << Acc/binary, T:24 >>);
+websocket_unmask(State, Req, HandlerState, RemainingData,
+ Opcode, << O:16 >>, MaskKey, Acc) ->
+ << MaskKey2:16, _:16 >> = << MaskKey:32 >>,
+ T = O bxor MaskKey2,
+ websocket_dispatch(State, Req, HandlerState, RemainingData,
+ Opcode, << Acc/binary, T:16 >>);
+websocket_unmask(State, Req, HandlerState, RemainingData,
+ Opcode, << O:8 >>, MaskKey, Acc) ->
+ << MaskKey2:8, _:24 >> = << MaskKey:32 >>,
+ T = O bxor MaskKey2,
+ websocket_dispatch(State, Req, HandlerState, RemainingData,
+ Opcode, << Acc/binary, T:8 >>);
+websocket_unmask(State, Req, HandlerState, RemainingData,
+ Opcode, <<>>, _MaskKey, Acc) ->
+ websocket_dispatch(State, Req, HandlerState, RemainingData,
+ Opcode, Acc).
+
+%% hybi dispatching.
+-spec websocket_dispatch(#state{}, #http_req{}, any(), binary(),
+ opcode(), binary()) -> closed | none().
+%% @todo Fragmentation.
+%~ websocket_dispatch(State, Req, HandlerState, RemainingData, 0, Payload) ->
+%% Text frame.
+websocket_dispatch(State, Req, HandlerState, RemainingData, 1, Payload) ->
+ handler_call(State, Req, HandlerState, RemainingData,
+ websocket_handle, {text, Payload}, fun websocket_data/4);
+%% Binary frame.
+websocket_dispatch(State, Req, HandlerState, RemainingData, 2, Payload) ->
+ handler_call(State, Req, HandlerState, RemainingData,
+ websocket_handle, {binary, Payload}, fun websocket_data/4);
+%% Close control frame.
+%% @todo Handle the optional Payload.
+websocket_dispatch(State, Req, HandlerState, _RemainingData, 8, _Payload) ->
+ websocket_close(State, Req, HandlerState, {normal, closed});
+%% Ping control frame. Send a pong back and forward the ping to the handler.
+websocket_dispatch(State, Req=#http_req{socket=Socket, transport=Transport},
+ HandlerState, RemainingData, 9, Payload) ->
+ Len = hybi_payload_length(byte_size(Payload)),
+ Transport:send(Socket, << 1:1, 0:3, 10:4, 0:1, Len/bits, Payload/binary >>),
+ handler_call(State, Req, HandlerState, RemainingData,
+ websocket_handle, {ping, Payload}, fun websocket_data/4);
+%% Pong control frame.
+websocket_dispatch(State, Req, HandlerState, RemainingData, 10, Payload) ->
+ handler_call(State, Req, HandlerState, RemainingData,
+ websocket_handle, {pong, Payload}, fun websocket_data/4).
+
+-spec handler_call(#state{}, #http_req{}, any(), binary(),
+ atom(), any(), fun()) -> closed | none().
+handler_call(State=#state{handler=Handler, opts=Opts}, Req, HandlerState,
+ RemainingData, Callback, Message, NextState) ->
+ try Handler:Callback(Message, Req, HandlerState) of
+ {ok, Req2, HandlerState2} ->
+ NextState(State, Req2, HandlerState2, RemainingData);
+ {ok, Req2, HandlerState2, hibernate} ->
+ NextState(State#state{hibernate=true},
+ Req2, HandlerState2, RemainingData);
+ {reply, Payload, Req2, HandlerState2} ->
+ websocket_send(Payload, State, Req2),
+ NextState(State, Req2, HandlerState2, RemainingData);
+ {reply, Payload, Req2, HandlerState2, hibernate} ->
+ websocket_send(Payload, State, Req2),
+ NextState(State#state{hibernate=true},
+ Req2, HandlerState2, RemainingData);
+ {shutdown, Req2, HandlerState2} ->
+ websocket_close(State, Req2, HandlerState2, {normal, shutdown})
+ catch Class:Reason ->
+ error_logger:error_msg(
+ "** Handler ~p terminating in ~p/3~n"
+ " for the reason ~p:~p~n** Message was ~p~n"
+ "** Options were ~p~n** Handler state was ~p~n"
+ "** Request was ~p~n** Stacktrace: ~p~n~n",
+ [Handler, Callback, Class, Reason, Message, Opts,
+ HandlerState, Req, erlang:get_stacktrace()]),
+ websocket_close(State, Req, HandlerState, {error, handler})
+ end.
+
+-spec websocket_send(binary(), #state{}, #http_req{}) -> closed | ignore.
+%% hixie-76 text frame.
+websocket_send({text, Payload}, #state{version=0},
+ #http_req{socket=Socket, transport=Transport}) ->
+ Transport:send(Socket, [0, Payload, 255]);
+%% Ignore all unknown frame types for compatibility with hixie 76.
+websocket_send(_Any, #state{version=0}, _Req) ->
+ ignore;
+websocket_send({Type, Payload}, _State,
+ #http_req{socket=Socket, transport=Transport}) ->
+ Opcode = case Type of
+ text -> 1;
+ binary -> 2;
+ ping -> 9;
+ pong -> 10
+ end,
+ Len = hybi_payload_length(iolist_size(Payload)),
+ Transport:send(Socket, [<< 1:1, 0:3, Opcode:4, 0:1, Len/bits >>,
+ Payload]).
+
+-spec websocket_close(#state{}, #http_req{}, any(), {atom(), atom()}) -> closed.
+websocket_close(State=#state{version=0}, Req=#http_req{socket=Socket,
+ transport=Transport}, HandlerState, Reason) ->
+ Transport:send(Socket, << 255, 0 >>),
+ handler_terminate(State, Req, HandlerState, Reason);
+%% @todo Send a Payload? Using Reason is usually good but we're quite careless.
+websocket_close(State, Req=#http_req{socket=Socket,
+ transport=Transport}, HandlerState, Reason) ->
+ Transport:send(Socket, << 1:1, 0:3, 8:4, 0:8 >>),
+ handler_terminate(State, Req, HandlerState, Reason).
+
+-spec handler_terminate(#state{}, #http_req{},
+ any(), atom() | {atom(), atom()}) -> closed.
+handler_terminate(#state{handler=Handler, opts=Opts},
+ Req, HandlerState, TerminateReason) ->
+ try
+ Handler:websocket_terminate(TerminateReason, Req, HandlerState)
+ catch Class:Reason ->
+ error_logger:error_msg(
+ "** Handler ~p terminating in websocket_terminate/3~n"
+ " for the reason ~p:~p~n** Initial reason was ~p~n"
+ "** Options were ~p~n** Handler state was ~p~n"
+ "** Request was ~p~n** Stacktrace: ~p~n~n",
+ [Handler, Class, Reason, TerminateReason, Opts,
+ HandlerState, Req, erlang:get_stacktrace()])
+ end,
+ closed.
+
+%% hixie-76 specific.
+
+-spec hixie76_challenge(binary(), binary(), binary()) -> binary().
+hixie76_challenge(Key1, Key2, Key3) ->
+ IntKey1 = hixie76_key_to_integer(Key1),
+ IntKey2 = hixie76_key_to_integer(Key2),
+ erlang:md5(<< IntKey1:32, IntKey2:32, Key3/binary >>).
+
+-spec hixie76_key_to_integer(binary()) -> integer().
+hixie76_key_to_integer(Key) ->
+ Number = list_to_integer([C || << C >> <= Key, C >= $0, C =< $9]),
+ Spaces = length([C || << C >> <= Key, C =:= 32]),
+ Number div Spaces.
+
+-spec hixie76_location(atom(), binary(), inet:ip_port(), binary(), binary())
+ -> binary().
+hixie76_location(Protocol, Host, Port, Path, <<>>) ->
+ << (hixie76_location_protocol(Protocol))/binary, "://", Host/binary,
+ (hixie76_location_port(Protocol, Port))/binary, Path/binary>>;
+hixie76_location(Protocol, Host, Port, Path, QS) ->
+ << (hixie76_location_protocol(Protocol))/binary, "://", Host/binary,
+ (hixie76_location_port(Protocol, Port))/binary, Path/binary, "?", QS/binary >>.
+
+-spec hixie76_location_protocol(atom()) -> binary().
+hixie76_location_protocol(ssl) -> <<"wss">>;
+hixie76_location_protocol(_) -> <<"ws">>.
+
+%% @todo We should add a secure/0 function to transports
+%% instead of relying on their name.
+-spec hixie76_location_port(atom(), inet:ip_port()) -> binary().
+hixie76_location_port(ssl, 443) ->
+ <<>>;
+hixie76_location_port(tcp, 80) ->
+ <<>>;
+hixie76_location_port(_, Port) ->
+ <<":", (list_to_binary(integer_to_list(Port)))/binary>>.
+
+%% hybi specific.
+
+-spec hybi_challenge(binary()) -> binary().
+hybi_challenge(Key) ->
+ Bin = << Key/binary, "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" >>,
+ base64:encode(crypto:sha(Bin)).
+
+-spec hybi_payload_length(0..16#7fffffffffffffff)
+ -> << _:7 >> | << _:23 >> | << _:71 >>.
+hybi_payload_length(N) ->
+ case N of
+ N when N =< 125 -> << N:7 >>;
+ N when N =< 16#ffff -> << 126:7, N:16 >>;
+ N when N =< 16#7fffffffffffffff -> << 127:7, N:64 >>
+ end.
+
+%% Tests.
+
+-ifdef(TEST).
+
+hixie76_location_test() ->
+ ?assertEqual(<<"ws://localhost/path">>,
+ hixie76_location(tcp, <<"localhost">>, 80, <<"/path">>, <<>>)),
+ ?assertEqual(<<"ws://localhost:443/path">>,
+ hixie76_location(tcp, <<"localhost">>, 443, <<"/path">>, <<>>)),
+ ?assertEqual(<<"ws://localhost:8080/path">>,
+ hixie76_location(tcp, <<"localhost">>, 8080, <<"/path">>, <<>>)),
+ ?assertEqual(<<"ws://localhost:8080/path?dummy=2785">>,
+ hixie76_location(tcp, <<"localhost">>, 8080, <<"/path">>, <<"dummy=2785">>)),
+ ?assertEqual(<<"wss://localhost/path">>,
+ hixie76_location(ssl, <<"localhost">>, 443, <<"/path">>, <<>>)),
+ ?assertEqual(<<"wss://localhost:8443/path">>,
+ hixie76_location(ssl, <<"localhost">>, 8443, <<"/path">>, <<>>)),
+ ?assertEqual(<<"wss://localhost:8443/path?dummy=2785">>,
+ hixie76_location(ssl, <<"localhost">>, 8443, <<"/path">>, <<"dummy=2785">>)),
+ ok.
+
+-endif.
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+%% @doc Handler for HTTP WebSocket requests.
+%%
+%% WebSocket handlers must implement four callbacks: <em>websocket_init/3</em>,
+%% <em>websocket_handle/3</em>, <em>websocket_info/3</em> and
+%% <em>websocket_terminate/3</em>. These callbacks will only be called if the
+%% connection is upgraded to WebSocket in the HTTP handler's <em>init/3</em>
+%% callback. They are then called in that order, although
+%% <em>websocket_handle/3</em> will be called for each packet received,
+%% and <em>websocket_info</em> for each message received.
+%%
+%% <em>websocket_init/3</em> is meant for initialization. It receives
+%% information about the transport and protocol used, along with the handler
+%% options from the dispatch list. You can define a request-wide state here.
+%% If you are going to want to compact the request, you should probably do it
+%% here.
+%%
+%% <em>websocket_handle/3</em> receives the data from the socket. It can reply
+%% something, do nothing or close the connection.
+%%
+%% <em>websocket_info/3</em> receives messages sent to the process. It has
+%% the same reply format as <em>websocket_handle/3</em> described above. Note
+%% that unlike in a <em>gen_server</em>, when <em>websocket_info/3</em>
+%% replies something, it is always to the socket, not to the process that
+%% originated the message.
+%%
+%% <em>websocket_terminate/3</em> is meant for cleaning up. It also receives
+%% the request and the state previously defined, along with a reason for
+%% termination.
+%%
+%% All of <em>websocket_init/3</em>, <em>websocket_handle/3</em> and
+%% <em>websocket_info/3</em> can decide to hibernate the process by adding
+%% an extra element to the returned tuple, containing the atom
+%% <em>hibernate</em>. Doing so helps save memory and improve CPU usage.
+-module(cowboy_http_websocket_handler).
+
+-export([behaviour_info/1]).
+
+%% @private
+-spec behaviour_info(_)
+ -> undefined | [{websocket_handle, 3} | {websocket_info, 3}
+ | {websocket_init, 3} | {websocket_terminate, 3}, ...].
+behaviour_info(callbacks) ->
+ [{websocket_init, 3}, {websocket_handle, 3},
+ {websocket_info, 3}, {websocket_terminate, 3}];
+behaviour_info(_Other) ->
+ undefined.
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+%% @doc Public API for managing listeners.
+-module(cowboy_listener).
+-behaviour(gen_server).
+
+-export([start_link/0, stop/1,
+ add_connection/3, move_connection/3, remove_connection/2, wait/3]). %% API.
+-export([init/1, handle_call/3, handle_cast/2,
+ handle_info/2, terminate/2, code_change/3]). %% gen_server.
+
+-record(state, {
+ req_pools = [] :: [{atom(), non_neg_integer()}],
+ reqs_table,
+ queue = []
+}).
+
+%% API.
+
+%% @private
+%%
+%% We set the process priority to high because cowboy_listener is the central
+%% gen_server in Cowboy and is used to manage all the incoming connections.
+%% Setting the process priority to high ensures the connection-related code
+%% will always be executed when a connection needs it, allowing Cowboy to
+%% scale far beyond what it would with a normal priority.
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], [{spawn_opt, [{priority, high}]}]).
+
+%% @private
+-spec stop(pid()) -> stopped.
+stop(ServerPid) ->
+ gen_server:call(ServerPid, stop).
+
+%% @doc Add a connection to the given pool in the listener.
+%%
+%% Pools of connections are used to restrict the maximum number of connections
+%% depending on their type. By default, Cowboy add all connections to the
+%% pool <em>default</em>. It also checks for the maximum number of connections
+%% in that pool before accepting again.
+%%
+%% When a process managing a connection dies, the process is removed from the
+%% pool. If the socket has been sent to another process, it is up to the
+%% protocol code to inform the listener of the new <em>ConnPid</em> by removing
+%% the previous and adding the new one.
+-spec add_connection(pid(), atom(), pid()) -> {ok, non_neg_integer()}.
+add_connection(ServerPid, Pool, ConnPid) ->
+ gen_server:call(ServerPid, {add_connection, Pool, ConnPid}).
+
+%% @doc Move a connection from one pool to another.
+-spec move_connection(pid(), atom(), pid()) -> ok.
+move_connection(ServerPid, DestPool, ConnPid) ->
+ gen_server:cast(ServerPid, {move_connection, DestPool, ConnPid}).
+
+%% @doc Remove the given connection from its pool.
+-spec remove_connection(pid(), pid()) -> ok.
+remove_connection(ServerPid, ConnPid) ->
+ gen_server:cast(ServerPid, {remove_connection, ConnPid}).
+
+%% @doc Wait until the number of connections in the given pool gets below
+%% the given threshold.
+%%
+%% This function will not return until the number of connections in the pool
+%% gets below <em>MaxConns</em>. It makes use of <em>gen_server:reply/2</em>
+%% to make the process wait for a reply indefinitely.
+-spec wait(pid(), atom(), non_neg_integer()) -> ok.
+wait(ServerPid, Pool, MaxConns) ->
+ gen_server:call(ServerPid, {wait, Pool, MaxConns}, infinity).
+
+%% gen_server.
+
+%% @private
+-spec init([]) -> {ok, #state{}}.
+init([]) ->
+ ReqsTablePid = ets:new(requests_table, [set, private]),
+ {ok, #state{reqs_table=ReqsTablePid}}.
+
+%% @private
+-spec handle_call(_, _, State)
+ -> {reply, ignored, State} | {stop, normal, stopped, State}.
+handle_call({add_connection, Pool, ConnPid}, _From, State=#state{
+ req_pools=Pools, reqs_table=ReqsTable}) ->
+ MonitorRef = erlang:monitor(process, ConnPid),
+ {NbConnsRet, Pools2} = case lists:keyfind(Pool, 1, Pools) of
+ false ->
+ {1, [{Pool, 1}|Pools]};
+ {Pool, NbConns} ->
+ NbConns2 = NbConns + 1,
+ {NbConns2, [{Pool, NbConns2}|lists:keydelete(Pool, 1, Pools)]}
+ end,
+ ets:insert(ReqsTable, {ConnPid, {MonitorRef, Pool}}),
+ {reply, {ok, NbConnsRet}, State#state{req_pools=Pools2}};
+handle_call({wait, Pool, MaxConns}, From, State=#state{
+ req_pools=Pools, queue=Queue}) ->
+ case lists:keyfind(Pool, 1, Pools) of
+ {Pool, NbConns} when NbConns > MaxConns ->
+ {noreply, State#state{queue=[From|Queue]}};
+ _Any ->
+ {reply, ok, State}
+ end;
+handle_call(stop, _From, State) ->
+ {stop, normal, stopped, State};
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+%% @private
+-spec handle_cast(_, State) -> {noreply, State}.
+handle_cast({move_connection, DestPool, ConnPid}, State=#state{
+ req_pools=Pools, reqs_table=ReqsTable}) ->
+ {MonitorRef, SrcPool} = ets:lookup_element(ReqsTable, ConnPid, 2),
+ ets:insert(ReqsTable, {ConnPid, {MonitorRef, DestPool}}),
+ {SrcPool, SrcNbConns} = lists:keyfind(SrcPool, 1, Pools),
+ DestNbConns = case lists:keyfind(DestPool, 1, Pools) of
+ false -> 1;
+ {DestPool, NbConns} -> NbConns + 1
+ end,
+ Pools2 = lists:keydelete(SrcPool, 1, lists:keydelete(DestPool, 1, Pools)),
+ Pools3 = [{SrcPool, SrcNbConns - 1}, {DestPool, DestNbConns}|Pools2],
+ {noreply, State#state{req_pools=Pools3}};
+handle_cast({remove_connection, ConnPid}, State) ->
+ State2 = remove_pid(ConnPid, State),
+ {noreply, State2};
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+%% @private
+-spec handle_info(_, State) -> {noreply, State}.
+handle_info({'DOWN', _Ref, process, Pid, _Info}, State) ->
+ State2 = remove_pid(Pid, State),
+ {noreply, State2};
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+%% @private
+-spec terminate(_, _) -> ok.
+terminate(_Reason, _State) ->
+ ok.
+
+%% @private
+-spec code_change(_, State, _) -> {ok, State}.
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%% Internal.
+
+%% @private
+-spec remove_pid(pid(), State) -> State.
+remove_pid(Pid, State=#state{
+ req_pools=Pools, reqs_table=ReqsTable, queue=Queue}) ->
+ {MonitorRef, Pool} = ets:lookup_element(ReqsTable, Pid, 2),
+ erlang:demonitor(MonitorRef, [flush]),
+ {Pool, NbConns} = lists:keyfind(Pool, 1, Pools),
+ Pools2 = [{Pool, NbConns - 1}|lists:keydelete(Pool, 1, Pools)],
+ ets:delete(ReqsTable, Pid),
+ case Queue of
+ [] ->
+ State#state{req_pools=Pools2};
+ [Client|Queue2] ->
+ gen_server:reply(Client, ok),
+ State#state{req_pools=Pools2, queue=Queue2}
+ end.
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+%% @private
+-module(cowboy_listener_sup).
+-behaviour(supervisor).
+
+-export([start_link/5]). %% API.
+-export([init/1]). %% supervisor.
+
+%% API.
+
+-spec start_link(non_neg_integer(), module(), any(), module(), any())
+ -> {ok, pid()}.
+start_link(NbAcceptors, Transport, TransOpts, Protocol, ProtoOpts) ->
+ {ok, SupPid} = supervisor:start_link(?MODULE, []),
+ {ok, ListenerPid} = supervisor:start_child(SupPid,
+ {cowboy_listener, {cowboy_listener, start_link, []},
+ permanent, 5000, worker, [cowboy_listener]}),
+ {ok, ReqsPid} = supervisor:start_child(SupPid,
+ {cowboy_requests_sup, {cowboy_requests_sup, start_link, []},
+ permanent, 5000, supervisor, [cowboy_requests_sup]}),
+ {ok, _PoolPid} = supervisor:start_child(SupPid,
+ {cowboy_acceptors_sup, {cowboy_acceptors_sup, start_link, [
+ NbAcceptors, Transport, TransOpts,
+ Protocol, ProtoOpts, ListenerPid, ReqsPid
+ ]}, permanent, 5000, supervisor, [cowboy_acceptors_sup]}),
+ {ok, SupPid}.
+
+%% supervisor.
+
+-spec init([]) -> {ok, {{one_for_all, 10, 10}, []}}.
+init([]) ->
+ {ok, {{one_for_all, 10, 10}, []}}.
--- /dev/null
+%% Copyright (c) 2011, Anthony Ramine <nox@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+%% @doc Multipart parser.
+-module(cowboy_multipart).
+
+-type part_parser() :: any().
+-type parser(T) :: fun((binary()) -> T).
+-type more(T) :: T | {more, parser(T)}.
+-type part_result() :: any().
+-type headers() :: any().
+-type http_headers() :: [{atom() | binary(), binary()}].
+-type body_cont() :: any().
+-type cont(T) :: fun(() -> T).
+-type body_result() :: any().
+-type end_of_part() :: {end_of_part, cont(more(part_result()))}.
+-type disposition() :: {binary(), [{binary(), binary()}]}.
+
+-export([parser/1, content_disposition/1]).
+
+-include_lib("eunit/include/eunit.hrl").
+
+%% API.
+
+%% @doc Return a multipart parser for the given boundary.
+-spec parser(binary()) -> part_parser().
+parser(Boundary) when is_binary(Boundary) ->
+ fun (Bin) when is_binary(Bin) -> parse(Bin, Boundary) end.
+
+%% @doc Parse a content disposition.
+%% @todo Parse the MIME header instead of the HTTP one.
+-spec content_disposition(binary()) -> disposition().
+content_disposition(Data) ->
+ cowboy_http:token_ci(Data,
+ fun (_Rest, <<>>) -> {error, badarg};
+ (Rest, Disposition) ->
+ cowboy_http:content_type_params(Rest,
+ fun (Params) -> {Disposition, Params} end, [])
+ end).
+
+%% Internal.
+
+%% @doc Entry point of the multipart parser, skips over the preamble if any.
+-spec parse(binary(), binary()) -> more(part_result()).
+parse(Bin, Boundary) when byte_size(Bin) >= byte_size(Boundary) + 2 ->
+ BoundarySize = byte_size(Boundary),
+ Pattern = pattern(Boundary),
+ case Bin of
+ <<"--", Boundary:BoundarySize/binary, Rest/binary>> ->
+ % Data starts with initial boundary, skip preamble parsing.
+ parse_boundary_tail(Rest, Pattern);
+ _ ->
+ % Parse preamble.
+ skip(Bin, Pattern)
+ end;
+parse(Bin, Boundary) ->
+ % Not enough data to know if the data begins with a boundary.
+ more(Bin, fun (NewBin) -> parse(NewBin, Boundary) end).
+
+-type pattern() :: {binary:cp(), non_neg_integer()}.
+
+%% @doc Return a compiled binary pattern with its size in bytes.
+%% The pattern is the boundary prepended with "\r\n--".
+-spec pattern(binary()) -> pattern().
+pattern(Boundary) ->
+ MatchPattern = <<"\r\n--", Boundary/binary>>,
+ {binary:compile_pattern(MatchPattern), byte_size(MatchPattern)}.
+
+%% @doc Parse remaining characters of a line beginning with the boundary.
+%% If followed by "--", <em>eof</em> is returned and parsing is finished.
+-spec parse_boundary_tail(binary(), pattern()) -> more(part_result()).
+parse_boundary_tail(Bin, Pattern) when byte_size(Bin) >= 2 ->
+ case Bin of
+ <<"--", _Rest/binary>> ->
+ % Boundary is followed by "--", end parsing.
+ eof;
+ _ ->
+ % No dash after boundary, proceed with unknown chars and lwsp
+ % removal.
+ parse_boundary_eol(Bin, Pattern)
+ end;
+parse_boundary_tail(Bin, Pattern) ->
+ % Boundary may be followed by "--", need more data.
+ more(Bin, fun (NewBin) -> parse_boundary_tail(NewBin, Pattern) end).
+
+%% @doc Skip whitespace and unknown chars until CRLF.
+-spec parse_boundary_eol(binary(), pattern()) -> more(part_result()).
+parse_boundary_eol(Bin, Pattern) ->
+ case binary:match(Bin, <<"\r\n">>) of
+ {CrlfStart, _Length} ->
+ % End of line found, remove optional whitespace.
+ <<_:CrlfStart/binary, Rest/binary>> = Bin,
+ Fun = fun (Rest2) -> parse_boundary_crlf(Rest2, Pattern) end,
+ cowboy_http:whitespace(Rest, Fun);
+ nomatch ->
+ % CRLF not found in the given binary.
+ RestStart = lists:max([byte_size(Bin) - 1, 0]),
+ <<_:RestStart/binary, Rest/binary>> = Bin,
+ more(Rest, fun (NewBin) -> parse_boundary_eol(NewBin, Pattern) end)
+ end.
+
+-spec parse_boundary_crlf(binary(), pattern()) -> more(part_result()).
+parse_boundary_crlf(<<"\r\n", Rest/binary>>, Pattern) ->
+ % The binary is at least 2 bytes long as this function is only called by
+ % parse_boundary_eol/3 when CRLF has been found so a more tuple will never
+ % be returned from here.
+ parse_headers(Rest, Pattern);
+parse_boundary_crlf(Bin, Pattern) ->
+ % Unspecified behaviour here: RFC 2046 doesn't say what to do when LWSP is
+ % not followed directly by a new line. In this implementation it is
+ % considered part of the boundary so EOL needs to be searched again.
+ parse_boundary_eol(Bin, Pattern).
+
+-spec parse_headers(binary(), pattern()) -> more(part_result()).
+parse_headers(Bin, Pattern) ->
+ parse_headers(Bin, Pattern, []).
+
+-spec parse_headers(binary(), pattern(), http_headers()) -> more(part_result()).
+parse_headers(Bin, Pattern, Acc) ->
+ case erlang:decode_packet(httph_bin, Bin, []) of
+ {ok, {http_header, _, Name, _, Value}, Rest} ->
+ parse_headers(Rest, Pattern, [{Name, Value} | Acc]);
+ {ok, http_eoh, Rest} ->
+ Headers = lists:reverse(Acc),
+ {headers, Headers, fun () -> parse_body(Rest, Pattern) end};
+ {ok, {http_error, _}, _} ->
+ % Skip malformed parts.
+ skip(Bin, Pattern);
+ {more, _} ->
+ more(Bin, fun (NewBin) -> parse_headers(NewBin, Pattern, Acc) end)
+ end.
+
+-spec parse_body(binary(), pattern()) -> more(body_result()).
+parse_body(Bin, Pattern = {P, PSize}) when byte_size(Bin) >= PSize ->
+ case binary:match(Bin, P) of
+ {0, _Length} ->
+ <<_:PSize/binary, Rest/binary>> = Bin,
+ end_of_part(Rest, Pattern);
+ {BoundaryStart, _Length} ->
+ % Boundary found, this is the latest partial body that will be
+ % returned for this part.
+ <<PBody:BoundaryStart/binary, _:PSize/binary, Rest/binary>> = Bin,
+ FResult = end_of_part(Rest, Pattern),
+ {body, PBody, fun () -> FResult end};
+ nomatch ->
+ PartialLength = byte_size(Bin) - PSize + 1,
+ <<PBody:PartialLength/binary, Rest/binary>> = Bin,
+ {body, PBody, fun () -> parse_body(Rest, Pattern) end}
+ end;
+parse_body(Bin, Pattern) ->
+ more(Bin, fun (NewBin) -> parse_body(NewBin, Pattern) end).
+
+-spec end_of_part(binary(), pattern()) -> end_of_part().
+end_of_part(Bin, Pattern) ->
+ {end_of_part, fun () -> parse_boundary_tail(Bin, Pattern) end}.
+
+-spec skip(binary(), pattern()) -> more(part_result()).
+skip(Bin, Pattern = {P, PSize}) ->
+ case binary:match(Bin, P) of
+ {BoundaryStart, _Length} ->
+ % Boundary found, proceed with parsing of the next part.
+ RestStart = BoundaryStart + PSize,
+ <<_:RestStart/binary, Rest/binary>> = Bin,
+ parse_boundary_tail(Rest, Pattern);
+ nomatch ->
+ % Boundary not found, need more data.
+ RestStart = lists:max([byte_size(Bin) - PSize + 1, 0]),
+ <<_:RestStart/binary, Rest/binary>> = Bin,
+ more(Rest, fun (NewBin) -> skip(NewBin, Pattern) end)
+ end.
+
+-spec more(binary(), parser(T)) -> {more, parser(T)}.
+more(<<>>, F) ->
+ {more, F};
+more(Bin, InnerF) ->
+ F = fun (NewData) when is_binary(NewData) ->
+ InnerF(<<Bin/binary, NewData/binary>>)
+ end,
+ {more, F}.
+
+%% Tests.
+
+-ifdef(TEST).
+
+multipart_test_() ->
+ %% {Body, Result}
+ Tests = [
+ {<<"--boundary--">>, []},
+ {<<"preamble\r\n--boundary--">>, []},
+ {<<"--boundary--\r\nepilogue">>, []},
+ {<<"\r\n--boundary\r\nA:b\r\nC:d\r\n\r\n\r\n--boundary--">>,
+ [{[{<<"A">>, <<"b">>}, {<<"C">>, <<"d">>}], <<>>}]},
+ {
+ <<
+ "--boundary\r\nX-Name:answer\r\n\r\n42"
+ "\r\n--boundary\r\nServer:Cowboy\r\n\r\nIt rocks!\r\n"
+ "\r\n--boundary--"
+ >>,
+ [
+ {[{<<"X-Name">>, <<"answer">>}], <<"42">>},
+ {[{'Server', <<"Cowboy">>}], <<"It rocks!\r\n">>}
+ ]
+ }
+ ],
+ [{title(V), fun () -> R = acc_multipart(V) end} || {V, R} <- Tests].
+
+acc_multipart(V) ->
+ acc_multipart((parser(<<"boundary">>))(V), []).
+
+acc_multipart({headers, Headers, Cont}, Acc) ->
+ acc_multipart(Cont(), [{Headers, []}|Acc]);
+acc_multipart({body, Body, Cont}, [{Headers, BodyAcc}|Acc]) ->
+ acc_multipart(Cont(), [{Headers, [Body|BodyAcc]}|Acc]);
+acc_multipart({end_of_part, Cont}, [{Headers, BodyAcc}|Acc]) ->
+ Body = list_to_binary(lists:reverse(BodyAcc)),
+ acc_multipart(Cont(), [{Headers, Body}|Acc]);
+acc_multipart(eof, Acc) ->
+ lists:reverse(Acc).
+
+content_disposition_test_() ->
+ %% {Disposition, Result}
+ Tests = [
+ {<<"form-data; name=id">>, {<<"form-data">>, [{<<"name">>, <<"id">>}]}},
+ {<<"inline">>, {<<"inline">>, []}},
+ {<<"attachment; \tfilename=brackets-slides.pdf">>,
+ {<<"attachment">>, [{<<"filename">>, <<"brackets-slides.pdf">>}]}}
+ ],
+ [{title(V), fun () -> R = content_disposition(V) end} || {V, R} <- Tests].
+
+title(Bin) ->
+ Title = lists:foldl(
+ fun ({T, R}, V) -> re:replace(V, T, R, [global]) end,
+ Bin,
+ [{"\t", "\\\\t"}, {"\r", "\\\\r"}, {"\n", "\\\\n"}]
+ ),
+ iolist_to_binary(Title).
+
+-endif.
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%% Copyright (c) 2011, Michiel Hakvoort <michiel@hakvoort.it>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+%% @doc Cowboy protocol.
+%%
+%% A Cowboy protocol must implement one callback: <em>start_link/4</em>.
+%%
+%% <em>start_link/4</em> is meant for the initialization of the
+%% protocol process.
+%% It receives the pid to the listener's gen_server, the client socket,
+%% the module name of the chosen transport and the options defined when
+%% starting the listener. The <em>start_link/4</em> function must follow
+%% the supervisor start function specification.
+%%
+%% After initializing your protocol, it is recommended to call the
+%% function cowboy:accept_ack/1 with the ListenerPid as argument,
+%% as it will ensure Cowboy has been able to fully initialize the socket.
+%% Anything you do past this point is up to you!
+%%
+%% If you need to change some socket options, like enabling raw mode
+%% for example, you can call the <em>Transport:setopts/2</em> function.
+%% It is the protocol's responsability to manage the socket usage,
+%% there should be no need for an user to specify that kind of options
+%% while starting a listener.
+%%
+%% You should definitely look at the cowboy_http_protocol module for
+%% a great example of fast request handling if you need to.
+%% Otherwise it's probably safe to use <code>{active, once}</code> mode
+%% and handle everything as it comes.
+%%
+%% Note that while you technically can run a protocol handler directly
+%% as a gen_server or a gen_fsm, it's probably not a good idea,
+%% as the only call you'll ever receive from Cowboy is the
+%% <em>start_link/4</em> call. On the other hand, feel free to write
+%% a very basic protocol handler which then forwards requests to a
+%% gen_server or gen_fsm. By doing so however you must take care to
+%% supervise their processes as Cowboy only knows about the protocol
+%% handler itself.
+-module(cowboy_protocol).
+
+-export([behaviour_info/1]).
+
+%% @private
+-spec behaviour_info(_)
+ -> undefined | [{start_link, 4}, ...].
+behaviour_info(callbacks) ->
+ [{start_link, 4}];
+behaviour_info(_Other) ->
+ undefined.
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+%% @private
+-module(cowboy_requests_sup).
+-behaviour(supervisor).
+
+-export([start_link/0, start_request/5]). %% API.
+-export([init/1]). %% supervisor.
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ supervisor:start_link(?MODULE, []).
+
+-spec start_request(pid(), inet:socket(), module(), module(), any())
+ -> {ok, pid()}.
+start_request(ListenerPid, Socket, Transport, Protocol, Opts) ->
+ Protocol:start_link(ListenerPid, Socket, Transport, Opts).
+
+%% supervisor.
+
+-spec init([]) -> {ok, {{simple_one_for_one, 0, 1}, [{_, _, _, _, _, _}, ...]}}.
+init([]) ->
+ {ok, {{simple_one_for_one, 0, 1}, [{?MODULE, {?MODULE, start_request, []},
+ temporary, brutal_kill, worker, [?MODULE]}]}}.
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+%% @doc SSL transport API.
+%%
+%% Wrapper around <em>ssl</em> implementing the Cowboy transport API.
+%%
+%% This transport requires the <em>crypto</em>, <em>public_key</em>
+%% and <em>ssl</em> applications to be started. If they aren't started,
+%% it will try to start them itself before opening a port to listen.
+%% Applications aren't stopped when the listening socket is closed, though.
+%%
+%% @see ssl
+-module(cowboy_ssl_transport).
+-export([name/0, messages/0, listen/1, accept/2, recv/3, send/2, setopts/2,
+ controlling_process/2, peername/1, close/1]).
+
+%% @doc Name of this transport API, <em>ssl</em>.
+-spec name() -> ssl.
+name() -> ssl.
+
+%% @doc Atoms used in the process messages sent by this API.
+%%
+%% They identify incoming data, closed connection and errors when receiving
+%% data in active mode.
+-spec messages() -> {ssl, ssl_closed, ssl_error}.
+messages() -> {ssl, ssl_closed, ssl_error}.
+
+%% @doc Setup a socket to listen on the given port on the local host.
+%%
+%% The available options are:
+%% <dl>
+%% <dt>port</dt><dd>Mandatory. TCP port number to open.</dd>
+%% <dt>backlog</dt><dd>Maximum length of the pending connections queue.
+%% Defaults to 1024.</dd>
+%% <dt>ip</dt><dd>Interface to listen on. Listen on all interfaces
+%% by default.</dd>
+%% <dt>certfile</dt><dd>Mandatory. Path to a file containing the user's
+%% certificate.</dd>
+%% <dt>keyfile</dt><dd>Mandatory. Path to the file containing the user's
+%% private PEM encoded key.</dd>
+%% <dt>cacertfile</dt><dd>Optional. Path to file containing PEM encoded
+%% CA certificates (trusted certificates used for verifying a peer
+%% certificate).</dd>
+%% <dt>password</dt><dd>Mandatory. String containing the user's password.
+%% All private keyfiles must be password protected currently.</dd>
+%% </dl>
+%%
+%% @see ssl:listen/2
+%% @todo The password option shouldn't be mandatory.
+-spec listen([{port, inet:ip_port()} | {certfile, string()}
+ | {keyfile, string()} | {password, string()}
+ | {cacertfile, string()} | {ip, inet:ip_address()}])
+ -> {ok, ssl:sslsocket()} | {error, atom()}.
+listen(Opts) ->
+ require([crypto, public_key, ssl]),
+ {port, Port} = lists:keyfind(port, 1, Opts),
+ Backlog = proplists:get_value(backlog, Opts, 1024),
+ {certfile, CertFile} = lists:keyfind(certfile, 1, Opts),
+ {keyfile, KeyFile} = lists:keyfind(keyfile, 1, Opts),
+ {password, Password} = lists:keyfind(password, 1, Opts),
+ ListenOpts0 = [binary, {active, false},
+ {backlog, Backlog}, {packet, raw}, {reuseaddr, true},
+ {certfile, CertFile}, {keyfile, KeyFile}, {password, Password}],
+ ListenOpts1 =
+ case lists:keyfind(ip, 1, Opts) of
+ false -> ListenOpts0;
+ Ip -> [Ip|ListenOpts0]
+ end,
+ ListenOpts =
+ case lists:keyfind(cacertfile, 1, Opts) of
+ false -> ListenOpts1;
+ CACertFile -> [CACertFile|ListenOpts1]
+ end,
+ ssl:listen(Port, ListenOpts).
+
+%% @doc Accept an incoming connection on a listen socket.
+%%
+%% Note that this function does both the transport accept and
+%% the SSL handshake.
+%%
+%% @see ssl:transport_accept/2
+%% @see ssl:ssl_accept/2
+-spec accept(ssl:sslsocket(), timeout())
+ -> {ok, ssl:sslsocket()} | {error, closed | timeout | atom()}.
+accept(LSocket, Timeout) ->
+ case ssl:transport_accept(LSocket, Timeout) of
+ {ok, CSocket} ->
+ ssl_accept(CSocket, Timeout);
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+%% @doc Receive a packet from a socket in passive mode.
+%% @see ssl:recv/3
+-spec recv(ssl:sslsocket(), non_neg_integer(), timeout())
+ -> {ok, any()} | {error, closed | atom()}.
+recv(Socket, Length, Timeout) ->
+ ssl:recv(Socket, Length, Timeout).
+
+%% @doc Send a packet on a socket.
+%% @see ssl:send/2
+-spec send(ssl:sslsocket(), iolist()) -> ok | {error, atom()}.
+send(Socket, Packet) ->
+ ssl:send(Socket, Packet).
+
+%% @doc Set one or more options for a socket.
+%% @see ssl:setopts/2
+-spec setopts(ssl:sslsocket(), list()) -> ok | {error, atom()}.
+setopts(Socket, Opts) ->
+ ssl:setopts(Socket, Opts).
+
+%% @doc Assign a new controlling process <em>Pid</em> to <em>Socket</em>.
+%% @see ssl:controlling_process/2
+-spec controlling_process(ssl:sslsocket(), pid())
+ -> ok | {error, closed | not_owner | atom()}.
+controlling_process(Socket, Pid) ->
+ ssl:controlling_process(Socket, Pid).
+
+%% @doc Return the address and port for the other end of a connection.
+%% @see ssl:peername/1
+-spec peername(ssl:sslsocket())
+ -> {ok, {inet:ip_address(), inet:ip_port()}} | {error, atom()}.
+peername(Socket) ->
+ ssl:peername(Socket).
+
+%% @doc Close a TCP socket.
+%% @see ssl:close/1
+-spec close(ssl:sslsocket()) -> ok.
+close(Socket) ->
+ ssl:close(Socket).
+
+%% Internal.
+
+-spec require(list(module())) -> ok.
+require([]) ->
+ ok;
+require([App|Tail]) ->
+ case application:start(App) of
+ ok -> ok;
+ {error, {already_started, App}} -> ok
+ end,
+ require(Tail).
+
+-spec ssl_accept(ssl:sslsocket(), timeout())
+ -> {ok, ssl:sslsocket()} | {error, closed | timeout | atom()}.
+ssl_accept(Socket, Timeout) ->
+ case ssl:ssl_accept(Socket, Timeout) of
+ ok ->
+ {ok, Socket};
+ {error, Reason} ->
+ {error, Reason}
+ end.
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+%% @private
+-module(cowboy_sup).
+-behaviour(supervisor).
+
+-export([start_link/0]). %% API.
+-export([init/1]). %% supervisor.
+
+-define(SUPERVISOR, ?MODULE).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ supervisor:start_link({local, ?SUPERVISOR}, ?MODULE, []).
+
+%% supervisor.
+
+-spec init([]) -> {ok, {{one_for_one, 10, 10}, [{_, _, _, _, _, _}, ...]}}.
+init([]) ->
+ Procs = [{cowboy_clock, {cowboy_clock, start_link, []},
+ permanent, 5000, worker, [cowboy_clock]}],
+ {ok, {{one_for_one, 10, 10}, Procs}}.
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+%% @doc TCP transport API.
+%%
+%% Wrapper around <em>gen_tcp</em> implementing the Cowboy transport API.
+%%
+%% @see gen_tcp
+-module(cowboy_tcp_transport).
+
+-export([name/0, messages/0, listen/1, accept/2, recv/3, send/2, setopts/2,
+ controlling_process/2, peername/1, close/1]).
+
+%% @doc Name of this transport API, <em>tcp</em>.
+-spec name() -> tcp.
+name() -> tcp.
+
+%% @doc Atoms used in the process messages sent by this API.
+%%
+%% They identify incoming data, closed connection and errors when receiving
+%% data in active mode.
+-spec messages() -> {tcp, tcp_closed, tcp_error}.
+messages() -> {tcp, tcp_closed, tcp_error}.
+
+%% @doc Setup a socket to listen on the given port on the local host.
+%%
+%% The available options are:
+%% <dl>
+%% <dt>port</dt><dd>Mandatory. TCP port number to open.</dd>
+%% <dt>backlog</dt><dd>Maximum length of the pending connections queue.
+%% Defaults to 1024.</dd>
+%% <dt>ip</dt><dd>Interface to listen on. Listen on all interfaces
+%% by default.</dd>
+%% </dl>
+%%
+%% @see gen_tcp:listen/2
+-spec listen([{port, inet:ip_port()} | {ip, inet:ip_address()}])
+ -> {ok, inet:socket()} | {error, atom()}.
+listen(Opts) ->
+ {port, Port} = lists:keyfind(port, 1, Opts),
+ Backlog = proplists:get_value(backlog, Opts, 1024),
+ ListenOpts0 = [binary, {active, false},
+ {backlog, Backlog}, {packet, raw}, {reuseaddr, true}],
+ ListenOpts =
+ case lists:keyfind(ip, 1, Opts) of
+ false -> ListenOpts0;
+ Ip -> [Ip|ListenOpts0]
+ end,
+ gen_tcp:listen(Port, ListenOpts).
+
+%% @doc Accept an incoming connection on a listen socket.
+%% @see gen_tcp:accept/2
+-spec accept(inet:socket(), timeout())
+ -> {ok, inet:socket()} | {error, closed | timeout | atom()}.
+accept(LSocket, Timeout) ->
+ gen_tcp:accept(LSocket, Timeout).
+
+%% @doc Receive a packet from a socket in passive mode.
+%% @see gen_tcp:recv/3
+-spec recv(inet:socket(), non_neg_integer(), timeout())
+ -> {ok, any()} | {error, closed | atom()}.
+recv(Socket, Length, Timeout) ->
+ gen_tcp:recv(Socket, Length, Timeout).
+
+%% @doc Send a packet on a socket.
+%% @see gen_tcp:send/2
+-spec send(inet:socket(), iolist()) -> ok | {error, atom()}.
+send(Socket, Packet) ->
+ gen_tcp:send(Socket, Packet).
+
+%% @doc Set one or more options for a socket.
+%% @see inet:setopts/2
+-spec setopts(inet:socket(), list()) -> ok | {error, atom()}.
+setopts(Socket, Opts) ->
+ inet:setopts(Socket, Opts).
+
+%% @doc Assign a new controlling process <em>Pid</em> to <em>Socket</em>.
+%% @see gen_tcp:controlling_process/2
+-spec controlling_process(inet:socket(), pid())
+ -> ok | {error, closed | not_owner | atom()}.
+controlling_process(Socket, Pid) ->
+ gen_tcp:controlling_process(Socket, Pid).
+
+%% @doc Return the address and port for the other end of a connection.
+%% @see inet:peername/1
+-spec peername(inet:socket())
+ -> {ok, {inet:ip_address(), inet:ip_port()}} | {error, atom()}.
+peername(Socket) ->
+ inet:peername(Socket).
+
+%% @doc Close a TCP socket.
+%% @see gen_tcp:close/1
+-spec close(inet:socket()) -> ok.
+close(Socket) ->
+ gen_tcp:close(Socket).
--- /dev/null
+%% Feel free to use, reuse and abuse the code in this file.
+
+-module(chunked_handler).
+-behaviour(cowboy_http_handler).
+-export([init/3, handle/2, terminate/2]).
+
+init({_Transport, http}, Req, _Opts) ->
+ {ok, Req, undefined}.
+
+handle(Req, State) ->
+ {ok, Req2} = cowboy_http_req:chunked_reply(200, Req),
+ cowboy_http_req:chunk("chunked_handler\r\n", Req2),
+ cowboy_http_req:chunk("works fine!", Req2),
+ {ok, Req2, State}.
+
+terminate(_Req, _State) ->
+ ok.
--- /dev/null
+%% Copyright (c) 2011, Magnus Klaar <magnus.klaar@gmail.com>
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+-module(dispatcher_prop).
+-include_lib("proper/include/proper.hrl").
+
+%% Generators.
+
+hostname_head_char() ->
+ oneof([choose($a, $z), choose($A, $Z), choose($0, $9)]).
+
+hostname_char() ->
+ oneof([choose($a, $z), choose($A, $Z), choose($0, $9), $-]).
+
+hostname_label() ->
+ ?SUCHTHAT(Label, [hostname_head_char()|list(hostname_char())],
+ length(Label) < 64).
+
+hostname() ->
+ ?SUCHTHAT(Hostname,
+ ?LET(Labels, list(hostname_label()), string:join(Labels, ".")),
+ length(Hostname) > 0 andalso length(Hostname) =< 255).
+
+port_number() ->
+ choose(1, 16#ffff).
+
+port_str() ->
+ oneof(["", ?LET(Port, port_number(), ":" ++ integer_to_list(Port))]).
+
+server() ->
+ ?LET({Hostname, PortStr}, {hostname(), port_str()},
+ list_to_binary(Hostname ++ PortStr)).
+
+%% Properties.
+
+prop_split_host_symmetric() ->
+ ?FORALL(Server, server(),
+ begin case cowboy_dispatcher:split_host(Server) of
+ {Tokens, RawHost, undefined} ->
+ (Server == RawHost) and (Server == binary_join(Tokens, "."));
+ {Tokens, RawHost, Port} ->
+ PortBin = (list_to_binary(":" ++ integer_to_list(Port))),
+ (Server == << RawHost/binary, PortBin/binary >>)
+ and (Server == << (binary_join(Tokens, "."))/binary,
+ PortBin/binary >>)
+ end end).
+
+%% Internal.
+
+%% Contributed by MononcQc on #erlounge.
+binary_join(Flowers, Leaf) ->
+ case Flowers of
+ [] -> <<>>;
+ [Petal|Pot] -> iolist_to_binary(
+ [Petal | [[Leaf | Pollen] || Pollen <- Pot]])
+ end.
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%% Copyright (c) 2011, Anthony Ramine <nox@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+-module(http_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+
+-export([all/0, groups/0, init_per_suite/1, end_per_suite/1,
+ init_per_group/2, end_per_group/2]). %% ct.
+-export([chunked_response/1, headers_dupe/1, headers_huge/1,
+ keepalive_nl/1, max_keepalive/1, nc_rand/1, nc_zero/1,
+ pipeline/1, raw/1, set_resp_header/1, set_resp_overwrite/1,
+ set_resp_body/1, stream_body_set_resp/1, response_as_req/1,
+ static_mimetypes_function/1, static_attribute_etag/1,
+ static_function_etag/1, multipart/1]). %% http.
+-export([http_200/1, http_404/1, handler_errors/1,
+ file_200/1, file_403/1, dir_403/1, file_404/1,
+ file_400/1]). %% http and https.
+-export([http_10_hostless/1]). %% misc.
+-export([rest_simple/1, rest_keepalive/1, rest_keepalive_post/1]). %% rest.
+
+%% ct.
+
+all() ->
+ [{group, http}, {group, https}, {group, misc}, {group, rest}].
+
+groups() ->
+ BaseTests = [http_200, http_404, handler_errors,
+ file_200, file_403, dir_403, file_404, file_400],
+ [{http, [], [chunked_response, headers_dupe, headers_huge,
+ keepalive_nl, max_keepalive, nc_rand, nc_zero, pipeline, raw,
+ set_resp_header, set_resp_overwrite,
+ set_resp_body, response_as_req, stream_body_set_resp,
+ static_mimetypes_function, static_attribute_etag,
+ static_function_etag, multipart] ++ BaseTests},
+ {https, [], BaseTests},
+ {misc, [], [http_10_hostless]},
+ {rest, [], [rest_simple, rest_keepalive, rest_keepalive_post]}].
+
+init_per_suite(Config) ->
+ application:start(inets),
+ application:start(cowboy),
+ Config.
+
+end_per_suite(_Config) ->
+ application:stop(cowboy),
+ application:stop(inets),
+ ok.
+
+init_per_group(http, Config) ->
+ Port = 33080,
+ Config1 = init_static_dir(Config),
+ cowboy:start_listener(http, 100,
+ cowboy_tcp_transport, [{port, Port}],
+ cowboy_http_protocol, [{max_keepalive, 50},
+ {dispatch, init_http_dispatch(Config1)}]
+ ),
+ [{scheme, "http"}, {port, Port}|Config1];
+init_per_group(https, Config) ->
+ Port = 33081,
+ Config1 = init_static_dir(Config),
+ application:start(crypto),
+ application:start(public_key),
+ application:start(ssl),
+ DataDir = ?config(data_dir, Config),
+ cowboy:start_listener(https, 100,
+ cowboy_ssl_transport, [
+ {port, Port}, {certfile, DataDir ++ "cert.pem"},
+ {keyfile, DataDir ++ "key.pem"}, {password, "cowboy"}],
+ cowboy_http_protocol, [{dispatch, init_https_dispatch(Config1)}]
+ ),
+ [{scheme, "https"}, {port, Port}|Config1];
+init_per_group(misc, Config) ->
+ Port = 33082,
+ cowboy:start_listener(misc, 100,
+ cowboy_tcp_transport, [{port, Port}],
+ cowboy_http_protocol, [{dispatch, [{'_', [
+ {[], http_handler, []}
+ ]}]}]),
+ [{port, Port}|Config];
+init_per_group(rest, Config) ->
+ Port = 33083,
+ cowboy:start_listener(reset, 100,
+ cowboy_tcp_transport, [{port, Port}],
+ cowboy_http_protocol, [{dispatch, [{'_', [
+ {[<<"simple">>], rest_simple_resource, []},
+ {[<<"forbidden_post">>], rest_forbidden_resource, [true]},
+ {[<<"simple_post">>], rest_forbidden_resource, [false]}
+ ]}]}]),
+ [{port, Port}|Config].
+
+end_per_group(https, Config) ->
+ cowboy:stop_listener(https),
+ application:stop(ssl),
+ application:stop(public_key),
+ application:stop(crypto),
+ end_static_dir(Config),
+ ok;
+end_per_group(http, Config) ->
+ cowboy:stop_listener(http),
+ end_static_dir(Config);
+end_per_group(Listener, _Config) ->
+ cowboy:stop_listener(Listener),
+ ok.
+
+%% Dispatch configuration.
+
+init_http_dispatch(Config) ->
+ [
+ {[<<"localhost">>], [
+ {[<<"chunked_response">>], chunked_handler, []},
+ {[<<"init_shutdown">>], http_handler_init_shutdown, []},
+ {[<<"long_polling">>], http_handler_long_polling, []},
+ {[<<"headers">>, <<"dupe">>], http_handler,
+ [{headers, [{<<"Connection">>, <<"close">>}]}]},
+ {[<<"set_resp">>, <<"header">>], http_handler_set_resp,
+ [{headers, [{<<"Vary">>, <<"Accept">>}]}]},
+ {[<<"set_resp">>, <<"overwrite">>], http_handler_set_resp,
+ [{headers, [{<<"Server">>, <<"DesireDrive/1.0">>}]}]},
+ {[<<"set_resp">>, <<"body">>], http_handler_set_resp,
+ [{body, <<"A flameless dance does not equal a cycle">>}]},
+ {[<<"stream_body">>, <<"set_resp">>], http_handler_stream_body,
+ [{reply, set_resp}, {body, <<"stream_body_set_resp">>}]},
+ {[<<"static">>, '...'], cowboy_http_static,
+ [{directory, ?config(static_dir, Config)},
+ {mimetypes, [{<<".css">>, [<<"text/css">>]}]}]},
+ {[<<"static_mimetypes_function">>, '...'], cowboy_http_static,
+ [{directory, ?config(static_dir, Config)},
+ {mimetypes, {fun(Path, data) when is_binary(Path) ->
+ [<<"text/html">>] end, data}}]},
+ {[<<"handler_errors">>], http_handler_errors, []},
+ {[<<"static_attribute_etag">>, '...'], cowboy_http_static,
+ [{directory, ?config(static_dir, Config)},
+ {etag, {attributes, [filepath, filesize, inode, mtime]}}]},
+ {[<<"static_function_etag">>, '...'], cowboy_http_static,
+ [{directory, ?config(static_dir, Config)},
+ {etag, {fun static_function_etag/2, etag_data}}]},
+ {[<<"multipart">>], http_handler_multipart, []},
+ {[], http_handler, []}
+ ]}
+ ].
+
+init_https_dispatch(Config) ->
+ init_http_dispatch(Config).
+
+
+init_static_dir(Config) ->
+ Dir = filename:join(?config(priv_dir, Config), "static"),
+ Level1 = fun(Name) -> filename:join(Dir, Name) end,
+ ok = file:make_dir(Dir),
+ ok = file:write_file(Level1("test_file"), "test_file\n"),
+ ok = file:write_file(Level1("test_file.css"), "test_file.css\n"),
+ ok = file:write_file(Level1("test_noread"), "test_noread\n"),
+ ok = file:change_mode(Level1("test_noread"), 8#0333),
+ ok = file:write_file(Level1("test.html"), "test.html\n"),
+ ok = file:make_dir(Level1("test_dir")),
+ [{static_dir, Dir}|Config].
+
+end_static_dir(Config) ->
+ Dir = ?config(static_dir, Config),
+ Level1 = fun(Name) -> filename:join(Dir, Name) end,
+ ok = file:delete(Level1("test_file")),
+ ok = file:delete(Level1("test_file.css")),
+ ok = file:delete(Level1("test_noread")),
+ ok = file:delete(Level1("test.html")),
+ ok = file:del_dir(Level1("test_dir")),
+ ok = file:del_dir(Dir),
+ Config.
+
+%% http.
+
+chunked_response(Config) ->
+ {ok, {{"HTTP/1.1", 200, "OK"}, _Headers, "chunked_handler\r\nworks fine!"}} =
+ httpc:request(build_url("/chunked_response", Config)).
+
+headers_dupe(Config) ->
+ {port, Port} = lists:keyfind(port, 1, Config),
+ {ok, Socket} = gen_tcp:connect("localhost", Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = gen_tcp:send(Socket, "GET /headers/dupe HTTP/1.1\r\n"
+ "Host: localhost\r\nConnection: keep-alive\r\n\r\n"),
+ {ok, Data} = gen_tcp:recv(Socket, 0, 6000),
+ {_Start, _Length} = binary:match(Data, <<"Connection: close">>),
+ nomatch = binary:match(Data, <<"Connection: keep-alive">>),
+ {error, closed} = gen_tcp:recv(Socket, 0, 1000).
+
+headers_huge(Config) ->
+ Cookie = lists:flatten(["whatever_man_biiiiiiiiiiiig_cookie_me_want_77="
+ "Wed Apr 06 2011 10:38:52 GMT-0500 (CDT)" || _N <- lists:seq(1, 40)]),
+ {_Packet, 200} = raw_req(["GET / HTTP/1.0\r\nHost: localhost\r\n"
+ "Set-Cookie: ", Cookie, "\r\n\r\n"], Config).
+
+keepalive_nl(Config) ->
+ {port, Port} = lists:keyfind(port, 1, Config),
+ {ok, Socket} = gen_tcp:connect("localhost", Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = keepalive_nl_loop(Socket, 10),
+ ok = gen_tcp:close(Socket).
+
+keepalive_nl_loop(_Socket, 0) ->
+ ok;
+keepalive_nl_loop(Socket, N) ->
+ ok = gen_tcp:send(Socket, "GET / HTTP/1.1\r\n"
+ "Host: localhost\r\nConnection: keep-alive\r\n\r\n"),
+ {ok, Data} = gen_tcp:recv(Socket, 0, 6000),
+ {0, 12} = binary:match(Data, <<"HTTP/1.1 200">>),
+ nomatch = binary:match(Data, <<"Connection: close">>),
+ ok = gen_tcp:send(Socket, "\r\n"), %% extra nl
+ keepalive_nl_loop(Socket, N - 1).
+
+max_keepalive(Config) ->
+ {port, Port} = lists:keyfind(port, 1, Config),
+ {ok, Socket} = gen_tcp:connect("localhost", Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = max_keepalive_loop(Socket, 50),
+ {error, closed} = gen_tcp:recv(Socket, 0, 1000).
+
+max_keepalive_loop(_Socket, 0) ->
+ ok;
+max_keepalive_loop(Socket, N) ->
+ ok = gen_tcp:send(Socket, "GET / HTTP/1.1\r\n"
+ "Host: localhost\r\nConnection: keep-alive\r\n\r\n"),
+ {ok, Data} = gen_tcp:recv(Socket, 0, 6000),
+ {0, 12} = binary:match(Data, <<"HTTP/1.1 200">>),
+ case N of
+ 1 -> {_, _} = binary:match(Data, <<"Connection: close">>);
+ N -> nomatch = binary:match(Data, <<"Connection: close">>)
+ end,
+ keepalive_nl_loop(Socket, N - 1).
+
+multipart(Config) ->
+ Url = build_url("/multipart", Config),
+ Body = <<
+ "This is a preamble."
+ "\r\n--OHai\r\nX-Name:answer\r\n\r\n42"
+ "\r\n--OHai\r\nServer:Cowboy\r\n\r\nIt rocks!\r\n"
+ "\r\n--OHai--"
+ "This is an epiloque."
+ >>,
+ Request = {Url, [], "multipart/x-makes-no-sense; boundary=OHai", Body},
+ {ok, {{"HTTP/1.1", 200, "OK"}, _Headers, Response}} =
+ httpc:request(post, Request, [], [{body_format, binary}]),
+ Parts = binary_to_term(Response),
+ Parts = [
+ {[{<<"X-Name">>, <<"answer">>}], <<"42">>},
+ {[{'Server', <<"Cowboy">>}], <<"It rocks!\r\n">>}
+ ].
+
+nc_rand(Config) ->
+ nc_reqs(Config, "/dev/urandom").
+
+nc_zero(Config) ->
+ nc_reqs(Config, "/dev/zero").
+
+nc_reqs(Config, Input) ->
+ Cat = os:find_executable("cat"),
+ Nc = os:find_executable("nc"),
+ case {Cat, Nc} of
+ {false, _} ->
+ {skip, {notfound, cat}};
+ {_, false} ->
+ {skip, {notfound, nc}};
+ _Good ->
+ %% Throw garbage at the server then check if it's still up.
+ {port, Port} = lists:keyfind(port, 1, Config),
+ [nc_run_req(Port, Input) || _N <- lists:seq(1, 100)],
+ Packet = "GET / HTTP/1.0\r\nHost: localhost\r\n\r\n",
+ {Packet, 200} = raw_req(Packet, Config)
+ end.
+
+nc_run_req(Port, Input) ->
+ os:cmd("cat " ++ Input ++ " | nc localhost " ++ integer_to_list(Port)).
+
+pipeline(Config) ->
+ {port, Port} = lists:keyfind(port, 1, Config),
+ {ok, Socket} = gen_tcp:connect("localhost", Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = gen_tcp:send(Socket,
+ "GET / HTTP/1.1\r\nHost: localhost\r\nConnection: keep-alive\r\n\r\n"
+ "GET / HTTP/1.1\r\nHost: localhost\r\nConnection: keep-alive\r\n\r\n"
+ "GET / HTTP/1.1\r\nHost: localhost\r\nConnection: keep-alive\r\n\r\n"
+ "GET / HTTP/1.1\r\nHost: localhost\r\nConnection: keep-alive\r\n\r\n"
+ "GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n"),
+ Data = pipeline_recv(Socket, <<>>),
+ Reqs = binary:split(Data, << "\r\n\r\nhttp_handler" >>, [global, trim]),
+ 5 = length(Reqs),
+ pipeline_check(Reqs).
+
+pipeline_check([]) ->
+ ok;
+pipeline_check([Req|Tail]) ->
+ << "HTTP/1.1 200", _Rest/bits >> = Req,
+ pipeline_check(Tail).
+
+pipeline_recv(Socket, SoFar) ->
+ case gen_tcp:recv(Socket, 0, 6000) of
+ {ok, Data} ->
+ pipeline_recv(Socket, << SoFar/binary, Data/binary >>);
+ {error, closed} ->
+ ok = gen_tcp:close(Socket),
+ SoFar
+ end.
+
+raw_req(Packet, Config) ->
+ {port, Port} = lists:keyfind(port, 1, Config),
+ {ok, Socket} = gen_tcp:connect("localhost", Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = gen_tcp:send(Socket, Packet),
+ Res = case gen_tcp:recv(Socket, 0, 6000) of
+ {ok, << "HTTP/1.1 ", Str:24/bits, _Rest/bits >>} ->
+ list_to_integer(binary_to_list(Str));
+ {error, Reason} ->
+ Reason
+ end,
+ gen_tcp:close(Socket),
+ {Packet, Res}.
+
+%% Send a raw request. Return the response code and the full response.
+raw_resp(Request, Config) ->
+ {port, Port} = lists:keyfind(port, 1, Config),
+ Transport = case ?config(scheme, Config) of
+ "http" -> gen_tcp;
+ "https" -> ssl
+ end,
+ {ok, Socket} = Transport:connect("localhost", Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = Transport:send(Socket, Request),
+ {StatusCode, Response} = case recv_loop(Transport, Socket, <<>>) of
+ {ok, << "HTTP/1.1 ", Str:24/bits, _Rest/bits >> = Bin} ->
+ {list_to_integer(binary_to_list(Str)), Bin};
+ {ok, Bin} ->
+ {badresp, Bin};
+ {error, Reason} ->
+ {Reason, <<>>}
+ end,
+ Transport:close(Socket),
+ {Response, StatusCode}.
+
+recv_loop(Transport, Socket, Acc) ->
+ case Transport:recv(Socket, 0, 6000) of
+ {ok, Data} ->
+ recv_loop(Transport, Socket, <<Acc/binary, Data/binary>>);
+ {error, closed} ->
+ ok = Transport:close(Socket),
+ {ok, Acc};
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+
+
+raw(Config) ->
+ Huge = [$0 || _N <- lists:seq(1, 5000)],
+ Tests = [
+ {"\r\n\r\n\r\n\r\n\r\nGET / HTTP/1.1\r\nHost: localhost\r\n\r\n", 200},
+ {"\n", 400},
+ {"Garbage\r\n\r\n", 400},
+ {"\r\n\r\n\r\n\r\n\r\n\r\n", 400},
+ {"GET / HTTP/1.1\r\nHost: dev-extend.eu\r\n\r\n", 400},
+ {"", closed},
+ {"\r\n", closed},
+ {"\r\n\r\n", closed},
+ {"GET / HTTP/1.1", closed},
+ {"GET / HTTP/1.1\r\n", 408},
+ {"GET / HTTP/1.1\r\nHost: localhost", 408},
+ {"GET / HTTP/1.1\r\nHost: localhost\r\n", 408},
+ {"GET / HTTP/1.1\r\nHost: localhost\r\n\r", 408},
+ {"GET http://localhost/ HTTP/1.1\r\n\r\n", 501},
+ {"GET / HTTP/1.2\r\nHost: localhost\r\n\r\n", 505},
+ {"GET /init_shutdown HTTP/1.1\r\nHost: localhost\r\n\r\n", 666},
+ {"GET /long_polling HTTP/1.1\r\nHost: localhost\r\n\r\n", 102},
+ {Huge, 413},
+ {"GET / HTTP/1.1\r\n" ++ Huge, 413}
+ ],
+ [{Packet, StatusCode} = raw_req(Packet, Config)
+ || {Packet, StatusCode} <- Tests].
+
+set_resp_header(Config) ->
+ {port, Port} = lists:keyfind(port, 1, Config),
+ {ok, Socket} = gen_tcp:connect("localhost", Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = gen_tcp:send(Socket, "GET /set_resp/header HTTP/1.1\r\n"
+ "Host: localhost\r\nConnection: close\r\n\r\n"),
+ {ok, Data} = gen_tcp:recv(Socket, 0, 6000),
+ {_, _} = binary:match(Data, <<"Vary: Accept">>),
+ {_, _} = binary:match(Data, <<"Set-Cookie: ">>).
+
+set_resp_overwrite(Config) ->
+ {port, Port} = lists:keyfind(port, 1, Config),
+ {ok, Socket} = gen_tcp:connect("localhost", Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = gen_tcp:send(Socket, "GET /set_resp/overwrite HTTP/1.1\r\n"
+ "Host: localhost\r\nConnection: close\r\n\r\n"),
+ {ok, Data} = gen_tcp:recv(Socket, 0, 6000),
+ {_Start, _Length} = binary:match(Data, <<"Server: DesireDrive/1.0">>).
+
+set_resp_body(Config) ->
+ {port, Port} = lists:keyfind(port, 1, Config),
+ {ok, Socket} = gen_tcp:connect("localhost", Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = gen_tcp:send(Socket, "GET /set_resp/body HTTP/1.1\r\n"
+ "Host: localhost\r\nConnection: close\r\n\r\n"),
+ {ok, Data} = gen_tcp:recv(Socket, 0, 6000),
+ {_Start, _Length} = binary:match(Data, <<"\r\n\r\n"
+ "A flameless dance does not equal a cycle">>).
+
+response_as_req(Config) ->
+ Packet =
+"HTTP/1.0 302 Found
+Location: http://www.google.co.il/
+Cache-Control: private
+Content-Type: text/html; charset=UTF-8
+Set-Cookie: PREF=ID=568f67013d4a7afa:FF=0:TM=1323014101:LM=1323014101:S=XqctDWC65MzKT0zC; expires=Tue, 03-Dec-2013 15:55:01 GMT; path=/; domain=.google.com
+Date: Sun, 04 Dec 2011 15:55:01 GMT
+Server: gws
+Content-Length: 221
+X-XSS-Protection: 1; mode=block
+X-Frame-Options: SAMEORIGIN
+
+<HTML><HEAD><meta http-equiv=\"content-type\" content=\"text/html;charset=utf-8\">
+<TITLE>302 Moved</TITLE></HEAD><BODY>
+<H1>302 Moved</H1>
+The document has moved
+<A HREF=\"http://www.google.co.il/\">here</A>.
+</BODY></HTML>",
+ {Packet, 400} = raw_req(Packet, Config).
+
+stream_body_set_resp(Config) ->
+ {Packet, 200} = raw_resp(
+ "GET /stream_body/set_resp HTTP/1.1\r\n"
+ "Host: localhost\r\nConnection: close\r\n\r\n", Config),
+ {_Start, _Length} = binary:match(Packet, <<"stream_body_set_resp">>).
+
+static_mimetypes_function(Config) ->
+ TestURL = build_url("/static_mimetypes_function/test.html", Config),
+ {ok, {{"HTTP/1.1", 200, "OK"}, Headers1, "test.html\n"}} =
+ httpc:request(TestURL),
+ "text/html" = ?config("content-type", Headers1).
+
+handler_errors(Config) ->
+ Request = fun(Case) ->
+ raw_resp(["GET /handler_errors?case=", Case, " HTTP/1.1\r\n",
+ "Host: localhost\r\n\r\n"], Config) end,
+
+ {_Packet1, 500} = Request("init_before_reply"),
+
+ {Packet2, 200} = Request("init_after_reply"),
+ nomatch = binary:match(Packet2, <<"HTTP/1.1 500">>),
+
+ {Packet3, 200} = Request("init_reply_handle_error"),
+ nomatch = binary:match(Packet3, <<"HTTP/1.1 500">>),
+
+ {_Packet4, 500} = Request("handle_before_reply"),
+
+ {Packet5, 200} = Request("handle_after_reply"),
+ nomatch = binary:match(Packet5, <<"HTTP/1.1 500">>),
+
+ {Packet6, 200} = raw_resp([
+ "GET / HTTP/1.1\r\n",
+ "Host: localhost\r\n",
+ "Connection: keep-alive\r\n\r\n",
+ "GET /handler_errors?case=handle_after_reply\r\n",
+ "Host: localhost\r\n\r\n"], Config),
+ nomatch = binary:match(Packet6, <<"HTTP/1.1 500">>),
+
+ {Packet7, 200} = raw_resp([
+ "GET / HTTP/1.1\r\n",
+ "Host: localhost\r\n",
+ "Connection: keep-alive\r\n\r\n",
+ "GET /handler_errors?case=handle_before_reply HTTP/1.1\r\n",
+ "Host: localhost\r\n\r\n"], Config),
+ {{_, _}, _} = {binary:match(Packet7, <<"HTTP/1.1 500">>), Packet7},
+
+ done.
+
+static_attribute_etag(Config) ->
+ TestURL = build_url("/static_attribute_etag/test.html", Config),
+ {ok, {{"HTTP/1.1", 200, "OK"}, Headers1, "test.html\n"}} =
+ httpc:request(TestURL),
+ false = ?config("etag", Headers1) =:= undefined,
+ {ok, {{"HTTP/1.1", 200, "OK"}, Headers2, "test.html\n"}} =
+ httpc:request(TestURL),
+ true = ?config("etag", Headers1) =:= ?config("etag", Headers2).
+
+static_function_etag(Config) ->
+ TestURL = build_url("/static_function_etag/test.html", Config),
+ {ok, {{"HTTP/1.1", 200, "OK"}, Headers1, "test.html\n"}} =
+ httpc:request(TestURL),
+ false = ?config("etag", Headers1) =:= undefined,
+ {ok, {{"HTTP/1.1", 200, "OK"}, Headers2, "test.html\n"}} =
+ httpc:request(TestURL),
+ true = ?config("etag", Headers1) =:= ?config("etag", Headers2).
+
+static_function_etag(Arguments, etag_data) ->
+ {_, Filepath} = lists:keyfind(filepath, 1, Arguments),
+ {_, _Filesize} = lists:keyfind(filesize, 1, Arguments),
+ {_, _INode} = lists:keyfind(inode, 1, Arguments),
+ {_, _Modified} = lists:keyfind(mtime, 1, Arguments),
+ ChecksumCommand = lists:flatten(io_lib:format("sha1sum ~s", [Filepath])),
+ [Checksum|_] = string:tokens(os:cmd(ChecksumCommand), " "),
+ iolist_to_binary(Checksum).
+
+%% http and https.
+
+build_url(Path, Config) ->
+ {scheme, Scheme} = lists:keyfind(scheme, 1, Config),
+ {port, Port} = lists:keyfind(port, 1, Config),
+ Scheme ++ "://localhost:" ++ integer_to_list(Port) ++ Path.
+
+http_200(Config) ->
+ {ok, {{"HTTP/1.1", 200, "OK"}, _Headers, "http_handler"}} =
+ httpc:request(build_url("/", Config)).
+
+http_404(Config) ->
+ {ok, {{"HTTP/1.1", 404, "Not Found"}, _Headers, _Body}} =
+ httpc:request(build_url("/not/found", Config)).
+
+file_200(Config) ->
+ {ok, {{"HTTP/1.1", 200, "OK"}, Headers, "test_file\n"}} =
+ httpc:request(build_url("/static/test_file", Config)),
+ "application/octet-stream" = ?config("content-type", Headers),
+
+ {ok, {{"HTTP/1.1", 200, "OK"}, Headers1, "test_file.css\n"}} =
+ httpc:request(build_url("/static/test_file.css", Config)),
+ "text/css" = ?config("content-type", Headers1).
+
+file_403(Config) ->
+ {ok, {{"HTTP/1.1", 403, "Forbidden"}, _Headers, _Body}} =
+ httpc:request(build_url("/static/test_noread", Config)).
+
+dir_403(Config) ->
+ {ok, {{"HTTP/1.1", 403, "Forbidden"}, _Headers, _Body}} =
+ httpc:request(build_url("/static/test_dir", Config)),
+ {ok, {{"HTTP/1.1", 403, "Forbidden"}, _Headers, _Body}} =
+ httpc:request(build_url("/static/test_dir/", Config)).
+
+file_404(Config) ->
+ {ok, {{"HTTP/1.1", 404, "Not Found"}, _Headers, _Body}} =
+ httpc:request(build_url("/static/not_found", Config)).
+
+file_400(Config) ->
+ {ok, {{"HTTP/1.1", 400, "Bad Request"}, _Headers, _Body}} =
+ httpc:request(build_url("/static/%2f", Config)),
+ {ok, {{"HTTP/1.1", 400, "Bad Request"}, _Headers1, _Body1}} =
+ httpc:request(build_url("/static/%2e", Config)),
+ {ok, {{"HTTP/1.1", 400, "Bad Request"}, _Headers2, _Body2}} =
+ httpc:request(build_url("/static/%2e%2e", Config)).
+%% misc.
+
+http_10_hostless(Config) ->
+ Packet = "GET / HTTP/1.0\r\n\r\n",
+ {Packet, 200} = raw_req(Packet, Config).
+
+%% rest.
+
+rest_simple(Config) ->
+ Packet = "GET /simple HTTP/1.1\r\nHost: localhost\r\n\r\n",
+ {Packet, 200} = raw_req(Packet, Config).
+
+rest_keepalive(Config) ->
+ {port, Port} = lists:keyfind(port, 1, Config),
+ {ok, Socket} = gen_tcp:connect("localhost", Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = rest_keepalive_loop(Socket, 100),
+ ok = gen_tcp:close(Socket).
+
+rest_keepalive_loop(_Socket, 0) ->
+ ok;
+rest_keepalive_loop(Socket, N) ->
+ ok = gen_tcp:send(Socket, "GET /simple HTTP/1.1\r\n"
+ "Host: localhost\r\nConnection: keep-alive\r\n\r\n"),
+ {ok, Data} = gen_tcp:recv(Socket, 0, 6000),
+ {0, 12} = binary:match(Data, <<"HTTP/1.1 200">>),
+ nomatch = binary:match(Data, <<"Connection: close">>),
+ rest_keepalive_loop(Socket, N - 1).
+
+rest_keepalive_post(Config) ->
+ {port, Port} = lists:keyfind(port, 1, Config),
+ {ok, Socket} = gen_tcp:connect("localhost", Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = rest_keepalive_post_loop(Socket, 10, forbidden_post),
+ ok = gen_tcp:close(Socket).
+
+rest_keepalive_post_loop(_Socket, 0, _) ->
+ ok;
+rest_keepalive_post_loop(Socket, N, simple_post) ->
+ ok = gen_tcp:send(Socket, "POST /simple_post HTTP/1.1\r\n"
+ "Host: localhost\r\nConnection: keep-alive\r\n"
+ "Content-Length: 5\r\nContent-Type: text/plain\r\n\r\n12345"),
+ {ok, Data} = gen_tcp:recv(Socket, 0, 6000),
+ {0, 12} = binary:match(Data, <<"HTTP/1.1 303">>),
+ nomatch = binary:match(Data, <<"Connection: close">>),
+ rest_keepalive_post_loop(Socket, N - 1, forbidden_post);
+rest_keepalive_post_loop(Socket, N, forbidden_post) ->
+ ok = gen_tcp:send(Socket, "POST /forbidden_post HTTP/1.1\r\n"
+ "Host: localhost\r\nConnection: keep-alive\r\n"
+ "Content-Length: 5\r\nContent-Type: text/plain\r\n\r\n12345"),
+ {ok, Data} = gen_tcp:recv(Socket, 0, 6000),
+ {0, 12} = binary:match(Data, <<"HTTP/1.1 403">>),
+ nomatch = binary:match(Data, <<"Connection: close">>),
+ rest_keepalive_post_loop(Socket, N - 1, simple_post).
--- /dev/null
+-----BEGIN CERTIFICATE-----
+MIICKTCCAZICCQCl9gdHk5NqUjANBgkqhkiG9w0BAQUFADBZMQswCQYDVQQGEwJB
+VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0
+cyBQdHkgTHRkMRIwEAYDVQQDDAlsb2NhbGhvc3QwHhcNMTEwNDA4MTMxNTE3WhcN
+MTEwNTA4MTMxNTE3WjBZMQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0
+ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMRIwEAYDVQQDDAls
+b2NhbGhvc3QwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOjgFPS0dP4d8F1e
+bNJPB+kAjM2FyTZGmkFCLUYONTPrdGOUIHL/UOGtU22BQzlskE+a6/j2Kg72tm8x
+4X7yf+6s7CdRe086idNx9+GymZ64ZTnly33rD3AJffbBeWHwT2e9fuBeFk9WGC8v
+kqECFZyqf7+znS0o48oBNcx3ePB5AgMBAAEwDQYJKoZIhvcNAQEFBQADgYEASTkv
+oHuZyO8DgT8bIE6W3yM2fvlNshkhh7Thgpf32qQoVOxRU9EF0KpuJCCAHQHQNQlI
+nf9Zc4UzOrLhxZBGocNhkkn4WLw2ysto/7+/+9xHah0M0l4auHLQagVLCoOsHUn2
+JX+A2NrbvuX5wnUrZGOdgY70tvMBeU/xLtp3af8=
+-----END CERTIFICATE-----
--- /dev/null
+-----BEGIN RSA PRIVATE KEY-----
+Proc-Type: 4,ENCRYPTED
+DEK-Info: DES-EDE3-CBC,F11262DB77BB804C
+
+jOJ+ft/dihIxz7CTuuK47fCTGdX7xMLANmA7mRg8y9OYhNZQiCz5GjcWLqe0NNl5
+qXPW0uvT/9B5O9o21Y2i/CKU1BqRLuXHXDsjHg7RGaSH6wIavWt+lR+I1sjieFbX
+VByK1KHXjEU704DEILKJIA9gVzoYAgMzo+FTw2e/2jusXntxk8HXyF5zKTzjHBtI
+NQGweJqTmfZjX3SgPP4Co/ShrA6fUG0uTp1HwbByJnwtAeT3xWJrAD4QSn7+qrlv
+3qmEIqVXsvLrfZRY1WZ4uIsbLK8wkvxboSIoIK55VV9R2zRbwQULon6QJwKYujAr
+J2WUYkHHQOMpaAzUmalaT+8GUt8/A1oSK4BdiSZywsMMm46/hDadXBzFg+dPL5g2
+Td+7/L0S6tUVWq4+YBp5EalZH6VQ4cqPYDJZUZ9xt6+yY7V5748lSdA7cHCROnbG
+bKbSW9WbF7MPDHCjvCAfq+s1dafHJgyIOlMg2bm7V8eHWAA0xKQ/o7i5EyEyaKYR
+UXGeAf+KfXcclEZ77v2RCXZvd6ceWkifm59qWv/3TCYaHiS2Aa3lVToMKTwYzzXQ
+p5X5os6wv3IAi2nGyAIOoSDisdHmFteZNXNQsw0n3XCAYfsNMk+r5/r5YqDffURH
+c8SMOCP4BIPoZ/abi/gnEntGqsx1YALg0aosHwHGDJ/l+QJC6u6PZk310YzRw4GL
+K9+wscFgEub2OO+R83Vkfesj4tYzgOjab7+92a/soHdW0zhGejlvehODOgNZ6NUG
+MPQlT+qpF9Jh5IThYXupXXFzJzQe3O/qVXy89m69JGa+AWRvbu+M/A==
+-----END RSA PRIVATE KEY-----
--- /dev/null
+%% Feel free to use, reuse and abuse the code in this file.
+
+-module(http_handler).
+-behaviour(cowboy_http_handler).
+-export([init/3, handle/2, terminate/2]).
+
+-record(state, {headers, body}).
+
+init({_Transport, http}, Req, Opts) ->
+ Headers = proplists:get_value(headers, Opts, []),
+ Body = proplists:get_value(body, Opts, "http_handler"),
+ {ok, Req, #state{headers=Headers, body=Body}}.
+
+handle(Req, State=#state{headers=Headers, body=Body}) ->
+ {ok, Req2} = cowboy_http_req:reply(200, Headers, Body, Req),
+ {ok, Req2, State}.
+
+terminate(_Req, _State) ->
+ ok.
--- /dev/null
+%% Feel free to use, reuse and abuse the code in this file.
+
+-module(http_handler_errors).
+-behaviour(cowboy_http_handler).
+-export([init/3, handle/2, terminate/2]).
+
+init({_Transport, http}, Req, _Opts) ->
+ {Case, Req1} = cowboy_http_req:qs_val(<<"case">>, Req),
+ case_init(Case, Req1).
+
+case_init(<<"init_before_reply">> = Case, _Req) ->
+ erlang:error(Case);
+
+case_init(<<"init_after_reply">> = Case, Req) ->
+ {ok, _Req1} = cowboy_http_req:reply(200, [], "http_handler_crashes", Req),
+ erlang:error(Case);
+
+case_init(<<"init_reply_handle_error">> = Case, Req) ->
+ {ok, Req1} = cowboy_http_req:reply(200, [], "http_handler_crashes", Req),
+ {ok, Req1, Case};
+
+case_init(<<"handle_before_reply">> = Case, Req) ->
+ {ok, Req, Case};
+
+case_init(<<"handle_after_reply">> = Case, Req) ->
+ {ok, Req, Case}.
+
+
+handle(_Req, <<"init_reply_handle_error">> = Case) ->
+ erlang:error(Case);
+
+handle(_Req, <<"handle_before_reply">> = Case) ->
+ erlang:error(Case);
+
+handle(Req, <<"handle_after_reply">> = Case) ->
+ {ok, _Req1} = cowboy_http_req:reply(200, [], "http_handler_crashes", Req),
+ erlang:error(Case).
+
+terminate(_Req, _State) ->
+ ok.
--- /dev/null
+%% Feel free to use, reuse and abuse the code in this file.
+
+-module(http_handler_init_shutdown).
+-behaviour(cowboy_http_handler).
+-export([init/3, handle/2, terminate/2]).
+
+init({_Transport, http}, Req, _Opts) ->
+ {ok, Req2} = cowboy_http_req:reply(<<"666 Init Shutdown Testing">>,
+ [{'Connection', <<"close">>}], Req),
+ {shutdown, Req2, undefined}.
+
+handle(Req, State) ->
+ {ok, Req2} = cowboy_http_req:reply(200, [], "Hello world!", Req),
+ {ok, Req2, State}.
+
+terminate(_Req, _State) ->
+ ok.
--- /dev/null
+%% Feel free to use, reuse and abuse the code in this file.
+
+-module(http_handler_long_polling).
+-behaviour(cowboy_http_handler).
+-export([init/3, handle/2, info/3, terminate/2]).
+
+init({_Transport, http}, Req, _Opts) ->
+ erlang:send_after(500, self(), timeout),
+ {loop, Req, 9, 5000, hibernate}.
+
+handle(_Req, _State) ->
+ exit(badarg).
+
+info(timeout, Req, 0) ->
+ {ok, Req2} = cowboy_http_req:reply(102, Req),
+ {ok, Req2, 0};
+info(timeout, Req, State) ->
+ erlang:send_after(500, self(), timeout),
+ {loop, Req, State - 1, hibernate}.
+
+terminate(_Req, _State) ->
+ ok.
--- /dev/null
+%% Feel free to use, reuse and abuse the code in this file.
+
+-module(http_handler_multipart).
+-behaviour(cowboy_http_handler).
+-export([init/3, handle/2, terminate/2]).
+
+init({_Transport, http}, Req, []) ->
+ {ok, Req, {}}.
+
+handle(Req, State) ->
+ {Result, Req2} = acc_multipart(Req, []),
+ {ok, Req3} = cowboy_http_req:reply(200, [], term_to_binary(Result), Req2),
+ {ok, Req3, State}.
+
+terminate(_Req, _State) ->
+ ok.
+
+acc_multipart(Req, Acc) ->
+ {Result, Req2} = cowboy_http_req:multipart_data(Req),
+ acc_multipart(Req2, Acc, Result).
+
+acc_multipart(Req, Acc, {headers, Headers}) ->
+ acc_multipart(Req, [{Headers, []}|Acc]);
+acc_multipart(Req, [{Headers, BodyAcc}|Acc], {body, Data}) ->
+ acc_multipart(Req, [{Headers, [Data|BodyAcc]}|Acc]);
+acc_multipart(Req, [{Headers, BodyAcc}|Acc], end_of_part) ->
+ acc_multipart(Req, [{Headers, list_to_binary(lists:reverse(BodyAcc))}|Acc]);
+acc_multipart(Req, Acc, eof) ->
+ {lists:reverse(Acc), Req}.
--- /dev/null
+%% Feel free to use, reuse and abuse the code in this file.
+
+-module(http_handler_set_resp).
+-behaviour(cowboy_http_handler).
+-export([init/3, handle/2, terminate/2]).
+
+init({_Transport, http}, Req, Opts) ->
+ Headers = proplists:get_value(headers, Opts, []),
+ Body = proplists:get_value(body, Opts, <<"http_handler_set_resp">>),
+ {ok, Req2} = lists:foldl(fun({Name, Value}, {ok, R}) ->
+ cowboy_http_req:set_resp_header(Name, Value, R)
+ end, {ok, Req}, Headers),
+ {ok, Req3} = cowboy_http_req:set_resp_body(Body, Req2),
+ {ok, Req4} = cowboy_http_req:set_resp_header(
+ <<"X-Cowboy-Test">>, <<"ok">>, Req3),
+ {ok, Req5} = cowboy_http_req:set_resp_cookie(
+ <<"cake">>, <<"lie">>, [], Req4),
+ {ok, Req5, undefined}.
+
+handle(Req, State) ->
+ case cowboy_http_req:has_resp_header(<<"X-Cowboy-Test">>, Req) of
+ false -> {ok, Req, State};
+ true ->
+ case cowboy_http_req:has_resp_body(Req) of
+ false -> {ok, Req, State};
+ true ->
+ {ok, Req2} = cowboy_http_req:reply(200, Req),
+ {ok, Req2, State}
+ end
+ end.
+
+terminate(_Req, _State) ->
+ ok.
--- /dev/null
+%% Feel free to use, reuse and abuse the code in this file.
+
+-module(http_handler_stream_body).
+-behaviour(cowboy_http_handler).
+-export([init/3, handle/2, terminate/2]).
+
+-record(state, {headers, body, reply}).
+
+init({_Transport, http}, Req, Opts) ->
+ Headers = proplists:get_value(headers, Opts, []),
+ Body = proplists:get_value(body, Opts, "http_handler_stream_body"),
+ Reply = proplists:get_value(reply, Opts),
+ {ok, Req, #state{headers=Headers, body=Body, reply=Reply}}.
+
+handle(Req, State=#state{headers=_Headers, body=Body, reply=set_resp}) ->
+ {ok, Transport, Socket} = cowboy_http_req:transport(Req),
+ SFun = fun() -> Transport:send(Socket, Body), sent end,
+ SLen = iolist_size(Body),
+ {ok, Req2} = cowboy_http_req:set_resp_body_fun(SLen, SFun, Req),
+ {ok, Req3} = cowboy_http_req:reply(200, Req2),
+ {ok, Req3, State}.
+
+terminate(_Req, _State) ->
+ ok.
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+-module(proper_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+
+-export([all/0, groups/0]). %% ct.
+-export([dispatcher_split_host/1]). %% cowboy_dispatcher.
+
+%% ct.
+
+all() ->
+ [{group, dispatcher}].
+
+groups() ->
+ [{dispatcher, [], [dispatcher_split_host]}].
+
+%% cowboy_dispatcher.
+
+dispatcher_split_host(_Config) ->
+ true = proper:quickcheck(dispatcher_prop:prop_split_host_symmetric(),
+ [{on_output, fun(Format, Data) ->
+ io:format(user, Format, Data), %% Console.
+ io:format(Format, Data) %% Logs.
+ end}]).
--- /dev/null
+-module(rest_forbidden_resource).
+-export([init/3, rest_init/2, allowed_methods/2, forbidden/2,
+ content_types_provided/2, content_types_accepted/2,
+ post_is_create/2, create_path/2, to_text/2, from_text/2]).
+
+init(_Transport, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_http_rest}.
+
+rest_init(Req, [Forbidden]) ->
+ {ok, Req, Forbidden}.
+
+allowed_methods(Req, State) ->
+ {['GET', 'HEAD', 'POST'], Req, State}.
+
+forbidden(Req, State=true) ->
+ {true, Req, State};
+forbidden(Req, State=false) ->
+ {false, Req, State}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"plain">>, []}, to_text}], Req, State}.
+
+content_types_accepted(Req, State) ->
+ {[{{<<"text">>, <<"plain">>, []}, from_text}], Req, State}.
+
+post_is_create(Req, State) ->
+ {true, Req, State}.
+
+create_path(Req, State) ->
+ {Path, Req2} = cowboy_http_req:raw_path(Req),
+ {Path, Req2, State}.
+
+to_text(Req, State) ->
+ {<<"This is REST!">>, Req, State}.
+
+from_text(Req, State) ->
+ {true, Req, State}.
+
+
+
--- /dev/null
+-module(rest_simple_resource).
+-export([init/3, content_types_provided/2, get_text_plain/2]).
+
+init(_Transport, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_http_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"plain">>, []}, get_text_plain}], Req, State}.
+
+get_text_plain(Req, State) ->
+ {<<"This is REST!">>, Req, State}.
+
--- /dev/null
+%% Feel free to use, reuse and abuse the code in this file.
+
+-module(websocket_handler).
+-behaviour(cowboy_http_handler).
+-behaviour(cowboy_http_websocket_handler).
+-export([init/3, handle/2, terminate/2]).
+-export([websocket_init/3, websocket_handle/3,
+ websocket_info/3, websocket_terminate/3]).
+
+init(_Any, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_http_websocket}.
+
+handle(_Req, _State) ->
+ exit(badarg).
+
+terminate(_Req, _State) ->
+ exit(badarg).
+
+websocket_init(_TransportName, Req, _Opts) ->
+ erlang:start_timer(1000, self(), <<"websocket_init">>),
+ Req2 = cowboy_http_req:compact(Req),
+ {ok, Req2, undefined}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info({timeout, _Ref, Msg}, Req, State) ->
+ erlang:start_timer(1000, self(), <<"websocket_handle">>),
+ {reply, {text, Msg}, Req, State};
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
--- /dev/null
+%% Feel free to use, reuse and abuse the code in this file.
+
+-module(websocket_handler_init_shutdown).
+-behaviour(cowboy_http_handler).
+-behaviour(cowboy_http_websocket_handler).
+-export([init/3, handle/2, terminate/2]).
+-export([websocket_init/3, websocket_handle/3,
+ websocket_info/3, websocket_terminate/3]).
+
+init(_Any, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_http_websocket}.
+
+handle(_Req, _State) ->
+ exit(badarg).
+
+terminate(_Req, _State) ->
+ exit(badarg).
+
+websocket_init(_TransportName, Req, _Opts) ->
+ {ok, Req2} = cowboy_http_req:reply(403, Req),
+ {shutdown, Req2}.
+
+websocket_handle(_Frame, _Req, _State) ->
+ exit(badarg).
+
+websocket_info(_Info, _Req, _State) ->
+ exit(badarg).
+
+websocket_terminate(_Reason, _Req, _State) ->
+ exit(badarg).
--- /dev/null
+%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
+%%
+%% Permission to use, copy, modify, and/or distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+-module(ws_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+
+-export([all/0, groups/0, init_per_suite/1, end_per_suite/1,
+ init_per_group/2, end_per_group/2]). %% ct.
+-export([ws0/1, ws8/1, ws8_single_bytes/1, ws8_init_shutdown/1,
+ ws13/1, ws_timeout_hibernate/1]). %% ws.
+
+%% ct.
+
+all() ->
+ [{group, ws}].
+
+groups() ->
+ BaseTests = [ws0, ws8, ws8_single_bytes, ws8_init_shutdown, ws13,
+ ws_timeout_hibernate],
+ [{ws, [], BaseTests}].
+
+init_per_suite(Config) ->
+ application:start(inets),
+ application:start(cowboy),
+ Config.
+
+end_per_suite(_Config) ->
+ application:stop(cowboy),
+ application:stop(inets),
+ ok.
+
+init_per_group(ws, Config) ->
+ Port = 33080,
+ cowboy:start_listener(ws, 100,
+ cowboy_tcp_transport, [{port, Port}],
+ cowboy_http_protocol, [{dispatch, init_dispatch()}]
+ ),
+ [{port, Port}|Config].
+
+end_per_group(Listener, _Config) ->
+ cowboy:stop_listener(Listener),
+ ok.
+
+%% Dispatch configuration.
+
+init_dispatch() ->
+ [
+ {[<<"localhost">>], [
+ {[<<"websocket">>], websocket_handler, []},
+ {[<<"ws_timeout_hibernate">>], ws_timeout_hibernate_handler, []},
+ {[<<"ws_init_shutdown">>], websocket_handler_init_shutdown, []}
+ ]}
+ ].
+
+%% ws and wss.
+
+%% This test makes sure the code works even if we wait for a reply
+%% before sending the third challenge key in the GET body.
+%%
+%% This ensures that Cowboy will work fine with proxies on hixie.
+ws0(Config) ->
+ {port, Port} = lists:keyfind(port, 1, Config),
+ {ok, Socket} = gen_tcp:connect("localhost", Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = gen_tcp:send(Socket,
+ "GET /websocket HTTP/1.1\r\n"
+ "Host: localhost\r\n"
+ "Connection: Upgrade\r\n"
+ "Upgrade: WebSocket\r\n"
+ "Origin: http://localhost\r\n"
+ "Sec-Websocket-Key1: Y\" 4 1Lj!957b8@0H756!i\r\n"
+ "Sec-Websocket-Key2: 1711 M;4\\74 80<6\r\n"
+ "\r\n"),
+ {ok, Handshake} = gen_tcp:recv(Socket, 0, 6000),
+ {ok, {http_response, {1, 1}, 101, "WebSocket Protocol Handshake"}, Rest}
+ = erlang:decode_packet(http, Handshake, []),
+ [Headers, <<>>] = websocket_headers(
+ erlang:decode_packet(httph, Rest, []), []),
+ {'Connection', "Upgrade"} = lists:keyfind('Connection', 1, Headers),
+ {'Upgrade', "WebSocket"} = lists:keyfind('Upgrade', 1, Headers),
+ {"sec-websocket-location", "ws://localhost/websocket"}
+ = lists:keyfind("sec-websocket-location", 1, Headers),
+ {"sec-websocket-origin", "http://localhost"}
+ = lists:keyfind("sec-websocket-origin", 1, Headers),
+ ok = gen_tcp:send(Socket, <<15,245,8,18,2,204,133,33>>),
+ {ok, Body} = gen_tcp:recv(Socket, 0, 6000),
+ <<169,244,191,103,146,33,149,59,74,104,67,5,99,118,171,236>> = Body,
+ ok = gen_tcp:send(Socket, << 0, "client_msg", 255 >>),
+ {ok, << 0, "client_msg", 255 >>} = gen_tcp:recv(Socket, 0, 6000),
+ {ok, << 0, "websocket_init", 255 >>} = gen_tcp:recv(Socket, 0, 6000),
+ {ok, << 0, "websocket_handle", 255 >>} = gen_tcp:recv(Socket, 0, 6000),
+ {ok, << 0, "websocket_handle", 255 >>} = gen_tcp:recv(Socket, 0, 6000),
+ {ok, << 0, "websocket_handle", 255 >>} = gen_tcp:recv(Socket, 0, 6000),
+ %% We try to send another HTTP request to make sure
+ %% the server closed the request.
+ ok = gen_tcp:send(Socket, [
+ << 255, 0 >>, %% Close websocket command.
+ "GET / HTTP/1.1\r\nHost: localhost\r\n\r\n" %% Server should ignore it.
+ ]),
+ {ok, << 255, 0 >>} = gen_tcp:recv(Socket, 0, 6000),
+ {error, closed} = gen_tcp:recv(Socket, 0, 6000),
+ ok.
+
+ws8(Config) ->
+ {port, Port} = lists:keyfind(port, 1, Config),
+ {ok, Socket} = gen_tcp:connect("localhost", Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = gen_tcp:send(Socket, [
+ "GET /websocket HTTP/1.1\r\n"
+ "Host: localhost\r\n"
+ "Connection: Upgrade\r\n"
+ "Upgrade: websocket\r\n"
+ "Sec-WebSocket-Origin: http://localhost\r\n"
+ "Sec-WebSocket-Version: 8\r\n"
+ "Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==\r\n"
+ "\r\n"]),
+ {ok, Handshake} = gen_tcp:recv(Socket, 0, 6000),
+ {ok, {http_response, {1, 1}, 101, "Switching Protocols"}, Rest}
+ = erlang:decode_packet(http, Handshake, []),
+ [Headers, <<>>] = websocket_headers(
+ erlang:decode_packet(httph, Rest, []), []),
+ {'Connection', "Upgrade"} = lists:keyfind('Connection', 1, Headers),
+ {'Upgrade', "websocket"} = lists:keyfind('Upgrade', 1, Headers),
+ {"sec-websocket-accept", "s3pPLMBiTxaQ9kYGzzhZRbK+xOo="}
+ = lists:keyfind("sec-websocket-accept", 1, Headers),
+ ok = gen_tcp:send(Socket, << 16#81, 16#85, 16#37, 16#fa, 16#21, 16#3d,
+ 16#7f, 16#9f, 16#4d, 16#51, 16#58 >>),
+ {ok, << 1:1, 0:3, 1:4, 0:1, 5:7, "Hello" >>}
+ = gen_tcp:recv(Socket, 0, 6000),
+ {ok, << 1:1, 0:3, 1:4, 0:1, 14:7, "websocket_init" >>}
+ = gen_tcp:recv(Socket, 0, 6000),
+ {ok, << 1:1, 0:3, 1:4, 0:1, 16:7, "websocket_handle" >>}
+ = gen_tcp:recv(Socket, 0, 6000),
+ {ok, << 1:1, 0:3, 1:4, 0:1, 16:7, "websocket_handle" >>}
+ = gen_tcp:recv(Socket, 0, 6000),
+ {ok, << 1:1, 0:3, 1:4, 0:1, 16:7, "websocket_handle" >>}
+ = gen_tcp:recv(Socket, 0, 6000),
+ ok = gen_tcp:send(Socket, << 1:1, 0:3, 9:4, 0:8 >>), %% ping
+ {ok, << 1:1, 0:3, 10:4, 0:8 >>} = gen_tcp:recv(Socket, 0, 6000), %% pong
+ ok = gen_tcp:send(Socket, << 1:1, 0:3, 8:4, 0:8 >>), %% close
+ {ok, << 1:1, 0:3, 8:4, 0:8 >>} = gen_tcp:recv(Socket, 0, 6000),
+ {error, closed} = gen_tcp:recv(Socket, 0, 6000),
+ ok.
+
+ws8_single_bytes(Config) ->
+ {port, Port} = lists:keyfind(port, 1, Config),
+ {ok, Socket} = gen_tcp:connect("localhost", Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = gen_tcp:send(Socket, [
+ "GET /websocket HTTP/1.1\r\n"
+ "Host: localhost\r\n"
+ "Connection: Upgrade\r\n"
+ "Upgrade: websocket\r\n"
+ "Sec-WebSocket-Origin: http://localhost\r\n"
+ "Sec-WebSocket-Version: 8\r\n"
+ "Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==\r\n"
+ "\r\n"]),
+ {ok, Handshake} = gen_tcp:recv(Socket, 0, 6000),
+ {ok, {http_response, {1, 1}, 101, "Switching Protocols"}, Rest}
+ = erlang:decode_packet(http, Handshake, []),
+ [Headers, <<>>] = websocket_headers(
+ erlang:decode_packet(httph, Rest, []), []),
+ {'Connection', "Upgrade"} = lists:keyfind('Connection', 1, Headers),
+ {'Upgrade', "websocket"} = lists:keyfind('Upgrade', 1, Headers),
+ {"sec-websocket-accept", "s3pPLMBiTxaQ9kYGzzhZRbK+xOo="}
+ = lists:keyfind("sec-websocket-accept", 1, Headers),
+ ok = gen_tcp:send(Socket, << 16#81 >>), %% send one byte
+ ok = timer:sleep(100), %% sleep for a period
+ ok = gen_tcp:send(Socket, << 16#85 >>), %% send another and so on
+ ok = timer:sleep(100),
+ ok = gen_tcp:send(Socket, << 16#37 >>),
+ ok = timer:sleep(100),
+ ok = gen_tcp:send(Socket, << 16#fa >>),
+ ok = timer:sleep(100),
+ ok = gen_tcp:send(Socket, << 16#21 >>),
+ ok = timer:sleep(100),
+ ok = gen_tcp:send(Socket, << 16#3d >>),
+ ok = timer:sleep(100),
+ ok = gen_tcp:send(Socket, << 16#7f >>),
+ ok = timer:sleep(100),
+ ok = gen_tcp:send(Socket, << 16#9f >>),
+ ok = timer:sleep(100),
+ ok = gen_tcp:send(Socket, << 16#4d >>),
+ ok = timer:sleep(100),
+ ok = gen_tcp:send(Socket, << 16#51 >>),
+ ok = timer:sleep(100),
+ ok = gen_tcp:send(Socket, << 16#58 >>),
+ {ok, << 1:1, 0:3, 1:4, 0:1, 14:7, "websocket_init" >>}
+ = gen_tcp:recv(Socket, 0, 6000),
+ {ok, << 1:1, 0:3, 1:4, 0:1, 5:7, "Hello" >>}
+ = gen_tcp:recv(Socket, 0, 6000),
+ {ok, << 1:1, 0:3, 1:4, 0:1, 16:7, "websocket_handle" >>}
+ = gen_tcp:recv(Socket, 0, 6000),
+ {ok, << 1:1, 0:3, 1:4, 0:1, 16:7, "websocket_handle" >>}
+ = gen_tcp:recv(Socket, 0, 6000),
+ {ok, << 1:1, 0:3, 1:4, 0:1, 16:7, "websocket_handle" >>}
+ = gen_tcp:recv(Socket, 0, 6000),
+ ok = gen_tcp:send(Socket, << 1:1, 0:3, 9:4, 0:8 >>), %% ping
+ {ok, << 1:1, 0:3, 10:4, 0:8 >>} = gen_tcp:recv(Socket, 0, 6000), %% pong
+ ok = gen_tcp:send(Socket, << 1:1, 0:3, 8:4, 0:8 >>), %% close
+ {ok, << 1:1, 0:3, 8:4, 0:8 >>} = gen_tcp:recv(Socket, 0, 6000),
+ {error, closed} = gen_tcp:recv(Socket, 0, 6000),
+ ok.
+
+ws_timeout_hibernate(Config) ->
+ {port, Port} = lists:keyfind(port, 1, Config),
+ {ok, Socket} = gen_tcp:connect("localhost", Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = gen_tcp:send(Socket, [
+ "GET /ws_timeout_hibernate HTTP/1.1\r\n"
+ "Host: localhost\r\n"
+ "Connection: Upgrade\r\n"
+ "Upgrade: websocket\r\n"
+ "Sec-WebSocket-Origin: http://localhost\r\n"
+ "Sec-WebSocket-Version: 8\r\n"
+ "Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==\r\n"
+ "\r\n"]),
+ {ok, Handshake} = gen_tcp:recv(Socket, 0, 6000),
+ {ok, {http_response, {1, 1}, 101, "Switching Protocols"}, Rest}
+ = erlang:decode_packet(http, Handshake, []),
+ [Headers, <<>>] = websocket_headers(
+ erlang:decode_packet(httph, Rest, []), []),
+ {'Connection', "Upgrade"} = lists:keyfind('Connection', 1, Headers),
+ {'Upgrade', "websocket"} = lists:keyfind('Upgrade', 1, Headers),
+ {"sec-websocket-accept", "s3pPLMBiTxaQ9kYGzzhZRbK+xOo="}
+ = lists:keyfind("sec-websocket-accept", 1, Headers),
+ {ok, << 1:1, 0:3, 8:4, 0:8 >>} = gen_tcp:recv(Socket, 0, 6000),
+ {error, closed} = gen_tcp:recv(Socket, 0, 6000),
+ ok.
+
+ws8_init_shutdown(Config) ->
+ {port, Port} = lists:keyfind(port, 1, Config),
+ {ok, Socket} = gen_tcp:connect("localhost", Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = gen_tcp:send(Socket, [
+ "GET /ws_init_shutdown HTTP/1.1\r\n"
+ "Host: localhost\r\n"
+ "Connection: Upgrade\r\n"
+ "Upgrade: websocket\r\n"
+ "Sec-WebSocket-Origin: http://localhost\r\n"
+ "Sec-WebSocket-Version: 8\r\n"
+ "Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==\r\n"
+ "\r\n"]),
+ {ok, Handshake} = gen_tcp:recv(Socket, 0, 6000),
+ {ok, {http_response, {1, 1}, 403, "Forbidden"}, _Rest}
+ = erlang:decode_packet(http, Handshake, []),
+ {error, closed} = gen_tcp:recv(Socket, 0, 6000),
+ ok.
+
+ws13(Config) ->
+ {port, Port} = lists:keyfind(port, 1, Config),
+ {ok, Socket} = gen_tcp:connect("localhost", Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = gen_tcp:send(Socket, [
+ "GET /websocket HTTP/1.1\r\n"
+ "Host: localhost\r\n"
+ "Connection: Upgrade\r\n"
+ "Origin: http://localhost\r\n"
+ "Sec-WebSocket-Version: 13\r\n"
+ "Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==\r\n"
+ "Upgrade: websocket\r\n"
+ "\r\n"]),
+ {ok, Handshake} = gen_tcp:recv(Socket, 0, 6000),
+ {ok, {http_response, {1, 1}, 101, "Switching Protocols"}, Rest}
+ = erlang:decode_packet(http, Handshake, []),
+ [Headers, <<>>] = websocket_headers(
+ erlang:decode_packet(httph, Rest, []), []),
+ {'Connection', "Upgrade"} = lists:keyfind('Connection', 1, Headers),
+ {'Upgrade', "websocket"} = lists:keyfind('Upgrade', 1, Headers),
+ {"sec-websocket-accept", "s3pPLMBiTxaQ9kYGzzhZRbK+xOo="}
+ = lists:keyfind("sec-websocket-accept", 1, Headers),
+ %% text
+ ok = gen_tcp:send(Socket, << 16#81, 16#85, 16#37, 16#fa, 16#21, 16#3d,
+ 16#7f, 16#9f, 16#4d, 16#51, 16#58 >>),
+ {ok, << 1:1, 0:3, 1:4, 0:1, 5:7, "Hello" >>}
+ = gen_tcp:recv(Socket, 0, 6000),
+ %% binary (empty)
+ ok = gen_tcp:send(Socket, << 1:1, 0:3, 2:4, 0:8 >>),
+ {ok, << 1:1, 0:3, 2:4, 0:8 >>} = gen_tcp:recv(Socket, 0, 6000),
+ %% binary
+ ok = gen_tcp:send(Socket, << 16#82, 16#85, 16#37, 16#fa, 16#21, 16#3d,
+ 16#7f, 16#9f, 16#4d, 16#51, 16#58 >>),
+ {ok, << 1:1, 0:3, 2:4, 0:1, 5:7, "Hello" >>}
+ = gen_tcp:recv(Socket, 0, 6000),
+ %% Receives.
+ {ok, << 1:1, 0:3, 1:4, 0:1, 14:7, "websocket_init" >>}
+ = gen_tcp:recv(Socket, 0, 6000),
+ {ok, << 1:1, 0:3, 1:4, 0:1, 16:7, "websocket_handle" >>}
+ = gen_tcp:recv(Socket, 0, 6000),
+ {ok, << 1:1, 0:3, 1:4, 0:1, 16:7, "websocket_handle" >>}
+ = gen_tcp:recv(Socket, 0, 6000),
+ {ok, << 1:1, 0:3, 1:4, 0:1, 16:7, "websocket_handle" >>}
+ = gen_tcp:recv(Socket, 0, 6000),
+ ok = gen_tcp:send(Socket, << 1:1, 0:3, 9:4, 0:8 >>), %% ping
+ {ok, << 1:1, 0:3, 10:4, 0:8 >>} = gen_tcp:recv(Socket, 0, 6000), %% pong
+ ok = gen_tcp:send(Socket, << 1:1, 0:3, 8:4, 0:8 >>), %% close
+ {ok, << 1:1, 0:3, 8:4, 0:8 >>} = gen_tcp:recv(Socket, 0, 6000),
+ {error, closed} = gen_tcp:recv(Socket, 0, 6000),
+ ok.
+
+websocket_headers({ok, http_eoh, Rest}, Acc) ->
+ [Acc, Rest];
+websocket_headers({ok, {http_header, _I, Key, _R, Value}, Rest}, Acc) ->
+ F = fun(S) when is_atom(S) -> S; (S) -> string:to_lower(S) end,
+ websocket_headers(erlang:decode_packet(httph, Rest, []),
+ [{F(Key), Value}|Acc]).
--- /dev/null
+%% Feel free to use, reuse and abuse the code in this file.
+
+-module(ws_timeout_hibernate_handler).
+-behaviour(cowboy_http_handler).
+-behaviour(cowboy_http_websocket_handler).
+-export([init/3, handle/2, terminate/2]).
+-export([websocket_init/3, websocket_handle/3,
+ websocket_info/3, websocket_terminate/3]).
+
+init(_Any, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_http_websocket}.
+
+handle(_Req, _State) ->
+ exit(badarg).
+
+terminate(_Req, _State) ->
+ exit(badarg).
+
+websocket_init(_TransportName, Req, _Opts) ->
+ {ok, Req, undefined, 1000, hibernate}.
+
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State, hibernate}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State, hibernate}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
--- /dev/null
+UPSTREAM_SHORT_HASH:=4b93c2d
--- /dev/null
+APP_NAME:=cowboy
+
+UPSTREAM_GIT:=https://github.com/rabbitmq/cowboy.git
+UPSTREAM_REVISION:=4b93c2d19a10e5d9cee
+RETAIN_ORIGINAL_VERSION:=true
+WRAPPER_PATCHES:=\
+ 0001-R12-fake-iodata-type.patch \
+ 0002-R12-drop-all-references-to-boolean-type.patch \
+ 0003-R12-drop-all-references-to-reference-type.patch \
+ 0004-R12-drop-references-to-iodata-type.patch \
+ 0005-R12-drop-references-to-Default-any-type.patch \
+ 0006-Use-erlang-integer_to_list-and-lists-max-instead-of-.patch \
+ 0007-R12-type-definitions-must-be-ordered.patch \
+ 0008-sec-websocket-protocol.patch
+
+# Path include/http.hrl is needed during compilation
+INCLUDE_DIRS+=$(CLONE_DIR)
+
+ORIGINAL_APP_FILE:=$(CLONE_DIR)/src/$(APP_NAME).app.src
+DO_NOT_GENERATE_APP_FILE=true
+
+define construct_app_commands
+ cp $(CLONE_DIR)/LICENSE $(APP_DIR)/LICENSE-ISC-Cowboy
+endef
--- /dev/null
+# This file produces the makefile fragment associated with a package.
+# It includes the package's package.mk, interprets all of the
+# variables that package.mk might have set, and then visits any
+# dependencies of the package that have not already been visited.
+#
+# PACKAGE_DIR should be set to the canonical path of the package.
+
+# Mark that this package has been visited, so we can avoid doing it again
+DONE_$(PACKAGE_DIR):=true
+
+# Declare the standard per-package targets
+.PHONY: $(PACKAGE_DIR)+dist $(PACKAGE_DIR)+clean $(PACKAGE_DIR)+clean-recursive
+
+$(PACKAGE_DIR)+dist:: $(PACKAGE_DIR)/dist/.done
+
+$(PACKAGE_DIR)+srcdist:: $(PACKAGE_DIR)/srcdist/.done
+
+$(PACKAGE_DIR)+clean::
+
+$(PACKAGE_DIR)+clean-with-deps:: $(PACKAGE_DIR)+clean
+
+# Hook into the "all package" targets used by the main public-umbrella
+# makefile
+all-packages:: $(PACKAGE_DIR)/dist/.done
+clean-all-packages:: $(PACKAGE_DIR)+clean
+
+ifndef NON_INTEGRATED_$(PACKAGE_DIR)
+
+PACKAGE_NAME=$(notdir $(abspath $(PACKAGE_DIR)))
+
+# Set all the per-package vars to their default values
+
+# The packages upon which this package depends
+DEPS:=
+
+# The name of the erlang application produced by the package
+APP_NAME=$(call package_to_app_name,$(PACKAGE_NAME))
+
+# The location of the .app file which is used as the basis for the
+# .app file which goes into the .ez
+ORIGINAL_APP_FILE=$(EBIN_DIR)/$(APP_NAME).app
+
+# The location of the source for that file (before the modules list is
+# generated). Ignored if DO_NOT_GENERATE_APP_FILE is set.
+ORIGINAL_APP_SOURCE=$(PACKAGE_DIR)/src/$(APP_NAME).app.src
+
+# Set to prevent generation of the app file.
+DO_NOT_GENERATE_APP_FILE:=
+
+# Should the .ez files for this package, its dependencies, and its
+# source distribution be included in RabbitMQ releases, and should we test
+# this plugin when invoking "make test" in the umbrella?
+RELEASABLE:=
+
+# The options to pass to erlc when compiling .erl files in this
+# package
+PACKAGE_ERLC_OPTS=$(ERLC_OPTS)
+
+# The directories containing Erlang source files
+SOURCE_DIRS:=$(PACKAGE_DIR)/src
+
+# The Erlang source files to compile and include in the package .ez file
+SOURCE_ERLS=$(strip $(foreach D,$(SOURCE_DIRS),$(wildcard $(D)/*.erl)))
+
+# The directories containing Erlang *.hrl files to include in the
+# package .ez file.
+INCLUDE_DIRS:=$(PACKAGE_DIR)/include
+
+# The Erlang .hrl files to include in the package .ez file.
+INCLUDE_HRLS=$(strip $(foreach D,$(INCLUDE_DIRS),$(wildcard $(D)/*.hrl)))
+
+# The location of the directory containing the .app file. This is
+# also where the .beam files produced by compiling SOURCE_ERLS will
+# go.
+EBIN_DIR:=$(PACKAGE_DIR)/ebin
+
+# The .beam files for the application.
+EBIN_BEAMS=$(patsubst %,$(EBIN_DIR)/%.beam,$(notdir $(basename $(SOURCE_ERLS))))
+
+# Erlang expressions which will be invoked during testing (not in the
+# broker).
+STANDALONE_TEST_COMMANDS:=
+
+# Erlang expressions which will be invoked within the broker during
+# testing.
+WITH_BROKER_TEST_COMMANDS:=
+
+# Config file to give to the test broker.
+WITH_BROKER_TEST_CONFIG:=
+
+# Test scripts which should be invokedduring testing
+STANDALONE_TEST_SCRIPTS:=
+
+# Test scripts which should be invoked alongside a running broker
+# during testing
+WITH_BROKER_TEST_SCRIPTS:=
+
+# Test scripts which should be invoked to configure the broker before testing
+WITH_BROKER_SETUP_SCRIPTS:=
+
+# When cleaning, should we also remove the cloned directory for
+# wrappers?
+PRESERVE_CLONE_DIR?=
+
+# The directory within the package that contains tests
+TEST_DIR=$(PACKAGE_DIR)/test
+
+# The directories containing .erl files for tests
+TEST_SOURCE_DIRS=$(TEST_DIR)/src
+
+# The .erl files for tests
+TEST_SOURCE_ERLS=$(strip $(foreach D,$(TEST_SOURCE_DIRS),$(wildcard $(D)/*.erl)))
+
+# Where to put .beam files produced by compiling TEST_SOURCE_ERLS
+TEST_EBIN_DIR=$(TEST_DIR)/ebin
+
+# The .beam files produced by compiling TEST_SOURCE_ERLS
+TEST_EBIN_BEAMS=$(patsubst %,$(TEST_EBIN_DIR)/%.beam,$(notdir $(basename $(TEST_SOURCE_ERLS))))
+
+# Wrapper package variables
+
+# The git URL to clone from. Setting this variable marks the package
+# as a wrapper package.
+UPSTREAM_GIT:=
+
+# The Mercurial URL to clone from. Setting this variable marks the
+# package as a wrapper package.
+UPSTREAM_HG:=
+
+UPSTREAM_TYPE=$(if $(UPSTREAM_GIT),git)$(if $(UPSTREAM_HG),hg)
+
+# The upstream revision to clone. Leave empty for default or master
+UPSTREAM_REVISION:=
+
+# Where to clone the upstream repository to
+CLONE_DIR=$(PACKAGE_DIR)/$(patsubst %-wrapper,%,$(PACKAGE_NAME))-$(UPSTREAM_TYPE)
+
+# The source directories contained in the cloned repositories. These
+# are appended to SOURCE_DIRS.
+UPSTREAM_SOURCE_DIRS=$(CLONE_DIR)/src
+
+# The include directories contained in the cloned repositories. These
+# are appended to INCLUDE_DIRS.
+UPSTREAM_INCLUDE_DIRS=$(CLONE_DIR)/include
+
+# Patches to apply to the upstream codebase after cloning, if any
+WRAPPER_PATCHES:=
+
+# The version number to assign to the build artifacts
+PACKAGE_VERSION=$(VERSION)
+
+# Should the app version incorporate the version from the original
+# .app file?
+RETAIN_ORIGINAL_VERSION:=
+
+# The original version that should be incorporated into the package
+# version if RETAIN_ORIGINAL_VERSION is set. If empty, the original
+# version will be extracted from ORIGINAL_APP_FILE.
+ORIGINAL_VERSION:=
+
+# For customising construction of the build application directory.
+CONSTRUCT_APP_PREREQS:=
+construct_app_commands=
+
+package_rules=
+
+# Now let the package makefile fragment do its stuff
+include $(PACKAGE_DIR)/package.mk
+
+# package_rules provides a convenient way to force prompt expansion
+# of variables, including expansion in commands that would otherwise
+# be deferred.
+#
+# If package_rules is defined by the package makefile, we expand it
+# and eval it. The point here is to get around the fact that make
+# defers expansion of commands. But if we use package variables in
+# targets, as we naturally want to do, deferred expansion doesn't
+# work: They might have been trampled on by a later package. Because
+# we expand package_rules here, references to package varialbes will
+# get expanded with the values we expect.
+#
+# The downside is that any variable references for which expansion
+# really should be deferred need to be protected by doulbing up the
+# dollar. E.g., inside package_rules, you should write $$@, not $@.
+#
+# We use the same trick again below.
+ifdef package_rules
+$(eval $(package_rules))
+endif
+
+# Some variables used for brevity below. Packages can't set these.
+APP_FILE=$(PACKAGE_DIR)/build/$(APP_NAME).app.$(PACKAGE_VERSION)
+APP_DONE=$(PACKAGE_DIR)/build/app/.done.$(PACKAGE_VERSION)
+APP_DIR=$(PACKAGE_DIR)/build/app/$(APP_NAME)-$(PACKAGE_VERSION)
+EZ_FILE=$(PACKAGE_DIR)/dist/$(APP_NAME)-$(PACKAGE_VERSION).ez
+DEPS_FILE=$(PACKAGE_DIR)/build/deps.mk
+
+
+# Convert the DEPS package names to canonical paths
+DEP_PATHS:=$(foreach DEP,$(DEPS),$(call package_to_path,$(DEP)))
+
+# Handle RETAIN_ORIGINAL_VERSION / ORIGINAL_VERSION
+ifdef RETAIN_ORIGINAL_VERSION
+
+# Automatically acquire ORIGINAL_VERSION from ORIGINAL_APP_FILE
+ifndef ORIGINAL_VERSION
+
+# The generated ORIGINAL_VERSION setting goes in build/version.mk
+$(eval $(call safe_include,$(PACKAGE_DIR)/build/version.mk))
+
+$(PACKAGE_DIR)/build/version.mk: $(ORIGINAL_APP_FILE)
+ sed -n -e 's|^.*{vsn, *"\([^"]*\)".*$$|ORIGINAL_VERSION:=\1|p' <$< >$@
+
+$(APP_FILE): $(PACKAGE_DIR)/build/version.mk
+
+endif # ifndef ORIGINAL_VERSION
+
+PACKAGE_VERSION:=$(ORIGINAL_VERSION)-rmq$(VERSION)
+
+endif # ifdef RETAIN_ORIGINAL_VERSION
+
+# Handle wrapper packages
+ifneq ($(UPSTREAM_TYPE),)
+
+SOURCE_DIRS+=$(UPSTREAM_SOURCE_DIRS)
+INCLUDE_DIRS+=$(UPSTREAM_INCLUDE_DIRS)
+
+define package_rules
+
+ifdef UPSTREAM_GIT
+$(CLONE_DIR)/.done:
+ rm -rf $(CLONE_DIR)
+ git clone $(UPSTREAM_GIT) $(CLONE_DIR)
+ # Work around weird github breakage (bug 25264)
+ cd $(CLONE_DIR) && git pull
+ $(if $(UPSTREAM_REVISION),cd $(CLONE_DIR) && git checkout $(UPSTREAM_REVISION))
+ $(if $(WRAPPER_PATCHES),$(foreach F,$(WRAPPER_PATCHES),patch -d $(CLONE_DIR) -p1 <$(PACKAGE_DIR)/$(F) &&) :)
+ touch $$@
+endif # UPSTREAM_GIT
+
+ifdef UPSTREAM_HG
+$(CLONE_DIR)/.done:
+ rm -rf $(CLONE_DIR)
+ hg clone -r $(or $(UPSTREAM_REVISION),default) $(UPSTREAM_HG) $(CLONE_DIR)
+ $(if $(WRAPPER_PATCHES),$(foreach F,$(WRAPPER_PATCHES),patch -d $(CLONE_DIR) -p1 <$(PACKAGE_DIR)/$(F) &&) :)
+ touch $$@
+endif # UPSTREAM_HG
+
+# When we clone, we need to remake anything derived from the app file
+# (e.g. build/version.mk).
+$(ORIGINAL_APP_FILE): $(CLONE_DIR)/.done
+
+# We include the commit hash into the package version, via hash.mk
+# (not in build/ because we want it to survive
+# make PRESERVE_CLONE_DIR=true clean
+# for obvious reasons)
+$(eval $(call safe_include,$(PACKAGE_DIR)/hash.mk))
+
+$(PACKAGE_DIR)/hash.mk: $(CLONE_DIR)/.done
+ @mkdir -p $$(@D)
+ifdef UPSTREAM_GIT
+ echo UPSTREAM_SHORT_HASH:=`git --git-dir=$(CLONE_DIR)/.git log -n 1 HEAD | grep commit | cut -b 8-14` >$$@
+endif
+ifdef UPSTREAM_HG
+ echo UPSTREAM_SHORT_HASH:=`hg id -R $(CLONE_DIR) -i | cut -c -7` >$$@
+endif
+
+$(APP_FILE): $(PACKAGE_DIR)/hash.mk
+
+PACKAGE_VERSION:=$(PACKAGE_VERSION)-$(UPSTREAM_TYPE)$(UPSTREAM_SHORT_HASH)
+
+$(PACKAGE_DIR)+clean::
+ [ "x" != "x$(PRESERVE_CLONE_DIR)" ] || rm -rf $(CLONE_DIR) hash.mk
+endef # package_rules
+$(eval $(package_rules))
+
+endif # UPSTREAM_TYPE
+
+# Generate a rule to compile .erl files from the directory $(1) into
+# directory $(2), taking extra erlc options from $(3)
+define package_source_dir_targets
+$(2)/%.beam: $(1)/%.erl $(PACKAGE_DIR)/build/dep-apps/.done | $(DEPS_FILE)
+ @mkdir -p $$(@D)
+ ERL_LIBS=$(PACKAGE_DIR)/build/dep-apps $(ERLC) $(PACKAGE_ERLC_OPTS) $(foreach D,$(INCLUDE_DIRS),-I $(D)) -pa $$(@D) -o $$(@D) $(3) $$<
+
+endef
+
+$(eval $(foreach D,$(SOURCE_DIRS),$(call package_source_dir_targets,$(D),$(EBIN_DIR),)))
+$(eval $(foreach D,$(TEST_SOURCE_DIRS),$(call package_source_dir_targets,$(D),$(TEST_EBIN_DIR),-pa $(EBIN_DIR))))
+
+# Commands to run the broker for tests
+#
+# $(1): The value for RABBITMQ_SERVER_START_ARGS
+# $(2): Extra env var settings when invoking the rabbitmq-server script
+# $(3): Extra .ezs to copy into the plugins dir
+define run_broker
+ rm -rf $(TEST_TMPDIR)
+ mkdir -p $(foreach D,log plugins $(NODENAME),$(TEST_TMPDIR)/$(D))
+ cp -p $(PACKAGE_DIR)/dist/*.ez $(TEST_TMPDIR)/plugins
+ $(call copy,$(3),$(TEST_TMPDIR)/plugins)
+ rm -f $(TEST_TMPDIR)/plugins/rabbit_common*.ez
+ for plugin in \
+ $$$$(RABBITMQ_PLUGINS_DIR=$(TEST_TMPDIR)/plugins \
+ RABBITMQ_ENABLED_PLUGINS_FILE=$(TEST_TMPDIR)/enabled_plugins \
+ $(UMBRELLA_BASE_DIR)/rabbitmq-server/scripts/rabbitmq-plugins list -m); do \
+ RABBITMQ_PLUGINS_DIR=$(TEST_TMPDIR)/plugins \
+ RABBITMQ_ENABLED_PLUGINS_FILE=$(TEST_TMPDIR)/enabled_plugins \
+ $(UMBRELLA_BASE_DIR)/rabbitmq-server/scripts/rabbitmq-plugins \
+ enable $$$$plugin; \
+ done
+ RABBITMQ_PLUGINS_DIR=$(TEST_TMPDIR)/plugins \
+ RABBITMQ_ENABLED_PLUGINS_FILE=$(TEST_TMPDIR)/enabled_plugins \
+ RABBITMQ_LOG_BASE=$(TEST_TMPDIR)/log \
+ RABBITMQ_MNESIA_BASE=$(TEST_TMPDIR)/$(NODENAME) \
+ RABBITMQ_PID_FILE=$(TEST_TMPDIR)/$(NODENAME).pid \
+ RABBITMQ_NODENAME=$(NODENAME) \
+ RABBITMQ_SERVER_START_ARGS=$(1) \
+ $(2) $(UMBRELLA_BASE_DIR)/rabbitmq-server/scripts/rabbitmq-server
+endef
+
+# Commands to run the package's test suite
+#
+# $(1): Extra .ezs to copy into the plugins dir
+define run_with_broker_tests
+$(if $(WITH_BROKER_TEST_COMMANDS)$(WITH_BROKER_TEST_SCRIPTS),$(call run_with_broker_tests_aux,$1))
+endef
+
+define run_with_broker_tests_aux
+ $(call run_broker,'-pa $(TEST_EBIN_DIR) -coverage directories ["$(EBIN_DIR)"$(COMMA)"$(TEST_EBIN_DIR)"]',RABBITMQ_CONFIG_FILE=$(WITH_BROKER_TEST_CONFIG),$(1)) &
+ $(UMBRELLA_BASE_DIR)/rabbitmq-server/scripts/rabbitmqctl -n $(NODENAME) wait $(TEST_TMPDIR)/$(NODENAME).pid
+ echo > $(TEST_TMPDIR)/rabbit-test-output && \
+ if $(foreach SCRIPT,$(WITH_BROKER_SETUP_SCRIPTS),$(SCRIPT) &&) \
+ $(foreach CMD,$(WITH_BROKER_TEST_COMMANDS), \
+ echo >> $(TEST_TMPDIR)/rabbit-test-output && \
+ echo "$(CMD)." \
+ | tee -a $(TEST_TMPDIR)/rabbit-test-output \
+ | $(ERL_CALL) $(ERL_CALL_OPTS) \
+ | tee -a $(TEST_TMPDIR)/rabbit-test-output \
+ | egrep "{ok, (ok|passed)}" >/dev/null &&) \
+ $(foreach SCRIPT,$(WITH_BROKER_TEST_SCRIPTS),$(SCRIPT) &&) : ; \
+ then \
+ touch $(TEST_TMPDIR)/.passed ; \
+ echo "\nPASSED\n" ; \
+ else \
+ cat $(TEST_TMPDIR)/rabbit-test-output ; \
+ echo "\n\nFAILED\n" ; \
+ fi
+ sleep 1
+ echo "rabbit_misc:report_cover(), init:stop()." | $(ERL_CALL) $(ERL_CALL_OPTS)
+ sleep 1
+ test -f $(TEST_TMPDIR)/.passed
+endef
+
+# The targets common to all integrated packages
+define package_rules
+
+# Put all relevant ezs into the dist dir for this package, including
+# the main ez file produced by this package
+#
+# When the package version changes, our .ez filename will change, and
+# we need to regenerate the dist directory. So the dependency needs
+# to go via a stamp file that incorporates the version in its name.
+# But we need a target with a fixed name for other packages to depend
+# on. And it can't be a phony, as a phony will always get rebuilt.
+# Hence the need for two stamp files here.
+$(PACKAGE_DIR)/dist/.done: $(PACKAGE_DIR)/dist/.done.$(PACKAGE_VERSION)
+ touch $$@
+
+$(PACKAGE_DIR)/dist/.done.$(PACKAGE_VERSION): $(PACKAGE_DIR)/build/dep-ezs/.done $(APP_DONE)
+ rm -rf $$(@D)
+ mkdir -p $$(@D)
+ cd $(dir $(APP_DIR)) && zip -q -r $$(abspath $(EZ_FILE)) $(notdir $(APP_DIR))
+ $$(call copy,$$(wildcard $$(<D)/*.ez),$(PACKAGE_DIR)/dist)
+ touch $$@
+
+# Gather all the ezs from dependency packages
+$(PACKAGE_DIR)/build/dep-ezs/.done: $(foreach P,$(DEP_PATHS),$(P)/dist/.done)
+ rm -rf $$(@D)
+ mkdir -p $$(@D)
+ @echo [elided] copy dependent ezs
+ @$(if $(DEP_PATHS),$(foreach P,$(DEP_PATHS),$$(call copy,$$(wildcard $(P)/dist/*.ez),$$(@D),&&)) :)
+ touch $$@
+
+# Put together the main app tree for this package
+$(APP_DONE): $(EBIN_BEAMS) $(INCLUDE_HRLS) $(APP_FILE) $(CONSTRUCT_APP_PREREQS)
+ rm -rf $$(@D)
+ mkdir -p $(APP_DIR)/ebin $(APP_DIR)/include
+ @echo [elided] copy beams to ebin
+ @$(call copy,$(EBIN_BEAMS),$(APP_DIR)/ebin)
+ cp -p $(APP_FILE) $(APP_DIR)/ebin/$(APP_NAME).app
+ $(call copy,$(INCLUDE_HRLS),$(APP_DIR)/include)
+ $(construct_app_commands)
+ touch $$@
+
+# Copy the .app file into place, set its version number
+$(APP_FILE): $(ORIGINAL_APP_FILE)
+ @mkdir -p $$(@D)
+ sed -e 's|{vsn, *\"[^\"]*\"|{vsn,\"$(PACKAGE_VERSION)\"|' <$$< >$$@
+
+ifndef DO_NOT_GENERATE_APP_FILE
+
+# Generate the .app file. Note that this is a separate step from above
+# so that the plugin still works correctly when symlinked as a directory
+$(ORIGINAL_APP_FILE): $(ORIGINAL_APP_SOURCE) $(SOURCE_ERLS) $(UMBRELLA_BASE_DIR)/generate_app
+ @mkdir -p $$(@D)
+ escript $(UMBRELLA_BASE_DIR)/generate_app $$< $$@ $(SOURCE_DIRS)
+
+$(PACKAGE_DIR)+clean::
+ rm -f $(ORIGINAL_APP_FILE)
+
+endif
+
+# Unpack the ezs from dependency packages, so that their contents are
+# accessible to erlc
+$(PACKAGE_DIR)/build/dep-apps/.done: $(PACKAGE_DIR)/build/dep-ezs/.done
+ rm -rf $$(@D)
+ mkdir -p $$(@D)
+ @echo [elided] unzip ezs
+ @cd $$(@D) && $$(foreach EZ,$$(wildcard $(PACKAGE_DIR)/build/dep-ezs/*.ez),unzip -q $$(abspath $$(EZ)) &&) :
+ touch $$@
+
+# Dependency autogeneration. This is complicated slightly by the need
+# to generate a dependency file which is path-independent.
+$(DEPS_FILE): $(SOURCE_ERLS) $(INCLUDE_HRLS) $(TEST_SOURCE_ERLS)
+ @mkdir -p $$(@D)
+ @echo [elided] generate deps
+ @$$(if $$^,echo $$(subst : ,:,$$(foreach F,$$^,$$(abspath $$(F)):)) | escript $(abspath $(UMBRELLA_BASE_DIR)/generate_deps) $$@ '$$$$(EBIN_DIR)',echo >$$@)
+ @echo [elided] fix test deps
+ @$$(foreach F,$(TEST_EBIN_BEAMS),sed -e 's|^$$$$(EBIN_DIR)/$$(notdir $$(F)):|$$$$(TEST_EBIN_DIR)/$$(notdir $$(F)):|' $$@ > $$@.tmp && mv $$@.tmp $$@ && ) :
+ sed -e 's|$$@|$$$$(DEPS_FILE)|' $$@ > $$@.tmp && mv $$@.tmp $$@
+
+$(eval $(call safe_include,$(DEPS_FILE)))
+
+$(PACKAGE_DIR)/srcdist/.done: $(PACKAGE_DIR)/srcdist/.done.$(PACKAGE_VERSION)
+ touch $$@
+
+$(PACKAGE_DIR)/srcdist/.done.$(PACKAGE_VERSION):
+ mkdir -p $(PACKAGE_DIR)/build/srcdist/
+ rsync -a --exclude '.hg*' --exclude '.git*' --exclude 'build' $(PACKAGE_DIR) $(PACKAGE_DIR)/build/srcdist/$(APP_NAME)-$(PACKAGE_VERSION)
+ mkdir -p $(PACKAGE_DIR)/srcdist/
+ tar cjf $(PACKAGE_DIR)/srcdist/$(APP_NAME)-$(PACKAGE_VERSION)-src.tar.bz2 -C $(PACKAGE_DIR)/build/srcdist/ $(APP_NAME)-$(PACKAGE_VERSION)
+ touch $$@
+
+$(PACKAGE_DIR)+clean::
+ rm -rf $(EBIN_DIR)/*.beam $(TEST_EBIN_DIR)/*.beam $(PACKAGE_DIR)/dist $(PACKAGE_DIR)/srcdist $(PACKAGE_DIR)/build $(PACKAGE_DIR)/erl_crash.dump
+
+$(PACKAGE_DIR)+clean-with-deps:: $(foreach P,$(DEP_PATHS),$(P)+clean-with-deps)
+
+ifdef RELEASABLE
+all-releasable:: $(PACKAGE_DIR)/dist/.done
+
+copy-releasable:: $(PACKAGE_DIR)/dist/.done
+ cp $(PACKAGE_DIR)/dist/*.ez $(PLUGINS_DIST_DIR)
+
+copy-srcdist:: $(PLUGINS_SRC_DIST_DIR)/$(PACKAGE_DIR)/.srcdist_done
+
+endif
+
+$(PLUGINS_SRC_DIST_DIR)/$(PACKAGE_DIR)/.srcdist_done:: $(ORIGINAL_APP_FILE) $(foreach P,$(DEP_PATHS),$(PLUGINS_SRC_DIST_DIR)/$(P)/.srcdist_done)
+ rsync -a --exclude '.hg*' --exclude '.git*' $(PACKAGE_DIR) $(PLUGINS_SRC_DIST_DIR)/
+ [ -f $(PACKAGE_DIR)/license_info ] && cp $(PACKAGE_DIR)/license_info $(PLUGINS_SRC_DIST_DIR)/licensing/license_info_$(PACKAGE_NAME) || true
+ find $(PACKAGE_DIR) -maxdepth 1 -name 'LICENSE-*' -exec cp '{}' $(PLUGINS_SRC_DIST_DIR)/licensing/ \;
+ touch $(PLUGINS_SRC_DIST_DIR)/$(PACKAGE_DIR)/.srcdist_done
+
+# A hook to allow packages to verify that prerequisites are satisfied
+# before running.
+.PHONY: $(PACKAGE_DIR)+pre-run
+$(PACKAGE_DIR)+pre-run::
+
+# Run erlang with the package, its tests, and all its dependencies
+# available.
+.PHONY: $(PACKAGE_DIR)+run
+$(PACKAGE_DIR)+run: $(PACKAGE_DIR)/dist/.done $(TEST_EBIN_BEAMS) $(PACKAGE_DIR)+pre-run
+ ERL_LIBS=$(PACKAGE_DIR)/dist $(ERL) $(ERL_OPTS) -pa $(TEST_EBIN_DIR)
+
+# Run the broker with the package, its tests, and all its dependencies
+# available.
+.PHONY: $(PACKAGE_DIR)+run-in-broker
+$(PACKAGE_DIR)+run-in-broker: $(PACKAGE_DIR)/dist/.done $(RABBITMQ_SERVER_PATH)/dist/.done $(TEST_EBIN_BEAMS)
+ $(call run_broker,'-pa $(TEST_EBIN_DIR)',RABBITMQ_ALLOW_INPUT=true)
+
+# A hook to allow packages to verify that prerequisites are satisfied
+# before running tests.
+.PHONY: $(PACKAGE_DIR)+pre-test
+$(PACKAGE_DIR)+pre-test::
+
+# Runs the package's tests that operate within (or in conjuction with)
+# a running broker.
+.PHONY: $(PACKAGE_DIR)+in-broker-test
+$(PACKAGE_DIR)+in-broker-test: $(PACKAGE_DIR)/dist/.done $(RABBITMQ_SERVER_PATH)/dist/.done $(TEST_EBIN_BEAMS) $(PACKAGE_DIR)+pre-test $(PACKAGE_DIR)+standalone-test $(if $(RELEASABLE),$(call chain_test,$(PACKAGE_DIR)+in-broker-test))
+ $(call run_with_broker_tests)
+
+# Running the coverage tests requires Erlang/OTP R14. Note that
+# coverage only covers the in-broker tests.
+.PHONY: $(PACKAGE_DIR)+coverage
+$(PACKAGE_DIR)+coverage: $(PACKAGE_DIR)/dist/.done $(COVERAGE_PATH)/dist/.done $(TEST_EBIN_BEAMS) $(PACKAGE_DIR)+pre-test
+ $(call run_with_broker_tests,$(COVERAGE_PATH)/dist/*.ez)
+
+# Runs the package's tests that don't need a running broker
+.PHONY: $(PACKAGE_DIR)+standalone-test
+$(PACKAGE_DIR)+standalone-test: $(PACKAGE_DIR)/dist/.done $(TEST_EBIN_BEAMS) $(PACKAGE_DIR)+pre-test $(if $(RELEASABLE),$(call chain_test,$(PACKAGE_DIR)+standalone-test))
+ $$(if $(STANDALONE_TEST_COMMANDS),\
+ $$(foreach CMD,$(STANDALONE_TEST_COMMANDS),\
+ ERL_LIBS=$(PACKAGE_DIR)/dist $(ERL) -noinput $(ERL_OPTS) -pa $(TEST_EBIN_DIR) -sname standalone_test -eval "init:stop(case $$(CMD) of ok -> 0; passed -> 0; _Else -> 1 end)" &&\
+ )\
+ :)
+ $$(if $(STANDALONE_TEST_SCRIPTS),$$(foreach SCRIPT,$(STANDALONE_TEST_SCRIPTS),$$(SCRIPT) &&) :)
+
+# Run all the package's tests
+.PHONY: $(PACKAGE_DIR)+test
+$(PACKAGE_DIR)+test:: $(PACKAGE_DIR)+standalone-test $(PACKAGE_DIR)+in-broker-test
+
+.PHONY: $(PACKAGE_DIR)+check-xref
+$(PACKAGE_DIR)+check-xref: $(PACKAGE_DIR)/dist/.done
+ UNPACKDIR=$$$$(mktemp -d $(TMPDIR)/tmp.XXXXXXXXXX) && \
+ for ez in $$$$(find $(PACKAGE_DIR)/dist -type f -name "*.ez"); do \
+ unzip -q $$$${ez} -d $$$${UNPACKDIR}; \
+ done && \
+ rm -rf $$$${UNPACKDIR}/rabbit_common-* && \
+ ln -sf $$$$(pwd)/$(RABBITMQ_SERVER_PATH)/ebin $$$${UNPACKDIR} && \
+ OK=true && \
+ { $(UMBRELLA_BASE_DIR)/check_xref $(PACKAGE_DIR) $$$${UNPACKDIR} || OK=false; } && \
+ rm -rf $$$${UNPACKDIR} && \
+ $$$${OK}
+
+check-xref-packages:: $(PACKAGE_DIR)+check-xref
+
+endef
+$(eval $(package_rules))
+
+# Recursing into dependency packages has to be the last thing we do
+# because it will trample all over the per-package variables.
+
+# Recurse into dependency packages
+$(foreach DEP_PATH,$(DEP_PATHS),$(eval $(call do_package,$(DEP_PATH))))
+
+else # NON_INTEGRATED_$(PACKAGE_DIR)
+
+define package_rules
+
+# When the package version changes, our .ez filename will change, and
+# we need to regenerate the dist directory. So the dependency needs
+# to go via a stamp file that incorporates the version in its name.
+# But we need a target with a fixed name for other packages to depend
+# on. And it can't be a phony, as a phony will always get rebuilt.
+# Hence the need for two stamp files here.
+$(PACKAGE_DIR)/dist/.done: $(PACKAGE_DIR)/dist/.done.$(VERSION)
+ touch $$@
+
+# Non-integrated packages (rabbitmq-server and rabbitmq-erlang-client)
+# present a dilemma. We could re-make the package every time we need
+# it. But that will cause a huge amount of unnecessary rebuilding.
+# Or we could not worry about rebuilding non-integrated packages.
+# That's good for those developing plugins, but not for those who want
+# to work on the broker and erlang client in the context of the
+# plugins. So instead, we use a conservative approximation to the
+# dependency structure within the package, to tell when to re-run the
+# makefile.
+$(PACKAGE_DIR)/dist/.done.$(VERSION): $(PACKAGE_DIR)/Makefile $(wildcard $(PACKAGE_DIR)/*.mk) $(wildcard $(PACKAGE_DIR)/src/*.erl) $(wildcard $(PACKAGE_DIR)/include/*.hrl) $(wildcard $(PACKAGE_DIR)/*.py) $(foreach DEP,$(NON_INTEGRATED_DEPS_$(PACKAGE_DIR)),$(call package_to_path,$(DEP))/dist/.done)
+ rm -rf $$(@D)
+ $$(MAKE) -C $(PACKAGE_DIR)
+ mkdir -p $$(@D)
+ touch $$@
+
+# When building plugins-src we want to "make clean", but some
+# non-integrated packages will not be there. Don't fall over in that case.
+$(PACKAGE_DIR)+clean::
+ if [ -d $(PACKAGE_DIR) ] ; then $$(MAKE) -C $(PACKAGE_DIR) clean ; fi
+ rm -rf $(PACKAGE_DIR)/dist
+
+endef
+$(eval $(package_rules))
+
+endif # NON_INTEGRATED_$(PACKAGE_DIR)
--- /dev/null
+
+Copyright (c) 2010, Torbjorn Tornkvist
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
--- /dev/null
+include ../umbrella.mk
--- /dev/null
+diff -Naur eldap.orig/ebin/eldap.app eldap/ebin/eldap.app
+--- eldap.orig/ebin/eldap.app 1970-01-01 01:00:00.000000000 +0100
++++ eldap/ebin/eldap.app 2011-01-20 12:47:04.377399296 +0000
+@@ -0,0 +1,10 @@
++{application, eldap,
++ [{description, "LDAP Client Library"},
++ {vsn, "0.01"},
++ {modules, [
++ eldap,
++ 'ELDAPv3'
++ ]},
++ {registered, []},
++ {applications, [kernel, stdlib]}
++ ]}.
--- /dev/null
+
+Copyright (c) 2010, Torbjorn Tornkvist
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
--- /dev/null
+
+all:
+ (cd src;$(MAKE))
+
+clean:
+ (cd src;$(MAKE) clean)
+
--- /dev/null
+Hi,
+
+This is 'eldap', the Erlang LDAP library.
+
+It exports an API that can do all possible operations
+you may want to do against an LDAP server. The code has
+been tested to work at some point, but only the bind
+and search operations are running daily in our products,
+so there may be bugs lurking in some parts of the code.
+
+To just use eldap for doing authentication, do like in:
+
+ {ok,X} = eldap:open(["ldap.mycorp.com"], []).
+ eldap:simple_bind(X, "uid=tobbe,ou=People,dc=mycorp,dc=com", "passwd").
+
+In the doc/README.example you'll find a trace from a
+Erlang shell session as an example on how to setup a
+connection, authenticate (bind) and perform a search.
+Note that by using the option {ssl, true}, you should
+be able to setup an SSL tunnel (LDAPS) if your Erlang
+system has been configured with SSL.
+
+In the test directory there are some hints and examples
+on how to test the code and how to setup and populate
+an OpenLDAP server. The 'eldap' code has been tested
+agains OpenLDAP, IPlanet and ActiveDirectory servers.
+
+If you plan to incorporate this code into your system
+I suggest that you build a server/supervisor harnesk
+that uses 'eldap' (as we have done in our products).
+
+Good luck !
+/Tobbe
--- /dev/null
+1> {_,S} = eldap:open(["192.168.128.47"], []).
+{ok,<0.30.0>}
+2> eldap:simple_bind(S,"cn=Torbjorn Tornkvist,cn=Users,dc=bluetail,dc=com","qwe123").
+ok
+3> Base = {base, "dc=bluetail,dc=com"}.
+{base,"dc=bluetail,dc=com"}
+4> Scope = {scope, eldap:wholeSubtree()}.
+{scope,wholeSubtree}
+5> Filter = {filter, eldap:equalityMatch("sAMAccountName", "tobbe")}.
+{filter,{equalityMatch,{'AttributeValueAssertion',"sAMAccountName","tobbe"}}}
+6> Search = [Base, Scope, Filter].
+[{base,"dc=bluetail,dc=com"},
+ {scope,wholeSubtree},
+ {filter,{equalityMatch,{'AttributeValueAssertion',"sAMAccountName","tobbe"}}}]
+7> eldap:search(S, Search).
+{ok,{eldap_search_result,[{eldap_entry,
+ "CN=Torbjorn Tornkvist,CN=Users,DC=bluetail,DC=com",
+ [{"manager",
+ ["CN=Tord Larsson,CN=Users,DC=bluetail,DC=com"]},
+ {"memberOf",
+ ["CN=TestGroup2,CN=Users,DC=bluetail,DC=com",
+ "CN=TestGroup,CN=Users,DC=bluetail,DC=com",
+ "CN=Pre-Windows 2000 Compatible Access,CN=Builtin,DC=bluetail,DC=com",
+ "CN=Server Operators,CN=Builtin,DC=bluetail,DC=com"]},
+ {"accountExpires",["0"]},
+ {"adminCount",["1"]},
+ {"badPasswordTime",["127119104851642448"]},
+ {"badPwdCount",["0"]},
+ {"codePage",["0"]},
+ {"cn",["Torbjorn Tornkvist"]},
+ {"company",["Alteon Web Systems"]},
+ {"countryCode",["0"]},
+ {"department",["Bluetail"]},
+ {"displayName",["Torbjorn Tornkvist"]},
+ {"mail",["tobbe@bluetail.com"]},
+ {"givenName",["Torbjorn"]},
+ {"instanceType",["4"]},
+ {"lastLogoff",["0"]},
+ {"lastLogon",["127119109376267104"]},
+ {"logonCount",[...]},
+ {"msNPAllowDialin"|...},
+ {...}|...]}],
+ [["ldap://bluetail.com/CN=Configuration,DC=bluetail,DC=com"]]}}
+8>
--- /dev/null
+This is 'eldap', the Erlang LDAP library.
--- /dev/null
+{application, eldap,
+ [{description, "LDAP Client Library"},
+ {vsn, "0.01"},
+ {modules, [
+ eldap,
+ 'ELDAPv3'
+ ]},
+ {registered, []},
+ {applications, [kernel, stdlib]}
+ ]}.
--- /dev/null
+-ifndef( _ELDAP_HRL ).
+-define( _ELDAP_HRL , 1 ).
+
+%%%
+%%% Search input parameters
+%%%
+-record(eldap_search, {
+ base = [], % Baseobject
+ filter = [], % Search conditions
+ scope, % Search scope
+ attributes = [], % Attributes to be returned
+ types_only = false, % Return types+values or types
+ timeout = 0 % Timelimit for search
+ }).
+
+%%%
+%%% Returned search result
+%%%
+-record(eldap_search_result, {
+ entries = [], % List of #eldap_entry{} records
+ referrals = [] % List of referrals
+ }).
+
+%%%
+%%% LDAP entry
+%%%
+-record(eldap_entry, {
+ object_name = "", % The DN for the entry
+ attributes = [] % List of {Attribute, Value} pairs
+ }).
+
+-endif.
--- /dev/null
+-- LDAPv3 ASN.1 specification, taken from RFC 2251
+
+-- Lightweight-Directory-Access-Protocol-V3 DEFINITIONS
+ELDAPv3 DEFINITIONS
+IMPLICIT TAGS ::=
+
+BEGIN
+
+LDAPMessage ::= SEQUENCE {
+ messageID MessageID,
+ protocolOp CHOICE {
+ bindRequest BindRequest,
+ bindResponse BindResponse,
+ unbindRequest UnbindRequest,
+ searchRequest SearchRequest,
+ searchResEntry SearchResultEntry,
+ searchResDone SearchResultDone,
+ searchResRef SearchResultReference,
+ modifyRequest ModifyRequest,
+ modifyResponse ModifyResponse,
+ addRequest AddRequest,
+ addResponse AddResponse,
+ delRequest DelRequest,
+ delResponse DelResponse,
+ modDNRequest ModifyDNRequest,
+ modDNResponse ModifyDNResponse,
+ compareRequest CompareRequest,
+ compareResponse CompareResponse,
+ abandonRequest AbandonRequest,
+ extendedReq ExtendedRequest,
+ extendedResp ExtendedResponse },
+ controls [0] Controls OPTIONAL }
+
+MessageID ::= INTEGER (0 .. maxInt)
+
+maxInt INTEGER ::= 2147483647 -- (2^^31 - 1) --
+
+LDAPString ::= OCTET STRING
+
+LDAPOID ::= OCTET STRING
+
+LDAPDN ::= LDAPString
+
+RelativeLDAPDN ::= LDAPString
+
+AttributeType ::= LDAPString
+
+AttributeDescription ::= LDAPString
+
+
+
+
+-- Wahl, et. al. Standards Track [Page 44]
+-- \f
+-- RFC 2251 LDAPv3 December 1997
+
+
+AttributeDescriptionList ::= SEQUENCE OF
+ AttributeDescription
+
+AttributeValue ::= OCTET STRING
+
+AttributeValueAssertion ::= SEQUENCE {
+ attributeDesc AttributeDescription,
+ assertionValue AssertionValue }
+
+AssertionValue ::= OCTET STRING
+
+Attribute ::= SEQUENCE {
+ type AttributeDescription,
+ vals SET OF AttributeValue }
+
+MatchingRuleId ::= LDAPString
+
+LDAPResult ::= SEQUENCE {
+ resultCode ENUMERATED {
+ success (0),
+ operationsError (1),
+ protocolError (2),
+ timeLimitExceeded (3),
+ sizeLimitExceeded (4),
+ compareFalse (5),
+ compareTrue (6),
+ authMethodNotSupported (7),
+ strongAuthRequired (8),
+ -- 9 reserved --
+ referral (10), -- new
+ adminLimitExceeded (11), -- new
+ unavailableCriticalExtension (12), -- new
+ confidentialityRequired (13), -- new
+ saslBindInProgress (14), -- new
+ noSuchAttribute (16),
+ undefinedAttributeType (17),
+ inappropriateMatching (18),
+ constraintViolation (19),
+ attributeOrValueExists (20),
+ invalidAttributeSyntax (21),
+ -- 22-31 unused --
+ noSuchObject (32),
+ aliasProblem (33),
+ invalidDNSyntax (34),
+ -- 35 reserved for undefined isLeaf --
+ aliasDereferencingProblem (36),
+ -- 37-47 unused --
+ inappropriateAuthentication (48),
+
+-- Wahl, et. al. Standards Track [Page 45]
+-- \f
+-- RFC 2251 LDAPv3 December 1997
+
+
+ invalidCredentials (49),
+ insufficientAccessRights (50),
+ busy (51),
+ unavailable (52),
+ unwillingToPerform (53),
+ loopDetect (54),
+ -- 55-63 unused --
+ namingViolation (64),
+ objectClassViolation (65),
+ notAllowedOnNonLeaf (66),
+ notAllowedOnRDN (67),
+ entryAlreadyExists (68),
+ objectClassModsProhibited (69),
+ -- 70 reserved for CLDAP --
+ affectsMultipleDSAs (71), -- new
+ -- 72-79 unused --
+ other (80) },
+ -- 81-90 reserved for APIs --
+ matchedDN LDAPDN,
+ errorMessage LDAPString,
+ referral [3] Referral OPTIONAL }
+
+Referral ::= SEQUENCE OF LDAPURL
+
+LDAPURL ::= LDAPString -- limited to characters permitted in URLs
+
+Controls ::= SEQUENCE OF Control
+
+Control ::= SEQUENCE {
+ controlType LDAPOID,
+ criticality BOOLEAN DEFAULT FALSE,
+ controlValue OCTET STRING OPTIONAL }
+
+BindRequest ::= [APPLICATION 0] SEQUENCE {
+ version INTEGER (1 .. 127),
+ name LDAPDN,
+ authentication AuthenticationChoice }
+
+AuthenticationChoice ::= CHOICE {
+ simple [0] OCTET STRING,
+ -- 1 and 2 reserved
+ sasl [3] SaslCredentials }
+
+SaslCredentials ::= SEQUENCE {
+ mechanism LDAPString,
+ credentials OCTET STRING OPTIONAL }
+
+BindResponse ::= [APPLICATION 1] SEQUENCE {
+
+-- Wahl, et. al. Standards Track [Page 46]
+-- \f
+-- RFC 2251 LDAPv3 December 1997
+
+
+ COMPONENTS OF LDAPResult,
+ serverSaslCreds [7] OCTET STRING OPTIONAL }
+
+UnbindRequest ::= [APPLICATION 2] NULL
+
+SearchRequest ::= [APPLICATION 3] SEQUENCE {
+ baseObject LDAPDN,
+ scope ENUMERATED {
+ baseObject (0),
+ singleLevel (1),
+ wholeSubtree (2) },
+ derefAliases ENUMERATED {
+ neverDerefAliases (0),
+ derefInSearching (1),
+ derefFindingBaseObj (2),
+ derefAlways (3) },
+ sizeLimit INTEGER (0 .. maxInt),
+ timeLimit INTEGER (0 .. maxInt),
+ typesOnly BOOLEAN,
+ filter Filter,
+ attributes AttributeDescriptionList }
+
+Filter ::= CHOICE {
+ and [0] SET OF Filter,
+ or [1] SET OF Filter,
+ not [2] Filter,
+ equalityMatch [3] AttributeValueAssertion,
+ substrings [4] SubstringFilter,
+ greaterOrEqual [5] AttributeValueAssertion,
+ lessOrEqual [6] AttributeValueAssertion,
+ present [7] AttributeDescription,
+ approxMatch [8] AttributeValueAssertion,
+ extensibleMatch [9] MatchingRuleAssertion }
+
+SubstringFilter ::= SEQUENCE {
+ type AttributeDescription,
+ -- at least one must be present
+ substrings SEQUENCE OF CHOICE {
+ initial [0] LDAPString,
+ any [1] LDAPString,
+ final [2] LDAPString } }
+
+MatchingRuleAssertion ::= SEQUENCE {
+ matchingRule [1] MatchingRuleId OPTIONAL,
+ type [2] AttributeDescription OPTIONAL,
+ matchValue [3] AssertionValue,
+ dnAttributes [4] BOOLEAN DEFAULT FALSE }
+
+-- Wahl, et. al. Standards Track [Page 47]
+-- \f
+-- RFC 2251 LDAPv3 December 1997
+
+SearchResultEntry ::= [APPLICATION 4] SEQUENCE {
+ objectName LDAPDN,
+ attributes PartialAttributeList }
+
+PartialAttributeList ::= SEQUENCE OF SEQUENCE {
+ type AttributeDescription,
+ vals SET OF AttributeValue }
+
+SearchResultReference ::= [APPLICATION 19] SEQUENCE OF LDAPURL
+
+SearchResultDone ::= [APPLICATION 5] LDAPResult
+
+ModifyRequest ::= [APPLICATION 6] SEQUENCE {
+ object LDAPDN,
+ modification SEQUENCE OF SEQUENCE {
+ operation ENUMERATED {
+ add (0),
+ delete (1),
+ replace (2) },
+ modification AttributeTypeAndValues } }
+
+AttributeTypeAndValues ::= SEQUENCE {
+ type AttributeDescription,
+ vals SET OF AttributeValue }
+
+ModifyResponse ::= [APPLICATION 7] LDAPResult
+
+AddRequest ::= [APPLICATION 8] SEQUENCE {
+ entry LDAPDN,
+ attributes AttributeList }
+
+AttributeList ::= SEQUENCE OF SEQUENCE {
+ type AttributeDescription,
+ vals SET OF AttributeValue }
+
+AddResponse ::= [APPLICATION 9] LDAPResult
+
+DelRequest ::= [APPLICATION 10] LDAPDN
+
+DelResponse ::= [APPLICATION 11] LDAPResult
+
+ModifyDNRequest ::= [APPLICATION 12] SEQUENCE {
+ entry LDAPDN,
+ newrdn RelativeLDAPDN,
+ deleteoldrdn BOOLEAN,
+ newSuperior [0] LDAPDN OPTIONAL }
+
+ModifyDNResponse ::= [APPLICATION 13] LDAPResult
+
+-- Wahl, et. al. Standards Track [Page 48]
+-- \f
+-- RFC 2251 LDAPv3 December 1997
+
+
+CompareRequest ::= [APPLICATION 14] SEQUENCE {
+ entry LDAPDN,
+ ava AttributeValueAssertion }
+
+CompareResponse ::= [APPLICATION 15] LDAPResult
+
+AbandonRequest ::= [APPLICATION 16] MessageID
+
+ExtendedRequest ::= [APPLICATION 23] SEQUENCE {
+ requestName [0] LDAPOID,
+ requestValue [1] OCTET STRING OPTIONAL }
+
+ExtendedResponse ::= [APPLICATION 24] SEQUENCE {
+ COMPONENTS OF LDAPResult,
+ responseName [10] LDAPOID OPTIONAL,
+ response [11] OCTET STRING OPTIONAL }
+
+END
+
+
--- /dev/null
+
+ERLC = erlc
+EBIN_DIR = ../ebin
+ERLC_FLAGS += -I ./src -I ../include +debug_info
+ERL_OBJECTS := ${EBIN_DIR}/eldap.beam ${EBIN_DIR}/ELDAPv3.beam ${EBIN_DIR}/eldap_fsm.beam
+
+.SUFFIXES: .asn .erl .beam
+
+$(EBIN_DIR)/%.beam: %.erl
+ $(ERLC) $(ERLC_FLAGS) -o $(EBIN_DIR) $<
+
+.PHONY: all
+all: asn $(ERL_OBJECTS)
+
+.PHONY: asn
+asn: ELDAPv3.erl ../ebin/ELDAPv3.beam
+
+ELDAPv3.erl: ELDAPv3.asn
+ ${ERLC} ELDAPv3.asn
+ mv ELDAPv3.beam ${EBIN_DIR}
+
+.PHONY: clean
+clean:
+ -rm $(ERL_OBJECTS) ELDAPv3.erl ELDAPv3.asn1db ELDAPv3.hrl
+
+
--- /dev/null
+-module(eldap).
+%%% --------------------------------------------------------------------
+%%% Created: 12 Oct 2000 by Tobbe <tnt@home.se>
+%%% Function: Erlang client LDAP implementation according RFC 2251,2253
+%%% and 2255. The interface is based on RFC 1823, and
+%%% draft-ietf-asid-ldap-c-api-00.txt
+%%%
+%%% Copyright (c) 2010 Torbjorn Tornkvist
+%%% See MIT-LICENSE at the top dir for licensing information.
+%%% --------------------------------------------------------------------
+-vc('$Id$ ').
+-export([open/1,open/2,simple_bind/3,controlling_process/2,
+ baseObject/0,singleLevel/0,wholeSubtree/0,close/1,
+ equalityMatch/2,greaterOrEqual/2,lessOrEqual/2,
+ approxMatch/2,search/2,substrings/2,present/1,
+ 'and'/1,'or'/1,'not'/1,modify/3, mod_add/2, mod_delete/2,
+ mod_replace/2, add/3, delete/2, modify_dn/5,parse_dn/1,
+ parse_ldap_url/1]).
+
+-import(lists,[concat/1]).
+
+-include("ELDAPv3.hrl").
+-include("eldap.hrl").
+
+-define(LDAP_VERSION, 3).
+-define(LDAP_PORT, 389).
+-define(LDAPS_PORT, 636).
+
+-record(eldap, {version = ?LDAP_VERSION,
+ host, % Host running LDAP server
+ port = ?LDAP_PORT, % The LDAP server port
+ fd, % Socket filedescriptor.
+ binddn = "", % Name of the entry to bind as
+ passwd, % Password for (above) entry
+ id = 0, % LDAP Request ID
+ log, % User provided log function
+ timeout = infinity, % Request timeout
+ anon_auth = false, % Allow anonymous authentication
+ use_tls = false % LDAP/LDAPS
+ }).
+
+%%% For debug purposes
+%%-define(PRINT(S, A), io:fwrite("~w(~w): " ++ S, [?MODULE,?LINE|A])).
+-define(PRINT(S, A), true).
+
+-define(elog(S, A), error_logger:info_msg("~w(~w): "++S,[?MODULE,?LINE|A])).
+
+%%% ====================================================================
+%%% Exported interface
+%%% ====================================================================
+
+%%% --------------------------------------------------------------------
+%%% open(Hosts [,Opts] )
+%%% --------------------
+%%% Setup a connection to on of the Hosts in the argument
+%%% list. Stop at the first successful connection attempt.
+%%% Valid Opts are: Where:
+%%%
+%%% {port, Port} - Port is the port number
+%%% {log, F} - F(LogLevel, FormatString, ListOfArgs)
+%%% {timeout, milliSec} - request timeout
+%%%
+%%% --------------------------------------------------------------------
+open(Hosts) ->
+ open(Hosts, []).
+
+open(Hosts, Opts) when list(Hosts), list(Opts) ->
+ Self = self(),
+ Pid = spawn_link(fun() -> init(Hosts, Opts, Self) end),
+ recv(Pid).
+
+%%% --------------------------------------------------------------------
+%%% Shutdown connection (and process) asynchronous.
+%%% --------------------------------------------------------------------
+
+close(Handle) when pid(Handle) ->
+ send(Handle, close).
+
+%%% --------------------------------------------------------------------
+%%% Set who we should link ourselves to
+%%% --------------------------------------------------------------------
+
+controlling_process(Handle, Pid) when pid(Handle),pid(Pid) ->
+ link(Pid),
+ send(Handle, {cnt_proc, Pid}),
+ recv(Handle).
+
+%%% --------------------------------------------------------------------
+%%% Authenticate ourselves to the Directory
+%%% using simple authentication.
+%%%
+%%% Dn - The name of the entry to bind as
+%%% Passwd - The password to be used
+%%%
+%%% Returns: ok | {error, Error}
+%%% --------------------------------------------------------------------
+simple_bind(Handle, Dn, Passwd) when pid(Handle) ->
+ send(Handle, {simple_bind, Dn, Passwd}),
+ recv(Handle).
+
+%%% --------------------------------------------------------------------
+%%% Add an entry. The entry field MUST NOT exist for the AddRequest
+%%% to succeed. The parent of the entry MUST exist.
+%%% Example:
+%%%
+%%% add(Handle,
+%%% "cn=Bill Valentine, ou=people, o=Bluetail AB, dc=bluetail, dc=com",
+%%% [{"objectclass", ["person"]},
+%%% {"cn", ["Bill Valentine"]},
+%%% {"sn", ["Valentine"]},
+%%% {"telephoneNumber", ["545 555 00"]}]
+%%% )
+%%% --------------------------------------------------------------------
+add(Handle, Entry, Attributes) when pid(Handle),list(Entry),list(Attributes) ->
+ send(Handle, {add, Entry, add_attrs(Attributes)}),
+ recv(Handle).
+
+%%% Do sanity check !
+add_attrs(Attrs) ->
+ F = fun({Type,Vals}) when list(Type),list(Vals) ->
+ %% Confused ? Me too... :-/
+ {'AddRequest_attributes',Type, Vals}
+ end,
+ case catch lists:map(F, Attrs) of
+ {'EXIT', _} -> throw({error, attribute_values});
+ Else -> Else
+ end.
+
+%%% --------------------------------------------------------------------
+%%% Delete an entry. The entry consists of the DN of
+%%% the entry to be deleted.
+%%% Example:
+%%%
+%%% delete(Handle,
+%%% "cn=Bill Valentine, ou=people, o=Bluetail AB, dc=bluetail, dc=com"
+%%% )
+%%% --------------------------------------------------------------------
+delete(Handle, Entry) when pid(Handle), list(Entry) ->
+ send(Handle, {delete, Entry}),
+ recv(Handle).
+
+%%% --------------------------------------------------------------------
+%%% Modify an entry. Given an entry a number of modification
+%%% operations can be performed as one atomic operation.
+%%% Example:
+%%%
+%%% modify(Handle,
+%%% "cn=Torbjorn Tornkvist, ou=people, o=Bluetail AB, dc=bluetail, dc=com",
+%%% [replace("telephoneNumber", ["555 555 00"]),
+%%% add("description", ["LDAP hacker"])]
+%%% )
+%%% --------------------------------------------------------------------
+modify(Handle, Object, Mods) when pid(Handle), list(Object), list(Mods) ->
+ send(Handle, {modify, Object, Mods}),
+ recv(Handle).
+
+%%%
+%%% Modification operations.
+%%% Example:
+%%% replace("telephoneNumber", ["555 555 00"])
+%%%
+mod_add(Type, Values) when list(Type), list(Values) -> m(add, Type, Values).
+mod_delete(Type, Values) when list(Type), list(Values) -> m(delete, Type, Values).
+mod_replace(Type, Values) when list(Type), list(Values) -> m(replace, Type, Values).
+
+m(Operation, Type, Values) ->
+ #'ModifyRequest_modification_SEQOF'{
+ operation = Operation,
+ modification = #'AttributeTypeAndValues'{
+ type = Type,
+ vals = Values}}.
+
+%%% --------------------------------------------------------------------
+%%% Modify an entry. Given an entry a number of modification
+%%% operations can be performed as one atomic operation.
+%%% Example:
+%%%
+%%% modify_dn(Handle,
+%%% "cn=Bill Valentine, ou=people, o=Bluetail AB, dc=bluetail, dc=com",
+%%% "cn=Ben Emerson",
+%%% true,
+%%% ""
+%%% )
+%%% --------------------------------------------------------------------
+modify_dn(Handle, Entry, NewRDN, DelOldRDN, NewSup)
+ when pid(Handle),list(Entry),list(NewRDN),atom(DelOldRDN),list(NewSup) ->
+ send(Handle, {modify_dn, Entry, NewRDN,
+ bool_p(DelOldRDN), optional(NewSup)}),
+ recv(Handle).
+
+%%% Sanity checks !
+
+bool_p(Bool) when Bool==true;Bool==false -> Bool.
+
+optional([]) -> asn1_NOVALUE;
+optional(Value) -> Value.
+
+%%% --------------------------------------------------------------------
+%%% Synchronous search of the Directory returning a
+%%% requested set of attributes.
+%%%
+%%% Example:
+%%%
+%%% Filter = eldap:substrings("sn", [{any,"o"}]),
+%%% eldap:search(S, [{base, "dc=bluetail, dc=com"},
+%%% {filter, Filter},
+%%% {attributes,["cn"]}])),
+%%%
+%%% Returned result: {ok, #eldap_search_result{}}
+%%%
+%%% Example:
+%%%
+%%% {ok,{eldap_search_result,
+%%% [{eldap_entry,
+%%% "cn=Magnus Froberg, dc=bluetail, dc=com",
+%%% [{"cn",["Magnus Froberg"]}]},
+%%% {eldap_entry,
+%%% "cn=Torbjorn Tornkvist, dc=bluetail, dc=com",
+%%% [{"cn",["Torbjorn Tornkvist"]}]}],
+%%% []}}
+%%%
+%%% --------------------------------------------------------------------
+search(Handle, A) when pid(Handle), record(A, eldap_search) ->
+ call_search(Handle, A);
+search(Handle, L) when pid(Handle), list(L) ->
+ case catch parse_search_args(L) of
+ {error, Emsg} -> {error, Emsg};
+ A when record(A, eldap_search) -> call_search(Handle, A)
+ end.
+
+call_search(Handle, A) ->
+ send(Handle, {search, A}),
+ recv(Handle).
+
+parse_search_args(Args) ->
+ parse_search_args(Args, #eldap_search{scope = wholeSubtree}).
+
+parse_search_args([{base, Base}|T],A) ->
+ parse_search_args(T,A#eldap_search{base = Base});
+parse_search_args([{filter, Filter}|T],A) ->
+ parse_search_args(T,A#eldap_search{filter = Filter});
+parse_search_args([{scope, Scope}|T],A) ->
+ parse_search_args(T,A#eldap_search{scope = Scope});
+parse_search_args([{attributes, Attrs}|T],A) ->
+ parse_search_args(T,A#eldap_search{attributes = Attrs});
+parse_search_args([{types_only, TypesOnly}|T],A) ->
+ parse_search_args(T,A#eldap_search{types_only = TypesOnly});
+parse_search_args([{timeout, Timeout}|T],A) when integer(Timeout) ->
+ parse_search_args(T,A#eldap_search{timeout = Timeout});
+parse_search_args([H|_],_) ->
+ throw({error,{unknown_arg, H}});
+parse_search_args([],A) ->
+ A.
+
+%%%
+%%% The Scope parameter
+%%%
+baseObject() -> baseObject.
+singleLevel() -> singleLevel.
+wholeSubtree() -> wholeSubtree.
+
+%%%
+%%% Boolean filter operations
+%%%
+'and'(ListOfFilters) when list(ListOfFilters) -> {'and',ListOfFilters}.
+'or'(ListOfFilters) when list(ListOfFilters) -> {'or', ListOfFilters}.
+'not'(Filter) when tuple(Filter) -> {'not',Filter}.
+
+%%%
+%%% The following Filter parameters consist of an attribute
+%%% and an attribute value. Example: F("uid","tobbe")
+%%%
+equalityMatch(Desc, Value) -> {equalityMatch, av_assert(Desc, Value)}.
+greaterOrEqual(Desc, Value) -> {greaterOrEqual, av_assert(Desc, Value)}.
+lessOrEqual(Desc, Value) -> {lessOrEqual, av_assert(Desc, Value)}.
+approxMatch(Desc, Value) -> {approxMatch, av_assert(Desc, Value)}.
+
+av_assert(Desc, Value) ->
+ #'AttributeValueAssertion'{attributeDesc = Desc,
+ assertionValue = Value}.
+
+%%%
+%%% Filter to check for the presence of an attribute
+%%%
+present(Attribute) when list(Attribute) ->
+ {present, Attribute}.
+
+
+%%%
+%%% A substring filter seem to be based on a pattern:
+%%%
+%%% InitValue*AnyValue*FinalValue
+%%%
+%%% where all three parts seem to be optional (at least when
+%%% talking with an OpenLDAP server). Thus, the arguments
+%%% to substrings/2 looks like this:
+%%%
+%%% Type ::= string( <attribute> )
+%%% SubStr ::= listof( {initial,Value} | {any,Value}, {final,Value})
+%%%
+%%% Example: substrings("sn",[{initial,"To"},{any,"kv"},{final,"st"}])
+%%% will match entries containing: 'sn: Tornkvist'
+%%%
+substrings(Type, SubStr) when list(Type), list(SubStr) ->
+ Ss = {'SubstringFilter_substrings',v_substr(SubStr)},
+ {substrings,#'SubstringFilter'{type = Type,
+ substrings = Ss}}.
+
+%%% --------------------------------------------------------------------
+%%% Worker process. We keep track of a controlling process to
+%%% be able to terminate together with it.
+%%% --------------------------------------------------------------------
+
+init(Hosts, Opts, Cpid) ->
+ Data = parse_args(Opts, Cpid, #eldap{}),
+ case try_connect(Hosts, Data) of
+ {ok,Data2} ->
+ send(Cpid, {ok,self()}),
+ put(req_timeout, Data#eldap.timeout), % kludge...
+ loop(Cpid, Data2);
+ Else ->
+ send(Cpid, Else),
+ unlink(Cpid),
+ exit(Else)
+ end.
+
+parse_args([{port, Port}|T], Cpid, Data) when integer(Port) ->
+ parse_args(T, Cpid, Data#eldap{port = Port});
+parse_args([{timeout, Timeout}|T], Cpid, Data) when integer(Timeout),Timeout>0 ->
+ parse_args(T, Cpid, Data#eldap{timeout = Timeout});
+parse_args([{anon_auth, true}|T], Cpid, Data) ->
+ parse_args(T, Cpid, Data#eldap{anon_auth = false});
+parse_args([{anon_auth, _}|T], Cpid, Data) ->
+ parse_args(T, Cpid, Data);
+parse_args([{ssl, true}|T], Cpid, Data) ->
+ parse_args(T, Cpid, Data#eldap{use_tls = true});
+parse_args([{ssl, _}|T], Cpid, Data) ->
+ parse_args(T, Cpid, Data);
+parse_args([{log, F}|T], Cpid, Data) when function(F) ->
+ parse_args(T, Cpid, Data#eldap{log = F});
+parse_args([{log, _}|T], Cpid, Data) ->
+ parse_args(T, Cpid, Data);
+parse_args([H|_], Cpid, _) ->
+ send(Cpid, {error,{wrong_option,H}}),
+ exit(wrong_option);
+parse_args([], _, Data) ->
+ Data.
+
+%%% Try to connect to the hosts in the listed order,
+%%% and stop with the first one to which a successful
+%%% connection is made.
+
+try_connect([Host|Hosts], Data) ->
+ TcpOpts = [{packet, asn1}, {active,false}],
+ case do_connect(Host, Data, TcpOpts) of
+ {ok,Fd} -> {ok,Data#eldap{host = Host, fd = Fd}};
+ _ -> try_connect(Hosts, Data)
+ end;
+try_connect([],_) ->
+ {error,"connect failed"}.
+
+do_connect(Host, Data, Opts) when Data#eldap.use_tls == false ->
+ gen_tcp:connect(Host, Data#eldap.port, Opts, Data#eldap.timeout);
+do_connect(Host, Data, Opts) when Data#eldap.use_tls == true ->
+ ssl:connect(Host, Data#eldap.port, [{verify,0}|Opts]).
+
+
+loop(Cpid, Data) ->
+ receive
+
+ {From, {search, A}} ->
+ {Res,NewData} = do_search(Data, A),
+ send(From,Res),
+ loop(Cpid, NewData);
+
+ {From, {modify, Obj, Mod}} ->
+ {Res,NewData} = do_modify(Data, Obj, Mod),
+ send(From,Res),
+ loop(Cpid, NewData);
+
+ {From, {modify_dn, Obj, NewRDN, DelOldRDN, NewSup}} ->
+ {Res,NewData} = do_modify_dn(Data, Obj, NewRDN, DelOldRDN, NewSup),
+ send(From,Res),
+ loop(Cpid, NewData);
+
+ {From, {add, Entry, Attrs}} ->
+ {Res,NewData} = do_add(Data, Entry, Attrs),
+ send(From,Res),
+ loop(Cpid, NewData);
+
+ {From, {delete, Entry}} ->
+ {Res,NewData} = do_delete(Data, Entry),
+ send(From,Res),
+ loop(Cpid, NewData);
+
+ {From, {simple_bind, Dn, Passwd}} ->
+ {Res,NewData} = do_simple_bind(Data, Dn, Passwd),
+ send(From,Res),
+ loop(Cpid, NewData);
+
+ {From, {cnt_proc, NewCpid}} ->
+ unlink(Cpid),
+ send(From,ok),
+ ?PRINT("New Cpid is: ~p~n",[NewCpid]),
+ loop(NewCpid, Data);
+
+ {From, close} ->
+ unlink(Cpid),
+ exit(closed);
+
+ {Cpid, 'EXIT', Reason} ->
+ ?PRINT("Got EXIT from Cpid, reason=~p~n",[Reason]),
+ exit(Reason);
+
+ _XX ->
+ ?PRINT("loop got: ~p~n",[_XX]),
+ loop(Cpid, Data)
+
+ end.
+
+%%% --------------------------------------------------------------------
+%%% bindRequest
+%%% --------------------------------------------------------------------
+
+%%% Authenticate ourselves to the directory using
+%%% simple authentication.
+
+do_simple_bind(Data, anon, anon) -> %% For testing
+ do_the_simple_bind(Data, "", "");
+do_simple_bind(Data, Dn, _Passwd) when Dn=="",Data#eldap.anon_auth==false ->
+ {{error,anonymous_auth},Data};
+do_simple_bind(Data, _Dn, Passwd) when Passwd=="",Data#eldap.anon_auth==false ->
+ {{error,anonymous_auth},Data};
+do_simple_bind(Data, Dn, Passwd) ->
+ do_the_simple_bind(Data, Dn, Passwd).
+
+do_the_simple_bind(Data, Dn, Passwd) ->
+ case catch exec_simple_bind(Data#eldap{binddn = Dn,
+ passwd = Passwd,
+ id = bump_id(Data)}) of
+ {ok,NewData} -> {ok,NewData};
+ {error,Emsg} -> {{error,Emsg},Data};
+ Else -> {{error,Else},Data}
+ end.
+
+exec_simple_bind(Data) ->
+ Req = #'BindRequest'{version = Data#eldap.version,
+ name = Data#eldap.binddn,
+ authentication = {simple, Data#eldap.passwd}},
+ log2(Data, "bind request = ~p~n", [Req]),
+ Reply = request(Data#eldap.fd, Data, Data#eldap.id, {bindRequest, Req}),
+ log2(Data, "bind reply = ~p~n", [Reply]),
+ exec_simple_bind_reply(Data, Reply).
+
+exec_simple_bind_reply(Data, {ok,Msg}) when
+ Msg#'LDAPMessage'.messageID == Data#eldap.id ->
+ case Msg#'LDAPMessage'.protocolOp of
+ {bindResponse, Result} ->
+ case Result#'BindResponse'.resultCode of
+ success -> {ok,Data};
+ Error -> {error, Error}
+ end;
+ Other -> {error, Other}
+ end;
+exec_simple_bind_reply(_, Error) ->
+ {error, Error}.
+
+
+%%% --------------------------------------------------------------------
+%%% searchRequest
+%%% --------------------------------------------------------------------
+
+do_search(Data, A) ->
+ case catch do_search_0(Data, A) of
+ {error,Emsg} -> {ldap_closed_p(Data, Emsg),Data};
+ {'EXIT',Error} -> {ldap_closed_p(Data, Error),Data};
+ {ok,Res,Ref,NewData} -> {{ok,polish(Res, Ref)},NewData};
+ Else -> {ldap_closed_p(Data, Else),Data}
+ end.
+
+%%%
+%%% Polish the returned search result
+%%%
+
+polish(Res, Ref) ->
+ R = polish_result(Res),
+ %%% No special treatment of referrals at the moment.
+ #eldap_search_result{entries = R,
+ referrals = Ref}.
+
+polish_result([H|T]) when record(H, 'SearchResultEntry') ->
+ ObjectName = H#'SearchResultEntry'.objectName,
+ F = fun({_,A,V}) -> {A,V} end,
+ Attrs = lists:map(F, H#'SearchResultEntry'.attributes),
+ [#eldap_entry{object_name = ObjectName,
+ attributes = Attrs}|
+ polish_result(T)];
+polish_result([]) ->
+ [].
+
+do_search_0(Data, A) ->
+ Req = #'SearchRequest'{baseObject = A#eldap_search.base,
+ scope = v_scope(A#eldap_search.scope),
+ derefAliases = neverDerefAliases,
+ sizeLimit = 0, % no size limit
+ timeLimit = v_timeout(A#eldap_search.timeout),
+ typesOnly = v_bool(A#eldap_search.types_only),
+ filter = v_filter(A#eldap_search.filter),
+ attributes = v_attributes(A#eldap_search.attributes)
+ },
+ Id = bump_id(Data),
+ collect_search_responses(Data#eldap{id=Id}, Req, Id).
+
+%%% The returned answers cames in one packet per entry
+%%% mixed with possible referals
+
+collect_search_responses(Data, Req, ID) ->
+ S = Data#eldap.fd,
+ log2(Data, "search request = ~p~n", [Req]),
+ send_request(S, Data, ID, {searchRequest, Req}),
+ Resp = recv_response(S, Data),
+ log2(Data, "search reply = ~p~n", [Resp]),
+ collect_search_responses(Data, S, ID, Resp, [], []).
+
+collect_search_responses(Data, S, ID, {ok,Msg}, Acc, Ref)
+ when record(Msg,'LDAPMessage') ->
+ case Msg#'LDAPMessage'.protocolOp of
+ {'searchResDone',R} when R#'LDAPResult'.resultCode == success ->
+ log2(Data, "search reply = searchResDone ~n", []),
+ {ok,Acc,Ref,Data};
+ {'searchResEntry',R} when record(R,'SearchResultEntry') ->
+ Resp = recv_response(S, Data),
+ log2(Data, "search reply = ~p~n", [Resp]),
+ collect_search_responses(Data, S, ID, Resp, [R|Acc], Ref);
+ {'searchResRef',R} ->
+ %% At the moment we don't do anyting sensible here since
+ %% I haven't been able to trigger the server to generate
+ %% a response like this.
+ Resp = recv_response(S, Data),
+ log2(Data, "search reply = ~p~n", [Resp]),
+ collect_search_responses(Data, S, ID, Resp, Acc, [R|Ref]);
+ Else ->
+ throw({error,Else})
+ end;
+collect_search_responses(_, _, _, Else, _, _) ->
+ throw({error,Else}).
+
+%%% --------------------------------------------------------------------
+%%% addRequest
+%%% --------------------------------------------------------------------
+
+do_add(Data, Entry, Attrs) ->
+ case catch do_add_0(Data, Entry, Attrs) of
+ {error,Emsg} -> {ldap_closed_p(Data, Emsg),Data};
+ {'EXIT',Error} -> {ldap_closed_p(Data, Error),Data};
+ {ok,NewData} -> {ok,NewData};
+ Else -> {ldap_closed_p(Data, Else),Data}
+ end.
+
+do_add_0(Data, Entry, Attrs) ->
+ Req = #'AddRequest'{entry = Entry,
+ attributes = Attrs},
+ S = Data#eldap.fd,
+ Id = bump_id(Data),
+ log2(Data, "add request = ~p~n", [Req]),
+ Resp = request(S, Data, Id, {addRequest, Req}),
+ log2(Data, "add reply = ~p~n", [Resp]),
+ check_reply(Data#eldap{id = Id}, Resp, addResponse).
+
+
+%%% --------------------------------------------------------------------
+%%% deleteRequest
+%%% --------------------------------------------------------------------
+
+do_delete(Data, Entry) ->
+ case catch do_delete_0(Data, Entry) of
+ {error,Emsg} -> {ldap_closed_p(Data, Emsg),Data};
+ {'EXIT',Error} -> {ldap_closed_p(Data, Error),Data};
+ {ok,NewData} -> {ok,NewData};
+ Else -> {ldap_closed_p(Data, Else),Data}
+ end.
+
+do_delete_0(Data, Entry) ->
+ S = Data#eldap.fd,
+ Id = bump_id(Data),
+ log2(Data, "del request = ~p~n", [Entry]),
+ Resp = request(S, Data, Id, {delRequest, Entry}),
+ log2(Data, "del reply = ~p~n", [Resp]),
+ check_reply(Data#eldap{id = Id}, Resp, delResponse).
+
+
+%%% --------------------------------------------------------------------
+%%% modifyRequest
+%%% --------------------------------------------------------------------
+
+do_modify(Data, Obj, Mod) ->
+ case catch do_modify_0(Data, Obj, Mod) of
+ {error,Emsg} -> {ldap_closed_p(Data, Emsg),Data};
+ {'EXIT',Error} -> {ldap_closed_p(Data, Error),Data};
+ {ok,NewData} -> {ok,NewData};
+ Else -> {ldap_closed_p(Data, Else),Data}
+ end.
+
+do_modify_0(Data, Obj, Mod) ->
+ v_modifications(Mod),
+ Req = #'ModifyRequest'{object = Obj,
+ modification = Mod},
+ S = Data#eldap.fd,
+ Id = bump_id(Data),
+ log2(Data, "modify request = ~p~n", [Req]),
+ Resp = request(S, Data, Id, {modifyRequest, Req}),
+ log2(Data, "modify reply = ~p~n", [Resp]),
+ check_reply(Data#eldap{id = Id}, Resp, modifyResponse).
+
+%%% --------------------------------------------------------------------
+%%% modifyDNRequest
+%%% --------------------------------------------------------------------
+
+do_modify_dn(Data, Entry, NewRDN, DelOldRDN, NewSup) ->
+ case catch do_modify_dn_0(Data, Entry, NewRDN, DelOldRDN, NewSup) of
+ {error,Emsg} -> {ldap_closed_p(Data, Emsg),Data};
+ {'EXIT',Error} -> {ldap_closed_p(Data, Error),Data};
+ {ok,NewData} -> {ok,NewData};
+ Else -> {ldap_closed_p(Data, Else),Data}
+ end.
+
+do_modify_dn_0(Data, Entry, NewRDN, DelOldRDN, NewSup) ->
+ Req = #'ModifyDNRequest'{entry = Entry,
+ newrdn = NewRDN,
+ deleteoldrdn = DelOldRDN,
+ newSuperior = NewSup},
+ S = Data#eldap.fd,
+ Id = bump_id(Data),
+ log2(Data, "modify DN request = ~p~n", [Req]),
+ Resp = request(S, Data, Id, {modDNRequest, Req}),
+ log2(Data, "modify DN reply = ~p~n", [Resp]),
+ check_reply(Data#eldap{id = Id}, Resp, modDNResponse).
+
+%%% --------------------------------------------------------------------
+%%% Send an LDAP request and receive the answer
+%%% --------------------------------------------------------------------
+
+request(S, Data, ID, Request) ->
+ send_request(S, Data, ID, Request),
+ recv_response(S, Data).
+
+send_request(S, Data, ID, Request) ->
+ Message = #'LDAPMessage'{messageID = ID,
+ protocolOp = Request},
+ {ok,Bytes} = asn1rt:encode('ELDAPv3', 'LDAPMessage', Message),
+ case do_send(S, Data, Bytes) of
+ {error,Reason} -> throw({gen_tcp_error,Reason});
+ Else -> Else
+ end.
+
+do_send(S, Data, Bytes) when Data#eldap.use_tls == false ->
+ gen_tcp:send(S, Bytes);
+do_send(S, Data, Bytes) when Data#eldap.use_tls == true ->
+ ssl:send(S, Bytes).
+
+do_recv(S, Data, Len, Timeout) when Data#eldap.use_tls == false ->
+ gen_tcp:recv(S, Len, Timeout);
+do_recv(S, Data, Len, Timeout) when Data#eldap.use_tls == true ->
+ ssl:recv(S, Len, Timeout).
+
+recv_response(S, Data) ->
+ Timeout = get(req_timeout), % kludge...
+ case do_recv(S, Data, 0, Timeout) of
+ {ok, Packet} ->
+ check_tag(Packet),
+ case asn1rt:decode('ELDAPv3', 'LDAPMessage', Packet) of
+ {ok,Resp} -> {ok,Resp};
+ Error -> throw(Error)
+ end;
+ {error,Reason} ->
+ throw({gen_tcp_error, Reason});
+ Error ->
+ throw(Error)
+ end.
+
+%%% Sanity check of received packet
+check_tag(Data) ->
+ case asn1rt_ber_bin:decode_tag(b2l(Data)) of
+ {_Tag, Data1, _Rb} ->
+ case asn1rt_ber_bin:decode_length(b2l(Data1)) of
+ {{_Len, _Data2}, _Rb2} -> ok;
+ _ -> throw({error,decoded_tag_length})
+ end;
+ _ -> throw({error,decoded_tag})
+ end.
+
+%%% Check for expected kind of reply
+check_reply(Data, {ok,Msg}, Op) when
+ Msg#'LDAPMessage'.messageID == Data#eldap.id ->
+ case Msg#'LDAPMessage'.protocolOp of
+ {Op, Result} ->
+ case Result#'LDAPResult'.resultCode of
+ success -> {ok,Data};
+ Error -> {error, Error}
+ end;
+ Other -> {error, Other}
+ end;
+check_reply(_, Error, _) ->
+ {error, Error}.
+
+
+%%% --------------------------------------------------------------------
+%%% Verify the input data
+%%% --------------------------------------------------------------------
+
+v_filter({'and',L}) -> {'and',L};
+v_filter({'or', L}) -> {'or',L};
+v_filter({'not',L}) -> {'not',L};
+v_filter({equalityMatch,AV}) -> {equalityMatch,AV};
+v_filter({greaterOrEqual,AV}) -> {greaterOrEqual,AV};
+v_filter({lessOrEqual,AV}) -> {lessOrEqual,AV};
+v_filter({approxMatch,AV}) -> {approxMatch,AV};
+v_filter({present,A}) -> {present,A};
+v_filter({substrings,S}) when record(S,'SubstringFilter') -> {substrings,S};
+v_filter(_Filter) -> throw({error,concat(["unknown filter: ",_Filter])}).
+
+v_modifications(Mods) ->
+ F = fun({_,Op,_}) ->
+ case lists:member(Op,[add,delete,replace]) of
+ true -> true;
+ _ -> throw({error,{mod_operation,Op}})
+ end
+ end,
+ lists:foreach(F, Mods).
+
+v_substr([{Key,Str}|T]) when list(Str),Key==initial;Key==any;Key==final ->
+ [{Key,Str}|v_substr(T)];
+v_substr([H|_]) ->
+ throw({error,{substring_arg,H}});
+v_substr([]) ->
+ [].
+v_scope(baseObject) -> baseObject;
+v_scope(singleLevel) -> singleLevel;
+v_scope(wholeSubtree) -> wholeSubtree;
+v_scope(_Scope) -> throw({error,concat(["unknown scope: ",_Scope])}).
+
+v_bool(true) -> true;
+v_bool(false) -> false;
+v_bool(_Bool) -> throw({error,concat(["not Boolean: ",_Bool])}).
+
+v_timeout(I) when integer(I), I>=0 -> I;
+v_timeout(_I) -> throw({error,concat(["timeout not positive integer: ",_I])}).
+
+v_attributes(Attrs) ->
+ F = fun(A) when list(A) -> A;
+ (A) -> throw({error,concat(["attribute not String: ",A])})
+ end,
+ lists:map(F,Attrs).
+
+
+%%% --------------------------------------------------------------------
+%%% Log routines. Call a user provided log routine F.
+%%% --------------------------------------------------------------------
+
+log1(Data, Str, Args) -> log(Data, Str, Args, 1).
+log2(Data, Str, Args) -> log(Data, Str, Args, 2).
+
+log(Data, Str, Args, Level) when function(Data#eldap.log) ->
+ catch (Data#eldap.log)(Level, Str, Args);
+log(_, _, _, _) ->
+ ok.
+
+
+%%% --------------------------------------------------------------------
+%%% Misc. routines
+%%% --------------------------------------------------------------------
+
+send(To,Msg) -> To ! {self(),Msg}.
+recv(From) -> receive {From,Msg} -> Msg end.
+
+ldap_closed_p(Data, Emsg) when Data#eldap.use_tls == true ->
+ %% Check if the SSL socket seems to be alive or not
+ case catch ssl:sockname(Data#eldap.fd) of
+ {error, _} ->
+ ssl:close(Data#eldap.fd),
+ {error, ldap_closed};
+ {ok, _} ->
+ {error, Emsg};
+ _ ->
+ %% sockname crashes if the socket pid is not alive
+ {error, ldap_closed}
+ end;
+ldap_closed_p(Data, Emsg) ->
+ %% non-SSL socket
+ case inet:port(Data#eldap.fd) of
+ {error,_} -> {error, ldap_closed};
+ _ -> {error,Emsg}
+ end.
+
+bump_id(Data) -> Data#eldap.id + 1.
+
+
+%%% --------------------------------------------------------------------
+%%% parse_dn/1 - Implementation of RFC 2253:
+%%%
+%%% "UTF-8 String Representation of Distinguished Names"
+%%%
+%%% Test cases:
+%%%
+%%% The simplest case:
+%%%
+%%% 1> eldap:parse_dn("CN=Steve Kille,O=Isode Limited,C=GB").
+%%% {ok,[[{attribute_type_and_value,"CN","Steve Kille"}],
+%%% [{attribute_type_and_value,"O","Isode Limited"}],
+%%% [{attribute_type_and_value,"C","GB"}]]}
+%%%
+%%% The first RDN is multi-valued:
+%%%
+%%% 2> eldap:parse_dn("OU=Sales+CN=J. Smith,O=Widget Inc.,C=US").
+%%% {ok,[[{attribute_type_and_value,"OU","Sales"},
+%%% {attribute_type_and_value,"CN","J. Smith"}],
+%%% [{attribute_type_and_value,"O","Widget Inc."}],
+%%% [{attribute_type_and_value,"C","US"}]]}
+%%%
+%%% Quoting a comma:
+%%%
+%%% 3> eldap:parse_dn("CN=L. Eagle,O=Sue\\, Grabbit and Runn,C=GB").
+%%% {ok,[[{attribute_type_and_value,"CN","L. Eagle"}],
+%%% [{attribute_type_and_value,"O","Sue\\, Grabbit and Runn"}],
+%%% [{attribute_type_and_value,"C","GB"}]]}
+%%%
+%%% A value contains a carriage return:
+%%%
+%%% 4> eldap:parse_dn("CN=Before
+%%% 4> After,O=Test,C=GB").
+%%% {ok,[[{attribute_type_and_value,"CN","Before\nAfter"}],
+%%% [{attribute_type_and_value,"O","Test"}],
+%%% [{attribute_type_and_value,"C","GB"}]]}
+%%%
+%%% 5> eldap:parse_dn("CN=Before\\0DAfter,O=Test,C=GB").
+%%% {ok,[[{attribute_type_and_value,"CN","Before\\0DAfter"}],
+%%% [{attribute_type_and_value,"O","Test"}],
+%%% [{attribute_type_and_value,"C","GB"}]]}
+%%%
+%%% An RDN in OID form:
+%%%
+%%% 6> eldap:parse_dn("1.3.6.1.4.1.1466.0=#04024869,O=Test,C=GB").
+%%% {ok,[[{attribute_type_and_value,"1.3.6.1.4.1.1466.0","#04024869"}],
+%%% [{attribute_type_and_value,"O","Test"}],
+%%% [{attribute_type_and_value,"C","GB"}]]}
+%%%
+%%%
+%%% --------------------------------------------------------------------
+
+parse_dn("") -> % empty DN string
+ {ok,[]};
+parse_dn([H|_] = Str) when H=/=$, -> % 1:st name-component !
+ case catch parse_name(Str,[]) of
+ {'EXIT',Reason} -> {parse_error,internal_error,Reason};
+ Else -> Else
+ end.
+
+parse_name("",Acc) ->
+ {ok,lists:reverse(Acc)};
+parse_name([$,|T],Acc) -> % N:th name-component !
+ parse_name(T,Acc);
+parse_name(Str,Acc) ->
+ {Rest,NameComponent} = parse_name_component(Str),
+ parse_name(Rest,[NameComponent|Acc]).
+
+parse_name_component(Str) ->
+ parse_name_component(Str,[]).
+
+parse_name_component(Str,Acc) ->
+ case parse_attribute_type_and_value(Str) of
+ {[$+|Rest], ATV} ->
+ parse_name_component(Rest,[ATV|Acc]);
+ {Rest,ATV} ->
+ {Rest,lists:reverse([ATV|Acc])}
+ end.
+
+parse_attribute_type_and_value(Str) ->
+ case parse_attribute_type(Str) of
+ {Rest,[]} ->
+ error(expecting_attribute_type,Str);
+ {Rest,Type} ->
+ Rest2 = parse_equal_sign(Rest),
+ {Rest3,Value} = parse_attribute_value(Rest2),
+ {Rest3,{attribute_type_and_value,Type,Value}}
+ end.
+
+-define(IS_ALPHA(X) , X>=$a,X=<$z;X>=$A,X=<$Z ).
+-define(IS_DIGIT(X) , X>=$0,X=<$9 ).
+-define(IS_SPECIAL(X) , X==$,;X==$=;X==$+;X==$<;X==$>;X==$#;X==$; ).
+-define(IS_QUOTECHAR(X) , X=/=$\\,X=/=$" ).
+-define(IS_STRINGCHAR(X) ,
+ X=/=$,,X=/=$=,X=/=$+,X=/=$<,X=/=$>,X=/=$#,X=/=$;,?IS_QUOTECHAR(X) ).
+-define(IS_HEXCHAR(X) , ?IS_DIGIT(X);X>=$a,X=<$f;X>=$A,X=<$F ).
+
+parse_attribute_type([H|T]) when ?IS_ALPHA(H) ->
+ %% NB: It must be an error in the RFC in the definition
+ %% of 'attributeType', should be: (ALPHA *keychar)
+ {Rest,KeyChars} = parse_keychars(T),
+ {Rest,[H|KeyChars]};
+parse_attribute_type([H|_] = Str) when ?IS_DIGIT(H) ->
+ parse_oid(Str);
+parse_attribute_type(Str) ->
+ error(invalid_attribute_type,Str).
+
+
+
+%%% Is a hexstring !
+parse_attribute_value([$#,X,Y|T]) when ?IS_HEXCHAR(X),?IS_HEXCHAR(Y) ->
+ {Rest,HexString} = parse_hexstring(T),
+ {Rest,[$#,X,Y|HexString]};
+%%% Is a "quotation-sequence" !
+parse_attribute_value([$"|T]) ->
+ {Rest,Quotation} = parse_quotation(T),
+ {Rest,[$"|Quotation]};
+%%% Is a stringchar , pair or Empty !
+parse_attribute_value(Str) ->
+ parse_string(Str).
+
+parse_hexstring(Str) ->
+ parse_hexstring(Str,[]).
+
+parse_hexstring([X,Y|T],Acc) when ?IS_HEXCHAR(X),?IS_HEXCHAR(Y) ->
+ parse_hexstring(T,[Y,X|Acc]);
+parse_hexstring(T,Acc) ->
+ {T,lists:reverse(Acc)}.
+
+parse_quotation([$"|T]) -> % an empty: "" is ok !
+ {T,[$"]};
+parse_quotation(Str) ->
+ parse_quotation(Str,[]).
+
+%%% Parse to end of quotation
+parse_quotation([$"|T],Acc) ->
+ {T,lists:reverse([$"|Acc])};
+parse_quotation([X|T],Acc) when ?IS_QUOTECHAR(X) ->
+ parse_quotation(T,[X|Acc]);
+parse_quotation([$\\,X|T],Acc) when ?IS_SPECIAL(X) ->
+ parse_quotation(T,[X,$\\|Acc]);
+parse_quotation([$\\,$\\|T],Acc) ->
+ parse_quotation(T,[$\\,$\\|Acc]);
+parse_quotation([$\\,$"|T],Acc) ->
+ parse_quotation(T,[$",$\\|Acc]);
+parse_quotation([$\\,X,Y|T],Acc) when ?IS_HEXCHAR(X),?IS_HEXCHAR(Y) ->
+ parse_quotation(T,[Y,X,$\\|Acc]);
+parse_quotation(T,_) ->
+ error(expecting_double_quote_mark,T).
+
+parse_string(Str) ->
+ parse_string(Str,[]).
+
+parse_string("",Acc) ->
+ {"",lists:reverse(Acc)};
+parse_string([H|T],Acc) when ?IS_STRINGCHAR(H) ->
+ parse_string(T,[H|Acc]);
+parse_string([$\\,X|T],Acc) when ?IS_SPECIAL(X) -> % is a pair !
+ parse_string(T,[X,$\\|Acc]);
+parse_string([$\\,$\\|T],Acc) -> % is a pair !
+ parse_string(T,[$\\,$\\|Acc]);
+parse_string([$\\,$" |T],Acc) -> % is a pair !
+ parse_string(T,[$" ,$\\|Acc]);
+parse_string([$\\,X,Y|T],Acc) when ?IS_HEXCHAR(X),?IS_HEXCHAR(Y) -> % is a pair!
+ parse_string(T,[Y,X,$\\|Acc]);
+parse_string(T,Acc) ->
+ {T,lists:reverse(Acc)}.
+
+parse_equal_sign([$=|T]) -> T;
+parse_equal_sign(T) -> error(expecting_equal_sign,T).
+
+parse_keychars(Str) -> parse_keychars(Str,[]).
+
+parse_keychars([H|T],Acc) when ?IS_ALPHA(H) -> parse_keychars(T,[H|Acc]);
+parse_keychars([H|T],Acc) when ?IS_DIGIT(H) -> parse_keychars(T,[H|Acc]);
+parse_keychars([$-|T],Acc) -> parse_keychars(T,[$-|Acc]);
+parse_keychars(T,Acc) -> {T,lists:reverse(Acc)}.
+
+parse_oid(Str) -> parse_oid(Str,[]).
+
+parse_oid([H,$.|T], Acc) when ?IS_DIGIT(H) ->
+ parse_oid(T,[$.,H|Acc]);
+parse_oid([H|T], Acc) when ?IS_DIGIT(H) ->
+ parse_oid(T,[H|Acc]);
+parse_oid(T, Acc) ->
+ {T,lists:reverse(Acc)}.
+
+error(Emsg,Rest) ->
+ throw({parse_error,Emsg,Rest}).
+
+
+%%% --------------------------------------------------------------------
+%%% Parse LDAP url according to RFC 2255
+%%%
+%%% Test case:
+%%%
+%%% 2> eldap:parse_ldap_url("ldap://10.42.126.33:389/cn=Administrative%20CA,o=Post%20Danmark,c=DK?certificateRevokationList;binary").
+%%% {ok,{{10,42,126,33},389},
+%%% [[{attribute_type_and_value,"cn","Administrative%20CA"}],
+%%% [{attribute_type_and_value,"o","Post%20Danmark"}],
+%%% [{attribute_type_and_value,"c","DK"}]],
+%%% {attributes,["certificateRevokationList;binary"]}}
+%%%
+%%% --------------------------------------------------------------------
+
+parse_ldap_url("ldap://" ++ Rest1 = Str) ->
+ {Rest2,HostPort} = parse_hostport(Rest1),
+ %% Split the string into DN and Attributes+etc
+ {Sdn,Rest3} = split_string(rm_leading_slash(Rest2),$?),
+ case parse_dn(Sdn) of
+ {parse_error,internal_error,_Reason} ->
+ {parse_error,internal_error,{Str,[]}};
+ {parse_error,Emsg,Tail} ->
+ Head = get_head(Str,Tail),
+ {parse_error,Emsg,{Head,Tail}};
+ {ok,DN} ->
+ %% We stop parsing here for now and leave
+ %% 'scope', 'filter' and 'extensions' to
+ %% be implemented later if needed.
+ {_Rest4,Attributes} = parse_attributes(Rest3),
+ {ok,HostPort,DN,Attributes}
+ end.
+
+rm_leading_slash([$/|Tail]) -> Tail;
+rm_leading_slash(Tail) -> Tail.
+
+parse_attributes([$?|Tail]) ->
+ case split_string(Tail,$?) of
+ {[],Attributes} ->
+ {[],{attributes,string:tokens(Attributes,",")}};
+ {Attributes,Rest} ->
+ {Rest,{attributes,string:tokens(Attributes,",")}}
+ end.
+
+parse_hostport(Str) ->
+ {HostPort,Rest} = split_string(Str,$/),
+ case split_string(HostPort,$:) of
+ {Shost,[]} ->
+ {Rest,{parse_host(Rest,Shost),?LDAP_PORT}};
+ {Shost,[$:|Sport]} ->
+ {Rest,{parse_host(Rest,Shost),
+ parse_port(Rest,Sport)}}
+ end.
+
+parse_port(Rest,Sport) ->
+ case list_to_integer(Sport) of
+ Port when integer(Port) -> Port;
+ _ -> error(parsing_port,Rest)
+ end.
+
+parse_host(Rest,Shost) ->
+ case catch validate_host(Shost) of
+ {parse_error,Emsg,_} -> error(Emsg,Rest);
+ Host -> Host
+ end.
+
+validate_host(Shost) ->
+ case inet_parse:address(Shost) of
+ {ok,Host} -> Host;
+ _ ->
+ case inet_parse:domain(Shost) of
+ true -> Shost;
+ _ -> error(parsing_host,Shost)
+ end
+ end.
+
+
+split_string(Str,Key) ->
+ Pred = fun(X) when X==Key -> false; (_) -> true end,
+ lists:splitwith(Pred, Str).
+
+get_head(Str,Tail) ->
+ get_head(Str,Tail,[]).
+
+%%% Should always succeed !
+get_head([H|Tail],Tail,Rhead) -> lists:reverse([H|Rhead]);
+get_head([H|Rest],Tail,Rhead) -> get_head(Rest,Tail,[H|Rhead]).
+
+b2l(B) when binary(B) -> B;
+b2l(L) when list(L) -> list_to_binary(L).
+
--- /dev/null
+%%% $Id$
+
+%%% --------------------------------------------------------------------
+%%% Init setup
+%%% --------------------------------------------------------------------
+
+I set up the OpenLDAP (2.0.6) server using the following
+/usr/local/etc/openldap/slapd.conf file:
+
+ include /usr/local/etc/openldap/schema/core.schema
+ pidfile /var/run/slapd.pid
+ argsfile /var/run/slapd.args
+ database ldbm
+ suffix "dc=bluetail, dc=com"
+ rootdn "dc=bluetail, dc=com"
+ rootpw hejsan
+ directory /usr/local/var/openldap-ldbm
+ index objectClass eq
+
+
+%%% I started it on the console with some debug output:
+
+ /usr/local/libexec/slapd -d 255 -f /usr/local/etc/openldap/slapd.conf
+
+%%% Then I defined the following data in: bluetail.ldif
+
+ dn: dc=bluetail, dc=com
+ objectclass: organization
+ objectclass: dcObject
+ dc: bluetail
+ o: Bluetail AB
+
+%%% and in: tobbe.ldif
+
+ dn: cn=Torbjorn Tornkvist, dc=bluetail, dc=com
+ objectclass: person
+ cn: Torbjorn Tornkvist
+ sn: Tornkvist
+
+%%% I load the data with:
+
+ ldapadd -D "dc=bluetail, dc=com" -w hejsan < bluetail.ldif
+ ldapadd -D "dc=bluetail, dc=com" -w hejsan < people.ldif
+
+%%%% To search from a Unix shell:
+
+ ldapsearch -L -b "dc=bluetail, dc=com" -w hejsan "(objectclass=*)"
+ ldapsearch -L -b "dc=bluetail, dc=com" -w hejsan "cn=Torbjorn Tornkvist"
+ ldapsearch -L -b "dc=bluetail, dc=com" -w hejsan "cn=Torb*kvist"
+
+%%% --------------------------------------------------------------------
+%%% Example with certificateRevocationList
+%%% --------------------------------------------------------------------
+
+%%% Using two ldif files:
+
+%%% post_danmark.ldif
+
+dn: o=Post Danmark, c=DK
+objectclass: country
+objectclass: organization
+c: DK
+o: Post Danmark
+
+%%% crl.ldif
+
+dn: cn=Administrative CA, o=Post Danmark, c=DK
+objectclass: cRLDistributionPoint
+cn: Administrative CA
+certificateRevocationList;binary:< file:/home/tobbe/erlang/eldap/server1.crl
+
+%%% Note the definition of the CRL file !!
+
+%%% To add the difinitions
+
+ldapadd -D "o=Post Danmark, c=DK" -w hejsan < post_danmark.ldif
+ldapadd -D "o=Post Danmark, c=DK" -w hejsan < crl.ldif
+
+%%% And to retreive the CRL
+
+ldapsearch -L -b "o=Post Danmark, c=DK" -w hejsan "(objectclass=*)"
+ldapsearch -L -b "o=Post Danmark, c=DK" -w hejsan "(cn=Administrative CA)" \
+ certificateRevocationList
+
+### Put the retrieved binary in a file (tmp) with
+### the following header and footer
+
+-----BEGIN X509 CRL-----
+ <...binary....>
+-----END X509 CRL-----
+
+### To verify it with openssl
+
+ openssl crl -inform PEM -in tmp -text
+
+ldapsearch -L -D "cn=Torbjorn Tornkvist,o=Post Danmark,c=DK" -b "o=Post Danmark, c=DK" -w qwe123 "(cn=Torbjorn Tornkvist)" cn
--- /dev/null
+dn: mail=bill@bluetail.com, dc=bluetail, dc=com
+objectclass: posixAccount
+mail: bill@bluetail.com
+cn: Bill Valentine
+sn: Valentine
+uid: bill
+uidNumber: 400
+gidNumber: 400
+homeDirectory: /home/accounts/bill
+mailDirectory: /home/accounts/bill/INBOX
+userPassword: baltazar
+birMailAccept: accept
+birCluster: bc1
--- /dev/null
+dn: dc=bluetail, dc=com
+objectclass: dcObject
+dc: bluetail
+
+dn: o=Bluetail AB, dc=bluetail, dc=com
+objectclass: organization
+o: Bluetail AB
+street: St.Eriksgatan 44
+postalCode: 112 34
+
+dn: ou=people, o=Bluetail AB, dc=bluetail, dc=com
+objectclass: organizationalUnit
+ou: people
+description: People working at Bluetail
+
+
+
+
--- /dev/null
+dn: cn=Administrative CA,o=Post Danmark,c=DK
+objectclass: cRLDistributionPoint
+cn: Administrative CA
+certificateRevocationList;binary:< file:/home/tobbe/erlang/eldap/server1.crl
+
--- /dev/null
+-module(eldap_test).
+%%% --------------------------------------------------------------------
+%%% Created: 12 Oct 2000 by Tobbe
+%%% Function: Test code for the eldap module
+%%%
+%%% Copyright (C) 2000 Torbjörn Törnkvist
+%%% Copyright (c) 2010 Torbjorn Tornkvist <tobbe@tornkvist.org>
+%%% See MIT-LICENSE at the top dir for licensing information.
+%%%
+%%% --------------------------------------------------------------------
+-vc('$Id$ ').
+-export([topen_bind/1,topen_bind/2,all/0,t10/0,t20/0,t21/0,t22/0,
+ t23/0,t24/0,t25/0,t26/0,t27/0,debug/1,t30/0,t31/0,
+ t40/0,t41/0,t50/0,t51/0]).
+-export([crl1/0]).
+-export([switch/1]).
+-export([junk/0]).
+
+-include("ELDAPv3.hrl").
+-include("eldap.hrl").
+
+junk() ->
+ DN = "cn=Torbjorn Tornkvist, ou=people, o=Bluetail AB, dc=bluetail, dc=com",
+ Msg = #'LDAPMessage'{messageID = 1,
+ protocolOp = {delRequest,DN}},
+ asn1rt:encode('ELDAPv3', 'LDAPMessage', Msg).
+
+%%% --------------------------------------------------------------------
+%%% TEST STUFF
+%%% ----------
+%%% When adding a new test case it can be useful to
+%%% switch on debugging, i.e debug(t) in the call to
+%%% topen_bind/2.
+%%% --------------------------------------------------------------------
+
+all() ->
+ Check = "=== Check the result of the previous test case !~n",
+ t10(),
+ t20(),t21(),t22(),t23(),t24(),t25(),t26(),t27(),
+ t30(),t26(Check),t31(),t26(Check),
+ t40(),t26(Check),t41(),t26(Check),
+ t50(),t26(Check),t51(),t26(Check),
+ ok.
+
+%%%
+%%% Setup a connection and bind using simple authentication
+%%%
+t10() ->
+ F = fun() ->
+ sleep(),
+ line(),
+ io:format("=== TEST 10 (connection setup + simple auth)~n"),
+ line(),
+ X = topen_bind("localhost", debug(f)),
+ io:format("~p~n",[X]),
+ X
+ end,
+ go(F).
+
+%%%
+%%% Do an equality match: sn = Tornkvist
+%%%
+t20() ->
+ F = fun() ->
+ sleep(),
+ line(),
+ io:format("=== TEST 20 (equality match)~n"),
+ line(),
+ {ok,S} = topen_bind("localhost", debug(f)),
+ Filter = eldap:equalityMatch("sn","Tornkvist"),
+ X=(catch eldap:search(S, [{base, "dc=bluetail, dc=com"},
+ {filter, Filter}])),
+
+ io:format("~p~n",[X]),
+ X
+ end,
+ go(F).
+
+%%%
+%%% Do a substring match: sn = To*kv*st
+%%%
+t21() ->
+ F = fun() ->
+ sleep(),
+ line(),
+ io:format("=== TEST 21 (substring match)~n"),
+ line(),
+ {ok,S} = topen_bind("localhost", debug(f)),
+ Filter = eldap:substrings("sn", [{initial,"To"},
+ {any,"kv"},
+ {final,"st"}]),
+ X=(catch eldap:search(S, [{base, "dc=bluetail, dc=com"},
+ {filter, Filter}])),
+ io:format("~p~n",[X]),
+ X
+ end,
+ go(F).
+
+%%%
+%%% Do a substring match: sn = *o*
+%%% and do only retrieve the cn attribute
+%%%
+t22() ->
+ F = fun() ->
+ sleep(),
+ line(),
+ io:format("=== TEST 22 (substring match + return 'cn' only)~n"),
+ line(),
+ {ok,S} = topen_bind("localhost", debug(f)),
+ Filter = eldap:substrings("sn", [{any,"o"}]),
+ X=(catch eldap:search(S, [{base, "dc=bluetail, dc=com"},
+ {filter, Filter},
+ {attributes,["cn"]}])),
+ io:format("~p~n",[X]),
+ X
+ end,
+ go(F).
+
+
+%%%
+%%% Do a present search for the attribute 'objectclass'
+%%% on the base level.
+%%%
+t23() ->
+ F = fun() ->
+ sleep(),
+ line(),
+ io:format("=== TEST 23 (objectclass=* , base level)~n"),
+ line(),
+ {ok,S} = topen_bind("localhost", debug(f)),
+ X=(catch eldap:search(S, [{base, "dc=bluetail, dc=com"},
+ {filter, eldap:present("objectclass")},
+ {scope,eldap:baseObject()}])),
+ io:format("~p~n",[X]),
+ X
+ end,
+ go(F).
+
+%%%
+%%% Do a present search for the attribute 'objectclass'
+%%% on a single level.
+%%%
+t24() ->
+ F = fun() ->
+ sleep(),
+ line(),
+ io:format("=== TEST 24 (objectclass=* , single level)~n"),
+ line(),
+ {ok,S} = topen_bind("localhost", debug(f)),
+ X=(catch eldap:search(S, [{base, "dc=bluetail, dc=com"},
+ {filter, eldap:present("objectclass")},
+ {scope,eldap:singleLevel()}])),
+ io:format("~p~n",[X]),
+ X
+ end,
+ go(F).
+
+%%%
+%%% Do a present search for the attribute 'objectclass'
+%%% on the whole subtree.
+%%%
+t25() ->
+ F = fun() ->
+ sleep(),
+ line(),
+ io:format("=== TEST 25 (objectclass=* , whole subtree)~n"),
+ line(),
+ {ok,S} = topen_bind("localhost", debug(f)),
+ X=(catch eldap:search(S, [{base, "dc=bluetail, dc=com"},
+ {filter, eldap:present("objectclass")},
+ {scope,eldap:wholeSubtree()}])),
+ io:format("~p~n",[X]),
+ X
+ end,
+ go(F).
+
+%%%
+%%% Do a present search for the attributes
+%%% 'objectclass' and 'sn' on the whole subtree.
+%%%
+t26() -> t26([]).
+t26(Heading) ->
+ F = fun() ->
+ sleep(),
+ line(),
+ heading(Heading,
+ "=== TEST 26 (objectclass=* and sn=*)~n"),
+ line(),
+ {ok,S} = topen_bind("localhost", debug(f)),
+ Filter = eldap:'and'([eldap:present("objectclass"),
+ eldap:present("sn")]),
+ X=(catch eldap:search(S, [{base, "dc=bluetail, dc=com"},
+ {filter, Filter},
+ {scope,eldap:wholeSubtree()}])),
+ io:format("~p~n",[X]),
+ X
+ end,
+ go(F).
+
+%%%
+%%% Do a present search for the attributes
+%%% 'objectclass' and (not 'sn') on the whole subtree.
+%%%
+t27() ->
+ F = fun() ->
+ sleep(),
+ line(),
+ io:format("=== TEST 27 (objectclass=* and (not sn))~n"),
+ line(),
+ {ok,S} = topen_bind("localhost", debug(f)),
+ Filter = eldap:'and'([eldap:present("objectclass"),
+ eldap:'not'(eldap:present("sn"))]),
+ X=(catch eldap:search(S, [{base, "dc=bluetail, dc=com"},
+ {filter, Filter},
+ {scope,eldap:wholeSubtree()}])),
+ io:format("~p~n",[X]),
+ X
+ end,
+ go(F).
+
+%%%
+%%% Replace the 'telephoneNumber' attribute and
+%%% add a new attribute 'description'
+%%%
+t30() -> t30([]).
+t30(Heading) ->
+ F = fun() ->
+ sleep(),
+ {_,_,Tno} = erlang:now(),
+ Stno = integer_to_list(Tno),
+ Desc = "LDAP hacker " ++ Stno,
+ line(),
+ heading(Heading,
+ "=== TEST 30 (replace telephoneNumber/"
+ ++ Stno ++ " add description/" ++ Desc
+ ++ ")~n"),
+ line(),
+ {ok,S} = topen_bind("localhost", debug(f)),
+ Obj = "cn=Torbjorn Tornkvist, ou=people, o=Bluetail AB, dc=bluetail, dc=com",
+ Mod = [eldap:mod_replace("telephoneNumber", [Stno]),
+ eldap:mod_add("description", [Desc])],
+ X=(catch eldap:modify(S, Obj, Mod)),
+ io:format("~p~n",[X]),
+ X
+ end,
+ go(F).
+
+%%%
+%%% Delete attribute 'description'
+%%%
+t31() -> t31([]).
+t31(Heading) ->
+ F = fun() ->
+ sleep(),
+ {_,_,Tno} = erlang:now(),
+ line(),
+ heading(Heading,
+ "=== TEST 31 (delete 'description' attribute)~n"),
+ line(),
+ {ok,S} = topen_bind("localhost", debug(f)),
+ Obj = "cn=Torbjorn Tornkvist, ou=people, o=Bluetail AB, dc=bluetail, dc=com",
+ Mod = [eldap:mod_delete("description", [])],
+ X=(catch eldap:modify(S, Obj, Mod)),
+ io:format("~p~n",[X]),
+ X
+ end,
+ go(F).
+
+%%%
+%%% Add an entry
+%%%
+t40() -> t40([]).
+t40(Heading) ->
+ F = fun() ->
+ sleep(),
+ {_,_,Tno} = erlang:now(),
+ line(),
+ heading(Heading,
+ "=== TEST 40 (add entry 'Bill Valentine')~n"),
+ line(),
+ {ok,S} = topen_bind("localhost", debug(f)),
+ Entry = "cn=Bill Valentine, ou=people, o=Bluetail AB, dc=bluetail, dc=com",
+ X=(catch eldap:add(S, Entry,
+ [{"objectclass", ["person"]},
+ {"cn", ["Bill Valentine"]},
+ {"sn", ["Valentine"]},
+ {"telephoneNumber", ["545 555 00"]}])),
+ io:format("~p~n",[X]),
+ X
+ end,
+ go(F).
+
+%%%
+%%% Delete an entry
+%%%
+t41() -> t41([]).
+t41(Heading) ->
+ F = fun() ->
+ sleep(),
+ {_,_,Tno} = erlang:now(),
+ line(),
+ heading(Heading,
+ "=== TEST 41 (delete entry 'Bill Valentine')~n"),
+ line(),
+ {ok,S} = topen_bind("localhost", debug(f)),
+ Entry = "cn=Bill Valentine, ou=people, o=Bluetail AB, dc=bluetail, dc=com",
+ X=(catch eldap:delete(S, Entry)),
+ io:format("~p~n",[X]),
+ X
+ end,
+ go(F).
+
+%%%
+%%% Modify the DN of an entry
+%%%
+t50() -> t50([]).
+t50(Heading) ->
+ F = fun() ->
+ sleep(),
+ {_,_,Tno} = erlang:now(),
+ line(),
+ heading(Heading,
+ "=== TEST 50 (modify DN to: 'Torbjorn M.Tornkvist')~n"),
+ line(),
+ {ok,S} = topen_bind("localhost", debug(f)),
+ Entry = "cn=Torbjorn Tornkvist, ou=people, o=Bluetail AB, dc=bluetail, dc=com",
+ X=(catch eldap:modify_dn(S, Entry,
+ "cn=Torbjorn M.Tornkvist",
+ false,
+ [])),
+ io:format("~p~n",[X]),
+ X
+ end,
+ go(F).
+
+%%%
+%%% Modify the DN of an entry and remove the RDN attribute.
+%%% NB: Must be run after: 't50' !
+%%%
+t51() -> t51([]).
+t51(Heading) ->
+ F = fun() ->
+ sleep(),
+ {_,_,Tno} = erlang:now(),
+ line(),
+ heading(Heading,
+ "=== TEST 51 (modify DN, remove the RDN attribute)~n"),
+ line(),
+ {ok,S} = topen_bind("localhost", debug(f)),
+ Entry = "cn=Torbjorn M.Tornkvist, ou=people, o=Bluetail AB, dc=bluetail, dc=com",
+ X=(catch eldap:modify_dn(S, Entry,
+ "cn=Torbjorn Tornkvist",
+ true,
+ [])),
+ io:format("~p~n",[X]),
+ X
+ end,
+ go(F).
+
+%%% --------------------------------------------------------------------
+%%% Test cases for certificate revocation lists
+%%% --------------------------------------------------------------------
+
+crl1() ->
+ F = fun() ->
+ sleep(),
+ line(),
+ io:format("=== CRL-TEST 1 ~n"),
+ line(),
+ {ok,S} = crl_open_bind("localhost", debug(f)),
+ Filter = eldap:equalityMatch("cn","Administrative CA"),
+ X=(catch eldap:search(S, [{base, "o=Post Danmark, c=DK"},
+ {filter, Filter},
+ {attributes,["certificateRevocationList"]}])),
+ dump_to_file("test-crl1.result",X),
+ ok
+ end,
+ go(F).
+
+
+dump_to_file(Fname,{ok,Res}) ->
+ case Res#eldap_search_result.entries of
+ [Entry|_] ->
+ case Entry#eldap_entry.attributes of
+ [{Attribute,Value}|_] ->
+ file:write_file(Fname,list_to_binary(Value)),
+ io:format("Value of '~s' dumped to file: ~s~n",
+ [Attribute,Fname]);
+ Else ->
+ io:format("ERROR(dump_to_file): no attributes found~n",[])
+ end;
+ Else ->
+ io:format("ERROR(dump_to_file): no entries found~n",[])
+ end.
+
+switch(1) ->
+ %%
+ %% SEARCH
+ %%
+ F = fun() ->
+ sleep(),
+ line(),
+ io:format("=== SWITCH-TEST 1 (short-search)~n"),
+ line(),
+ {ok,S} = sw_open_bind("korp", debug(t)),
+ Filter = eldap:equalityMatch("cn","Administrative CA"),
+ X=(catch eldap:search(S, [{base, "o=Post Danmark, c=DK"},
+ {filter, Filter},
+ {attributes,["cn"]}])),
+ io:format("RESULT: ~p~n", [X]),
+ %%dump_to_file("test-switch-1.result",X),
+ eldap:close(S),
+ ok
+ end,
+ go(F);
+switch(2) ->
+ %%
+ %% ADD AN ENTRY
+ %%
+ F = fun() ->
+ sleep(),
+ line(),
+ io:format("=== SWITCH-TEST 2 (add-entry)~n"),
+ line(),
+ {ok,S} = sw_open_bind("korp", debug(t)),
+ Entry = "cn=Bill Valentine, o=Post Danmark, c=DK",
+ X=(catch eldap:add(S, Entry,
+ [{"objectclass", ["person"]},
+ {"cn", ["Bill Valentine"]},
+ {"sn", ["Valentine"]}
+ ])),
+ io:format("~p~n",[X]),
+ eldap:close(S),
+ X
+ end,
+ go(F);
+switch(3) ->
+ %%
+ %% SEARCH FOR THE NEWLEY ADDED ENTRY
+ %%
+ F = fun() ->
+ sleep(),
+ line(),
+ io:format("=== SWITCH-TEST 3 (search-added)~n"),
+ line(),
+ {ok,S} = sw_open_bind("korp", debug(t)),
+ Filter = eldap:equalityMatch("cn","Bill Valentine"),
+ X=(catch eldap:search(S, [{base, "o=Post Danmark, c=DK"},
+ {filter, Filter},
+ {attributes,["cn"]}])),
+ io:format("RESULT: ~p~n", [X]),
+ %%dump_to_file("test-switch-1.result",X),
+ eldap:close(S),
+ ok
+ end,
+ go(F);
+switch(4) ->
+ %%
+ %% DELETE THE NEWLEY ADDED ENTRY
+ %%
+ F = fun() ->
+ sleep(),
+ line(),
+ io:format("=== SWITCH-TEST 4 (delete-added)~n"),
+ line(),
+ {ok,S} = sw_open_bind("korp", debug(t)),
+ Entry = "cn=Bill Valentine, o=Post Danmark, c=DK",
+ X=(catch eldap:delete(S, Entry)),
+ io:format("RESULT: ~p~n", [X]),
+ %%dump_to_file("test-switch-1.result",X),
+ eldap:close(S),
+ ok
+ end,
+ go(F).
+
+
+
+%%% ---------------
+%%% Misc. functions
+%%% ---------------
+
+sw_open_bind(Host) ->
+ sw_open_bind(Host, debug(t)).
+
+sw_open_bind(Host, Dbg) ->
+ sw_open_bind(Host, Dbg, "cn=Torbjorn Tornkvist,o=Post Danmark,c=DK", "qwe123").
+
+sw_open_bind(Host, LogFun, RootDN, Passwd) ->
+ Opts = [{log,LogFun},{port,9779}],
+ {ok,Handle} = eldap:open([Host], Opts),
+ {eldap:simple_bind(Handle, RootDN, Passwd),
+ Handle}.
+
+crl_open_bind(Host) ->
+ crl_open_bind(Host, debug(t)).
+
+crl_open_bind(Host, Dbg) ->
+ do_open_bind(Host, Dbg, "o=Post Danmark, c=DK", "hejsan").
+
+topen_bind(Host) ->
+ topen_bind(Host, debug(t)).
+
+topen_bind(Host, Dbg) ->
+ do_open_bind(Host, Dbg, "dc=bluetail, dc=com", "hejsan").
+
+do_open_bind(Host, LogFun, RootDN, Passwd) ->
+ Opts = [{log,LogFun}],
+ {ok,Handle} = eldap:open([Host], Opts),
+ {eldap:simple_bind(Handle, RootDN, Passwd),
+ Handle}.
+
+debug(t) -> fun(L,S,A) -> io:format("--- " ++ S, A) end;
+debug(1) -> fun(L,S,A) when L =< 1 -> io:format("--- " ++ S, A) end;
+debug(2) -> fun(L,S,A) when L =< 2 -> io:format("--- " ++ S, A) end;
+debug(f) -> false.
+
+sleep() -> msleep(400).
+%sleep(Sec) -> msleep(Sec*1000).
+msleep(T) -> receive after T -> true end.
+
+line() ->
+ S = "==============================================================\n",
+ io:format(S).
+
+heading([], Heading) -> io:format(Heading);
+heading(Heading, _ ) -> io:format(Heading).
+
+%%%
+%%% Process to run the test case
+%%%
+go(F) ->
+ Self = self(),
+ Pid = spawn(fun() -> run(F,Self) end),
+ receive {Pid, X} -> ok end.
+
+run(F, Pid) ->
+ Pid ! {self(),catch F()}.
--- /dev/null
+#!/bin/sh
+#
+# ldap This shell script takes care of starting and stopping
+# ldap servers (slapd and slurpd).
+#
+# chkconfig: - 39 61
+# description: LDAP stands for Lightweight Directory Access Protocol, used \
+# for implementing the industry standard directory services.
+# processname: slapd
+# config: /etc/openldap/slapd.conf
+# pidfile: /var/run/slapd.pid
+
+# Source function library.
+. /etc/init.d/functions
+
+# Source networking configuration and check that networking is up.
+if [ -r /etc/sysconfig/network ] ; then
+ . /etc/sysconfig/network
+ [ ${NETWORKING} = "no" ] && exit 0
+fi
+
+
+slapd=/usr/sbin/slapd
+slurpd=/usr/sbin/slurpd
+[ -x ${slapd} ] || exit 0
+[ -x ${slurpd} ] || exit 0
+
+RETVAL=0
+
+function start() {
+ # Start daemons.
+ echo -n "Starting slapd:"
+ daemon ${slapd}
+ RETVAL=$?
+ echo
+ if [ $RETVAL -eq 0 ]; then
+ if grep -q "^replogfile" /etc/openldap/slapd.conf; then
+ echo -n "Starting slurpd:"
+ daemon ${slurpd}
+ RETVAL=$?
+ echo
+ fi
+ fi
+ [ $RETVAL -eq 0 ] && touch /var/lock/subsys/ldap
+ return $RETVAL
+}
+
+function stop() {
+ # Stop daemons.
+ echo -n "Shutting down ldap: "
+ killproc ${slapd}
+ RETVAL=$?
+ if [ $RETVAL -eq 0 ]; then
+ if grep -q "^replogfile" /etc/openldap/slapd.conf; then
+ killproc ${slurpd}
+ RETVAL=$?
+ fi
+ fi
+ echo
+ [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/ldap /var/run/slapd.args
+ return $RETVAL
+}
+
+# See how we were called.
+case "$1" in
+ start)
+ start
+ ;;
+ stop)
+ stop
+ ;;
+ status)
+ status ${slapd}
+ if grep -q "^replogfile" /etc/openldap/slapd.conf ; then
+ status ${slurpd}
+ fi
+ ;;
+ restart)
+ stop
+ start
+ ;;
+ reload)
+ killall -HUP ${slapd}
+ RETVAL=$?
+ if [ $RETVAL -eq 0 ]; then
+ if grep -q "^replogfile" /etc/openldap/slapd.conf; then
+ killall -HUP ${slurpd}
+ RETVAL=$?
+ fi
+ fi
+ ;;
+ condrestart)
+ if [ -f /var/lock/subsys/ldap ] ; then
+ stop
+ start
+ fi
+ ;;
+ *)
+ echo "Usage: $0 start|stop|restart|status|condrestart}"
+ RETVAL=1
+esac
+
+exit $RETVAL
--- /dev/null
+dn: cn=Torbjorn Tornkvist, ou=people, o=Bluetail AB, dc=bluetail, dc=com
+objectclass: person
+cn: Torbjorn Tornkvist
+sn: Tornkvist
+telephoneNumber: 545 550 23
+
+dn: cn=Magnus Froberg, ou=people, o=Bluetail AB, dc=bluetail, dc=com
+objectclass: person
+cn: Magnus Froberg
+sn: Froberg
+telephoneNumber: 545 550 26
--- /dev/null
+dn: o=Post Danmark,c=DK
+objectclass: country
+objectclass: organization
+c: DK
+o: Post Danmark
--- /dev/null
+# $OpenLDAP: pkg/ldap/servers/slapd/slapd.conf,v 1.8.8.4 2000/08/26 17:06:18 kurt Exp $
+#
+# See slapd.conf(5) for details on configuration options.
+# This file should NOT be world readable.
+#
+include /usr/etc/openldap/schema/core.schema
+
+# Define global ACLs to disable default read access.
+
+# Do not enable referrals until AFTER you have a working directory
+# service AND an understanding of referrals.
+#referral ldap://root.openldap.org
+
+pidfile /var/run/slapd.pid
+argsfile /var/run/slapd.args
+
+# Load dynamic backend modules:
+# modulepath /usr/libexec/openldap
+# moduleload back_ldap.la
+# moduleload back_ldbm.la
+# moduleload back_passwd.la
+# moduleload back_shell.la
+
+#######################################################################
+# ldbm database definitions
+#######################################################################
+
+database ldbm
+suffix "dc=bluetail, dc=com"
+#suffix "o=My Organization Name, c=US"
+rootdn "dc=bluetail, dc=com"
+#rootdn "cn=Manager, o=My Organization Name, c=US"
+# Cleartext passwords, especially for the rootdn, should
+# be avoid. See slappasswd(8) and slapd.conf(5) for details.
+# Use of strong authentication encouraged.
+rootpw hejsan
+# The database directory MUST exist prior to running slapd AND
+# should only be accessable by the slapd/tools. Mode 700 recommended.
+directory /usr/var/openldap-ldbm
+# Indices to maintain
+index objectClass eq
--- /dev/null
+dn: cn=Torbjorn Tornkvist,o=Post Danmark,c=DK
+objectclass: person
+cn: Torbjorn Tornkvist
+sn: Tornkvist
+userPassword: qwe123
+
--- /dev/null
+diff --git a/src/eldap.erl b/src/eldap.erl
+index 9a78270..b0cdb2e 100644
+--- a/src/eldap.erl
++++ b/src/eldap.erl
+@@ -363,12 +363,5 @@ do_connect(Host, Data, Opts) when Data#eldap.use_tls == false ->
+ gen_tcp:connect(Host, Data#eldap.port, Opts, Data#eldap.timeout);
+ do_connect(Host, Data, Opts) when Data#eldap.use_tls == true ->
+- Vsn = erlang:system_info(version),
+- if Vsn >= "5.3" ->
+- %% In R9C, but not in R9B
+- {_,_,X} = erlang:now(),
+- ssl:seed("bkrlnateqqo" ++ integer_to_list(X));
+- true -> true
+- end,
+ ssl:connect(Host, Data#eldap.port, [{verify,0}|Opts]).
+
+
--- /dev/null
+UPSTREAM_SHORT_HASH:=e309de4
--- /dev/null
+Eldap is "Copyright (c) 2010, Torbjorn Tornkvist" and is covered by
+the MIT license. It was downloaded from https://github.com/etnt/eldap
+
--- /dev/null
+APP_NAME:=eldap
+
+UPSTREAM_GIT:=https://github.com/rabbitmq/eldap.git
+UPSTREAM_REVISION:=e309de4db4b78d67d623
+WRAPPER_PATCHES:=eldap-appify.patch remove-eldap-fsm.patch eldap-no-ssl-seed.patch remove-ietf-doc.patch
+
+ORIGINAL_APP_FILE:=$(CLONE_DIR)/ebin/$(APP_NAME).app
+DO_NOT_GENERATE_APP_FILE=true
+
+GENERATED_DIR:=$(CLONE_DIR)/generated
+PACKAGE_ERLC_OPTS+=-I $(GENERATED_DIR)
+INCLUDE_HRLS+=$(GENERATED_DIR)/ELDAPv3.hrl
+EBIN_BEAMS+=$(GENERATED_DIR)/ELDAPv3.beam
+
+define package_rules
+
+$(CLONE_DIR)/src/ELDAPv3.asn: $(CLONE_DIR)/.done
+
+$(GENERATED_DIR)/ELDAPv3.hrl $(GENERATED_DIR)/ELDAPv3.beam: $(CLONE_DIR)/src/ELDAPv3.asn
+ @mkdir -p $(GENERATED_DIR)
+ $(ERLC) $(PACKAGE_ERLC_OPTS) -o $(GENERATED_DIR) $$<
+
+$(PACKAGE_DIR)+clean::
+ rm -rf $(GENERATED_DIR) $(EBIN_DIR)
+
+# This rule is run *before* the one in do_package.mk
+$(PLUGINS_SRC_DIST_DIR)/$(PACKAGE_DIR)/.srcdist_done::
+ cp $(CLONE_DIR)/LICENSE $(PACKAGE_DIR)/LICENSE-MIT-eldap
+
+endef
--- /dev/null
+diff --git a/src/eldap_fsm.erl b/src/eldap_fsm.erl
+deleted file mode 100644
+index 381ce69..0000000
+--- a/src/eldap_fsm.erl
++++ /dev/null
+@@ -1,946 +0,0 @@
+--module(eldap_fsm).
+-%%% --------------------------------------------------------------------
+-%%% Created: 12 Oct 2000 by Tobbe
+-%%% Function: Erlang client LDAP implementation according RFC 2251.
+-%%% The interface is based on RFC 1823, and
+-%%% draft-ietf-asid-ldap-c-api-00.txt
+-%%%
+-%%% Copyright (C) 2000 Torbjörn Törnkvist
+-%%% Copyright (c) 2010 Torbjorn Tornkvist <tobbe@tornkvist.org>
+-%%% See MIT-LICENSE at the top dir for licensing information.
+-%%%
+-%%% Modified by Sean Hinde <shinde@iee.org> 7th Dec 2000
+-%%% Turned into gen_fsm, made non-blocking, added timers etc to support this.
+-%%% Now has the concept of a name (string() or atom()) per instance which allows
+-%%% multiple users to call by name if so desired.
+-%%%
+-%%% Can be configured with start_link parameters or use a config file to get
+-%%% host to connect to, dn, password, log function etc.
+-%%% --------------------------------------------------------------------
+-
+-
+-%%%----------------------------------------------------------------------
+-%%% LDAP Client state machine.
+-%%% Possible states are:
+-%%% connecting - actually disconnected, but retrying periodically
+-%%% wait_bind_response - connected and sent bind request
+-%%% active - bound to LDAP Server and ready to handle commands
+-%%%----------------------------------------------------------------------
+-
+-%%-compile(export_all).
+-%%-export([Function/Arity, ...]).
+-
+--behaviour(gen_fsm).
+-
+-%% External exports
+--export([start_link/1, start_link/5, start_link/6]).
+-
+--export([baseObject/0,singleLevel/0,wholeSubtree/0,close/1,
+- equalityMatch/2,greaterOrEqual/2,lessOrEqual/2,
+- approxMatch/2,search/2,substrings/2,present/1,
+- 'and'/1,'or'/1,'not'/1,modify/3, mod_add/2, mod_delete/2,
+- mod_replace/2, add/3, delete/2, modify_dn/5]).
+--export([debug_level/2, get_status/1]).
+-
+-%% gen_fsm callbacks
+--export([init/1, connecting/2,
+- connecting/3, wait_bind_response/3, active/3, handle_event/3,
+- handle_sync_event/4, handle_info/3, terminate/3, code_change/4]).
+-
+-
+--import(lists,[concat/1]).
+-
+--include("ELDAPv3.hrl").
+--include("eldap.hrl").
+-
+--define(LDAP_VERSION, 3).
+--define(RETRY_TIMEOUT, 5000).
+--define(BIND_TIMEOUT, 10000).
+--define(CMD_TIMEOUT, 5000).
+--define(MAX_TRANSACTION_ID, 65535).
+--define(MIN_TRANSACTION_ID, 0).
+-
+--record(eldap, {version = ?LDAP_VERSION,
+- hosts, % Possible hosts running LDAP servers
+- host = null, % Connected Host LDAP server
+- port = 389 , % The LDAP server port
+- fd = null, % Socket filedescriptor.
+- rootdn = "", % Name of the entry to bind as
+- passwd, % Password for (above) entry
+- id = 0, % LDAP Request ID
+- log, % User provided log function
+- bind_timer, % Ref to bind timeout
+- dict, % dict holding operation params and results
+- debug_level % Integer debug/logging level
+- }).
+-
+-%%%----------------------------------------------------------------------
+-%%% API
+-%%%----------------------------------------------------------------------
+-start_link(Name) ->
+- Reg_name = list_to_atom("eldap_" ++ Name),
+- gen_fsm:start_link({local, Reg_name}, ?MODULE, [], []).
+-
+-start_link(Name, Hosts, Port, Rootdn, Passwd) ->
+- Log = fun(N, Fmt, Args) -> io:format("---- " ++ Fmt, [Args]) end,
+- Reg_name = list_to_atom("eldap_" ++ Name),
+- gen_fsm:start_link({local, Reg_name}, ?MODULE, {Hosts, Port, Rootdn, Passwd, Log}, []).
+-
+-start_link(Name, Hosts, Port, Rootdn, Passwd, Log) ->
+- Reg_name = list_to_atom("eldap_" ++ Name),
+- gen_fsm:start_link({local, Reg_name}, ?MODULE, {Hosts, Port, Rootdn, Passwd, Log}, []).
+-
+-%%% --------------------------------------------------------------------
+-%%% Set Debug Level. 0 - none, 1 - errors, 2 - ldap events
+-%%% --------------------------------------------------------------------
+-debug_level(Handle, N) when integer(N) ->
+- Handle1 = get_handle(Handle),
+- gen_fsm:sync_send_all_state_event(Handle1, {debug_level,N}).
+-
+-%%% --------------------------------------------------------------------
+-%%% Get status of connection.
+-%%% --------------------------------------------------------------------
+-get_status(Handle) ->
+- Handle1 = get_handle(Handle),
+- gen_fsm:sync_send_all_state_event(Handle1, get_status).
+-
+-%%% --------------------------------------------------------------------
+-%%% Shutdown connection (and process) asynchronous.
+-%%% --------------------------------------------------------------------
+-close(Handle) ->
+- Handle1 = get_handle(Handle),
+- gen_fsm:send_all_state_event(Handle1, close).
+-
+-%%% --------------------------------------------------------------------
+-%%% Add an entry. The entry field MUST NOT exist for the AddRequest
+-%%% to succeed. The parent of the entry MUST exist.
+-%%% Example:
+-%%%
+-%%% add(Handle,
+-%%% "cn=Bill Valentine, ou=people, o=Bluetail AB, dc=bluetail, dc=com",
+-%%% [{"objectclass", ["person"]},
+-%%% {"cn", ["Bill Valentine"]},
+-%%% {"sn", ["Valentine"]},
+-%%% {"telephoneNumber", ["545 555 00"]}]
+-%%% )
+-%%% --------------------------------------------------------------------
+-add(Handle, Entry, Attributes) when list(Entry),list(Attributes) ->
+- Handle1 = get_handle(Handle),
+- gen_fsm:sync_send_event(Handle1, {add, Entry, add_attrs(Attributes)}).
+-
+-%%% Do sanity check !
+-add_attrs(Attrs) ->
+- F = fun({Type,Vals}) when list(Type),list(Vals) ->
+- %% Confused ? Me too... :-/
+- {'AddRequest_attributes',Type, Vals}
+- end,
+- case catch lists:map(F, Attrs) of
+- {'EXIT', _} -> throw({error, attribute_values});
+- Else -> Else
+- end.
+-
+-
+-%%% --------------------------------------------------------------------
+-%%% Delete an entry. The entry consists of the DN of
+-%%% the entry to be deleted.
+-%%% Example:
+-%%%
+-%%% delete(Handle,
+-%%% "cn=Bill Valentine, ou=people, o=Bluetail AB, dc=bluetail, dc=com"
+-%%% )
+-%%% --------------------------------------------------------------------
+-delete(Handle, Entry) when list(Entry) ->
+- Handle1 = get_handle(Handle),
+- gen_fsm:sync_send_event(Handle1, {delete, Entry}).
+-
+-%%% --------------------------------------------------------------------
+-%%% Modify an entry. Given an entry a number of modification
+-%%% operations can be performed as one atomic operation.
+-%%% Example:
+-%%%
+-%%% modify(Handle,
+-%%% "cn=Torbjorn Tornkvist, ou=people, o=Bluetail AB, dc=bluetail, dc=com",
+-%%% [replace("telephoneNumber", ["555 555 00"]),
+-%%% add("description", ["LDAP hacker"])]
+-%%% )
+-%%% --------------------------------------------------------------------
+-modify(Handle, Object, Mods) when list(Object), list(Mods) ->
+- Handle1 = get_handle(Handle),
+- gen_fsm:sync_send_event(Handle1, {modify, Object, Mods}).
+-
+-%%%
+-%%% Modification operations.
+-%%% Example:
+-%%% replace("telephoneNumber", ["555 555 00"])
+-%%%
+-mod_add(Type, Values) when list(Type), list(Values) -> m(add, Type, Values).
+-mod_delete(Type, Values) when list(Type), list(Values) -> m(delete, Type, Values).
+-mod_replace(Type, Values) when list(Type), list(Values) -> m(replace, Type, Values).
+-
+-m(Operation, Type, Values) ->
+- #'ModifyRequest_modification_SEQOF'{
+- operation = Operation,
+- modification = #'AttributeTypeAndValues'{
+- type = Type,
+- vals = Values}}.
+-
+-%%% --------------------------------------------------------------------
+-%%% Modify an entry. Given an entry a number of modification
+-%%% operations can be performed as one atomic operation.
+-%%% Example:
+-%%%
+-%%% modify_dn(Handle,
+-%%% "cn=Bill Valentine, ou=people, o=Bluetail AB, dc=bluetail, dc=com",
+-%%% "cn=Ben Emerson",
+-%%% true,
+-%%% ""
+-%%% )
+-%%% --------------------------------------------------------------------
+-modify_dn(Handle, Entry, NewRDN, DelOldRDN, NewSup)
+- when list(Entry),list(NewRDN),atom(DelOldRDN),list(NewSup) ->
+- Handle1 = get_handle(Handle),
+- gen_fsm:sync_send_event(Handle1, {modify_dn, Entry, NewRDN, bool_p(DelOldRDN), optional(NewSup)}).
+-
+-%%% Sanity checks !
+-
+-bool_p(Bool) when Bool==true;Bool==false -> Bool.
+-
+-optional([]) -> asn1_NOVALUE;
+-optional(Value) -> Value.
+-
+-%%% --------------------------------------------------------------------
+-%%% Synchronous search of the Directory returning a
+-%%% requested set of attributes.
+-%%%
+-%%% Example:
+-%%%
+-%%% Filter = eldap:substrings("sn", [{any,"o"}]),
+-%%% eldap:search(S, [{base, "dc=bluetail, dc=com"},
+-%%% {filter, Filter},
+-%%% {attributes,["cn"]}])),
+-%%%
+-%%% Returned result: {ok, #eldap_search_result{}}
+-%%%
+-%%% Example:
+-%%%
+-%%% {ok,{eldap_search_result,
+-%%% [{eldap_entry,
+-%%% "cn=Magnus Froberg, dc=bluetail, dc=com",
+-%%% [{"cn",["Magnus Froberg"]}]},
+-%%% {eldap_entry,
+-%%% "cn=Torbjorn Tornkvist, dc=bluetail, dc=com",
+-%%% [{"cn",["Torbjorn Tornkvist"]}]}],
+-%%% []}}
+-%%%
+-%%% --------------------------------------------------------------------
+-search(Handle, A) when record(A, eldap_search) ->
+- call_search(Handle, A);
+-search(Handle, L) when list(Handle), list(L) ->
+- case catch parse_search_args(L) of
+- {error, Emsg} -> {error, Emsg};
+- {'EXIT', Emsg} -> {error, Emsg};
+- A when record(A, eldap_search) -> call_search(Handle, A)
+- end.
+-
+-call_search(Handle, A) ->
+- Handle1 = get_handle(Handle),
+- gen_fsm:sync_send_event(Handle1, {search, A}).
+-
+-parse_search_args(Args) ->
+- parse_search_args(Args, #eldap_search{scope = wholeSubtree}).
+-
+-parse_search_args([{base, Base}|T],A) ->
+- parse_search_args(T,A#eldap_search{base = Base});
+-parse_search_args([{filter, Filter}|T],A) ->
+- parse_search_args(T,A#eldap_search{filter = Filter});
+-parse_search_args([{scope, Scope}|T],A) ->
+- parse_search_args(T,A#eldap_search{scope = Scope});
+-parse_search_args([{attributes, Attrs}|T],A) ->
+- parse_search_args(T,A#eldap_search{attributes = Attrs});
+-parse_search_args([{types_only, TypesOnly}|T],A) ->
+- parse_search_args(T,A#eldap_search{types_only = TypesOnly});
+-parse_search_args([{timeout, Timeout}|T],A) when integer(Timeout) ->
+- parse_search_args(T,A#eldap_search{timeout = Timeout});
+-parse_search_args([H|T],A) ->
+- throw({error,{unknown_arg, H}});
+-parse_search_args([],A) ->
+- A.
+-
+-%%%
+-%%% The Scope parameter
+-%%%
+-baseObject() -> baseObject.
+-singleLevel() -> singleLevel.
+-wholeSubtree() -> wholeSubtree.
+-
+-%%%
+-%%% Boolean filter operations
+-%%%
+-'and'(ListOfFilters) when list(ListOfFilters) -> {'and',ListOfFilters}.
+-'or'(ListOfFilters) when list(ListOfFilters) -> {'or', ListOfFilters}.
+-'not'(Filter) when tuple(Filter) -> {'not',Filter}.
+-
+-%%%
+-%%% The following Filter parameters consist of an attribute
+-%%% and an attribute value. Example: F("uid","tobbe")
+-%%%
+-equalityMatch(Desc, Value) -> {equalityMatch, av_assert(Desc, Value)}.
+-greaterOrEqual(Desc, Value) -> {greaterOrEqual, av_assert(Desc, Value)}.
+-lessOrEqual(Desc, Value) -> {lessOrEqual, av_assert(Desc, Value)}.
+-approxMatch(Desc, Value) -> {approxMatch, av_assert(Desc, Value)}.
+-
+-av_assert(Desc, Value) ->
+- #'AttributeValueAssertion'{attributeDesc = Desc,
+- assertionValue = Value}.
+-
+-%%%
+-%%% Filter to check for the presence of an attribute
+-%%%
+-present(Attribute) when list(Attribute) ->
+- {present, Attribute}.
+-
+-
+-%%%
+-%%% A substring filter seem to be based on a pattern:
+-%%%
+-%%% InitValue*AnyValue*FinalValue
+-%%%
+-%%% where all three parts seem to be optional (at least when
+-%%% talking with an OpenLDAP server). Thus, the arguments
+-%%% to substrings/2 looks like this:
+-%%%
+-%%% Type ::= string( <attribute> )
+-%%% SubStr ::= listof( {initial,Value} | {any,Value}, {final,Value})
+-%%%
+-%%% Example: substrings("sn",[{initial,"To"},{any,"kv"},{final,"st"}])
+-%%% will match entries containing: 'sn: Tornkvist'
+-%%%
+-substrings(Type, SubStr) when list(Type), list(SubStr) ->
+- Ss = {'SubstringFilter_substrings',v_substr(SubStr)},
+- {substrings,#'SubstringFilter'{type = Type,
+- substrings = Ss}}.
+-
+-
+-get_handle(Pid) when pid(Pid) -> Pid;
+-get_handle(Atom) when atom(Atom) -> Atom;
+-get_handle(Name) when list(Name) -> list_to_atom("eldap_" ++ Name).
+-%%%----------------------------------------------------------------------
+-%%% Callback functions from gen_fsm
+-%%%----------------------------------------------------------------------
+-
+-%%----------------------------------------------------------------------
+-%% Func: init/1
+-%% Returns: {ok, StateName, StateData} |
+-%% {ok, StateName, StateData, Timeout} |
+-%% ignore |
+-%% {stop, StopReason}
+-%% I use the trick of setting a timeout of 0 to pass control into the
+-%% process.
+-%%----------------------------------------------------------------------
+-init([]) ->
+- case get_config() of
+- {ok, Hosts, Rootdn, Passwd, Log} ->
+- init({Hosts, Rootdn, Passwd, Log});
+- {error, Reason} ->
+- {stop, Reason}
+- end;
+-init({Hosts, Port, Rootdn, Passwd, Log}) ->
+- {ok, connecting, #eldap{hosts = Hosts,
+- port = Port,
+- rootdn = Rootdn,
+- passwd = Passwd,
+- id = 0,
+- log = Log,
+- dict = dict:new(),
+- debug_level = 0}, 0}.
+-
+-%%----------------------------------------------------------------------
+-%% Func: StateName/2
+-%% Called when gen_fsm:send_event/2,3 is invoked (async)
+-%% Returns: {next_state, NextStateName, NextStateData} |
+-%% {next_state, NextStateName, NextStateData, Timeout} |
+-%% {stop, Reason, NewStateData}
+-%%----------------------------------------------------------------------
+-connecting(timeout, S) ->
+- {ok, NextState, NewS} = connect_bind(S),
+- {next_state, NextState, NewS}.
+-
+-%%----------------------------------------------------------------------
+-%% Func: StateName/3
+-%% Called when gen_fsm:sync_send_event/2,3 is invoked.
+-%% Returns: {next_state, NextStateName, NextStateData} |
+-%% {next_state, NextStateName, NextStateData, Timeout} |
+-%% {reply, Reply, NextStateName, NextStateData} |
+-%% {reply, Reply, NextStateName, NextStateData, Timeout} |
+-%% {stop, Reason, NewStateData} |
+-%% {stop, Reason, Reply, NewStateData}
+-%%----------------------------------------------------------------------
+-connecting(Event, From, S) ->
+- Reply = {error, connecting},
+- {reply, Reply, connecting, S}.
+-
+-wait_bind_response(Event, From, S) ->
+- Reply = {error, wait_bind_response},
+- {reply, Reply, wait_bind_response, S}.
+-
+-active(Event, From, S) ->
+- case catch send_command(Event, From, S) of
+- {ok, NewS} ->
+- {next_state, active, NewS};
+- {error, Reason} ->
+- {reply, {error, Reason}, active, S};
+- {'EXIT', Reason} ->
+- {reply, {error, Reason}, active, S}
+- end.
+-
+-%%----------------------------------------------------------------------
+-%% Func: handle_event/3
+-%% Called when gen_fsm:send_all_state_event/2 is invoked.
+-%% Returns: {next_state, NextStateName, NextStateData} |
+-%% {next_state, NextStateName, NextStateData, Timeout} |
+-%% {stop, Reason, NewStateData}
+-%%----------------------------------------------------------------------
+-handle_event(close, StateName, S) ->
+- gen_tcp:close(S#eldap.fd),
+- {stop, closed, S};
+-
+-handle_event(Event, StateName, S) ->
+- {next_state, StateName, S}.
+-
+-%%----------------------------------------------------------------------
+-%% Func: handle_sync_event/4
+-%% Called when gen_fsm:sync_send_all_state_event/2,3 is invoked
+-%% Returns: {next_state, NextStateName, NextStateData} |
+-%% {next_state, NextStateName, NextStateData, Timeout} |
+-%% {reply, Reply, NextStateName, NextStateData} |
+-%% {reply, Reply, NextStateName, NextStateData, Timeout} |
+-%% {stop, Reason, NewStateData} |
+-%% {stop, Reason, Reply, NewStateData}
+-%%----------------------------------------------------------------------
+-handle_sync_event({debug_level, N}, From, StateName, S) ->
+- {reply, ok, StateName, S#eldap{debug_level = N}};
+-
+-handle_sync_event(Event, From, StateName, S) ->
+- {reply, {StateName, S}, StateName, S};
+-
+-handle_sync_event(Event, From, StateName, S) ->
+- Reply = ok,
+- {reply, Reply, StateName, S}.
+-
+-%%----------------------------------------------------------------------
+-%% Func: handle_info/3
+-%% Returns: {next_state, NextStateName, NextStateData} |
+-%% {next_state, NextStateName, NextStateData, Timeout} |
+-%% {stop, Reason, NewStateData}
+-%%----------------------------------------------------------------------
+-
+-%%
+-%% Packets arriving in various states
+-%%
+-handle_info({tcp, Socket, Data}, connecting, S) ->
+- log1("eldap. tcp packet received when disconnected!~n~p~n", [Data], S),
+- {next_state, connecting, S};
+-
+-handle_info({tcp, Socket, Data}, wait_bind_response, S) ->
+- cancel_timer(S#eldap.bind_timer),
+- case catch recvd_wait_bind_response(Data, S) of
+- bound -> {next_state, active, S};
+- {fail_bind, Reason} -> close_and_retry(S),
+- {next_state, connecting, S#eldap{fd = null}};
+- {'EXIT', Reason} -> close_and_retry(S),
+- {next_state, connecting, S#eldap{fd = null}};
+- {error, Reason} -> close_and_retry(S),
+- {next_state, connecting, S#eldap{fd = null}}
+- end;
+-
+-handle_info({tcp, Socket, Data}, active, S) ->
+- case catch recvd_packet(Data, S) of
+- {reply, Reply, To, NewS} -> gen_fsm:reply(To, Reply),
+- {next_state, active, NewS};
+- {ok, NewS} -> {next_state, active, NewS};
+- {'EXIT', Reason} -> {next_state, active, S};
+- {error, Reason} -> {next_state, active, S}
+- end;
+-
+-handle_info({tcp_closed, Socket}, All_fsm_states, S) ->
+- F = fun(Id, [{Timer, From, Name}|Res]) ->
+- gen_fsm:reply(From, {error, tcp_closed}),
+- cancel_timer(Timer)
+- end,
+- dict:map(F, S#eldap.dict),
+- retry_connect(),
+- {next_state, connecting, S#eldap{fd = null,
+- dict = dict:new()}};
+-
+-handle_info({tcp_error, Socket, Reason}, Fsm_state, S) ->
+- log1("eldap received tcp_error: ~p~nIn State: ~p~n", [Reason, Fsm_state], S),
+- {next_state, Fsm_state, S};
+-%%
+-%% Timers
+-%%
+-handle_info({timeout, Timer, {cmd_timeout, Id}}, active, S) ->
+- case cmd_timeout(Timer, Id, S) of
+- {reply, To, Reason, NewS} -> gen_fsm:reply(To, Reason),
+- {next_state, active, NewS};
+- {error, Reason} -> {next_state, active, S}
+- end;
+-
+-handle_info({timeout, retry_connect}, connecting, S) ->
+- {ok, NextState, NewS} = connect_bind(S),
+- {next_state, NextState, NewS};
+-
+-handle_info({timeout, Timer, bind_timeout}, wait_bind_response, S) ->
+- close_and_retry(S),
+- {next_state, connecting, S#eldap{fd = null}};
+-
+-%%
+-%% Make sure we don't fill the message queue with rubbish
+-%%
+-handle_info(Info, StateName, S) ->
+- log1("eldap. Unexpected Info: ~p~nIn state: ~p~n when StateData is: ~p~n",
+- [Info, StateName, S], S),
+- {next_state, StateName, S}.
+-
+-%%----------------------------------------------------------------------
+-%% Func: terminate/3
+-%% Purpose: Shutdown the fsm
+-%% Returns: any
+-%%----------------------------------------------------------------------
+-terminate(Reason, StateName, StatData) ->
+- ok.
+-
+-%%----------------------------------------------------------------------
+-%% Func: code_change/4
+-%% Purpose: Convert process state when code is changed
+-%% Returns: {ok, NewState, NewStateData}
+-%%----------------------------------------------------------------------
+-code_change(OldVsn, StateName, S, Extra) ->
+- {ok, StateName, S}.
+-
+-%%%----------------------------------------------------------------------
+-%%% Internal functions
+-%%%----------------------------------------------------------------------
+-send_command(Command, From, S) ->
+- Id = bump_id(S),
+- {Name, Request} = gen_req(Command),
+- Message = #'LDAPMessage'{messageID = Id,
+- protocolOp = {Name, Request}},
+- log2("~p~n",[{Name, Request}], S),
+- {ok, Bytes} = asn1rt:encode('ELDAPv3', 'LDAPMessage', Message),
+- ok = gen_tcp:send(S#eldap.fd, Bytes),
+- Timer = erlang:start_timer(?CMD_TIMEOUT, self(), {cmd_timeout, Id}),
+- New_dict = dict:store(Id, [{Timer, From, Name}], S#eldap.dict),
+- {ok, S#eldap{id = Id,
+- dict = New_dict}}.
+-
+-gen_req({search, A}) ->
+- {searchRequest,
+- #'SearchRequest'{baseObject = A#eldap_search.base,
+- scope = v_scope(A#eldap_search.scope),
+- derefAliases = neverDerefAliases,
+- sizeLimit = 0, % no size limit
+- timeLimit = v_timeout(A#eldap_search.timeout),
+- typesOnly = v_bool(A#eldap_search.types_only),
+- filter = v_filter(A#eldap_search.filter),
+- attributes = v_attributes(A#eldap_search.attributes)
+- }};
+-gen_req({add, Entry, Attrs}) ->
+- {addRequest,
+- #'AddRequest'{entry = Entry,
+- attributes = Attrs}};
+-gen_req({delete, Entry}) ->
+- {delRequest, Entry};
+-gen_req({modify, Obj, Mod}) ->
+- v_modifications(Mod),
+- {modifyRequest,
+- #'ModifyRequest'{object = Obj,
+- modification = Mod}};
+-gen_req({modify_dn, Entry, NewRDN, DelOldRDN, NewSup}) ->
+- {modDNRequest,
+- #'ModifyDNRequest'{entry = Entry,
+- newrdn = NewRDN,
+- deleteoldrdn = DelOldRDN,
+- newSuperior = NewSup}}.
+-
+-%%-----------------------------------------------------------------------
+-%% recvd_packet
+-%% Deals with incoming packets in the active state
+-%% Will return one of:
+-%% {ok, NewS} - Don't reply to client yet as this is part of a search
+-%% result and we haven't got all the answers yet.
+-%% {reply, Result, From, NewS} - Reply with result to client From
+-%% {error, Reason}
+-%% {'EXIT', Reason} - Broke
+-%%-----------------------------------------------------------------------
+-recvd_packet(Pkt, S) ->
+- check_tag(Pkt),
+- case asn1rt:decode('ELDAPv3', 'LDAPMessage', Pkt) of
+- {ok,Msg} ->
+- Op = Msg#'LDAPMessage'.protocolOp,
+- log2("~p~n",[Op], S),
+- Dict = S#eldap.dict,
+- Id = Msg#'LDAPMessage'.messageID,
+- {Timer, From, Name, Result_so_far} = get_op_rec(Id, Dict),
+- case {Name, Op} of
+- {searchRequest, {searchResEntry, R}} when
+- record(R,'SearchResultEntry') ->
+- New_dict = dict:append(Id, R, Dict),
+- {ok, S#eldap{dict = New_dict}};
+- {searchRequest, {searchResDone, Result}} ->
+- case Result#'LDAPResult'.resultCode of
+- success ->
+- {Res, Ref} = polish(Result_so_far),
+- New_dict = dict:erase(Id, Dict),
+- cancel_timer(Timer),
+- {reply, #eldap_search_result{entries = Res,
+- referrals = Ref}, From,
+- S#eldap{dict = New_dict}};
+- Reason ->
+- New_dict = dict:erase(Id, Dict),
+- cancel_timer(Timer),
+- {reply, {error, Reason}, From, S#eldap{dict = New_dict}}
+- end;
+- {searchRequest, {searchResRef, R}} ->
+- New_dict = dict:append(Id, R, Dict),
+- {ok, S#eldap{dict = New_dict}};
+- {addRequest, {addResponse, Result}} ->
+- New_dict = dict:erase(Id, Dict),
+- cancel_timer(Timer),
+- Reply = check_reply(Result, From),
+- {reply, Reply, From, S#eldap{dict = New_dict}};
+- {delRequest, {delResponse, Result}} ->
+- New_dict = dict:erase(Id, Dict),
+- cancel_timer(Timer),
+- Reply = check_reply(Result, From),
+- {reply, Reply, From, S#eldap{dict = New_dict}};
+- {modifyRequest, {modifyResponse, Result}} ->
+- New_dict = dict:erase(Id, Dict),
+- cancel_timer(Timer),
+- Reply = check_reply(Result, From),
+- {reply, Reply, From, S#eldap{dict = New_dict}};
+- {modDNRequest, {modDNResponse, Result}} ->
+- New_dict = dict:erase(Id, Dict),
+- cancel_timer(Timer),
+- Reply = check_reply(Result, From),
+- {reply, Reply, From, S#eldap{dict = New_dict}};
+- {OtherName, OtherResult} ->
+- New_dict = dict:erase(Id, Dict),
+- cancel_timer(Timer),
+- {reply, {error, {invalid_result, OtherName, OtherResult}},
+- From, S#eldap{dict = New_dict}}
+- end;
+- Error -> Error
+- end.
+-
+-check_reply(#'LDAPResult'{resultCode = success}, From) ->
+- ok;
+-check_reply(#'LDAPResult'{resultCode = Reason}, From) ->
+- {error, Reason};
+-check_reply(Other, From) ->
+- {error, Other}.
+-
+-get_op_rec(Id, Dict) ->
+- case dict:find(Id, Dict) of
+- {ok, [{Timer, From, Name}|Res]} ->
+- {Timer, From, Name, Res};
+- error ->
+- throw({error, unkown_id})
+- end.
+-
+-%%-----------------------------------------------------------------------
+-%% recvd_wait_bind_response packet
+-%% Deals with incoming packets in the wait_bind_response state
+-%% Will return one of:
+-%% bound - Success - move to active state
+-%% {fail_bind, Reason} - Failed
+-%% {error, Reason}
+-%% {'EXIT', Reason} - Broken packet
+-%%-----------------------------------------------------------------------
+-recvd_wait_bind_response(Pkt, S) ->
+- check_tag(Pkt),
+- case asn1rt:decode('ELDAPv3', 'LDAPMessage', Pkt) of
+- {ok,Msg} ->
+- log2("~p", [Msg], S),
+- check_id(S#eldap.id, Msg#'LDAPMessage'.messageID),
+- case Msg#'LDAPMessage'.protocolOp of
+- {bindResponse, Result} ->
+- case Result#'LDAPResult'.resultCode of
+- success -> bound;
+- Error -> {fail_bind, Error}
+- end
+- end;
+- Else ->
+- {fail_bind, Else}
+- end.
+-
+-check_id(Id, Id) -> ok;
+-check_id(_, _) -> throw({error, wrong_bind_id}).
+-
+-%%-----------------------------------------------------------------------
+-%% General Helpers
+-%%-----------------------------------------------------------------------
+-
+-cancel_timer(Timer) ->
+- erlang:cancel_timer(Timer),
+- receive
+- {timeout, Timer, _} ->
+- ok
+- after 0 ->
+- ok
+- end.
+-
+-
+-%%% Sanity check of received packet
+-check_tag(Data) ->
+- case asn1rt_ber:decode_tag(Data) of
+- {Tag, Data1, Rb} ->
+- case asn1rt_ber:decode_length(Data1) of
+- {{Len,Data2}, Rb2} -> ok;
+- _ -> throw({error,decoded_tag_length})
+- end;
+- _ -> throw({error,decoded_tag})
+- end.
+-
+-close_and_retry(S) ->
+- gen_tcp:close(S#eldap.fd),
+- retry_connect().
+-
+-retry_connect() ->
+- erlang:send_after(?RETRY_TIMEOUT, self(),
+- {timeout, retry_connect}).
+-
+-
+-%%-----------------------------------------------------------------------
+-%% Sort out timed out commands
+-%%-----------------------------------------------------------------------
+-cmd_timeout(Timer, Id, S) ->
+- Dict = S#eldap.dict,
+- case dict:find(Id, Dict) of
+- {ok, [{Id, Timer, From, Name}|Res]} ->
+- case Name of
+- searchRequest ->
+- {Res1, Ref1} = polish(Res),
+- New_dict = dict:erase(Id, Dict),
+- {reply, From, {timeout,
+- #eldap_search_result{entries = Res1,
+- referrals = Ref1}},
+- S#eldap{dict = New_dict}};
+- Others ->
+- New_dict = dict:erase(Id, Dict),
+- {reply, From, {error, timeout}, S#eldap{dict = New_dict}}
+- end;
+- error ->
+- {error, timed_out_cmd_not_in_dict}
+- end.
+-
+-%%-----------------------------------------------------------------------
+-%% Common stuff for results
+-%%-----------------------------------------------------------------------
+-%%%
+-%%% Polish the returned search result
+-%%%
+-
+-polish(Entries) ->
+- polish(Entries, [], []).
+-
+-polish([H|T], Res, Ref) when record(H, 'SearchResultEntry') ->
+- ObjectName = H#'SearchResultEntry'.objectName,
+- F = fun({_,A,V}) -> {A,V} end,
+- Attrs = lists:map(F, H#'SearchResultEntry'.attributes),
+- polish(T, [#eldap_entry{object_name = ObjectName,
+- attributes = Attrs}|Res], Ref);
+-polish([H|T], Res, Ref) -> % No special treatment of referrals at the moment.
+- polish(T, Res, [H|Ref]);
+-polish([], Res, Ref) ->
+- {Res, Ref}.
+-
+-%%-----------------------------------------------------------------------
+-%% Connect to next server in list and attempt to bind to it.
+-%%-----------------------------------------------------------------------
+-connect_bind(S) ->
+- Host = next_host(S#eldap.host, S#eldap.hosts),
+- TcpOpts = [{packet, asn1}, {active, true}],
+- case gen_tcp:connect(Host, S#eldap.port, TcpOpts) of
+- {ok, Socket} ->
+- case bind_request(Socket, S) of
+- {ok, NewS} ->
+- Timer = erlang:start_timer(?BIND_TIMEOUT, self(),
+- {timeout, bind_timeout}),
+- {ok, wait_bind_response, NewS#eldap{fd = Socket,
+- host = Host,
+- bind_timer = Timer}};
+- {error, Reason} ->
+- gen_tcp:close(Socket),
+- erlang:send_after(?RETRY_TIMEOUT, self(),
+- {timeout, retry_connect}),
+- {ok, connecting, S#eldap{host = Host}}
+- end;
+- {error, Reason} ->
+- erlang:send_after(?RETRY_TIMEOUT, self(),
+- {timeout, retry_connect}),
+- {ok, connecting, S#eldap{host = Host}}
+- end.
+-
+-bind_request(Socket, S) ->
+- Id = bump_id(S),
+- Req = #'BindRequest'{version = S#eldap.version,
+- name = S#eldap.rootdn,
+- authentication = {simple, S#eldap.passwd}},
+- Message = #'LDAPMessage'{messageID = Id,
+- protocolOp = {bindRequest, Req}},
+- log2("Message:~p~n",[Message], S),
+- {ok, Bytes} = asn1rt:encode('ELDAPv3', 'LDAPMessage', Message),
+- ok = gen_tcp:send(Socket, Bytes),
+- {ok, S#eldap{id = Id}}.
+-
+-%% Given last tried Server, find next one to try
+-next_host(null, [H|_]) -> H; % First time, take first
+-next_host(Host, Hosts) -> % Find next in turn
+- next_host(Host, Hosts, Hosts).
+-
+-next_host(Host, [Host], Hosts) -> hd(Hosts); % Wrap back to first
+-next_host(Host, [Host|Tail], Hosts) -> hd(Tail); % Take next
+-next_host(Host, [], Hosts) -> hd(Hosts); % Never connected before? (shouldn't happen)
+-next_host(Host, [H|T], Hosts) -> next_host(Host, T, Hosts).
+-
+-
+-%%% --------------------------------------------------------------------
+-%%% Verify the input data
+-%%% --------------------------------------------------------------------
+-
+-v_filter({'and',L}) -> {'and',L};
+-v_filter({'or', L}) -> {'or',L};
+-v_filter({'not',L}) -> {'not',L};
+-v_filter({equalityMatch,AV}) -> {equalityMatch,AV};
+-v_filter({greaterOrEqual,AV}) -> {greaterOrEqual,AV};
+-v_filter({lessOrEqual,AV}) -> {lessOrEqual,AV};
+-v_filter({approxMatch,AV}) -> {approxMatch,AV};
+-v_filter({present,A}) -> {present,A};
+-v_filter({substrings,S}) when record(S,'SubstringFilter') -> {substrings,S};
+-v_filter(_Filter) -> throw({error,concat(["unknown filter: ",_Filter])}).
+-
+-v_modifications(Mods) ->
+- F = fun({_,Op,_}) ->
+- case lists:member(Op,[add,delete,replace]) of
+- true -> true;
+- _ -> throw({error,{mod_operation,Op}})
+- end
+- end,
+- lists:foreach(F, Mods).
+-
+-v_substr([{Key,Str}|T]) when list(Str),Key==initial;Key==any;Key==final ->
+- [{Key,Str}|v_substr(T)];
+-v_substr([H|T]) ->
+- throw({error,{substring_arg,H}});
+-v_substr([]) ->
+- [].
+-v_scope(baseObject) -> baseObject;
+-v_scope(singleLevel) -> singleLevel;
+-v_scope(wholeSubtree) -> wholeSubtree;
+-v_scope(_Scope) -> throw({error,concat(["unknown scope: ",_Scope])}).
+-
+-v_bool(true) -> true;
+-v_bool(false) -> false;
+-v_bool(_Bool) -> throw({error,concat(["not Boolean: ",_Bool])}).
+-
+-v_timeout(I) when integer(I), I>=0 -> I;
+-v_timeout(_I) -> throw({error,concat(["timeout not positive integer: ",_I])}).
+-
+-v_attributes(Attrs) ->
+- F = fun(A) when list(A) -> A;
+- (A) -> throw({error,concat(["attribute not String: ",A])})
+- end,
+- lists:map(F,Attrs).
+-
+-
+-%%% --------------------------------------------------------------------
+-%%% Get and Validate the initial configuration
+-%%% --------------------------------------------------------------------
+-get_config() ->
+- Priv_dir = code:priv_dir(eldap),
+- File = filename:join(Priv_dir, "eldap.conf"),
+- case file:consult(File) of
+- {ok, Entries} ->
+- case catch parse(Entries) of
+- {ok, Hosts, Port, Rootdn, Passwd, Log} ->
+- {ok, Hosts, Port, Rootdn, Passwd, Log};
+- {error, Reason} ->
+- {error, Reason};
+- {'EXIT', Reason} ->
+- {error, Reason}
+- end;
+- {error, Reason} ->
+- {error, Reason}
+- end.
+-
+-parse(Entries) ->
+- {ok,
+- get_hosts(host, Entries),
+- get_integer(port, Entries),
+- get_list(rootdn, Entries),
+- get_list(passwd, Entries),
+- get_log(log, Entries)}.
+-
+-get_integer(Key, List) ->
+- case lists:keysearch(Key, 1, List) of
+- {value, {Key, Value}} when integer(Value) ->
+- Value;
+- {value, {Key, Value}} ->
+- throw({error, "Bad Value in Config for " ++ atom_to_list(Key)});
+- false ->
+- throw({error, "No Entry in Config for " ++ atom_to_list(Key)})
+- end.
+-
+-get_list(Key, List) ->
+- case lists:keysearch(Key, 1, List) of
+- {value, {Key, Value}} when list(Value) ->
+- Value;
+- {value, {Key, Value}} ->
+- throw({error, "Bad Value in Config for " ++ atom_to_list(Key)});
+- false ->
+- throw({error, "No Entry in Config for " ++ atom_to_list(Key)})
+- end.
+-
+-get_log(Key, List) ->
+- case lists:keysearch(Key, 1, List) of
+- {value, {Key, Value}} when function(Value) ->
+- Value;
+- {value, {Key, Else}} ->
+- false;
+- false ->
+- fun(Level, Format, Args) -> io:format("--- " ++ Format, Args) end
+- end.
+-
+-get_hosts(Key, List) ->
+- lists:map(fun({Key1, {A,B,C,D}}) when integer(A),
+- integer(B),
+- integer(C),
+- integer(D),
+- Key == Key1->
+- {A,B,C,D};
+- ({Key1, Value}) when list(Value),
+- Key == Key1->
+- Value;
+- ({Else, Value}) ->
+- throw({error, "Bad Hostname in config"})
+- end, List).
+-
+-%%% --------------------------------------------------------------------
+-%%% Other Stuff
+-%%% --------------------------------------------------------------------
+-bump_id(#eldap{id = Id}) when Id > ?MAX_TRANSACTION_ID ->
+- ?MIN_TRANSACTION_ID;
+-bump_id(#eldap{id = Id}) ->
+- Id + 1.
+-
+-%%% --------------------------------------------------------------------
+-%%% Log routines. Call a user provided log routine Fun.
+-%%% --------------------------------------------------------------------
+-
+-log1(Str, Args, #eldap{log = Fun, debug_level = N}) -> log(Fun, Str, Args, 1, N).
+-log2(Str, Args, #eldap{log = Fun, debug_level = N}) -> log(Fun, Str, Args, 2, N).
+-
+-log(Fun, Str, Args, This_level, Status) when function(Fun), This_level =< Status ->
+- catch Fun(This_level, Str, Args);
+-log(_, _, _, _, _) ->
+- ok.
--- /dev/null
+diff --git a/doc/draft-ietf-asid-ldap-c-api-00.txt b/doc/draft-ietf-asid-ldap-c-api-00.txt
+deleted file mode 100755
+index 5f2e856..0000000
+--- a/doc/draft-ietf-asid-ldap-c-api-00.txt
++++ /dev/null
+@@ -1,3030 +0,0 @@
+-
+-
+-
+-
+-
+-
+-Network Working Group T. Howes
+-INTERNET-DRAFT Netscape Communications Corp.
+-Intended Category: Standards Track M. Smith
+-Obsoletes: RFC 1823 Netscape Communications Corp.
+-Expires: January 1998 A. Herron
+- Microsoft Corp.
+- C. Weider
+- Microsoft Corp.
+- M. Wahl
+- Critical Angle, Inc.
+-
+- 29 July 1997
+-
+-
+- The C LDAP Application Program Interface
+- <draft-ietf-asid-ldap-c-api-00.txt>
+-
+-
+-
+-1. Status of this Memo
+-
+-This draft document will be submitted to the RFC Editor as a Standards
+-Track document. Distribution of this memo is unlimited. Please send com-
+-ments to the authors.
+-
+-This document is an Internet-Draft. Internet-Drafts are working docu-
+-ments of the Internet Engineering Task Force (IETF), its areas, and its
+-working groups. Note that other groups may also distribute working
+-documents as Internet-Drafts.
+-
+-Internet-Drafts are draft documents valid for a maximum of six months
+-and may be updated, replaced, or obsoleted by other documents at any
+-time. It is inappropriate to use Internet-Drafts as reference material
+-or to cite them other than as ``work in progress.''
+-
+-To learn the current status of any Internet-Draft, please check the
+-``1id-abstracts.txt'' listing contained in the Internet-Drafts Shadow
+-Directories on ds.internic.net (US East Coast), nic.nordu.net (Europe),
+-ftp.isi.edu (US West Coast), or munnari.oz.au (Pacific Rim).
+-
+-2. Introduction
+-
+-This document defines a C language application program interface to the
+-lightweight directory access protocol (LDAP). This document replaces the
+-previous definition of this API, defined in RFC 1823, updating it to
+-include support for features found in version 3 of the LDAP protocol.
+-New extended operation functions were added to support LDAPv3 features
+-such as controls. In addition, other LDAP API changes were made to
+-
+-
+-
+-Expires: January 1998 [Page 1]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+-support information hiding and thread safety.
+-
+-The C LDAP API is designed to be powerful, yet simple to use. It defines
+-compatible synchronous and asynchronous interfaces to LDAP to suit a
+-wide variety of applications. This document gives a brief overview of
+-the LDAP model, then an overview of how the API is used by an applica-
+-tion program to obtain LDAP information. The API calls are described in
+-detail, followed by an appendix that provides some example code demon-
+-strating the use of the API. This document provides information to the
+-Internet community. It does not specify any standard.
+-
+-3. Overview of the LDAP Model
+-
+-LDAP is the lightweight directory access protocol, described in [2] and
+-[6]. It can provide a lightweight frontend to the X.500 directory [1],
+-or a stand-alone service. In either mode, LDAP is based on a client-
+-server model in which a client makes a TCP connection to an LDAP server,
+-over which it sends requests and receives responses.
+-
+-The LDAP information model is based on the entry, which contains infor-
+-mation about some object (e.g., a person). Entries are composed of
+-attributes, which have a type and one or more values. Each attribute has
+-a syntax that determines what kinds of values are allowed in the attri-
+-bute (e.g., ASCII characters, a jpeg photograph, etc.) and how those
+-values behave during directory operations (e.g., is case significant
+-during comparisons).
+-
+-Entries may be organized in a tree structure, usually based on politi-
+-cal, geographical, and organizational boundaries. Each entry is uniquely
+-named relative to its sibling entries by its relative distinguished name
+-(RDN) consisting of one or more distinguished attribute values from the
+-entry. At most one value from each attribute may be used in the RDN.
+-For example, the entry for the person Babs Jensen might be named with
+-the "Barbara Jensen" value from the commonName attribute.
+-
+-A globally unique name for an entry, called a distinguished name or DN,
+-is constructed by concatenating the sequence of RDNs from the entry up
+-to the root of the tree. For example, if Babs worked for the University
+-of Michigan, the DN of her U-M entry might be "cn=Barbara Jensen,
+-o=University of Michigan, c=US". The DN format used by LDAP is defined
+-in [4].
+-
+-Operations are provided to authenticate, search for and retrieve infor-
+-mation, modify information, and add and delete entries from the tree.
+-The next sections give an overview of how the API is used and detailed
+-descriptions of the LDAP API calls that implement all of these func-
+-tions.
+-
+-
+-
+-
+-Expires: January 1998 [Page 2]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+-4. Overview of LDAP API Use
+-
+-An application generally uses the C LDAP API in four simple steps.
+-
+-- Initialize an LDAP session with a default LDAP server. The
+- ldap_init() function returns a handle to the session, allowing mul-
+- tiple connections to be open at once.
+-
+-- Authenticate to the LDAP server. The ldap_bind() function and
+- friends support a variety of authentication methods.
+-
+-- Perform some LDAP operations and obtain some results. ldap_search()
+- and friends return results which can be parsed by
+- ldap_result2error(), ldap_first_entry(), ldap_next_entry(), etc.
+-
+-- Close the session. The ldap_unbind() function closes the connec-
+- tion.
+-
+-Operations can be performed either synchronously or asynchronously. The
+-names of the synchronous functions end in _s. For example, a synchronous
+-search can be completed by calling ldap_search_s(). An asynchronous
+-search can be initiated by calling ldap_search(). All synchronous rou-
+-tines return an indication of the outcome of the operation (e.g, the
+-constant LDAP_SUCCESS or some other error code). The asynchronous rou-
+-tines return the message id of the operation initiated. This id can be
+-used in subsequent calls to ldap_result() to obtain the result(s) of the
+-operation. An asynchronous operation can be abandoned by calling
+-ldap_abandon().
+-
+-Results and errors are returned in an opaque structure called LDAPMes-
+-sage. Routines are provided to parse this structure, step through
+-entries and attributes returned, etc. Routines are also provided to
+-interpret errors. Later sections of this document describe these rou-
+-tines in more detail.
+-
+-LDAP version 3 servers may return referrals to other servers. By
+-default, implementations of this API will attempt to follow referrals
+-automatically for the application. This behavior can be disabled glo-
+-bally (using the ldap_set_option() call) or on a per-request basis
+-through the use of a client control.
+-
+-As in the LDAPv3 protocol itself, all DNs and string values that are
+-passed into or produced by the C LDAP API are represented as UTF-8[10]
+-characters.
+-
+-For compatibility with existing applications, implementations of this
+-API will by default use version 2 of the LDAP protocol. Applications
+-that intend to take advantage of LDAP version 3 features will need to
+-
+-
+-
+-Expires: January 1998 [Page 3]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+-use the ldap_set_option() call with a LDAP_OPT_PROTOCOL_VERSION to
+-switch to version 3.
+-
+-
+-5. Common Data Structures
+-
+-Some data structures that are common to several LDAP API functions are
+-defined here:
+-
+- typedef struct ldap LDAP;
+-
+- typedef struct ldapmsg LDAPMessage;
+-
+- struct berval {
+- unsigned long bv_len;
+- char *bv_val;
+- };
+-
+- struct timeval {
+- long tv_sec;
+- long tv_usec;
+- };
+-
+-The LDAP structure is an opaque data type that represents an LDAP ses-
+-sion Typically this corresponds to a connection to a single server, but
+-it may encompass several server connections in the face of LDAPv3 refer-
+-rals.
+-
+-The LDAPMessage structure is an opaque data type that is used to return
+-results and error information.
+-
+-The berval structure is used to represent arbitrary binary data and its
+-fields have the following meanings:
+-
+-bv_len Length of data in bytes.
+-
+-bv_val A pointer to the data itself.
+-
+-
+-The timeval structure is used to represent an interval of time and its
+-fields have the following meanings:
+-
+-tv_sec Seconds component of time interval.
+-
+-tv_usec Microseconds component of time interval.
+-
+-
+-
+-
+-
+-
+-Expires: January 1998 [Page 4]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+-6. LDAP Error Codes
+-
+-Many of the LDAP API routines return LDAP error codes, some of which
+-indicate local errors and some of which may be returned by servers.
+-Supported error codes are (hexadecimal values are given in parentheses
+-after the constant):
+-
+- LDAP_SUCCESS (0x00)
+- LDAP_OPERATIONS_ERROR( 0x01)
+- LDAP_PROTOCOL_ERROR (0x02)
+- LDAP_TIMELIMIT_EXCEEDED (0x03)
+- LDAP_SIZELIMIT_EXCEEDED (0x04)
+- LDAP_COMPARE_FALSE (0x05)
+- LDAP_COMPARE_TRUE (0x06)
+- LDAP_STRONG_AUTH_NOT_SUPPORTED (0x07)
+- LDAP_STRONG_AUTH_REQUIRED (0x08)
+- LDAP_REFERRAL (0x0a) -- new in LDAPv3
+- LDAP_ADMINLIMIT_EXCEEDED (0x0b) -- new in LDAPv3
+- LDAP_UNAVAILABLE_CRITICAL_EXTENSION (0x0c) -- new in LDAPv3
+- LDAP_CONFIDENTIALITY_REQUIRED (0x0d) -- new in LDAPv3
+- LDAP_NO_SUCH_ATTRIBUTE (0x10)
+- LDAP_UNDEFINED_TYPE (0x11)
+- LDAP_INAPPROPRIATE_MATCHING (0x12)
+- LDAP_CONSTRAINT_VIOLATION (0x13)
+- LDAP_TYPE_OR_VALUE_EXISTS (0x14)
+- LDAP_INVALID_SYNTAX (0x15)
+- LDAP_NO_SUCH_OBJECT (0x20)
+- LDAP_ALIAS_PROBLEM (0x21)
+- LDAP_INVALID_DN_SYNTAX (0x22)
+- LDAP_IS_LEAF (0x23) -- not used in LDAPv3
+- LDAP_ALIAS_DEREF_PROBLEM (0x24)
+- LDAP_INAPPROPRIATE_AUTH (0x30)
+- LDAP_INVALID_CREDENTIALS (0x31)
+- LDAP_INSUFFICIENT_ACCESS (0x32)
+- LDAP_BUSY (0x33)
+- LDAP_UNAVAILABLE (0x34)
+- LDAP_UNWILLING_TO_PERFORM (0x35)
+- LDAP_LOOP_DETECT (0x36)
+- LDAP_NAMING_VIOLATION (0x40)
+- LDAP_OBJECT_CLASS_VIOLATION (0x41)
+- LDAP_NOT_ALLOWED_ON_NONLEAF (0x42)
+- LDAP_NOT_ALLOWED_ON_RDN (0x43)
+- LDAP_ALREADY_EXISTS (0x44)
+- LDAP_NO_OBJECT_CLASS_MODS (0x45)
+- LDAP_RESULTS_TOO_LARGE (0x46)
+- LDAP_AFFECTS_MULTIPLE_DSAS (0x47) -- new in LDAPv3
+- LDAP_OTHER (0x50)
+- LDAP_SERVER_DOWN (0x51)
+-
+-
+-
+-Expires: January 1998 [Page 5]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+- LDAP_LOCAL_ERROR (0x52)
+- LDAP_ENCODING_ERROR (0x53)
+- LDAP_DECODING_ERROR (0x54)
+- LDAP_TIMEOUT (0x55)
+- LDAP_AUTH_UNKNOWN (0x56)
+- LDAP_FILTER_ERROR (0x57)
+- LDAP_USER_CANCELLED (0x58)
+- LDAP_PARAM_ERROR (0x59)
+- LDAP_NO_MEMORY (0x5a)
+- LDAP_CONNECT_ERROR (0x5b)
+- LDAP_NOT_SUPPORTED (0x5c)
+- LDAP_CONTROL_NOT_FOUND (0x5d)
+- LDAP_NO_RESULTS_RETURNED (0x5e)
+- LDAP_MORE_RESULTS_TO_RETURN (0x5f)
+- LDAP_CLIENT_LOOP (0x60)
+- LDAP_REFERRAL_LIMIT_EXCEEDED (0x61)
+-
+-
+-7. Performing LDAP Operations
+-
+-This section describes each LDAP operation API call in detail. All func-
+-tions take a "session handle," a pointer to an LDAP structure containing
+-per-connection information. Many routines return results in an LDAPMes-
+-sage structure. These structures and others are described as needed
+-below.
+-
+-
+-7.1. Initializing an LDAP Session
+-
+-ldap_init() initializes a session with an LDAP server. The server is not
+-actually contacted until an operation is performed that requires it,
+-allowing various options to be set after initialization.
+-
+- LDAP *ldap_init(
+- char *hostname,
+- int portno
+- );
+-
+-Use of the following routine is deprecated.
+-
+- LDAP *ldap_open(
+- char *hostname,
+- int portno
+- );
+-
+-Parameters are:
+-
+-hostname Contains a space-separated list of hostnames or dotted strings
+-
+-
+-
+-Expires: January 1998 [Page 6]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+- representing the IP address of hosts running an LDAP server to
+- connect to. Each hostname in the list can include an optional
+- port number which is separated from the host itself with a
+- colon (:) character. The hosts are tried in the order listed,
+- stopping with the first one to which a successful connection is
+- made. Note that only ldap_open() attempts to make the connec-
+- tion before returning to the caller. ldap_init() does not con-
+- nect to the LDAP server.
+-
+-portno Contains the TCP port number to connect to. The default LDAP
+- port of 389 can be obtained by supplying the constant
+- LDAP_PORT. If a host includes a port number then this parame-
+- ter is ignored.
+-
+-ldap_init() and ldap_open() both return a "session handle," a pointer to
+-an opaque structure that should be passed to subsequent calls pertaining
+-to the session. These routines return NULL if the session cannot be ini-
+-tialized in which case the operating system error reporting mechanism
+-can be checked to see why the call failed.
+-
+-Note that if you connect to an LDAPv2 server, one of the ldap_bind()
+-calls described below must be completed before other operations can be
+-performed on the session. LDAPv3 does not require that a bind operation
+-be completed before other operations can be performed.
+-
+-The calling program can set various attributes of the session by calling
+-the routines described in the next section.
+-
+-
+-7.2. LDAP Session Handle Options
+-
+-The LDAP session handle returned by ldap_init() is a pointer to an
+-opaque data type representing an LDAP session. Formerly, this data type
+-was a structure exposed to the caller, and various fields in the struc-
+-ture could be set to control aspects of the session, such as size and
+-time limits on searches.
+-
+-In the interest of insulating callers from inevitable changes to this
+-structure, these aspects of the session are now accessed through a pair
+-of accessor functions, described below.
+-
+-ldap_get_option() is used to access the current value of various
+-session-wide parameters. ldap_set_option() is used to set the value of
+-these parameters.
+-
+- int ldap_get_option(
+- LDAP *ld,
+- int option,
+-
+-
+-
+-Expires: January 1998 [Page 7]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+- void *outvalue
+- );
+-
+- int ldap_set_option(
+- LDAP *ld,
+- int option,
+- void *invalue
+- );
+-
+-Parameters are:
+-
+-ld The session handle.
+-
+-option The name of the option being accessed or set. This parameter
+- should be one of the following constants, which have the indi-
+- cated meanings. After the constant the actual value of the con-
+- stant is listed in hexadecimal in parentheses followed by the
+- type of the corresponding outvalue or invalue parameter.
+-
+- LDAP_OPT_DESC (0x01) int *
+- The underlying socket descriptor corresponding to the default
+- LDAP connection.
+-
+- LDAP_OPT_DEREF (0x02) int *
+- Controls how aliases are handled during search. It can have
+- one of the following values: LDAP_DEREF_NEVER (0x00),
+- LDAP_DEREF_SEARCHING (0x01), LDAP_DEREF_FINDING (0x02), or
+- LDAP_DEREF_ALWAYS (0x03). The LDAP_DEREF_SEARCHING value
+- means aliases should be dereferenced during the search but not
+- when locating the base object of the search. The
+- LDAP_DEREF_FINDING value means aliases should be dereferenced
+- when locating the base object but not during the search.
+-
+- LDAP_OPT_SIZELIMIT (0x03) int *
+- A limit on the number of entries to return from a search. A
+- value of zero means no limit.
+-
+- LDAP_OPT_TIMELIMIT (0x04) int *
+- A limit on the number of seconds to spend on a search. A value
+- of zero means no limit
+-
+- LDAP_OPT_REBIND_FN (0x06) function pointer
+- See the discussion of ldap_bind() and friends below.
+-
+- LDAP_OPT_REBIND_ARG (0x07) void *
+- See the discussion of ldap_bind() and friends below.
+-
+- LDAP_OPT_REFERRALS (0x08) void *
+-
+-
+-
+-Expires: January 1998 [Page 8]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+- This option controls whether the LDAP library automatically
+- follows referrals returned by LDAP servers or not. It can be
+- set to one of the constants LDAP_OPT_ON or LDAP_OPT_OFF.
+-
+- LDAP_OPT_RESTART (0x09) void *
+- This option controls whether LDAP I/O operations should
+- automatically be restarted if they abort prematurely. It
+- should be set to one of the constants LDAP_OPT_ON or
+- LDAP_OPT_OFF. This option is useful if an LDAP I/O operation
+- may be interrupted prematurely, for example by a timer going
+- off, or other interrrupt.
+-
+- LDAP_OPT_PROTOCOL_VERSION (0x11) int *
+- This option indicates the version of the default LDAP server.
+- It can be one of the constants LDAP_VERSION2 or LDAP_VERSION3.
+- If no version is set the default is LDAP_VERSION2.
+-
+- LDAP_OPT_SERVER_CONTROLS (0x12) LDAPControl **
+- A default list of LDAP server controls to be sent with each
+- request. See the Using Controls section below.
+-
+- LDAP_OPT_CLIENT_CONTROLS (0x13) LDAPControl **
+- A default list of client controls that affect the LDAP ses-
+- sion. See the Using Controls section below.
+-
+- LDAP_OPT_HOST_NAME (0x30) char **
+- The host name of the default LDAP server.
+-
+- LDAP_OPT_ERROR_NUMBER (0x31) int *
+- The code of the most recent LDAP error that occurred for this
+- session.
+-
+- LDAP_OPT_ERROR_STRING (0x32) char **
+- The message returned with the most recent LDAP error that
+- occurred for this session.
+-
+-
+-outvalue The address of a place to put the value of the option. The
+- actual type of this parameter depends on the setting of the
+- option parameter.
+-
+-invalue A pointer to the value the option is to be given. The actual
+- type of this parameter depends on the setting of the option
+- parameter. The constants LDAP_OPT_ON and LDAP_OPT_OFF can be
+- given for options that have on or off settings.
+-
+-
+-
+-
+-
+-
+-Expires: January 1998 [Page 9]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+-7.3. Working with controls
+-
+-LDAPv3 operations can be extended through the use of controls. Controls
+-may be sent to a server or returned to the client with any LDAP message.
+-These controls are referred to as server controls.
+-
+-The LDAP API also supports a client-side extension mechanism through the
+-use of client controls. These controls affect the behavior of the LDAP
+-API only and are never sent to a server. A common data structure is
+-used to represent both types of controls:
+-
+- typedef struct ldapcontrol {
+- char *ldctl_oid;
+- struct berval ldctl_value;
+- char ldctl_iscritical;
+- } LDAPControl, *PLDAPControl;
+-
+-The fields in the ldapcontrol structure have the following meanings:
+-
+-ldctl_oid The control type, represented as a string.
+-
+-ldctl_value The data associated with the control (if any).
+-
+-ldctl_iscritical Indicates whether the control is critical of not. If
+- this field is non-zero, the operation will only be car-
+- ried out if the control is recognized by the server
+- and/or client.
+-
+-Some LDAP API calls allocate an ldapcontrol structure or a NULL-
+-terminated array of ldapcontrol structures. The following routines can
+-be used to dispose of a single control or an array of controls:
+-
+- void ldap_control_free( LDAPControl *ctrl );
+- void ldap_controls_free( LDAPControl **ctrls );
+-
+-A set of controls that affect the entire session can be set using the
+-ldap_set_option() function (see above). A list of controls can also be
+-passed directly to some LDAP API calls such as ldap_search_ext(), in
+-which case any controls set for the session through the use of
+-ldap_set_option() are ignored. Control lists are represented as a NULL-
+-terminated array of pointers to ldapcontrol structures.
+-
+-Server controls are defined by LDAPv3 protocol extension documents; for
+-example, a control has been proposed to support server-side sorting of
+-search results [7].
+-
+-No client controls are defined by this document but they may be defined
+-in future revisions or in any document that extends this API.
+-
+-
+-
+-Expires: January 1998 [Page 10]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+-7.4. Authenticating to the directory
+-
+-The following functions are used to authenticate an LDAP client to an
+-LDAP directory server.
+-
+-The ldap_sasl_bind() and ldap_sasl_bind_s() functions can be used to do
+-general and extensible authentication over LDAP through the use of the
+-Simple Authentication Security Layer [8]. The routines both take the dn
+-to bind as, the method to use, as a dotted-string representation of an
+-OID identifying the method, and a struct berval holding the credentials.
+-The special constant value LDAP_SASL_SIMPLE ("") can be passed to
+-request simple authentication, or the simplified routines
+-ldap_simple_bind() or ldap_simple_bind_s() can be used.
+-
+- int ldap_sasl_bind(
+- LDAP *ld,
+- char *dn,
+- char *mechanism,
+- struct berval *cred,
+- LDAPControl **serverctrls,
+- LDAPControl **clientctrls,
+- int *msgidp
+- );
+-
+- int ldap_sasl_bind_s(
+- LDAP *ld,
+- char *dn,
+- char *mechanism,
+- struct berval *cred,
+- LDAPControl **serverctrls,
+- LDAPControl **clientctrls,
+- struct berval **servercredp
+- );
+-
+- int ldap_simple_bind(
+- LDAP *ld,
+- char *dn,
+- char *passwd
+- );
+-
+- int ldap_simple_bind_s(
+- LDAP *ld,
+- char *dn,
+- char *passwd
+- );
+-
+- The use of the following routines is deprecated:
+-
+-
+-
+-
+-Expires: January 1998 [Page 11]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+- int ldap_bind( LDAP *ld, char *dn, char *cred, int method );
+-
+- int ldap_bind_s( LDAP *ld, char *dn, char *cred, int method );
+-
+- int ldap_kerberos_bind( LDAP *ld, char *dn );
+-
+- int ldap_kerberos_bind_s( LDAP *ld, char *dn );
+-
+-Parameters are:
+-
+-ld The session handle.
+-
+-dn The name of the entry to bind as.
+-
+-mechanism Either LDAP_AUTH_SIMPLE_OID to get simple authentication,
+- or a dotted text string representing an OID identifying the
+- SASL method.
+-
+-cred The credentials with which to authenticate. Arbitrary
+- credentials can be passed using this parameter. The format
+- and content of the credentials depends on the setting of
+- the mechanism parameter.
+-
+-passwd For ldap_simple_bind(), the password to compare to the
+- entry's userPassword attribute.
+-
+-serverctrls List of LDAP server controls.
+-
+-clientctrls List of client controls.
+-
+-msgidp This result parameter will be set to the message id of the
+- request if the ldap_sasl_bind() call succeeds.
+-
+-servercredp This result parameter will be set to the credentials
+- returned by the server. This should be freed by calling
+- ldap_If no credentials are returned it will be set to NULL.
+-
+-Additional parameters for the deprecated routines are not described.
+-Interested readers are referred to RFC 1823.
+-
+-The ldap_sasl_bind() function initiates an asynchronous bind operation
+-and returns the constant LDAP_SUCCESS if the request was successfully
+-sent, or another LDAP error code if not. See the section below on error
+-handling for more information about possible errors and how to interpret
+-them. If successful, ldap_sasl_bind() places the message id of the
+-request in *msgidp. A subsequent call to ldap_result(), described below,
+-can be used to obtain the result of the bind.
+-
+-
+-
+-
+-Expires: January 1998 [Page 12]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+-The ldap_simple_bind() function initiates a simple asynchronous bind
+-operation and returns the message id of the operation initiated. A sub-
+-sequent call to ldap_result(), described below, can be used to obtain
+-the result of the bind. In case of error, ldap_simple_bind() will return
+--1, setting the session error parameters in the LDAP structure appropri-
+-ately.
+-
+-The synchronous ldap_sasl_bind_s() and ldap_simple_bind_s() functions
+-both return the result of the operation, either the constant
+-LDAP_SUCCESS if the operation was successful, or another LDAP error code
+-if it was not. See the section below on error handling for more informa-
+-tion about possible errors and how to interpret them.
+-
+-Note that if an LDAPv2 server is contacted, no other operations over the
+-connection should be attempted before a bind call has successfully com-
+-pleted.
+-
+-Subsequent bind calls can be used to re-authenticate over the same con-
+-nection, and multistep SASL sequences can be accomplished through a
+-sequence of calls to ldap_sasl_bind() or ldap_sasl_bind_s().
+-
+-
+-7.5. Closing the session
+-
+-The following functions are used to unbind from the directory, close the
+-connection, and dispose of the session handle.
+-
+- int ldap_unbind( LDAP *ld );
+-
+- int ldap_unbind_s( LDAP *ld );
+-
+-Parameters are:
+-
+-ld The session handle.
+-
+-ldap_unbind() and ldap_unbind_s() both work synchronously, unbinding
+-from the directory, closing the connection, and freeing up the ld struc-
+-ture before returning. There is no server response to an unbind opera-
+-tion. ldap_unbind() returns LDAP_SUCCESS (or another LDAP error code if
+-the request cannot be sent to the LDAP server). After a call to
+-ldap_unbind() or ldap_unbind_s(), the session handle ld is invalid.
+-
+-
+-7.6. Searching
+-
+-The following functions are used to search the LDAP directory, returning
+-a requested set of attributes for each entry matched. There are five
+-variations.
+-
+-
+-
+-Expires: January 1998 [Page 13]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+- int ldap_search_ext(
+- LDAP *ld,
+- char *base,
+- int scope,
+- char *filter,
+- char **attrs,
+- int attrsonly,
+- LDAPControl **serverctrls,
+- LDAPControl **clientctrls,
+- struct timeval *timeoutp,
+- int sizelimit,
+- int *msgidp
+- );
+-
+- int ldap_search_ext_s(
+- LDAP *ld,
+- char *base,
+- int scope,
+- char *filter,
+- char **attrs,
+- int attrsonly,
+- LDAPControl **serverctrls,
+- LDAPControl **clientctrls,
+- struct timeval *timeoutp,
+- int sizelimit,
+- LDAPMessage **res
+- );
+-
+- int ldap_search(
+- LDAP *ld,
+- char *base,
+- int scope,
+- char *filter,
+- char **attrs,
+- int attrsonly
+- );
+-
+- int ldap_search_s(
+- LDAP *ld,
+- char *base,
+- int scope,
+- char *filter,
+- char **attrs,
+- int attrsonly,
+- LDAPMessage **res
+- );
+-
+- int ldap_search_st(
+-
+-
+-
+-Expires: January 1998 [Page 14]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+- LDAP *ld,
+- char *base,
+- int scope,
+- char *filter,
+- char **attrs,
+- int attrsonly,
+- struct timeval *timeout,
+- LDAPMessage **res
+- );
+-
+-Parameters are:
+-
+-ld The session handle.
+-
+-base The dn of the entry at which to start the search.
+-
+-scope One of LDAP_SCOPE_BASE (0x00), LDAP_SCOPE_ONELEVEL (0x01),
+- or LDAP_SCOPE_SUBTREE (0x02), indicating the scope of the
+- search.
+-
+-filter A character string as described in [3], representing the
+- search filter.
+-
+-attrs A NULL-terminated array of strings indicating which attri-
+- butes to return for each matching entry. Passing NULL for
+- this parameter causes all available attributes to be
+- retrieved.
+-
+-attrsonly A boolean value that should be zero if both attribute types
+- and values are to be returned, non-zero if only types are
+- wanted.
+-
+-timeout For the ldap_search_st() function, this specifies the local
+- search timeout value. For the ldap_search_ext() and
+- ldap_search_ext_s() functions, this specifies both the
+- local search timeout value and the operation time limit
+- that is sent to the server within the search request.
+-
+-res For the synchronous calls, this is a result parameter which
+- will contain the results of the search upon completion of
+- the call.
+-
+-serverctrls List of LDAP server controls.
+-
+-clientctrls List of client controls.
+-
+-msgidp This result parameter will be set to the message id of the
+- request if the ldap_search_ext() call succeeds.
+-
+-
+-
+-Expires: January 1998 [Page 15]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+-There are three options in the session handle ld which potentially
+-affect how the search is performed. They are:
+-
+-LDAP_OPT_SIZELIMIT
+- A limit on the number of entries to return from the search.
+- A value of zero means no limit. Note that the value from
+- the session handle is ignored when using the
+- ldap_search_ext() or ldap_search_ext_s() functions.
+-
+-LDAP_OPT_TIMELIMIT
+- A limit on the number of seconds to spend on the search. A
+- value of zero means no limit. Note that the value from the
+- session handle is ignored when using the ldap_search_ext()
+- or ldap_search_ext_s() functions.
+-
+-LDAP_OPT_DEREF
+- One of LDAP_DEREF_NEVER (0x00), LDAP_DEREF_SEARCHING
+- (0x01), LDAP_DEREF_FINDING (0x02), or LDAP_DEREF_ALWAYS
+- (0x03), specifying how aliases should be handled during the
+- search. The LDAP_DEREF_SEARCHING value means aliases should
+- be dereferenced during the search but not when locating the
+- base object of the search. The LDAP_DEREF_FINDING value
+- means aliases should be dereferenced when locating the base
+- object but not during the search.
+-
+-The ldap_search_ext() function initiates an asynchronous search opera-
+-tion and returns the constant LDAP_SUCCESS if the request was success-
+-fully sent, or another LDAP error code if not. See the section below on
+-error handling for more information about possible errors and how to
+-interpret them. If successful, ldap_search_ext() places the message id
+-of the request in *msgidp. A subsequent call to ldap_result(), described
+-below, can be used to obtain the results from the search. These results
+-can be parsed using the result parsing routines described in detail
+-later.
+-
+-Similar to ldap_search_ext(), the ldap_search() function initiates an
+-asynchronous search operation and returns the message id of the opera-
+-tion initiated. As for ldap_search_ext(), a subsequent call to
+-ldap_result(), described below, can be used to obtain the result of the
+-bind. In case of error, ldap_search() will return -1, setting the ses-
+-sion error parameters in the LDAP structure appropriately.
+-
+-The synchronous ldap_search_ext_s(), ldap_search_s(), and
+-ldap_search_st() functions all return the result of the operation,
+-either the constant LDAP_SUCCESS if the operation was successful, or
+-another LDAP error code if it was not. See the section below on error
+-handling for more information about possible errors and how to interpret
+-them. Entries returned from the search (if any) are contained in the
+-
+-
+-
+-Expires: January 1998 [Page 16]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+-res parameter. This parameter is opaque to the caller. Entries, attri-
+-butes, values, etc., should be extracted by calling the parsing routines
+-described below. The results contained in res should be freed when no
+-longer in use by calling ldap_msgfree(), described later.
+-
+-The ldap_search_ext() and ldap_search_ext_s() functions support LDAPv3
+-server controls, client controls, and allow varying size and time limits
+-to be easily specified for each search operation. The ldap_search_st()
+-function is identical to ldap_search_s() except that it takes an addi-
+-tional parameter specifying a local timeout for the search.
+-
+-7.7. Reading an Entry
+-
+-LDAP does not support a read operation directly. Instead, this operation
+-is emulated by a search with base set to the DN of the entry to read,
+-scope set to LDAP_SCOPE_BASE, and filter set to "(objectclass=*)". attrs
+-contains the list of attributes to return.
+-
+-
+-7.8. Listing the Children of an Entry
+-
+-LDAP does not support a list operation directly. Instead, this operation
+-is emulated by a search with base set to the DN of the entry to list,
+-scope set to LDAP_SCOPE_ONELEVEL, and filter set to "(objectclass=*)".
+-attrs contains the list of attributes to return for each child entry.
+-
+-7.9. Comparing a Value Against an Entry
+-
+-The following routines are used to compare a given attribute value
+-assertion against an LDAP entry. There are four variations:
+-
+- int ldap_compare_ext(
+- LDAP *ld,
+- char *dn,
+- char *attr,
+- struct berval *bvalue
+- LDAPControl **serverctrls,
+- LDAPControl **clientctrls,
+- int *msgidp
+- );
+-
+- int ldap_compare_ext_s(
+- LDAP *ld,
+- char *dn,
+- char *attr,
+- struct berval *bvalue,
+- LDAPControl **serverctrls,
+- LDAPControl **clientctrls
+-
+-
+-
+-Expires: January 1998 [Page 17]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+- );
+-
+- int ldap_compare(
+- LDAP *ld,
+- char *dn,
+- char *attr,
+- char *value
+- );
+-
+- int ldap_compare_s(
+- LDAP *ld,
+- char *dn,
+- char *attr,
+- char *value
+- );
+-
+-Parameters are:
+-
+-ld The session handle.
+-
+-dn The name of the entry to compare against.
+-
+-attr The attribute to compare against.
+-
+-bvalue The attribute value to compare against those found in the
+- given entry. This parameter is used in the extended rou-
+- tines and is a pointer to a struct berval so it is possible
+- to compare binary values.
+-
+-value A string attribute value to compare against, used by the
+- ldap_compare() and ldap_compare_s() functions. Use
+- ldap_compare_ext() or ldap_compare_ext_s() if you need to
+- compare binary values.
+-
+-serverctrls List of LDAP server controls.
+-
+-clientctrls List of client controls.
+-
+-msgidp This result parameter will be set to the message id of the
+- request if the ldap_compare_ext() call succeeds.
+-
+-The ldap_compare_ext() function initiates an asynchronous compare opera-
+-tion and returns the constant LDAP_SUCCESS if the request was success-
+-fully sent, or another LDAP error code if not. See the section below on
+-error handling for more information about possible errors and how to
+-interpret them. If successful, ldap_compare_ext() places the message id
+-of the request in *msgidp. A subsequent call to ldap_result(), described
+-below, can be used to obtain the result of the compare.
+-
+-
+-
+-Expires: January 1998 [Page 18]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+-Similar to ldap_compare_ext(), the ldap_compare() function initiates an
+-asynchronous compare operation and returns the message id of the opera-
+-tion initiated. As for ldap_compare_ext(), a subsequent call to
+-ldap_result(), described below, can be used to obtain the result of the
+-bind. In case of error, ldap_compare() will return -1, setting the ses-
+-sion error parameters in the LDAP structure appropriately.
+-
+-The synchronous ldap_compare_ext_s() and ldap_compare_s() functions both
+-return the result of the operation, either the constant LDAP_SUCCESS if
+-the operation was successful, or another LDAP error code if it was not.
+-See the section below on error handling for more information about pos-
+-sible errors and how to interpret them.
+-
+-The ldap_compare_ext() and ldap_compare_ext_s() functions support LDAPv3
+-server controls and client controls.
+-
+-
+-7.10. Modifying an entry
+-
+-The following routines are used to modify an existing LDAP entry. There
+-are four variations:
+-
+- typedef struct ldapmod {
+- int mod_op;
+- char *mod_type;
+- union {
+- char **modv_strvals;
+- struct berval **modv_bvals;
+- } mod_vals;
+- } LDAPMod;
+- #define mod_values mod_vals.modv_strvals
+- #define mod_bvalues mod_vals.modv_bvals
+-
+- int ldap_modify_ext(
+- LDAP *ld,
+- char *dn,
+- LDAPMod **mods,
+- LDAPControl **serverctrls,
+- LDAPControl **clientctrls,
+- int *msgidp
+- );
+-
+- int ldap_modify_ext_s(
+- LDAP *ld,
+- char *dn,
+- LDAPMod **mods,
+- LDAPControl **serverctrls,
+- LDAPControl **clientctrls
+-
+-
+-
+-Expires: January 1998 [Page 19]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+- );
+-
+- int ldap_modify(
+- LDAP *ld,
+- char *dn,
+- LDAPMod **mods
+- );
+-
+- int ldap_modify_s(
+- LDAP *ld,
+- char *dn,
+- LDAPMod **mods
+- );
+-
+-Parameters are:
+-
+-ld The session handle.
+-
+-dn The name of the entry to modify.
+-
+-mods A NULL-terminated array of modifications to make to the
+- entry.
+-
+-serverctrls List of LDAP server controls.
+-
+-clientctrls List of client controls.
+-
+-msgidp This result parameter will be set to the message id of the
+- request if the ldap_modify_ext() call succeeds.
+-
+-The fields in the LDAPMod structure have the following meanings:
+-
+-mod_op The modification operation to perform. It should be one of
+- LDAP_MOD_ADD (0x00), LDAP_MOD_DELETE (0x01), or
+- LDAP_MOD_REPLACE (0x02). This field also indicates the
+- type of values included in the mod_vals union. It is logi-
+- cally ORed with LDAP_MOD_BVALUES (0x80) to select the
+- mod_bvalues form. Otherwise, the mod_values form is used.
+-
+-mod_type The type of the attribute to modify.
+-
+-mod_vals The values (if any) to add, delete, or replace. Only one of
+- the mod_values or mod_bvalues variants should be used,
+- selected by ORing the mod_op field with the constant
+- LDAP_MOD_BVALUES. mod_values is a NULL-terminated array of
+- zero-terminated strings and mod_bvalues is a NULL-
+- terminated array of berval structures that can be used to
+- pass binary values such as images.
+-
+-
+-
+-Expires: January 1998 [Page 20]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+-For LDAP_MOD_ADD modifications, the given values are added to the
+-entry, creating the attribute if necessary.
+-
+-For LDAP_MOD_DELETE modifications, the given values are deleted from the
+-entry, removing the attribute if no values remain. If the entire attri-
+-bute is to be deleted, the mod_vals field should be set to NULL.
+-
+-For LDAP_MOD_REPLACE modifications, the attribute will have the listed
+-values after the modification, having been created if necessary, or
+-removed if the mod_vals field is NULL. All modifications are performed
+-in the order in which they are listed.
+-
+-The ldap_modify_ext() function initiates an asynchronous modify opera-
+-tion and returns the constant LDAP_SUCCESS if the request was success-
+-fully sent, or another LDAP error code if not. See the section below on
+-error handling for more information about possible errors and how to
+-interpret them. If successful, ldap_modify_ext() places the message id
+-of the request in *msgidp. A subsequent call to ldap_result(), described
+-below, can be used to obtain the result of the modify.
+-
+-Similar to ldap_modify_ext(), the ldap_modify() function initiates an
+-asynchronous modify operation and returns the message id of the opera-
+-tion initiated. As for ldap_modify_ext(), a subsequent call to
+-ldap_result(), described below, can be used to obtain the result of the
+-modify. In case of error, ldap_modify() will return -1, setting the ses-
+-sion error parameters in the LDAP structure appropriately.
+-
+-The synchronous ldap_modify_ext_s() and ldap_modify_s() functions both
+-return the result of the operation, either the constant LDAP_SUCCESS if
+-the operation was successful, or another LDAP error code if it was not.
+-See the section below on error handling for more information about pos-
+-sible errors and how to interpret them.
+-
+-The ldap_modify_ext() and ldap_modify_ext_s() functions support LDAPv3
+-server controls and client controls.
+-
+-
+-7.11. Modifying the Name of an Entry
+-
+-In LDAPv2, the ldap_modrdn() and ldap_modrdn_s() routines were used to
+-change the name of an LDAP entry. They could only be used to change the
+-least significant component of a name (the RDN or relative distinguished
+-name). LDAPv3 provides the Modify DN protocol operation that allows more
+-general name change access. The ldap_rename() and ldap_rename_s() rou-
+-tines are used to change the name of an entry, and the use of the
+-ldap_modrdn() and ldap_modrdn_s() routines is deprecated.
+-
+- int ldap_rename(
+-
+-
+-
+-Expires: January 1998 [Page 21]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+- LDAP *ld,
+- char *dn,
+- char *newrdn,
+- char *newparent,
+- int deleteoldrdn,
+- LDAPControl **serverctrls,
+- LDAPControl **clientctrls,
+- int *msgidp
+-
+- );
+- int ldap_rename_s(
+- LDAP *ld,
+- char *dn,
+- char *newrdn,
+- char *newparent,
+- int deleteoldrdn,
+- LDAPControl **serverctrls,
+- LDAPControl **clientctrls
+- );
+-
+- Use of the following routines is deprecated.
+-
+- int ldap_modrdn(
+- LDAP *ld,
+- char *dn,
+- char *newrdn,
+- int deleteoldrdn
+- );
+- int ldap_modrdn_s(
+- LDAP *ld,
+- char *dn,
+- char *newrdn,
+- int deleteoldrdn
+- );
+-
+-Parameters are:
+-
+-ld The session handle.
+-
+-dn The name of the entry whose DN is to be changed.
+-
+-newrdn The new RDN to give the entry.
+-
+-newparent The new parent, or superior entry. If this parameter is
+- NULL, only the RDN of the entry is changed. The root DN
+- may be specified by passing a zero length string, "". The
+- newparent parameter should always be NULL when using ver-
+- sion 2 of the LDAP protocol; otherwise the server's
+-
+-
+-
+-Expires: January 1998 [Page 22]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+- behavior is undefined.
+-
+-deleteoldrdn This parameter only has meaning on the rename routines if
+- newrdn is different than the old RDN. It is a boolean
+- value, if non-zero indicating that the old RDN value(s)
+- should be removed, if zero indicating that the old RDN
+- value(s) should be retained as non-distinguished values of
+- the entry.
+-
+-serverctrls List of LDAP server controls.
+-
+-clientctrls List of client controls.
+-
+-msgidp This result parameter will be set to the message id of the
+- request if the ldap_rename() call succeeds.
+-
+-The ldap_rename() function initiates an asynchronous modify DN operation
+-and returns the constant LDAP_SUCCESS if the request was successfully
+-sent, or another LDAP error code if not. See the section below on error
+-handling for more information about possible errors and how to interpret
+-them. If successful, ldap_rename() places the DN message id of the
+-request in *msgidp. A subsequent call to ldap_result(), described below,
+-can be used to obtain the result of the rename.
+-
+-The synchronous ldap_rename_s() returns the result of the operation,
+-either the constant LDAP_SUCCESS if the operation was successful, or
+-another LDAP error code if it was not. See the section below on error
+-handling for more information about possible errors and how to interpret
+-them.
+-
+-The ldap_rename() and ldap_rename_s() functions both support LDAPv3
+-server controls and client controls.
+-
+-
+-7.12. Adding an entry
+-
+-The following functions are used to add entries to the LDAP directory.
+-There are four variations:
+-
+- int ldap_add_ext(
+- LDAP *ld,
+- char *dn,
+- LDAPMod **attrs,
+- LDAPControl **serverctrls,
+- LDAPControl **clientctrls,
+- int *msgidp
+- );
+-
+-
+-
+-
+-Expires: January 1998 [Page 23]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+- int ldap_add_ext_s(
+- LDAP *ld,
+- char *dn,
+- LDAPMod **attrs,
+- LDAPControl **serverctrls,
+- LDAPControl **clientctrls
+- );
+-
+- int ldap_add(
+- LDAP *ld,
+- char *dn,
+- LDAPMod **attrs
+- );
+-
+- int ldap_add_s(
+- LDAP *ld,
+- char *dn,
+- LDAPMod **attrs
+- );
+-
+-Parameters are:
+-
+-ld The session handle.
+-
+-dn The name of the entry to add.
+-
+-attrs The entry's attributes, specified using the LDAPMod struc-
+- ture defined for ldap_modify(). The mod_type and mod_vals
+- fields should be filled in. The mod_op field is ignored
+- unless ORed with the constant LDAP_MOD_BVALUES, used to
+- select the mod_bvalues case of the mod_vals union.
+-
+-serverctrls List of LDAP server controls.
+-
+-clientctrls List of client controls.
+-
+-msgidp This result parameter will be set to the message id of the
+- request if the ldap_add_ext() call succeeds.
+-
+-Note that the parent of the entry being added must already exist or the
+-parent must be empty (i.e., equal to the root DN) for an add to succeed.
+-
+-The ldap_add_ext() function initiates an asynchronous add operation and
+-returns the constant LDAP_SUCCESS if the request was successfully sent,
+-or another LDAP error code if not. See the section below on error han-
+-dling for more information about possible errors and how to interpret
+-them. If successful, ldap_add_ext() places the message id of the
+-request in *msgidp. A subsequent call to ldap_result(), described below,
+-
+-
+-
+-Expires: January 1998 [Page 24]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+-can be used to obtain the result of the add.
+-
+-Similar to ldap_add_ext(), the ldap_add() function initiates an asyn-
+-chronous add operation and returns the message id of the operation ini-
+-tiated. As for ldap_add_ext(), a subsequent call to ldap_result(),
+-described below, can be used to obtain the result of the add. In case of
+-error, ldap_add() will return -1, setting the session error parameters
+-in the LDAP structure appropriately.
+-
+-The synchronous ldap_add_ext_s() and ldap_add_s() functions both return
+-the result of the operation, either the constant LDAP_SUCCESS if the
+-operation was successful, or another LDAP error code if it was not. See
+-the section below on error handling for more information about possible
+-errors and how to interpret them.
+-
+-The ldap_add_ext() and ldap_add_ext_s() functions support LDAPv3 server
+-controls and client controls.
+-
+-
+-
+-7.13. Deleting an entry
+-
+-The following functions are used to delete a leaf entry from the LDAP
+-directory. There are four variations:
+-
+- int ldap_delete_ext(
+- LDAP *ld,
+- char *dn,
+- LDAPControl **serverctrls,
+- LDAPControl **clientctrls,
+- int *msgidp
+- );
+-
+- int ldap_delete_ext_s(
+- LDAP *ld,
+- char *dn,
+- LDAPControl **serverctrls,
+- LDAPControl **clientctrls
+- );
+-
+- int ldap_delete(
+- LDAP *ld,
+- char *dn
+- );
+-
+- int ldap_delete_s(
+- LDAP *ld,
+- char *dn
+-
+-
+-
+-Expires: January 1998 [Page 25]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+- );
+-
+-Parameters are:
+-
+-ld The session handle.
+-
+-dn The name of the entry to delete.
+-
+-serverctrls List of LDAP server controls.
+-
+-clientctrls List of client controls.
+-
+-msgidp This result parameter will be set to the message id of the
+- request if the ldap_delete_ext() call succeeds.
+-
+-Note that the entry to delete must be a leaf entry (i.e., it must have
+-no children). Deletion of entire subtrees in a single operation is not
+-supported by LDAP.
+-
+-The ldap_delete_ext() function initiates an asynchronous delete opera-
+-tion and returns the constant LDAP_SUCCESS if the request was success-
+-fully sent, or another LDAP error code if not. See the section below on
+-error handling for more information about possible errors and how to
+-interpret them. If successful, ldap_delete_ext() places the message id
+-of the request in *msgidp. A subsequent call to ldap_result(), described
+-below, can be used to obtain the result of the delete.
+-
+-Similar to ldap_delete_ext(), the ldap_delete() function initiates an
+-asynchronous delete operation and returns the message id of the opera-
+-tion initiated. As for ldap_delete_ext(), a subsequent call to
+-ldap_result(), described below, can be used to obtain the result of the
+-delete. In case of error, ldap_delete() will return -1, setting the ses-
+-sion error parameters in the LDAP structure appropriately.
+-
+-The synchronous ldap_delete_ext_s() and ldap_delete_s() functions both
+-return the result of the operation, either the constant LDAP_SUCCESS if
+-the operation was successful, or another LDAP error code if it was not.
+-See the section below on error handling for more information about pos-
+-sible errors and how to interpret them.
+-
+-The ldap_delete_ext() and ldap_delete_ext_s() functions support LDAPv3
+-server controls and client controls.
+-
+-
+-7.14. Extended Operations
+-
+-The ldap_extended_operation() and ldap_extended_operation_s() routines
+-allow extended LDAP operations to be passed to the server, providing a
+-
+-
+-
+-Expires: January 1998 [Page 26]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+-general protocol extensibility mechanism.
+-
+- int ldap_extended_operation(
+- LDAP *ld,
+- char *exoid,
+- struct berval *exdata,
+- LDAPControl **serverctrls,
+- LDAPControl **clientctrls,
+- int *msgidp
+- );
+-
+- int ldap_extended_operation_s(
+- LDAP *ld,
+- char *exoid,
+- struct berval *exdata,
+- LDAPControl **serverctrls,
+- LDAPControl **clientctrls,
+- char **retoidp,
+- struct berval **retdatap
+- );
+-
+-Parameters are:
+-
+-ld The session handle.
+-
+-requestoid The dotted-OID text string naming the request.
+-
+-requestdata The arbitrary data required by the operation (if NULL, no
+- data is sent to the server).
+-
+-serverctrls List of LDAP server controls.
+-
+-clientctrls List of client controls.
+-
+-msgidp This result parameter will be set to the message id of the
+- request if the ldap_extended_operation() call succeeds.
+-
+-retoidp Pointer to a character string that will be set to an allo-
+- cated, dotted-OID text string returned by the server. This
+- string should be disposed of using the ldap_memfree() func-
+- tion. If no OID was returned, *retoidp is set to NULL.
+-
+-retdatap Pointer to a berval structure pointer that will be set an
+- allocated copy of the data returned by the server. This
+- struct berval should be disposed of using ber_bvfree(). If
+- no data is returned, *retdatap is set to NULL.
+-
+-The ldap_extended_operation() function initiates an asynchronous
+-
+-
+-
+-Expires: January 1998 [Page 27]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+-extended operation and returns the constant LDAP_SUCCESS if the request
+-was successfully sent, or another LDAP error code if not. See the sec-
+-tion below on error handling for more information about possible errors
+-and how to interpret them. If successful, ldap_extended_operation()
+-places the message id of the request in *msgidp. A subsequent call to
+-ldap_result(), described below, can be used to obtain the result of the
+-extended operation which can be passed to ldap_parse_extended_result()
+-to obtain the OID and data contained in the response.
+-
+-The synchronous ldap_extended_operation_s() function returns the result
+-of the operation, either the constant LDAP_SUCCESS if the operation was
+-successful, or another LDAP error code if it was not. See the section
+-below on error handling for more information about possible errors and
+-how to interpret them. The retoid and retdata parameters are filled in
+-with the OID and data from the response. If no OID or data was
+-returned, these parameters are set to NULL.
+-
+-The ldap_extended_operation() and ldap_extended_operation_s() functions
+-both support LDAPv3 server controls and client controls.
+-
+-
+-8. Abandoning An Operation
+-
+-The following calls are used to abandon an operation in progress:
+-
+- int ldap_abandon_ext(
+- LDAP *ld,
+- int msgid,
+- LDAPControl **serverctrls,
+- LDAPControl **clientctrls
+- );
+-
+- int ldap_abandon(
+- LDAP *ld,
+- int msgid
+- );
+-
+-
+-ld The session handle.
+-
+-msgid The message id of the request to be abandoned.
+-
+-serverctrls List of LDAP server controls.
+-
+-clientctrls List of client controls.
+-
+-ldap_abandon_ext() abandons the operation with message id msgid and
+-returns the constant LDAP_SUCCESS if the abandon was successful or
+-
+-
+-
+-Expires: January 1998 [Page 28]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+-another LDAP error code if not. See the section below on error handling
+-for more information about possible errors and how to interpret them.
+-
+-ldap_abandon() is identical to ldap_abandon_ext() except that it returns
+-zero if the abandon was successful, -1 otherwise and does not support
+-LDAPv3 server controls or client controls.
+-
+-After a successful call to ldap_abandon() or ldap_abandon_ext(), results
+-with the given message id are never returned from a subsequent call to
+-ldap_result(). There is no server response to LDAP abandon operations.
+-
+-
+-9. Obtaining Results and Peeking Inside LDAP Messages
+-
+-ldap_result() is used to obtain the result of a previous asynchronously
+-initiated operation. Note that depending on how it is called,
+-ldap_result() may actually return a list or "chain" of messages.
+-
+-ldap_msgfree() frees the results obtained from a previous call to
+-ldap_result(), or a synchronous search routine.
+-
+-ldap_msgtype() returns the type of an LDAP message. ldap_msgid()
+-returns the message ID of an LDAP message.
+-
+- int ldap_result(
+- LDAP *ld,
+- int msgid,
+- int all,
+- struct timeval *timeout,
+- LDAPMessage **res
+- );
+-
+- int ldap_msgfree( LDAPMessage *res );
+-
+- int ldap_msgtype( LDAPMessage *res );
+-
+- int ldap_msgid( LDAPMessage *res );
+-
+-Parameters are:
+-
+-ld The session handle.
+-
+-msgid The message id of the operation whose results are to be
+- returned, or the constant LDAP_RES_ANY (-1) if any result is
+- desired.
+-
+-all Specifies how many messages will be retrieved in a single call
+- to ldap_result(). This parameter only has meaning for search
+-
+-
+-
+-Expires: January 1998 [Page 29]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+- results. Pass the constant LDAP_MSG_ONE (0x00) to retrieve one
+- message at a time. Pass LDAP_MSG_ALL (0x01) to request that
+- all results of a search be received before returning all
+- results in a single chain. Pass LDAP_MSG_RECEIVED (0x02) to
+- indicate that all results retrieved so far should be returned
+- in the result chain.
+-
+-timeout A timeout specifying how long to wait for results to be
+- returned. A NULL value causes ldap_result() to block until
+- results are available. A timeout value of zero seconds speci-
+- fies a polling behavior.
+-
+-res For ldap_result(), a result parameter that will contain the
+- result(s) of the operation. For ldap_msgfree(), the result
+- chain to be freed, obtained from a previous call to
+- ldap_result(), ldap_search_s(), or ldap_search_st().
+-
+-Upon successful completion, ldap_result() returns the type of the first
+-result returned in the res parameter. This will be one of the following
+-constants.
+-
+- LDAP_RES_BIND (0x61)
+- LDAP_RES_SEARCH_ENTRY (0x64)
+- LDAP_RES_SEARCH_REFERENCE (0x73) -- new in LDAPv3
+- LDAP_RES_SEARCH_RESULT (0x65)
+- LDAP_RES_MODIFY (0x67)
+- LDAP_RES_ADD (0x69)
+- LDAP_RES_DELETE (0x6B)
+- LDAP_RES_MODDN (0x6D)
+- LDAP_RES_COMPARE (0x6F)
+- LDAP_RES_EXTENDED (0x78) -- new in LDAPv3
+-
+-ldap_result() returns 0 if the timeout expired and -1 if an error
+-occurs, in which case the error parameters of the LDAP session handle
+-will be set accordingly.
+-
+-ldap_msgfree() frees the result structure pointed to by res and returns
+-the type of the message it freed.
+-
+-ldap_msgtype() returns the type of the LDAP message it is passed as a
+-parameter. The type will be one of the types listed above, or -1 on
+-error.
+-
+-ldap_msgid() returns the message ID associated with the LDAP message
+-passed as a parameter.
+-
+-
+-
+-
+-
+-
+-Expires: January 1998 [Page 30]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+-10. Handling Errors and Parsing Results
+-
+-The following calls are used to extract information from results and
+-handle errors returned by other LDAP API routines.
+-
+- int ldap_parse_result(
+- LDAP *ld,
+- LDAPMessage *res,
+- int *errcodep,
+- char **matcheddnp,
+- char **errmsgp,
+- char ***referralsp,
+- LDAPControl ***serverctrlsp,
+- int freeit
+- );
+-
+- int ldap_parse_sasl_bind_result(
+- LDAP *ld,
+- LDAPMessage *res,
+- struct berval **servercredp,
+- int freeit
+- );
+-
+- int ldap_parse_extended_result(
+- LDAP *ld,
+- LDAPMessage *res,
+- char **resultoidp,
+- struct berval **resultdata,
+- int freeit
+- );
+-
+- char *ldap_err2string( int err );
+-
+- The use of the following routines is deprecated.
+-
+- int ldap_result2error(
+- LDAP *ld,
+- LDAPMessage *res,
+- int freeit
+- );
+-
+- void ldap_perror( LDAP *ld, char *msg );
+-
+-Parameters are:
+-
+-ld The session handle.
+-
+-res The result of an LDAP operation as returned by
+-
+-
+-
+-Expires: January 1998 [Page 31]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+- ldap_result() or one of the synchronous API operation
+- calls.
+-
+-errcodep This result parameter will be filled in with the LDAP error
+- code field from the LDAPResult message. This is the indi-
+- cation from the server of the outcome of the operation.
+- NULL may be passed to ignore this field.
+-
+-matcheddnp In the case of a return of LDAP_NO_SUCH_OBJECT, this result
+- parameter will be filled in with a DN indicating how much
+- of the name in the request was recognized. NULL may be
+- passed to ignore this field. The matched DN string should
+- be freed by calling ldap_memfree() which is described later
+- in this document.
+-
+-errmsgp This result parameter will be filled in with the contents
+- of the error message field from the LDAPResult message.
+- The error message string should be freed by calling
+- ldap_memfree() which is described later in this document.
+- NULL may be passed to ignore this field.
+-
+-referralsp This result parameter will be filled in with the contents
+- of the referrals field from the LDAPResult message, indi-
+- cating zero or more alternate LDAP servers where the
+- request should be retried. The referrals array should be
+- freed by calling ldap_value_free() which is described later
+- in this document. NULL may be passed to ignore this field.
+-
+-serverctrlsp This result parameter will be filled in with an allocated
+- array of controls copied out of the LDAPResult message.
+- The control array should be freed by calling
+- ldap_controls_free() which was described earlier.
+-
+-freeit A boolean that determines whether the res parameter is
+- disposed of or not. Pass any non-zero value to have these
+- routines free res after extracting the requested informa-
+- tion. This is provided as a convenience; you can also use
+- ldap_msgfree() to free the result later.
+-
+-servercredp For SASL bind results, this result parameter will be filled
+- in with the credentials passed back by the server for
+- mutual authentication, if given. An allocated berval struc-
+- ture is returned that should be disposed of by calling
+- ldap_ber_free(). NULL may be passed to ignore this field.
+-
+-resultoidp For extended results, this result parameter will be filled
+- in with the dotted-OID text representation of the name of
+- the extended operation response. This string should be
+-
+-
+-
+-Expires: January 1998 [Page 32]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+- disposed of by calling ldap_memfree(). NULL may be passed
+- to ignore this field.
+-
+-resultdatap For extended results, this result parameter will be filled
+- in with a pointer to a struct berval containing the data in
+- the extended operation response. It should be disposed of
+- by calling ldap_ber_free(). NULL may be passed to ignore
+- this field.
+-
+-err For ldap_err2string(), an LDAP error code, as returned by
+- ldap_result2error() or another LDAP API call.
+-
+-Additional parameters for the deprecated routines are not described.
+-Interested readers are referred to RFC 1823.
+-
+-All of the ldap_parse_*_result() routines skip over messages of type
+-LDAP_RES_SEARCH_ENTRY and LDAP_RES_SEARCH_REFERENCE when looking for a
+-result message to parse. They return the constant LDAP_SUCCESS if the
+-result was successfully parsed and another LDAP error code if not. Note
+-that the LDAP error code that indicates the outcome of the operation
+-performed by the server is placed in the errcodep ldap_parse_result()
+-parameter.
+-
+-ldap_err2string() is used to convert a numeric LDAP error code, as
+-returned by one of the ldap_parse_*_result() routines, or one of the
+-synchronous API operation calls, into an informative NULL-terminated
+-character string message describing the error. It returns a pointer to
+-static data.
+-
+-
+-11. Stepping Through a List of Results
+-
+-The ldap_first_message() and ldap_next_message() routines are used to
+-step through the list of messages in a result chain returned by
+-ldap_result(). For search operations, the result chain may actually
+-include referral messages, entry messages, and result messages.
+-ldap_count_messages() is used to count the number of messages returned.
+-The ldap_msgtype() function, described above, can be used to distinguish
+-between the different message types.
+-
+- LDAPMessage *ldap_first_message( LDAP *ld, LDAPMessage *res );
+-
+- LDAPMessage *ldap_next_message( LDAP *ld, LDAPMessage *msg );
+-
+- int ldap_count_messages( LDAP *ld, LDAPMessage *res );
+-
+-Parameters are:
+-
+-
+-
+-
+-Expires: January 1998 [Page 33]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+-ld The session handle.
+-
+-res The result chain, as obtained by a call to one of the synchronous
+- search routines or ldap_result().
+-
+-msg The message returned by a previous call to ldap_first_message()
+- or ldap_next_message().
+-
+-ldap_first_message() and ldap_next_message() will return NULL when no
+-more messages exist in the result set to be returned. NULL is also
+-returned if an error occurs while stepping through the entries, in which
+-case the error parameters in the session handle ld will be set to indi-
+-cate the error.
+-
+-ldap_count_messages() returns the number of messages contained in a
+-chain of results. It can also be used to count the number of messages
+-that remain in a chain if called with a message, entry, or reference
+-returned by ldap_first_message(), ldap_next_message(),
+-ldap_first_entry(), ldap_next_entry(), ldap_first_reference(),
+-ldap_next_reference().
+-
+-
+-12. Parsing Search Results
+-
+-The following calls are used to parse the entries and references
+-returned by ldap_search() and friends. These results are returned in an
+-opaque structure that should only be accessed by calling the routines
+-described below. Routines are provided to step through the entries and
+-references returned, step through the attributes of an entry, retrieve
+-the name of an entry, and retrieve the values associated with a given
+-attribute in an entry.
+-
+-
+-12.1. Stepping Through a List of Entries
+-
+-The ldap_first_entry() and ldap_next_entry() routines are used to step
+-through and retrieve the list of entries from a search result chain.
+-The ldap_first_reference() and ldap_next_reference() routines are used
+-to step through and retrieve the list of continuation references from a
+-search result chain. ldap_count_entries() is used to count the number
+-of entries returned. ldap_count_references() is used to count the number
+-of references returned.
+-
+- LDAPMessage *ldap_first_entry( LDAP *ld, LDAPMessage *res );
+-
+- LDAPMessage *ldap_next_entry( LDAP *ld, LDAPMessage *entry );
+-
+- LDAPMessage *ldap_first_reference( LDAP *ld, LDAPMessage *res );
+-
+-
+-
+-Expires: January 1998 [Page 34]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+- LDAPMessage *ldap_next_reference( LDAP *ld, LDAPMessage *ref );
+-
+- int ldap_count_entries( LDAP *ld, LDAPMessage *res );
+-
+- int ldap_count_references( LDAP *ld, LDAPMessage *res );
+-
+-Parameters are:
+-
+-ld The session handle.
+-
+-res The search result, as obtained by a call to one of the synchro-
+- nous search routines or ldap_result().
+-
+-entry The entry returned by a previous call to ldap_first_entry() or
+- ldap_next_entry().
+-
+-ldap_first_entry() and ldap_next_entry() will return NULL when no more
+-entries or references exist in the result set to be returned. NULL is
+-also returned if an error occurs while stepping through the entries, in
+-which case the error parameters in the session handle ld will be set to
+-indicate the error.
+-
+-ldap_count_entries() returns the number of entries contained in a chain
+-of entries. It can also be used to count the number of entries that
+-remain in a chain if called with a message, entry or reference returned
+-by ldap_first_message(), ldap_next_message(), ldap_first_entry(),
+-ldap_next_entry(), ldap_first_reference(), ldap_next_reference().
+-
+-ldap_count_references() returns the number of references contained in a
+-chain of search results. It can also be used to count the number of
+-references that remain in a chain.
+-
+-
+-12.2. Stepping Through the Attributes of an Entry
+-
+-The ldap_first_attribute() and ldap_next_attribute() calls are used to
+-step through the list of attribute types returned with an entry.
+-
+- char *ldap_first_attribute(
+- LDAP *ld,
+- LDAPMessage *entry,
+- BerElement **ptr
+- );
+-
+- char *ldap_next_attribute(
+- LDAP *ld,
+- LDAPMessage *entry,
+- BerElement *ptr
+-
+-
+-
+-Expires: January 1998 [Page 35]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+- );
+-
+- void ldap_memfree( char *mem );
+-
+-Parameters are:
+-
+-ld The session handle.
+-
+-entry The entry whose attributes are to be stepped through, as returned
+- by ldap_first_entry() or ldap_next_entry().
+-
+-ptr In ldap_first_attribute(), the address of a pointer used inter-
+- nally to keep track of the current position in the entry. In
+- ldap_next_attribute(), the pointer returned by a previous call to
+- ldap_first_attribute().
+-
+-mem A pointer to memory allocated by the LDAP library, such as the
+- attribute names returned by ldap_first_attribute() and
+- ldap_next_attribute, or the DN returned by ldap_get_dn().
+-
+-ldap_first_attribute() and ldap_next_attribute() will return NULL when
+-the end of the attributes is reached, or if there is an error, in which
+-case the error parameters in the session handle ld will be set to indi-
+-cate the error.
+-
+-Both routines return a pointer to an allocated buffer containing the
+-current attribute name. This should be freed when no longer in use by
+-calling ldap_memfree().
+-
+-ldap_first_attribute() will allocate and return in ptr a pointer to a
+-BerElement used to keep track of the current position. This pointer
+-should be passed in subsequent calls to ldap_next_attribute() to step
+-through the entry's attributes. After a set of calls to
+-ldap_first_attribute() and ldap_next_attibute(), if ptr is non-NULL, it
+-should be freed by calling ldap_ber_free( ptr, 0 ). Note that it is very
+-important to pass the second parameter as 0 (zero) in this call.
+-
+-The attribute names returned are suitable for passing in a call to
+-ldap_get_values() and friends to retrieve the associated values.
+-
+-
+-12.3. Retrieving the Values of an Attribute
+-
+-ldap_get_values() and ldap_get_values_len() are used to retrieve the
+-values of a given attribute from an entry. ldap_count_values() and
+-ldap_count_values_len() are used to count the returned values.
+-ldap_value_free() and ldap_value_free_len() are used to free the values.
+-
+-
+-
+-
+-Expires: January 1998 [Page 36]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+- char **ldap_get_values(
+- LDAP *ld,
+- LDAPMessage *entry,
+- char *attr
+- );
+-
+- struct berval **ldap_get_values_len(
+- LDAP *ld,
+- LDAPMessage *entry,
+- char *attr
+- );
+-
+- int ldap_count_values( char **vals );
+-
+- int ldap_count_values_len( struct berval **vals );
+-
+- int ldap_value_free( char **vals );
+-
+- int ldap_value_free_len( struct berval **vals );
+-
+-Parameters are:
+-
+-ld The session handle.
+-
+-entry The entry from which to retrieve values, as returned by
+- ldap_first_entry() or ldap_next_entry().
+-
+-attr The attribute whose values are to be retrieved, as returned by
+- ldap_first_attribute() or ldap_next_attribute(), or a caller-
+- supplied string (e.g., "mail").
+-
+-vals The values returned by a previous call to ldap_get_values() or
+- ldap_get_values_len().
+-
+-Two forms of the various calls are provided. The first form is only
+-suitable for use with non-binary character string data. The second _len
+-form is used with any kind of data.
+-
+-Note that the values returned are dynamically allocated and should be
+-freed by calling either ldap_value_free() or ldap_value_free_len() when
+-no longer in use.
+-
+-
+-12.4. Retrieving the name of an entry
+-
+-ldap_get_dn() is used to retrieve the name of an entry.
+-ldap_explode_dn() and ldap_explode_rdn() are used to break up a name
+-into its component parts. ldap_dn2ufn() is used to convert the name into
+-
+-
+-
+-Expires: January 1998 [Page 37]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+-a more "user friendly" format.
+-
+- char *ldap_get_dn( LDAP *ld, LDAPMessage *entry );
+-
+- char **ldap_explode_dn( char *dn, int notypes );
+-
+- char **ldap_explode_rdn( char *rdn, int notypes );
+-
+- char *ldap_dn2ufn( char *dn );
+-
+-Parameters are:
+-
+-ld The session handle.
+-
+-entry The entry whose name is to be retrieved, as returned by
+- ldap_first_entry() or ldap_next_entry().
+-
+-dn The dn to explode, such as returned by ldap_get_dn().
+-
+-rdn The rdn to explode, such as returned in the components of the
+- array returned by ldap_explode_dn().
+-
+-notypes A boolean parameter, if non-zero indicating that the dn or rdn
+- components should have their type information stripped off
+- (i.e., "cn=Babs" would become "Babs").
+-
+-ldap_get_dn() will return NULL if there is some error parsing the dn,
+-setting error parameters in the session handle ld to indicate the error.
+-It returns a pointer to malloc'ed space that the caller should free by
+-calling ldap_memfree() when it is no longer in use. Note the format of
+-the DNs returned is given by [4].
+-
+-ldap_explode_dn() returns a NULL-terminated char * array containing the
+-RDN components of the DN supplied, with or without types as indicated by
+-the notypes parameter. The array returned should be freed when it is no
+-longer in use by calling ldap_value_free().
+-
+-ldap_explode_rdn() returns a NULL-terminated char * array containing the
+-components of the RDN supplied, with or without types as indicated by
+-the notypes parameter. The array returned should be freed when it is no
+-longer in use by calling ldap_value_free().
+-
+-ldap_dn2ufn() converts the DN into the user friendly format described in
+-[5]. The UFN returned is malloc'ed space that should be freed by a call
+-to ldap_memfree() when no longer in use.
+-
+-
+-
+-
+-
+-
+-Expires: January 1998 [Page 38]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+-13. Encoded ASN.1 Value Manipulation
+-
+-This section describes routines which may be used to encode and decode
+-BER-encoded ASN.1 values, which are often used inside of control and
+-extension values.
+-
+-With the exceptions of two new functions ber_flatten() and ber_init(),
+-these functions are compatible with the University of Michigan LDAP 3.3
+-implementation of BER.
+-
+-
+-13.1. General
+-
+- struct berval {
+- unsigned long bv_len;
+- char *bv_val;
+- };
+-
+-A struct berval contains a sequence of bytes and an indication of its
+-length. The bv_val is not null terminated. bv_len must always be a
+-nonnegative number. Applications may allocate their own berval struc-
+-tures.
+-
+- typedef struct berelement {
+- /* opaque */
+- } BerElement;
+-
+-The BerElement structure contains not only a copy of the encoded value,
+-but also state information used in encoding or decoding. Applications
+-cannot allocate their own BerElement structures. The internal state is
+-neither thread-specific nor locked, so two threads should not manipulate
+-the same BerElement value simultaneously.
+-
+-A single BerElement value cannot be used for both encoding and decoding.
+-
+- void ber_bvfree ( struct berval *bv);
+-
+-ber_bvfree() frees a berval returned from this API. Both the bv->bv_val
+-string and the berval itself are freed. Applications should not use
+-ber_bvfree() with bervals which the application has allocated.
+-
+- void ber_bvecfree ( struct berval **bv );
+-
+-ber_bvecfree() frees an array of bervals returned from this API. Each
+-of the bervals in the array are freed using ber_bvfree(), then the array
+-itself is freed.
+-
+- struct berval *ber_bvdup (struct berval *bv );
+-
+-
+-
+-Expires: January 1998 [Page 39]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+-ber_bvdup() returns a copy of a berval. The bv_val field in the
+-returned berval points to a different area of memory as the bv_val field
+-in the argument berval. The null pointer is returned on error (e.g. out
+-of memory).
+-
+- void ber_free ( BerElement *ber, int fbuf );
+-
+-ber_free() frees a BerElement which is returned from the API calls
+-ber_alloc_t() or ber_init(). Each BerElement must be freed by the
+-caller. The second argument fbuf should always be set to 1.
+-
+-
+-13.2. Encoding
+-
+- BerElement *ber_alloc_t(int options);
+-
+-ber_alloc_t() constructs and returns BerElement. The null pointer is
+-returned on error. The options field contains a bitwise-or of options
+-which are to be used when generating the encoding of this BerElement.
+-One option is defined and must always be supplied:
+-
+- #define LBER_USE_DER 0x01
+-
+-When this option is present, lengths will always be encoded in the
+-minimum number of octets. Note that this option does not cause values
+-of sets and sequences to be rearranged in tag and byte order, so these
+-functions are not suitable for generating DER output as defined in X.509
+-and X.680.
+-
+-Unrecognized option bits are ignored.
+-
+-The BerElement returned by ber_alloc_t() is initially empty. Calls to
+-ber_printf() will append bytes to the end of the ber_alloc_t().
+-
+- int ber_printf(BerElement *ber, char *fmt, ... )
+-
+-The ber_printf() routine is used to encode a BER element in much the
+-same way that sprintf() works. One important difference, though, is
+-that state information is kept in the ber argument so that multiple
+-calls can be made to ber_printf() to append to the end of the BER ele-
+-ment. ber must be a pointer to a BerElement returned by ber_alloc_t().
+-ber_printf() interprets and formats its arguments according to the for-
+-mat string fmt. ber_printf() returns -1 if there is an error during
+-encoding. As with sprintf(), each character in fmt refers to an argu-
+-ment to ber_printf().
+-
+-The format string can contain the following format characters:
+-
+-
+-
+-
+-Expires: January 1998 [Page 40]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+-'t' Tag. The next argument is an int specifying the tag to override
+- the next element to be written to the ber. This works across
+- calls. The int value must contain the tag class, constructed
+- bit, and tag value. The tag value must fit in a single octet
+- (tag value is less than 32). For example, a tag of "[3]" for a
+- constructed type is 0xA3.
+-
+-'b' Boolean. The next argument is an int, containing either 0 for
+- FALSE or 0xff for TRUE. A boolean element is output. If this
+- format character is not preceded by the 't' format modifier, the
+- tag 0x01 is used for the element.
+-
+-'i' Integer. The next argument is an int, containing the integer in
+- the host's byte order. An integer element is output. If this
+- format character is not preceded by the 't' format modifier, the
+- tag 0x02 is used for the element.
+-
+-'X' Bitstring. The next two arguments are a char * pointer to the
+- start of the bitstring, followed by an int containing the number
+- of bits in the bitstring. A bitstring element is output, in
+- primitive form. If this format character is not preceded by the
+- 't' format modifier, the tag 0x03 is used for the element.
+-
+-'n' Null. No argument is required. An ASN.1 NULL element is out-
+- put. If this format character is not preceded by the 't' format
+- modifier, the tag 0x05 is used for the element.
+-
+-'o' Octet string. The next two arguments are a char *, followed by
+- an int with the length of the string. The string may contain
+- null bytes and need not by null-terminated. An octet string
+- element is output, in primitive form. If this format character
+- is not preceded by the 't' format modifier, the tag 0x04 is used
+- for the element.
+-
+-'s' Octet string. The next argument is a char * pointing to a
+- null-terminated string. An octet string element in primitive
+- form is output, which does not include the trailing ' ' byte. If
+- this format character is not preceded by the 't' format modif-
+- ier, the tag 0x04 is used for the element.
+-
+-'v' Several octet strings. The next argument is a char **, an array
+- of char * pointers to null-terminated strings. The last element
+- in the array must be a null pointer. The octet strings do not
+- include the trailing SEQUENCE OF octet strings. The 't' format
+- modifier cannot be used with this format character.
+-
+-'V' Several octet strings. A null-terminated array of berval *'s is
+- supplied. Note that a construct like '{V}' is required to get an
+-
+-
+-
+-Expires: January 1998 [Page 41]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+- actual SEQUENCE OF octet strings. The 't' format modifier cannot
+- be used with this format character.
+-
+-'{' Begin sequence. No argument is required. If this format char-
+- acter is not preceded by the 't' format modifier, the tag 0x30
+- is used.
+-
+-'}' End sequence. No argument is required. The 't' format modifier
+- cannot be used with this format character.
+-
+-'[' Begin set. No argument is required. If this format character
+- is not preceded by the 't' format modifier, the tag 0x31 is
+- used.
+-
+-']' End set. No argument is required. The 't' format modifier can-
+- not be used with this format character.
+-
+-Each use of a '{' format character must be matched by a '}' character,
+-either later in the format string, or in the format string of a subse-
+-quent call to ber_printf() for that BerElement. The same applies to the
+-'[' and
+-
+-Sequences and sets nest, and implementations of this API must maintain
+-internal state to be able to properly calculate the lengths.
+-
+- int ber_flatten (BerElement *ber, struct berval **bvPtr);
+-
+-The ber_flatten routine allocates a struct berval whose contents are a
+-BER encoding taken from the ber argument. The bvPtr pointer points to
+-the returned berval, which must be freed using ber_bvfree(). This rou-
+-tine returns 0 on success and -1 on error.
+-
+-The ber_flatten API call is not present in U-M LDAP 3.3.
+-
+-The use of ber_flatten on a BerElement in which all '{' and '}' format
+-modifiers have not been properly matched can result in a berval whose
+-contents are not a valid BER encoding.
+-
+-
+-13.3. Encoding Example
+-
+-The following is an example of encoding the following ASN.1 data type:
+-
+- Example1Request ::= SEQUENCE {
+- s OCTET STRING, -- must be printable
+- val1 INTEGER,
+- val2 [0] INTEGER DEFAULT 0
+- }
+-
+-
+-
+-Expires: January 1998 [Page 42]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+- int encode_example1(char *s,int val1,int val2,struct berval **bvPtr)
+- {
+- BerElement *ber;
+- int rc;
+-
+- ber = ber_alloc_t(LBER_USE_DER);
+-
+- if (ber == NULL) return -1;
+-
+- if (ber_printf(ber,"{si",s,val1) == -1) {
+- ber_free(ber,1);
+- return -1;
+- }
+-
+- if (val2 != 0) {
+- if (ber_printf(ber,"ti",0x80,val2) == -1) {
+- ber_free(ber,1);
+- return -1;
+- }
+- }
+-
+- if (ber_printf(ber,"}") == -1) {
+- ber_free(ber,1);
+- return -1;
+- }
+-
+- rc = ber_flatten(ber,bvPtr);
+- ber_free(ber,1);
+- return -1;
+- }
+-
+-
+-13.4. Decoding
+-
+-The following two symbols are available to applications.
+-
+- #define LBER_ERROR 0xffffffffL
+- #define LBER_DEFAULT 0xffffffffL
+-
+- BerElement *ber_init (struct berval *bv);
+-
+-The ber_init functions construct BerElement and returns a new BerElement
+-containing a copy of the data in the bv argument. ber_init returns the
+-null pointer on error.
+-
+- unsigned long ber_scanf (BerElement *ber, char *fmt, ... );
+-
+-The ber_scanf() routine is used to decode a BER element in much the same
+-
+-
+-
+-Expires: January 1998 [Page 43]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+-way that sscanf() works. One important difference, though, is that some
+-state information is kept with the ber argument so that multiple calls
+-can be made to ber_scanf() to sequentially read from the BER element.
+-The ber argument must be a pointer to a BerElement returned by
+-ber_init(). ber_scanf interprets the bytes according to the format
+-string fmt, and stores the results in its additional arguments.
+-ber_scanf() returns LBER_ERROR on error, and a nonnegative number on
+-success.
+-
+-The format string contains conversion specifications which are used to
+-direct the interpretation of the BER element. The format string can
+-contain the following characters:
+-
+-'a' Octet string. A char ** argument should be supplied. Memory is
+- allocated, filled with the contents of the octet string, null-
+- terminated, and the pointer to the string is stored in the argu-
+- ment. The returned value must be freed using ldap_memfree. The
+- tag of the element must indicate the primitive form (constructed
+- strings are not supported) but is otherwise ignored and dis-
+- carded during the decoding. This format cannot be used with
+- octet strings which could contain null bytes.
+-
+-'O' Octet string. A struct berval ** argument should be supplied,
+- which upon return points to a allocated struct berval containing
+- the octet string and its length. ber_bvfree() must be called to
+- free the allocated memory. The tag of the element must indicate
+- the primitive form (constructed strings are not supported) but
+- is otherwise ignored during the decoding.
+-
+-'b' Boolean. A pointer to an int should be supplied. The int value
+- stored will be 0 for FALSE or nonzero for TRUE. The tag of the
+- element must indicate the primitive form but is otherwise
+- ignored during the decoding.
+-
+-'i' Integer. A pointer to an int should be supplied. The int value
+- stored will be in host byte order. The tag of the element must
+- indicate the primitive form but is otherwise ignored during the
+- decoding. ber_scanf() will return an error if the integer can-
+- not be stored in an int.
+-
+-'B' Bitstring. A char ** argument should be supplied which will
+- point to the allocated bits, followed by an unsigned long *
+- argument, which will point to the length (in bits) of the bit-
+- string returned. ldap_memfree must be called to free the bit-
+- string. The tag of the element must indicate the primitive form
+- (constructed bitstrings are not supported) but is otherwise
+- ignored during the decoding.
+-
+-
+-
+-
+-Expires: January 1998 [Page 44]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+-'n' Null. No argument is required. The element is simply skipped
+- if it is recognized as a zero-length element. The tag is
+- ignored.
+-
+-'v' Several octet strings. A char *** argument should be supplied,
+- which upon return points to a allocated null-terminated array of
+- char *'s containing the octet strings. NULL is stored if the
+- sequence is empty. ldap_memfree must be called to free each
+- element of the array and the array itself. The tag of the
+- sequence and of the octet strings are ignored.
+-
+-'V' Several octet strings (which could contain null bytes). A
+- struct berval *** should be supplied, which upon return points
+- to a allocated null-terminated array of struct berval *'s con-
+- taining the octet strings and their lengths. NULL is stored if
+- the sequence is empty. ber_bvecfree() can be called to free the
+- allocated memory. The tag of the sequence and of the octet
+- strings are ignored.
+-
+-'x' Skip element. The next element is skipped. No argument is
+- required.
+-
+-'{' Begin sequence. No argument is required. The initial sequence
+- tag and length are skipped.
+-
+-'}' End sequence. No argument is required.
+-
+-'[' Begin set. No argument is required. The initial set tag and
+- length are skipped.
+-
+-']' End set. No argument is required.
+-
+- unsigned long ber_peek_tag (BerElement *ber, unsigned long *lenPtr);
+-
+-ber_peek_tag() returns the tag of the next element to be parsed in the
+-BerElement argument. The length of this element is stored in the
+-*lenPtr argument. LBER_DEFAULT is returned if there is no further data
+-to be read. The ber argument is not modified.
+-
+- unsigned long ber_skip_tag (BerElement *ber, unsigned long *lenPtr);
+-
+-ber_skip_tag() is similar to ber_peek_tag(), except that the state
+-pointer in the BerElement argument is advanced past the first tag and
+-length, and is pointed to the value part of the next element. This rou-
+-tine should only be used with constructed types and situations when a
+-BER encoding is used as the value of an OCTET STRING. The length of the
+-value is stored in *lenPtr.
+-
+-
+-
+-
+-Expires: January 1998 [Page 45]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+- unsigned long ber_first_element(BerElement *ber,
+- unsigned long *lenPtr, char **opaquePtr);
+-
+- unsigned long ber_next_element (BerElement *ber,
+- unsigned long *lenPtr, char *opaque);
+-
+-ber_first_element() and ber_next_element() are used to traverse a SET,
+-SET OF, SEQUENCE or SEQUENCE OF data value. ber_first_element() calls
+-ber_skip_tag(), stores internal information in *lenPtr and *opaquePtr,
+-and calls ber_peek_tag() for the first element inside the constructed
+-value. LBER_DEFAULT is returned if the constructed value is empty.
+-ber_next_element() positions the state at the start of the next element
+-in the constructed type. LBER_DEFAULT is returned if there are no
+-further values.
+-
+-The len and opaque values should not be used by applications other than
+-as arguments to ber_next_element(), as shown in the example below.
+-
+-
+-13.5. Decoding Example
+-
+-The following is an example of decoding an ASN.1 data type:
+-
+- Example2Request ::= SEQUENCE {
+- dn OCTET STRING, -- must be printable
+- scope ENUMERATED { b (0), s (1), w (2) },
+- ali ENUMERATED { n (0), s (1), f (2), a (3) },
+- size INTEGER,
+- time INTEGER,
+- tonly BOOLEAN,
+- attrs SEQUENCE OF OCTET STRING, -- must be printable
+- [0] SEQUENCE OF SEQUENCE {
+- type OCTET STRING -- must be printable,
+- crit BOOLEAN DEFAULT FALSE,
+- value OCTET STRING
+- } OPTIONAL }
+-
+- #define LDAP_TAG_CONTROL_LIST 0xA0L /* context specific cons 0 */
+-
+- int decode_example2(struct berval *bv)
+- {
+- BerElement *ber;
+- unsigned long len;
+- int scope, ali, size, time, tonly;
+- char *dn = NULL, **attrs = NULL;
+- int res,i,rc = 0;
+-
+- ber = ber_init(bv);
+-
+-
+-
+-Expires: January 1998 [Page 46]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+- if (ber == NULL) {
+- printf("ERROR ber_init failed0);
+- return -1;
+- }
+-
+- res = ber_scanf(ber,"{aiiiiib{v}",&dn,&scope,&ali,
+- &size,&time,&tonly,&attrs);
+-
+- if (res == -1) {
+- printf("ERROR ber_scanf failed0);
+- ber_free(ber,1);
+- return -1;
+- }
+-
+- /* *** use dn */
+- ldap_memfree(dn);
+-
+- for (i = 0; attrs != NULL && attrs[i] != NULL; i++) {
+- /* *** use attrs[i] */
+- ldap_memfree(attrs[i]);
+- }
+- ldap_memfree(attrs);
+-
+- if (ber_peek_tag(ber,&len) == LDAP_TAG_CONTROL_LIST) {
+- char *opaque;
+- unsigned long tag;
+-
+- for (tag = ber_first_element(ber,&len,&opaque);
+- tag != LBER_DEFAULT;
+- tag = ber_next_element (ber,&len,opaque)) {
+-
+- unsigned long ttag, tlen;
+- char *type;
+- int crit;
+- struct berval *value;
+-
+- if (ber_scanf(ber,"{a",&type) == LBER_ERROR) {
+- printf("ERROR cannot parse type0);
+- break;
+- }
+- /* *** use type */
+- ldap_memfree(type);
+-
+- ttag = ber_peek_tag(ber,&tlen);
+- if (ttag == 0x01) { /* boolean */
+- if (ber_scanf(ber,"b",
+- &crit) == LBER_ERROR) {
+- printf("ERROR cannot parse crit0);
+-
+-
+-
+-Expires: January 1998 [Page 47]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+- rc = -1;
+- break;
+- }
+- } else if (ttag == 0x04) { /* octet string */
+- crit = 0;
+- } else {
+- printf("ERROR extra field in controls0);
+- break;
+- }
+-
+- if (ber_scanf(ber,"O}",&value) == LBER_ERROR) {
+- printf("ERROR cannot parse value0);
+- rc = -1;
+- break;
+- }
+- /* *** use value */
+- ldap_bvfree(value);
+- }
+- }
+-
+- ber_scanf(ber,"}");
+-
+- ber_free(ber,1);
+-
+- return rc;
+- }
+-
+-
+-
+-14. Security Considerations
+-
+-LDAPv2 supports security through protocol-level authentication using
+-clear-text passwords. LDAPv3 adds support for SASL [8] (Simple Authen-
+-tication Security Layer) methods. LDAPv3 also supports operation over a
+-secure transport layer using Transport Layer Security TLS [8]. Readers
+-are referred to the protocol documents for discussion of related secu-
+-rity considerations.
+-
+-Implementations of this API should be cautious when handling authentica-
+-tion credentials. In particular, keeping long-lived copies of creden-
+-tials without the application's knowledge is discouraged.
+-
+-
+-15. Acknowledgements
+-
+-Many members of the IETF ASID working group as well as members of the
+-Internet at large have provided useful comments and suggestions that
+-have been incorporated into this revision.
+-
+-
+-
+-Expires: January 1998 [Page 48]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+-This original material upon which this revision is based was based upon
+-work supported by the National Science Foundation under Grant No. NCR-
+-9416667.
+-
+-
+-16. Bibliography
+-
+-[1] The Directory: Selected Attribute Syntaxes. CCITT, Recommendation
+- X.520.
+-
+-[2] M. Wahl, A. Coulbeck, T. Howes, S. Kille, W. Yeong, C. Robbins,
+- "Lightweight Directory Access Protocol Attribute Syntax Defini-
+- tions", INTERNET-DRAFT <draft-ietf-asid-ldapv3-attributes-06.txt>,
+- 11 July 1997.
+-
+-[3] T. Howes, "A String Representation of LDAP Search Filters,"
+- INTERNET-DRAFT <draft-ietf-asid-ldapv3-filter-02.txt>, May 1997.
+-
+-[4] S. Kille, M. Wahl, "A UTF-8 String Representation of Distinguished
+- Names", INTERNET-DRAFT <draft-ietf-asid-ldapv3-dn-03.txt>, 29 April
+- 1997.
+-
+-[5] S. Kille, "Using the OSI Directory to Achieve User Friendly Nam-
+- ing," RFC 1781, March 1995.
+-
+-[6] M. Wahl, T. Howes, S. Kille, "Lightweight Directory Access Protocol
+- (v3)", INTERNET-DRAFT <draft-ietf-asid-ldapv3-protocol-06.txt>, 11
+- July 1997.
+-
+-[7] A. Herron, T. Howes, M. Wahl, "LDAP Control Extension for Server
+- Side Sorting of Search Result," INTERNET-DRAFT <draft-ietf-asid-
+- ldapv3-sorting-00.txt>, 16 April 1997.
+-
+-[8] J. Meyers, "Simple Authentication and Security Layer", INTERNET-
+- DRAFT <draft-myers-auth-sasl-11.txt>, April 1997.
+-
+-[9] "Lightweight Directory Access Protocol (v3) Extension for Transport
+- Layer Security", INTERNET-DRAFT <draft-ietf-asid-ldapv3-tls-
+- 01.txt>, June 1997.
+-
+-[10] "UTF-8, a transformation format of Unicode and ISO 10646", RFC
+- 2044, October 1996.
+-
+-[11] "IP Version 6 Addressing Architecture,", RFC 1884, December 1995.
+-
+-
+-
+-
+-
+-
+-
+-Expires: January 1998 [Page 49]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+-17. Author's Addresses
+-
+- Tim Howes
+- Netscape Communications Corp.
+- 501 E. Middlefield Rd., Mailstop MV068
+- Mountain View, CA 94043
+- USA
+- +1 415 937-3419
+- howes@netscape.com
+-
+-
+- Mark Smith
+- Netscape Communications Corp.
+- 501 E. Middlefield Rd., Mailstop MV068
+- Mountain View, CA 94043
+- USA
+- +1 415 937-3477
+- mcs@netscape.com
+-
+- Andy Herron
+- Microsoft Corp.
+- 1 Microsoft Way
+- Redmond, WA 98052
+- USA
+- +1 425 882-8080
+- andyhe@microsoft.com
+-
+- Chris Weider
+- Microsoft Corp.
+- 1 Microsoft Way
+- Redmond, WA 98052
+- USA
+- +1 425 882-8080
+- cweider@microsoft.com
+-
+- Mark Wahl
+- Critical Angle Inc.
+- 4815 W Braker Lane #502-385
+- Austin, TX 78759
+- USA
+- M.Wahl@critical-angle.com
+-
+-
+-18. Appendix A - Sample LDAP API Code
+-
+- #include <ldap.h>
+-
+- main()
+-
+-
+-
+-Expires: January 1998 [Page 50]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+- {
+- LDAP *ld;
+- LDAPMessage *res, *e;
+- int i;
+- char *a, *dn;
+- BerElement *ptr;
+- char **vals;
+-
+- /* open an LDAP session */
+- if ( (ld = ldap_init( "dotted.host.name", LDAP_PORT )) == NULL )
+- exit( 1 );
+-
+- /* authenticate as nobody */
+- if ( ldap_simple_bind_s( ld, NULL, NULL ) != LDAP_SUCCESS ) {
+- ldap_perror( ld, "ldap_simple_bind_s" );
+- exit( 1 );
+- }
+-
+- /* search for entries with cn of "Babs Jensen", return all attrs */
+- if ( ldap_search_s( ld, "o=University of Michigan, c=US",
+- LDAP_SCOPE_SUBTREE, "(cn=Babs Jensen)", NULL, 0, &res )
+- != LDAP_SUCCESS ) {
+- ldap_perror( ld, "ldap_search_s" );
+- exit( 1 );
+- }
+-
+- /* step through each entry returned */
+- for ( e = ldap_first_entry( ld, res ); e != NULL;
+- e = ldap_next_entry( ld, e ) ) {
+- /* print its name */
+- dn = ldap_get_dn( ld, e );
+- printf( "dn: %s\n", dn );
+- ldap_memfree( dn );
+-
+- /* print each attribute */
+- for ( a = ldap_first_attribute( ld, e, &ptr ); a != NULL;
+- a = ldap_next_attribute( ld, e, ptr ) ) {
+- printf( "attribute: %s\n", a );
+-
+- /* print each value */
+- vals = ldap_get_values( ld, e, a );
+- for ( i = 0; vals[i] != NULL; i++ ) {
+- printf( "value: %s\n", vals[i] );
+- }
+- ldap_value_free( vals );
+- }
+- if ( ptr != NULL ) {
+- ldap_ber_free( ptr, 0 );
+-
+-
+-
+-Expires: January 1998 [Page 51]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+- }
+- }
+- /* free the search results */
+- ldap_msgfree( res );
+-
+- /* close and free connection resources */
+- ldap_unbind( ld );
+- }
+-
+-
+-
+-19. Appendix B - Outstanding Issues
+-
+-
+-19.1. Support for multithreaded applications
+-
+-In order to support multithreaded applications in a platform-independent
+-way, some additions to the LDAP API are needed. Different implementors
+-have taken different paths to solve this problem in the past. A common
+-set of thread-related API calls must be defined so that application
+-developers are not unduly burdened. These will be added to a future
+-revision of this specification.
+-
+-
+-19.2. Using Transport Layer Security (TLS)
+-
+-The API calls used to support TLS must be specified. They will be added
+-to a future revision of this specification.
+-
+-
+-19.3. Client control for chasing referrals
+-
+-A client control has been defined that can be used to specify on a per-
+-operation basis whether references and external referrals are automati-
+-cally chased by the client library. This will be added to a future
+-revision of this specification.
+-
+-
+-19.4. Potential confusion between hostname:port and IPv6 addresses
+-
+-String representations of IPv6 network addresses [11] can contain colon
+-characters. The ldap_init() call is specified to take strings of the
+-form "hostname:port" or "ipaddress:port". If IPv6 addresses are used,
+-the latter could be ambiguous. A future revision of this specification
+-will resolve this issue.
+-
+-
+-
+-
+-
+-
+-Expires: January 1998 [Page 52]
+-\f
+-C LDAP API The C LDAP Application Program Interface 29 July 1997
+-
+-
+-19.5. Need to track SASL API standardization efforts
+-
+-If a standard Simple Authentication and Security Layer API is defined,
+-it may be necessary to modify the LDAP API to accommodate it.
+-
+-
+-19.6. Support for character sets other than UTF-8?
+-
+-Some application developers would prefer to pass string data using a
+-character set other than UTF-8. This could be accommodated by adding a
+-new option to ldap_set_option() that supports choosing a character set.
+-If this feature is added, the number of different character sets sup-
+-ported should definitely be minimized.
+-
+-
+-19.7. Use of UTF-8 with LDAPv2 servers
+-
+-Strings are always passed as UTF-8 in this API but LDAP version 2
+-servers do not support the full range of UTF-8 characters. The expected
+-behavior of this API when using LDAP version 2 with unsupported charac-
+-ters should be specified.
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-Expires: January 1998 [Page 53]
+-\f
+-
+-
+-1. Status of this Memo............................................1
+-2. Introduction...................................................1
+-3. Overview of the LDAP Model.....................................2
+-4. Overview of LDAP API Use.......................................3
+-5. Common Data Structures.........................................4
+-6. LDAP Error Codes...............................................5
+-7. Performing LDAP Operations.....................................6
+-7.1. Initializing an LDAP Session................................6
+-7.2. LDAP Session Handle Options.................................7
+-7.3. Working with controls.......................................10
+-7.4. Authenticating to the directory.............................11
+-7.5. Closing the session.........................................13
+-7.6. Searching...................................................13
+-7.7. Reading an Entry............................................17
+-7.8. Listing the Children of an Entry............................17
+-7.9. Comparing a Value Against an Entry..........................17
+-7.10. Modifying an entry..........................................19
+-7.11. Modifying the Name of an Entry..............................21
+-7.12. Adding an entry.............................................23
+-7.13. Deleting an entry...........................................25
+-7.14. Extended Operations.........................................26
+-8. Abandoning An Operation........................................28
+-9. Obtaining Results and Peeking Inside LDAP Messages.............29
+-10. Handling Errors and Parsing Results............................31
+-11. Stepping Through a List of Results.............................33
+-12. Parsing Search Results.........................................34
+-12.1. Stepping Through a List of Entries..........................34
+-12.2. Stepping Through the Attributes of an Entry.................35
+-12.3. Retrieving the Values of an Attribute.......................36
+-12.4. Retrieving the name of an entry.............................37
+-13. Encoded ASN.1 Value Manipulation...............................39
+-13.1. General.....................................................39
+-13.2. Encoding....................................................40
+-13.3. Encoding Example............................................42
+-13.4. Decoding....................................................43
+-13.5. Decoding Example............................................46
+-14. Security Considerations........................................48
+-15. Acknowledgements...............................................48
+-16. Bibliography...................................................49
+-17. Author's Addresses.............................................50
+-18. Appendix A - Sample LDAP API Code..............................50
+-19. Appendix B - Outstanding Issues................................52
+-19.1. Support for multithreaded applications......................52
+-19.2. Using Transport Layer Security (TLS)........................52
+-19.3. Client control for chasing referrals........................52
+-19.4. Potential confusion between hostname:port and IPv6 addresses52
+-19.5. Need to track SASL API standardization efforts..............53
+-19.6. Support for character sets other than UTF-8?................53
+-19.7. Use of UTF-8 with LDAPv2 servers............................53
+-
+-
+-
+-
+-
+-
+-
+-
--- /dev/null
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+main([InFile, OutFile | SrcDirs]) ->
+ Modules = [list_to_atom(filename:basename(F, ".erl")) ||
+ SrcDir <- SrcDirs,
+ F <- filelib:wildcard("*.erl", SrcDir)],
+ {ok, [{application, Application, Properties}]} = file:consult(InFile),
+ NewProperties =
+ case proplists:get_value(modules, Properties) of
+ [] -> lists:keyreplace(modules, 1, Properties, {modules, Modules});
+ _ -> Properties
+ end,
+ file:write_file(
+ OutFile,
+ io_lib:format("~p.~n", [{application, Application, NewProperties}])).
--- /dev/null
+#!/usr/bin/env escript
+%% -*- erlang -*-
+-mode(compile).
+
+%% We expect the list of Erlang source and header files to arrive on
+%% stdin, with the entries colon-separated.
+main([TargetFile, EbinDir]) ->
+ ErlsAndHrls = [ string:strip(S,left) ||
+ S <- string:tokens(io:get_line(""), ":\n")],
+ ErlFiles = [F || F <- ErlsAndHrls, lists:suffix(".erl", F)],
+ Modules = sets:from_list(
+ [list_to_atom(filename:basename(FileName, ".erl")) ||
+ FileName <- ErlFiles]),
+ HrlFiles = [F || F <- ErlsAndHrls, lists:suffix(".hrl", F)],
+ IncludeDirs = lists:usort([filename:dirname(Path) || Path <- HrlFiles]),
+ Headers = sets:from_list(HrlFiles),
+ Deps = lists:foldl(
+ fun (Path, Deps1) ->
+ dict:store(Path, detect_deps(IncludeDirs, EbinDir,
+ Modules, Headers, Path),
+ Deps1)
+ end, dict:new(), ErlFiles),
+ {ok, Hdl} = file:open(TargetFile, [write, delayed_write]),
+ dict:fold(
+ fun (_Path, [], ok) ->
+ ok;
+ (Path, Dep, ok) ->
+ Module = filename:basename(Path, ".erl"),
+ ok = file:write(Hdl, [EbinDir, "/", Module, ".beam: ",
+ Path]),
+ ok = sets:fold(fun (E, ok) -> file:write(Hdl, [" ", E]) end,
+ ok, Dep),
+ file:write(Hdl, ["\n"])
+ end, ok, Deps),
+ ok = file:write(Hdl, [TargetFile, ": ", escript:script_name(), "\n"]),
+ ok = file:sync(Hdl),
+ ok = file:close(Hdl).
+
+detect_deps(IncludeDirs, EbinDir, Modules, Headers, Path) ->
+ {ok, Forms} = epp:parse_file(Path, IncludeDirs, [{use_specs, true}]),
+ lists:foldl(
+ fun ({attribute, _Line, Attribute, Behaviour}, Deps)
+ when Attribute =:= behaviour orelse Attribute =:= behavior ->
+ maybe_add_to_deps(EbinDir, Modules, Behaviour, Deps);
+ ({attribute, _Line, compile, {parse_transform, Transform}}, Deps) ->
+ maybe_add_to_deps(EbinDir, Modules, Transform, Deps);
+ ({attribute, _Line, file, {FileName, _LineNumber1}}, Deps) ->
+ case sets:is_element(FileName, Headers) of
+ true -> sets:add_element(FileName, Deps);
+ false -> Deps
+ end;
+ (_Form, Deps) ->
+ Deps
+ end, sets:new(), Forms).
+
+maybe_add_to_deps(EbinDir, Modules, Module, Deps) ->
+ case sets:is_element(Module, Modules) of
+ true -> sets:add_element(
+ [EbinDir, "/", atom_to_list(Module), ".beam"], Deps);
+ false -> Deps
+ end.
--- /dev/null
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
--- /dev/null
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
--- /dev/null
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
--- /dev/null
+/*
+ * Copyright (c) 2010 Nick Galbreath
+ * http://code.google.com/p/stringencoders/source/browse/#svn/trunk/javascript
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+*/
--- /dev/null
+Copyright (c) 2011, Brandon Jones
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the
+ distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--- /dev/null
+EJS - Embedded JavaScript
+
+Copyright (c) 2007 Edward Benson
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+
--- /dev/null
+Copyright (c) 2007-2013 IOLA and Ole Laursen
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
--- /dev/null
+This is the MIT license.
+
+Copyright (c) 2007 Mochi Media, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--- /dev/null
+Copyright (c) 2008 Aaron Quint, Quirkey NYC, LLC
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+
+
+
--- /dev/null
+
+Copyright (c) 2010, Torbjorn Tornkvist
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
--- /dev/null
+Copyright (c) 2011 John Resig, http://jquery.com/
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
--- /dev/null
+ MOZILLA PUBLIC LICENSE
+ Version 1.1
+
+ ---------------
+
+1. Definitions.
+
+ 1.0.1. "Commercial Use" means distribution or otherwise making the
+ Covered Code available to a third party.
+
+ 1.1. "Contributor" means each entity that creates or contributes to
+ the creation of Modifications.
+
+ 1.2. "Contributor Version" means the combination of the Original
+ Code, prior Modifications used by a Contributor, and the Modifications
+ made by that particular Contributor.
+
+ 1.3. "Covered Code" means the Original Code or Modifications or the
+ combination of the Original Code and Modifications, in each case
+ including portions thereof.
+
+ 1.4. "Electronic Distribution Mechanism" means a mechanism generally
+ accepted in the software development community for the electronic
+ transfer of data.
+
+ 1.5. "Executable" means Covered Code in any form other than Source
+ Code.
+
+ 1.6. "Initial Developer" means the individual or entity identified
+ as the Initial Developer in the Source Code notice required by Exhibit
+ A.
+
+ 1.7. "Larger Work" means a work which combines Covered Code or
+ portions thereof with code not governed by the terms of this License.
+
+ 1.8. "License" means this document.
+
+ 1.8.1. "Licensable" means having the right to grant, to the maximum
+ extent possible, whether at the time of the initial grant or
+ subsequently acquired, any and all of the rights conveyed herein.
+
+ 1.9. "Modifications" means any addition to or deletion from the
+ substance or structure of either the Original Code or any previous
+ Modifications. When Covered Code is released as a series of files, a
+ Modification is:
+ A. Any addition to or deletion from the contents of a file
+ containing Original Code or previous Modifications.
+
+ B. Any new file that contains any part of the Original Code or
+ previous Modifications.
+
+ 1.10. "Original Code" means Source Code of computer software code
+ which is described in the Source Code notice required by Exhibit A as
+ Original Code, and which, at the time of its release under this
+ License is not already Covered Code governed by this License.
+
+ 1.10.1. "Patent Claims" means any patent claim(s), now owned or
+ hereafter acquired, including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by grantor.
+
+ 1.11. "Source Code" means the preferred form of the Covered Code for
+ making modifications to it, including all modules it contains, plus
+ any associated interface definition files, scripts used to control
+ compilation and installation of an Executable, or source code
+ differential comparisons against either the Original Code or another
+ well known, available Covered Code of the Contributor's choice. The
+ Source Code can be in a compressed or archival form, provided the
+ appropriate decompression or de-archiving software is widely available
+ for no charge.
+
+ 1.12. "You" (or "Your") means an individual or a legal entity
+ exercising rights under, and complying with all of the terms of, this
+ License or a future version of this License issued under Section 6.1.
+ For legal entities, "You" includes any entity which controls, is
+ controlled by, or is under common control with You. For purposes of
+ this definition, "control" means (a) the power, direct or indirect,
+ to cause the direction or management of such entity, whether by
+ contract or otherwise, or (b) ownership of more than fifty percent
+ (50%) of the outstanding shares or beneficial ownership of such
+ entity.
+
+2. Source Code License.
+
+ 2.1. The Initial Developer Grant.
+ The Initial Developer hereby grants You a world-wide, royalty-free,
+ non-exclusive license, subject to third party intellectual property
+ claims:
+ (a) under intellectual property rights (other than patent or
+ trademark) Licensable by Initial Developer to use, reproduce,
+ modify, display, perform, sublicense and distribute the Original
+ Code (or portions thereof) with or without Modifications, and/or
+ as part of a Larger Work; and
+
+ (b) under Patents Claims infringed by the making, using or
+ selling of Original Code, to make, have made, use, practice,
+ sell, and offer for sale, and/or otherwise dispose of the
+ Original Code (or portions thereof).
+
+ (c) the licenses granted in this Section 2.1(a) and (b) are
+ effective on the date Initial Developer first distributes
+ Original Code under the terms of this License.
+
+ (d) Notwithstanding Section 2.1(b) above, no patent license is
+ granted: 1) for code that You delete from the Original Code; 2)
+ separate from the Original Code; or 3) for infringements caused
+ by: i) the modification of the Original Code or ii) the
+ combination of the Original Code with other software or devices.
+
+ 2.2. Contributor Grant.
+ Subject to third party intellectual property claims, each Contributor
+ hereby grants You a world-wide, royalty-free, non-exclusive license
+
+ (a) under intellectual property rights (other than patent or
+ trademark) Licensable by Contributor, to use, reproduce, modify,
+ display, perform, sublicense and distribute the Modifications
+ created by such Contributor (or portions thereof) either on an
+ unmodified basis, with other Modifications, as Covered Code
+ and/or as part of a Larger Work; and
+
+ (b) under Patent Claims infringed by the making, using, or
+ selling of Modifications made by that Contributor either alone
+ and/or in combination with its Contributor Version (or portions
+ of such combination), to make, use, sell, offer for sale, have
+ made, and/or otherwise dispose of: 1) Modifications made by that
+ Contributor (or portions thereof); and 2) the combination of
+ Modifications made by that Contributor with its Contributor
+ Version (or portions of such combination).
+
+ (c) the licenses granted in Sections 2.2(a) and 2.2(b) are
+ effective on the date Contributor first makes Commercial Use of
+ the Covered Code.
+
+ (d) Notwithstanding Section 2.2(b) above, no patent license is
+ granted: 1) for any code that Contributor has deleted from the
+ Contributor Version; 2) separate from the Contributor Version;
+ 3) for infringements caused by: i) third party modifications of
+ Contributor Version or ii) the combination of Modifications made
+ by that Contributor with other software (except as part of the
+ Contributor Version) or other devices; or 4) under Patent Claims
+ infringed by Covered Code in the absence of Modifications made by
+ that Contributor.
+
+3. Distribution Obligations.
+
+ 3.1. Application of License.
+ The Modifications which You create or to which You contribute are
+ governed by the terms of this License, including without limitation
+ Section 2.2. The Source Code version of Covered Code may be
+ distributed only under the terms of this License or a future version
+ of this License released under Section 6.1, and You must include a
+ copy of this License with every copy of the Source Code You
+ distribute. You may not offer or impose any terms on any Source Code
+ version that alters or restricts the applicable version of this
+ License or the recipients' rights hereunder. However, You may include
+ an additional document offering the additional rights described in
+ Section 3.5.
+
+ 3.2. Availability of Source Code.
+ Any Modification which You create or to which You contribute must be
+ made available in Source Code form under the terms of this License
+ either on the same media as an Executable version or via an accepted
+ Electronic Distribution Mechanism to anyone to whom you made an
+ Executable version available; and if made available via Electronic
+ Distribution Mechanism, must remain available for at least twelve (12)
+ months after the date it initially became available, or at least six
+ (6) months after a subsequent version of that particular Modification
+ has been made available to such recipients. You are responsible for
+ ensuring that the Source Code version remains available even if the
+ Electronic Distribution Mechanism is maintained by a third party.
+
+ 3.3. Description of Modifications.
+ You must cause all Covered Code to which You contribute to contain a
+ file documenting the changes You made to create that Covered Code and
+ the date of any change. You must include a prominent statement that
+ the Modification is derived, directly or indirectly, from Original
+ Code provided by the Initial Developer and including the name of the
+ Initial Developer in (a) the Source Code, and (b) in any notice in an
+ Executable version or related documentation in which You describe the
+ origin or ownership of the Covered Code.
+
+ 3.4. Intellectual Property Matters
+ (a) Third Party Claims.
+ If Contributor has knowledge that a license under a third party's
+ intellectual property rights is required to exercise the rights
+ granted by such Contributor under Sections 2.1 or 2.2,
+ Contributor must include a text file with the Source Code
+ distribution titled "LEGAL" which describes the claim and the
+ party making the claim in sufficient detail that a recipient will
+ know whom to contact. If Contributor obtains such knowledge after
+ the Modification is made available as described in Section 3.2,
+ Contributor shall promptly modify the LEGAL file in all copies
+ Contributor makes available thereafter and shall take other steps
+ (such as notifying appropriate mailing lists or newsgroups)
+ reasonably calculated to inform those who received the Covered
+ Code that new knowledge has been obtained.
+
+ (b) Contributor APIs.
+ If Contributor's Modifications include an application programming
+ interface and Contributor has knowledge of patent licenses which
+ are reasonably necessary to implement that API, Contributor must
+ also include this information in the LEGAL file.
+
+ (c) Representations.
+ Contributor represents that, except as disclosed pursuant to
+ Section 3.4(a) above, Contributor believes that Contributor's
+ Modifications are Contributor's original creation(s) and/or
+ Contributor has sufficient rights to grant the rights conveyed by
+ this License.
+
+ 3.5. Required Notices.
+ You must duplicate the notice in Exhibit A in each file of the Source
+ Code. If it is not possible to put such notice in a particular Source
+ Code file due to its structure, then You must include such notice in a
+ location (such as a relevant directory) where a user would be likely
+ to look for such a notice. If You created one or more Modification(s)
+ You may add your name as a Contributor to the notice described in
+ Exhibit A. You must also duplicate this License in any documentation
+ for the Source Code where You describe recipients' rights or ownership
+ rights relating to Covered Code. You may choose to offer, and to
+ charge a fee for, warranty, support, indemnity or liability
+ obligations to one or more recipients of Covered Code. However, You
+ may do so only on Your own behalf, and not on behalf of the Initial
+ Developer or any Contributor. You must make it absolutely clear than
+ any such warranty, support, indemnity or liability obligation is
+ offered by You alone, and You hereby agree to indemnify the Initial
+ Developer and every Contributor for any liability incurred by the
+ Initial Developer or such Contributor as a result of warranty,
+ support, indemnity or liability terms You offer.
+
+ 3.6. Distribution of Executable Versions.
+ You may distribute Covered Code in Executable form only if the
+ requirements of Section 3.1-3.5 have been met for that Covered Code,
+ and if You include a notice stating that the Source Code version of
+ the Covered Code is available under the terms of this License,
+ including a description of how and where You have fulfilled the
+ obligations of Section 3.2. The notice must be conspicuously included
+ in any notice in an Executable version, related documentation or
+ collateral in which You describe recipients' rights relating to the
+ Covered Code. You may distribute the Executable version of Covered
+ Code or ownership rights under a license of Your choice, which may
+ contain terms different from this License, provided that You are in
+ compliance with the terms of this License and that the license for the
+ Executable version does not attempt to limit or alter the recipient's
+ rights in the Source Code version from the rights set forth in this
+ License. If You distribute the Executable version under a different
+ license You must make it absolutely clear that any terms which differ
+ from this License are offered by You alone, not by the Initial
+ Developer or any Contributor. You hereby agree to indemnify the
+ Initial Developer and every Contributor for any liability incurred by
+ the Initial Developer or such Contributor as a result of any such
+ terms You offer.
+
+ 3.7. Larger Works.
+ You may create a Larger Work by combining Covered Code with other code
+ not governed by the terms of this License and distribute the Larger
+ Work as a single product. In such a case, You must make sure the
+ requirements of this License are fulfilled for the Covered Code.
+
+4. Inability to Comply Due to Statute or Regulation.
+
+ If it is impossible for You to comply with any of the terms of this
+ License with respect to some or all of the Covered Code due to
+ statute, judicial order, or regulation then You must: (a) comply with
+ the terms of this License to the maximum extent possible; and (b)
+ describe the limitations and the code they affect. Such description
+ must be included in the LEGAL file described in Section 3.4 and must
+ be included with all distributions of the Source Code. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Application of this License.
+
+ This License applies to code to which the Initial Developer has
+ attached the notice in Exhibit A and to related Covered Code.
+
+6. Versions of the License.
+
+ 6.1. New Versions.
+ Netscape Communications Corporation ("Netscape") may publish revised
+ and/or new versions of the License from time to time. Each version
+ will be given a distinguishing version number.
+
+ 6.2. Effect of New Versions.
+ Once Covered Code has been published under a particular version of the
+ License, You may always continue to use it under the terms of that
+ version. You may also choose to use such Covered Code under the terms
+ of any subsequent version of the License published by Netscape. No one
+ other than Netscape has the right to modify the terms applicable to
+ Covered Code created under this License.
+
+ 6.3. Derivative Works.
+ If You create or use a modified version of this License (which you may
+ only do in order to apply it to code which is not already Covered Code
+ governed by this License), You must (a) rename Your license so that
+ the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
+ "MPL", "NPL" or any confusingly similar phrase do not appear in your
+ license (except to note that your license differs from this License)
+ and (b) otherwise make it clear that Your version of the license
+ contains terms which differ from the Mozilla Public License and
+ Netscape Public License. (Filling in the name of the Initial
+ Developer, Original Code or Contributor in the notice described in
+ Exhibit A shall not of themselves be deemed to be modifications of
+ this License.)
+
+7. DISCLAIMER OF WARRANTY.
+
+ COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
+ DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
+ THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
+ IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
+ YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
+ COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
+ OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
+ ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
+
+8. TERMINATION.
+
+ 8.1. This License and the rights granted hereunder will terminate
+ automatically if You fail to comply with terms herein and fail to cure
+ such breach within 30 days of becoming aware of the breach. All
+ sublicenses to the Covered Code which are properly granted shall
+ survive any termination of this License. Provisions which, by their
+ nature, must remain in effect beyond the termination of this License
+ shall survive.
+
+ 8.2. If You initiate litigation by asserting a patent infringement
+ claim (excluding declatory judgment actions) against Initial Developer
+ or a Contributor (the Initial Developer or Contributor against whom
+ You file such action is referred to as "Participant") alleging that:
+
+ (a) such Participant's Contributor Version directly or indirectly
+ infringes any patent, then any and all rights granted by such
+ Participant to You under Sections 2.1 and/or 2.2 of this License
+ shall, upon 60 days notice from Participant terminate prospectively,
+ unless if within 60 days after receipt of notice You either: (i)
+ agree in writing to pay Participant a mutually agreeable reasonable
+ royalty for Your past and future use of Modifications made by such
+ Participant, or (ii) withdraw Your litigation claim with respect to
+ the Contributor Version against such Participant. If within 60 days
+ of notice, a reasonable royalty and payment arrangement are not
+ mutually agreed upon in writing by the parties or the litigation claim
+ is not withdrawn, the rights granted by Participant to You under
+ Sections 2.1 and/or 2.2 automatically terminate at the expiration of
+ the 60 day notice period specified above.
+
+ (b) any software, hardware, or device, other than such Participant's
+ Contributor Version, directly or indirectly infringes any patent, then
+ any rights granted to You by such Participant under Sections 2.1(b)
+ and 2.2(b) are revoked effective as of the date You first made, used,
+ sold, distributed, or had made, Modifications made by that
+ Participant.
+
+ 8.3. If You assert a patent infringement claim against Participant
+ alleging that such Participant's Contributor Version directly or
+ indirectly infringes any patent where such claim is resolved (such as
+ by license or settlement) prior to the initiation of patent
+ infringement litigation, then the reasonable value of the licenses
+ granted by such Participant under Sections 2.1 or 2.2 shall be taken
+ into account in determining the amount or value of any payment or
+ license.
+
+ 8.4. In the event of termination under Sections 8.1 or 8.2 above,
+ all end user license agreements (excluding distributors and resellers)
+ which have been validly granted by You or any distributor hereunder
+ prior to termination shall survive termination.
+
+9. LIMITATION OF LIABILITY.
+
+ UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
+ (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
+ DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
+ OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
+ ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
+ CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
+ WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
+ COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
+ INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
+ LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
+ RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
+ PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
+ EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
+ THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
+
+10. U.S. GOVERNMENT END USERS.
+
+ The Covered Code is a "commercial item," as that term is defined in
+ 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
+ software" and "commercial computer software documentation," as such
+ terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
+ C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
+ all U.S. Government End Users acquire Covered Code with only those
+ rights set forth herein.
+
+11. MISCELLANEOUS.
+
+ This License represents the complete agreement concerning subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. This License shall be governed by
+ California law provisions (except to the extent applicable law, if
+ any, provides otherwise), excluding its conflict-of-law provisions.
+ With respect to disputes in which at least one party is a citizen of,
+ or an entity chartered or registered to do business in the United
+ States of America, any litigation relating to this License shall be
+ subject to the jurisdiction of the Federal Courts of the Northern
+ District of California, with venue lying in Santa Clara County,
+ California, with the losing party responsible for costs, including
+ without limitation, court costs and reasonable attorneys' fees and
+ expenses. The application of the United Nations Convention on
+ Contracts for the International Sale of Goods is expressly excluded.
+ Any law or regulation which provides that the language of a contract
+ shall be construed against the drafter shall not apply to this
+ License.
+
+12. RESPONSIBILITY FOR CLAIMS.
+
+ As between Initial Developer and the Contributors, each party is
+ responsible for claims and damages arising, directly or indirectly,
+ out of its utilization of rights under this License and You agree to
+ work with Initial Developer and Contributors to distribute such
+ responsibility on an equitable basis. Nothing herein is intended or
+ shall be deemed to constitute any admission of liability.
+
+13. MULTIPLE-LICENSED CODE.
+
+ Initial Developer may designate portions of the Covered Code as
+ "Multiple-Licensed". "Multiple-Licensed" means that the Initial
+ Developer permits you to utilize portions of the Covered Code under
+ Your choice of the NPL or the alternative licenses, if any, specified
+ by the Initial Developer in the file described in Exhibit A.
+
+EXHIBIT A -Mozilla Public License.
+
+ ``The contents of this file are subject to the Mozilla Public License
+ Version 1.1 (the "License"); you may not use this file except in
+ compliance with the License. You may obtain a copy of the License at
+ http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+ License for the specific language governing rights and limitations
+ under the License.
+
+ The Original Code is RabbitMQ.
+
+ The Initial Developer of the Original Code is GoPivotal, Inc.
+ Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.''
+
+ [NOTE: The text of this Exhibit A may differ slightly from the text of
+ the notices in the Source Code files of the Original Code. You should
+ use the text of this Exhibit A rather than the text found in the
+ Original Code Source Code for Your Modifications.]
--- /dev/null
+Eldap is "Copyright (c) 2010, Torbjorn Tornkvist" and is covered by
+the MIT license. It was downloaded from https://github.com/etnt/eldap
+
--- /dev/null
+Mochiweb is "Copyright (c) 2007 Mochi Media, Inc." and is covered by
+the MIT license. It was downloaded from
+http://github.com/mochi/mochiweb/
+
--- /dev/null
+jQuery is "Copyright (c) 2010 John Resig" and is covered by the MIT
+license. It was downloaded from http://jquery.com/
+
+EJS is "Copyright (c) 2007 Edward Benson" and is covered by the MIT
+license. It was downloaded from http://embeddedjs.com/
+
+Sammy is "Copyright (c) 2008 Aaron Quint, Quirkey NYC, LLC" and is
+covered by the MIT license. It was downloaded from
+http://code.quirkey.com/sammy/
+
+ExplorerCanvas is "Copyright 2006 Google Inc" and is covered by the
+Apache License version 2.0. It was downloaded from
+http://code.google.com/p/explorercanvas/
+
+Flot is "Copyright (c) 2007-2013 IOLA and Ole Laursen" and is covered
+by the MIT license. It was downloaded from
+http://www.flotcharts.org/
--- /dev/null
+glMatrix is "Copyright (c) 2011, Brandon Jones" and is covered by the
+BSD 2-Clause license. It was downloaded from
+http://code.google.com/p/glmatrix/
+
--- /dev/null
+Webmachine is Copyright (c) Basho Technologies and is covered by the
+Apache License 2.0. It was downloaded from http://webmachine.basho.com/
+
--- /dev/null
+diff --git a/src/mochiglobal.erl b/src/mochiglobal.erl
+index ea645b0..6b20e41 100644
+--- a/src/mochiglobal.erl
++++ b/src/mochiglobal.erl
+@@ -6,12 +6,12 @@
+ -author("Bob Ippolito <bob@mochimedia.com>").
+ -export([get/1, get/2, put/2, delete/1]).
+
+--spec get(atom()) -> any() | undefined.
++%% -spec get(atom()) -> any() | undefined.
+ %% @equiv get(K, undefined)
+ get(K) ->
+ get(K, undefined).
+
+--spec get(atom(), T) -> any() | T.
++%% -spec get(atom(), T) -> any() | T.
+ %% @doc Get the term for K or return Default.
+ get(K, Default) ->
+ get(K, Default, key_to_module(K)).
+@@ -22,7 +22,7 @@ get(_K, Default, Mod) ->
+ Default
+ end.
+
+--spec put(atom(), any()) -> ok.
++%% -spec put(atom(), any()) -> ok.
+ %% @doc Store term V at K, replaces an existing term if present.
+ put(K, V) ->
+ put(K, V, key_to_module(K)).
+@@ -33,7 +33,7 @@ put(_K, V, Mod) ->
+ {module, Mod} = code:load_binary(Mod, atom_to_list(Mod) ++ ".erl", Bin),
+ ok.
+
+--spec delete(atom()) -> boolean().
++%% -spec delete(atom()) -> boolean().
+ %% @doc Delete term stored at K, no-op if non-existent.
+ delete(K) ->
+ delete(K, key_to_module(K)).
+@@ -42,21 +42,21 @@ delete(_K, Mod) ->
+ code:purge(Mod),
+ code:delete(Mod).
+
+--spec key_to_module(atom()) -> atom().
++%% -spec key_to_module(atom()) -> atom().
+ key_to_module(K) ->
+ list_to_atom("mochiglobal:" ++ atom_to_list(K)).
+
+--spec compile(atom(), any()) -> binary().
++%% -spec compile(atom(), any()) -> binary().
+ compile(Module, T) ->
+ {ok, Module, Bin} = compile:forms(forms(Module, T),
+ [verbose, report_errors]),
+ Bin.
+
+--spec forms(atom(), any()) -> [erl_syntax:syntaxTree()].
++%% -spec forms(atom(), any()) -> [erl_syntax:syntaxTree()].
+ forms(Module, T) ->
+ [erl_syntax:revert(X) || X <- term_to_abstract(Module, term, T)].
+
+--spec term_to_abstract(atom(), atom(), any()) -> [erl_syntax:syntaxTree()].
++%% -spec term_to_abstract(atom(), atom(), any()) -> [erl_syntax:syntaxTree()].
+ term_to_abstract(Module, Getter, T) ->
+ [%% -module(Module).
+ erl_syntax:attribute(
+diff --git a/src/mochiutf8.erl b/src/mochiutf8.erl
+index 28f28c1..c9d2751 100644
+--- a/src/mochiutf8.erl
++++ b/src/mochiutf8.erl
+@@ -11,11 +11,11 @@
+
+ %% External API
+
+--type unichar_low() :: 0..16#d7ff.
+--type unichar_high() :: 16#e000..16#10ffff.
+--type unichar() :: unichar_low() | unichar_high().
++%% -type unichar_low() :: 0..16#d7ff.
++%% -type unichar_high() :: 16#e000..16#10ffff.
++%% -type unichar() :: unichar_low() | unichar_high().
+
+--spec codepoint_to_bytes(unichar()) -> binary().
++%% -spec codepoint_to_bytes(unichar()) -> binary().
+ %% @doc Convert a unicode codepoint to UTF-8 bytes.
+ codepoint_to_bytes(C) when (C >= 16#00 andalso C =< 16#7f) ->
+ %% U+0000 - U+007F - 7 bits
+@@ -40,12 +40,12 @@ codepoint_to_bytes(C) when (C >= 16#010000 andalso C =< 16#10FFFF) ->
+ 2#10:2, B1:6,
+ 2#10:2, B0:6>>.
+
+--spec codepoints_to_bytes([unichar()]) -> binary().
++%% -spec codepoints_to_bytes([unichar()]) -> binary().
+ %% @doc Convert a list of codepoints to a UTF-8 binary.
+ codepoints_to_bytes(L) ->
+ <<<<(codepoint_to_bytes(C))/binary>> || C <- L>>.
+
+--spec read_codepoint(binary()) -> {unichar(), binary(), binary()}.
++%% -spec read_codepoint(binary()) -> {unichar(), binary(), binary()}.
+ read_codepoint(Bin = <<2#0:1, C:7, Rest/binary>>) ->
+ %% U+0000 - U+007F - 7 bits
+ <<B:1/binary, _/binary>> = Bin,
+@@ -82,32 +82,32 @@ read_codepoint(Bin = <<2#11110:5, B3:3,
+ {C, B, Rest}
+ end.
+
+--spec codepoint_foldl(fun((unichar(), _) -> _), _, binary()) -> _.
++%% -spec codepoint_foldl(fun((unichar(), _) -> _), _, binary()) -> _.
+ codepoint_foldl(F, Acc, <<>>) when is_function(F, 2) ->
+ Acc;
+ codepoint_foldl(F, Acc, Bin) ->
+ {C, _, Rest} = read_codepoint(Bin),
+ codepoint_foldl(F, F(C, Acc), Rest).
+
+--spec bytes_foldl(fun((binary(), _) -> _), _, binary()) -> _.
++%% -spec bytes_foldl(fun((binary(), _) -> _), _, binary()) -> _.
+ bytes_foldl(F, Acc, <<>>) when is_function(F, 2) ->
+ Acc;
+ bytes_foldl(F, Acc, Bin) ->
+ {_, B, Rest} = read_codepoint(Bin),
+ bytes_foldl(F, F(B, Acc), Rest).
+
+--spec bytes_to_codepoints(binary()) -> [unichar()].
++%% -spec bytes_to_codepoints(binary()) -> [unichar()].
+ bytes_to_codepoints(B) ->
+ lists:reverse(codepoint_foldl(fun (C, Acc) -> [C | Acc] end, [], B)).
+
+--spec len(binary()) -> non_neg_integer().
++%% -spec len(binary()) -> non_neg_integer().
+ len(<<>>) ->
+ 0;
+ len(B) ->
+ {_, _, Rest} = read_codepoint(B),
+ 1 + len(Rest).
+
+--spec valid_utf8_bytes(B::binary()) -> binary().
++%% -spec valid_utf8_bytes(B::binary()) -> binary().
+ %% @doc Return only the bytes in B that represent valid UTF-8. Uses
+ %% the following recursive algorithm: skip one byte if B does not
+ %% follow UTF-8 syntax (a 1-4 byte encoding of some number),
+@@ -118,7 +118,7 @@ valid_utf8_bytes(B) when is_binary(B) ->
+
+ %% Internal API
+
+--spec binary_skip_bytes(binary(), [non_neg_integer()]) -> binary().
++%% -spec binary_skip_bytes(binary(), [non_neg_integer()]) -> binary().
+ %% @doc Return B, but skipping the 0-based indexes in L.
+ binary_skip_bytes(B, []) ->
+ B;
+@@ -126,7 +126,7 @@ binary_skip_bytes(B, L) ->
+ binary_skip_bytes(B, L, 0, []).
+
+ %% @private
+--spec binary_skip_bytes(binary(), [non_neg_integer()], non_neg_integer(), iolist()) -> binary().
++%% -spec binary_skip_bytes(binary(), [non_neg_integer()], non_neg_integer(), iolist()) -> binary().
+ binary_skip_bytes(B, [], _N, Acc) ->
+ iolist_to_binary(lists:reverse([B | Acc]));
+ binary_skip_bytes(<<_, RestB/binary>>, [N | RestL], N, Acc) ->
+@@ -134,13 +134,13 @@ binary_skip_bytes(<<_, RestB/binary>>, [N | RestL], N, Acc) ->
+ binary_skip_bytes(<<C, RestB/binary>>, L, N, Acc) ->
+ binary_skip_bytes(RestB, L, 1 + N, [C | Acc]).
+
+--spec invalid_utf8_indexes(binary()) -> [non_neg_integer()].
++%% -spec invalid_utf8_indexes(binary()) -> [non_neg_integer()].
+ %% @doc Return the 0-based indexes in B that are not valid UTF-8.
+ invalid_utf8_indexes(B) ->
+ invalid_utf8_indexes(B, 0, []).
+
+ %% @private.
+--spec invalid_utf8_indexes(binary(), non_neg_integer(), [non_neg_integer()]) -> [non_neg_integer()].
++%% -spec invalid_utf8_indexes(binary(), non_neg_integer(), [non_neg_integer()]) -> [non_neg_integer()].
+ invalid_utf8_indexes(<<C, Rest/binary>>, N, Acc) when C < 16#80 ->
+ %% U+0000 - U+007F - 7 bits
+ invalid_utf8_indexes(Rest, 1 + N, Acc);
+diff --git a/src/mochiweb_charref.erl b/src/mochiweb_charref.erl
+index 193c7c7..665d0f9 100644
+--- a/src/mochiweb_charref.erl
++++ b/src/mochiweb_charref.erl
+@@ -11,7 +11,7 @@
+ %% codepoint, or return undefined on failure.
+ %% The input should not include an ampersand or semicolon.
+ %% charref("#38") = 38, charref("#x26") = 38, charref("amp") = 38.
+--spec charref(binary() | string()) -> integer() | [integer()] | undefined.
++%% -spec charref(binary() | string()) -> integer() | [integer()] | undefined.
+ charref(B) when is_binary(B) ->
+ charref(binary_to_list(B));
+ charref([$#, C | L]) when C =:= $x orelse C =:= $X ->
+diff --git a/src/mochiweb_http.erl b/src/mochiweb_http.erl
+index 931ecd0..ae6410f 100644
+--- a/src/mochiweb_http.erl
++++ b/src/mochiweb_http.erl
+@@ -121,12 +121,12 @@ call_body({M, F}, Req) ->
+ call_body(Body, Req) ->
+ Body(Req).
+
+--spec handle_invalid_request(term()) -> no_return().
++%% -spec handle_invalid_request(term()) -> no_return().
+ handle_invalid_request(Socket) ->
+ handle_invalid_request(Socket, {'GET', {abs_path, "/"}, {0,9}}, []),
+ exit(normal).
+
+--spec handle_invalid_request(term(), term(), term()) -> no_return().
++%% -spec handle_invalid_request(term(), term(), term()) -> no_return().
+ handle_invalid_request(Socket, Request, RevHeaders) ->
+ Req = new_request(Socket, Request, RevHeaders),
+ Req:respond({400, [], []}),
+diff --git a/src/mochiweb_session.erl b/src/mochiweb_session.erl
+index ac5d66b..ddf7c46 100644
+--- a/src/mochiweb_session.erl
++++ b/src/mochiweb_session.erl
+@@ -21,11 +21,11 @@
+
+ %% @doc Generates a secure encrypted binary convining all the parameters. The
+ %% expiration time must be a 32-bit integer.
+--spec generate_session_data(
+- ExpirationTime :: expiration_time(),
+- Data :: iolist(),
+- FSessionKey :: key_fun(),
+- ServerKey :: iolist()) -> binary().
++%% -spec generate_session_data(
++%% ExpirationTime :: expiration_time(),
++%% Data :: iolist(),
++%% FSessionKey :: key_fun(),
++%% ServerKey :: iolist()) -> binary().
+ generate_session_data(ExpirationTime, Data, FSessionKey, ServerKey)
+ when is_integer(ExpirationTime), is_function(FSessionKey)->
+ BData = ensure_binary(Data),
+@@ -39,11 +39,11 @@ generate_session_data(ExpirationTime, Data, FSessionKey, ServerKey)
+ %% @doc Convenience wrapper for generate_session_data that returns a
+ %% mochiweb cookie with "id" as the key, a max_age of 20000 seconds,
+ %% and the current local time as local time.
+--spec generate_session_cookie(
+- ExpirationTime :: expiration_time(),
+- Data :: iolist(),
+- FSessionKey :: key_fun(),
+- ServerKey :: iolist()) -> header().
++%% -spec generate_session_cookie(
++%% ExpirationTime :: expiration_time(),
++%% Data :: iolist(),
++%% FSessionKey :: key_fun(),
++%% ServerKey :: iolist()) -> header().
+ generate_session_cookie(ExpirationTime, Data, FSessionKey, ServerKey)
+ when is_integer(ExpirationTime), is_function(FSessionKey)->
+ CookieData = generate_session_data(ExpirationTime, Data,
+@@ -55,13 +55,13 @@ generate_session_cookie(ExpirationTime, Data, FSessionKey, ServerKey)
+ calendar:universal_time())}]).
+
+ %% TODO: This return type is messy to express in the type system.
+--spec check_session_cookie(
+- ECookie :: binary(),
+- ExpirationTime :: string(),
+- FSessionKey :: key_fun(),
+- ServerKey :: iolist()) ->
+- {Success :: boolean(),
+- ExpTimeAndData :: [integer() | binary()]}.
++%% -spec check_session_cookie(
++ %% ECookie :: binary(),
++ %% ExpirationTime :: string(),
++ %% FSessionKey :: key_fun(),
++ %% ServerKey :: iolist()) ->
++ %% {Success :: boolean(),
++ %% ExpTimeAndData :: [integer() | binary()]}.
+ check_session_cookie(ECookie, ExpirationTime, FSessionKey, ServerKey)
+ when is_binary(ECookie), is_integer(ExpirationTime),
+ is_function(FSessionKey) ->
+@@ -83,7 +83,7 @@ check_session_cookie(_ECookie, _ExpirationTime, _FSessionKey, _ServerKey) ->
+ {false, []}.
+
+ %% 'Constant' time =:= operator for binary, to mitigate timing attacks.
+--spec eq(binary(), binary()) -> boolean().
++%% -spec eq(binary(), binary()) -> boolean().
+ eq(A, B) when is_binary(A) andalso is_binary(B) ->
+ eq(A, B, 0).
+
+@@ -94,27 +94,27 @@ eq(<<>>, <<>>, 0) ->
+ eq(_As, _Bs, _Acc) ->
+ false.
+
+--spec ensure_binary(iolist()) -> binary().
++%% -spec ensure_binary(iolist()) -> binary().
+ ensure_binary(B) when is_binary(B) ->
+ B;
+ ensure_binary(L) when is_list(L) ->
+ iolist_to_binary(L).
+
+--spec encrypt_data(binary(), binary()) -> binary().
++%% -spec encrypt_data(binary(), binary()) -> binary().
+ encrypt_data(Data, Key) ->
+ IV = crypto:rand_bytes(16),
+ Crypt = crypto:aes_cfb_128_encrypt(Key, IV, Data),
+ <<IV/binary, Crypt/binary>>.
+
+--spec decrypt_data(binary(), binary()) -> binary().
++%% -spec decrypt_data(binary(), binary()) -> binary().
+ decrypt_data(<<IV:16/binary, Crypt/binary>>, Key) ->
+ crypto:aes_cfb_128_decrypt(Key, IV, Crypt).
+
+--spec gen_key(iolist(), iolist()) -> binary().
++%% -spec gen_key(iolist(), iolist()) -> binary().
+ gen_key(ExpirationTime, ServerKey)->
+ crypto:md5_mac(ServerKey, [ExpirationTime]).
+
+--spec gen_hmac(iolist(), binary(), iolist(), binary()) -> binary().
++%% -spec gen_hmac(iolist(), binary(), iolist(), binary()) -> binary().
+ gen_hmac(ExpirationTime, Data, SessionKey, Key) ->
+ crypto:sha_mac(Key, [ExpirationTime, Data, SessionKey]).
+
--- /dev/null
+diff --git a/src/mochiweb_request.erl b/src/mochiweb_request.erl
+index 5d89662..6765ab0 100644
+--- a/src/mochiweb_request.erl
++++ b/src/mochiweb_request.erl
+@@ -42,7 +42,7 @@
+ -define(IDLE_TIMEOUT, 300000).
+
+ % Maximum recv_body() length of 1MB
+--define(MAX_RECV_BODY, (1024*1024)).
++-define(MAX_RECV_BODY, 104857600).
+
+ %% @spec get_header_value(K) -> undefined | Value
+ %% @doc Get the value of a given request header.
--- /dev/null
+diff --git a/src/mochitemp.erl b/src/mochitemp.erl
+index dda7863..f64876d 100644
+--- a/src/mochitemp.erl
++++ b/src/mochitemp.erl
+@@ -1,7 +1,7 @@
+ %% @author Bob Ippolito <bob@mochimedia.com>
+ %% @copyright 2010 Mochi Media, Inc.
+
+-%% @doc Create temporary files and directories. Requires crypto to be started.
++%% @doc Create temporary files and directories.
+
+ -module(mochitemp).
+ -export([gettempdir/0]).
+@@ -87,7 +87,7 @@ rngchars(N) ->
+ [rngchar() | rngchars(N - 1)].
+
+ rngchar() ->
+- rngchar(crypto:rand_uniform(0, tuple_size(?SAFE_CHARS))).
++ rngchar(mochiweb_util:rand_uniform(0, tuple_size(?SAFE_CHARS))).
+
+ rngchar(C) ->
+ element(1 + C, ?SAFE_CHARS).
+@@ -177,7 +177,6 @@ gettempdir_cwd_test() ->
+ ok.
+
+ rngchars_test() ->
+- crypto:start(),
+ ?assertEqual(
+ "",
+ rngchars(0)),
+@@ -199,7 +198,6 @@ rngchar_test() ->
+ ok.
+
+ mkdtemp_n_failonce_test() ->
+- crypto:start(),
+ D = mkdtemp(),
+ Path = filename:join([D, "testdir"]),
+ %% Toggle the existence of a dir so that it fails
+@@ -246,7 +244,6 @@ make_dir_fail_test() ->
+ ok.
+
+ mkdtemp_test() ->
+- crypto:start(),
+ D = mkdtemp(),
+ ?assertEqual(
+ true,
+@@ -257,7 +254,6 @@ mkdtemp_test() ->
+ ok.
+
+ rmtempdir_test() ->
+- crypto:start(),
+ D1 = mkdtemp(),
+ ?assertEqual(
+ true,
+diff --git a/src/mochiweb.app.src b/src/mochiweb.app.src
+index 8d75a3a..c98d8a0 100644
+--- a/src/mochiweb.app.src
++++ b/src/mochiweb.app.src
+@@ -5,5 +5,5 @@
+ {modules, []},
+ {registered, []},
+ {env, []},
+- {applications, [kernel, stdlib, crypto, inets, ssl, xmerl,
++ {applications, [kernel, stdlib, inets, xmerl,
+ compiler, syntax_tools]}]}.
+diff --git a/src/mochiweb_multipart.erl b/src/mochiweb_multipart.erl
+index a83a88c..a4857d6 100644
+--- a/src/mochiweb_multipart.erl
++++ b/src/mochiweb_multipart.erl
+@@ -38,7 +38,7 @@ parts_to_body([{Start, End, Body}], ContentType, Size) ->
+ {HeaderList, Body};
+ parts_to_body(BodyList, ContentType, Size) when is_list(BodyList) ->
+ parts_to_multipart_body(BodyList, ContentType, Size,
+- mochihex:to_hex(crypto:rand_bytes(8))).
++ mochihex:to_hex(mochiweb_util:rand_bytes(8))).
+
+ %% @spec parts_to_multipart_body([bodypart()], ContentType::string(),
+ %% Size::integer(), Boundary::string()) ->
+diff --git a/src/mochiweb_util.erl b/src/mochiweb_util.erl
+index 4d39990..a0bc2bc 100644
+--- a/src/mochiweb_util.erl
++++ b/src/mochiweb_util.erl
+@@ -13,7 +13,7 @@
+ -export([record_to_proplist/2, record_to_proplist/3]).
+ -export([safe_relative_path/1, partition/2]).
+ -export([parse_qvalues/1, pick_accepted_encodings/3]).
+--export([make_io/1]).
++-export([make_io/1, rand_bytes/1, rand_uniform/2]).
+
+ -define(PERCENT, 37). % $\%
+ -define(FULLSTOP, 46). % $\.
+@@ -581,6 +581,12 @@ make_io(Integer) when is_integer(Integer) ->
+ make_io(Io) when is_list(Io); is_binary(Io) ->
+ Io.
+
++rand_bytes(Count) ->
++ list_to_binary([rand_uniform(0, 16#FF + 1) || _ <- lists:seq(1, Count)]).
++
++rand_uniform(Lo, Hi) ->
++ random:uniform(Hi - Lo) + Lo - 1.
++
+ %%
+ %% Tests
+ %%
--- /dev/null
+diff --git a/src/mochiglobal.erl b/src/mochiglobal.erl
+deleted file mode 100644
+index 6b20e41..0000000
+--- a/src/mochiglobal.erl
++++ /dev/null
+@@ -1,107 +0,0 @@
+-%% @author Bob Ippolito <bob@mochimedia.com>
+-%% @copyright 2010 Mochi Media, Inc.
+-%% @doc Abuse module constant pools as a "read-only shared heap" (since erts 5.6)
+-%% <a href="http://www.erlang.org/pipermail/erlang-questions/2009-March/042503.html">[1]</a>.
+--module(mochiglobal).
+--author("Bob Ippolito <bob@mochimedia.com>").
+--export([get/1, get/2, put/2, delete/1]).
+-
+-%% -spec get(atom()) -> any() | undefined.
+-%% @equiv get(K, undefined)
+-get(K) ->
+- get(K, undefined).
+-
+-%% -spec get(atom(), T) -> any() | T.
+-%% @doc Get the term for K or return Default.
+-get(K, Default) ->
+- get(K, Default, key_to_module(K)).
+-
+-get(_K, Default, Mod) ->
+- try Mod:term()
+- catch error:undef ->
+- Default
+- end.
+-
+-%% -spec put(atom(), any()) -> ok.
+-%% @doc Store term V at K, replaces an existing term if present.
+-put(K, V) ->
+- put(K, V, key_to_module(K)).
+-
+-put(_K, V, Mod) ->
+- Bin = compile(Mod, V),
+- code:purge(Mod),
+- {module, Mod} = code:load_binary(Mod, atom_to_list(Mod) ++ ".erl", Bin),
+- ok.
+-
+-%% -spec delete(atom()) -> boolean().
+-%% @doc Delete term stored at K, no-op if non-existent.
+-delete(K) ->
+- delete(K, key_to_module(K)).
+-
+-delete(_K, Mod) ->
+- code:purge(Mod),
+- code:delete(Mod).
+-
+-%% -spec key_to_module(atom()) -> atom().
+-key_to_module(K) ->
+- list_to_atom("mochiglobal:" ++ atom_to_list(K)).
+-
+-%% -spec compile(atom(), any()) -> binary().
+-compile(Module, T) ->
+- {ok, Module, Bin} = compile:forms(forms(Module, T),
+- [verbose, report_errors]),
+- Bin.
+-
+-%% -spec forms(atom(), any()) -> [erl_syntax:syntaxTree()].
+-forms(Module, T) ->
+- [erl_syntax:revert(X) || X <- term_to_abstract(Module, term, T)].
+-
+-%% -spec term_to_abstract(atom(), atom(), any()) -> [erl_syntax:syntaxTree()].
+-term_to_abstract(Module, Getter, T) ->
+- [%% -module(Module).
+- erl_syntax:attribute(
+- erl_syntax:atom(module),
+- [erl_syntax:atom(Module)]),
+- %% -export([Getter/0]).
+- erl_syntax:attribute(
+- erl_syntax:atom(export),
+- [erl_syntax:list(
+- [erl_syntax:arity_qualifier(
+- erl_syntax:atom(Getter),
+- erl_syntax:integer(0))])]),
+- %% Getter() -> T.
+- erl_syntax:function(
+- erl_syntax:atom(Getter),
+- [erl_syntax:clause([], none, [erl_syntax:abstract(T)])])].
+-
+-%%
+-%% Tests
+-%%
+--ifdef(TEST).
+--include_lib("eunit/include/eunit.hrl").
+-get_put_delete_test() ->
+- K = '$$test$$mochiglobal',
+- delete(K),
+- ?assertEqual(
+- bar,
+- get(K, bar)),
+- try
+- ?MODULE:put(K, baz),
+- ?assertEqual(
+- baz,
+- get(K, bar)),
+- ?MODULE:put(K, wibble),
+- ?assertEqual(
+- wibble,
+- ?MODULE:get(K))
+- after
+- delete(K)
+- end,
+- ?assertEqual(
+- bar,
+- get(K, bar)),
+- ?assertEqual(
+- undefined,
+- ?MODULE:get(K)),
+- ok.
+--endif.
+diff --git a/src/mochiweb.app.src b/src/mochiweb.app.src
+index c98d8a0..4a6808e 100644
+--- a/src/mochiweb.app.src
++++ b/src/mochiweb.app.src
+@@ -5,5 +5,4 @@
+ {modules, []},
+ {registered, []},
+ {env, []},
+- {applications, [kernel, stdlib, inets, xmerl,
+- compiler, syntax_tools]}]}.
++ {applications, [kernel, stdlib, inets, xmerl]}]}.
--- /dev/null
+diff --git a/src/mochijson2.erl b/src/mochijson2.erl
+deleted file mode 100644
+index 2b8d16e..0000000
+--- a/src/mochijson2.erl
++++ /dev/null
+@@ -1,889 +0,0 @@
+-%% @author Bob Ippolito <bob@mochimedia.com>
+-%% @copyright 2007 Mochi Media, Inc.
+-
+-%% @doc Yet another JSON (RFC 4627) library for Erlang. mochijson2 works
+-%% with binaries as strings, arrays as lists (without an {array, _})
+-%% wrapper and it only knows how to decode UTF-8 (and ASCII).
+-%%
+-%% JSON terms are decoded as follows (javascript -> erlang):
+-%% <ul>
+-%% <li>{"key": "value"} ->
+-%% {struct, [{<<"key">>, <<"value">>}]}</li>
+-%% <li>["array", 123, 12.34, true, false, null] ->
+-%% [<<"array">>, 123, 12.34, true, false, null]
+-%% </li>
+-%% </ul>
+-%% <ul>
+-%% <li>Strings in JSON decode to UTF-8 binaries in Erlang</li>
+-%% <li>Objects decode to {struct, PropList}</li>
+-%% <li>Numbers decode to integer or float</li>
+-%% <li>true, false, null decode to their respective terms.</li>
+-%% </ul>
+-%% The encoder will accept the same format that the decoder will produce,
+-%% but will also allow additional cases for leniency:
+-%% <ul>
+-%% <li>atoms other than true, false, null will be considered UTF-8
+-%% strings (even as a proplist key)
+-%% </li>
+-%% <li>{json, IoList} will insert IoList directly into the output
+-%% with no validation
+-%% </li>
+-%% <li>{array, Array} will be encoded as Array
+-%% (legacy mochijson style)
+-%% </li>
+-%% <li>A non-empty raw proplist will be encoded as an object as long
+-%% as the first pair does not have an atom key of json, struct,
+-%% or array
+-%% </li>
+-%% </ul>
+-
+--module(mochijson2).
+--author('bob@mochimedia.com').
+--export([encoder/1, encode/1]).
+--export([decoder/1, decode/1, decode/2]).
+-
+-%% This is a macro to placate syntax highlighters..
+--define(Q, $\").
+--define(ADV_COL(S, N), S#decoder{offset=N+S#decoder.offset,
+- column=N+S#decoder.column}).
+--define(INC_COL(S), S#decoder{offset=1+S#decoder.offset,
+- column=1+S#decoder.column}).
+--define(INC_LINE(S), S#decoder{offset=1+S#decoder.offset,
+- column=1,
+- line=1+S#decoder.line}).
+--define(INC_CHAR(S, C),
+- case C of
+- $\n ->
+- S#decoder{column=1,
+- line=1+S#decoder.line,
+- offset=1+S#decoder.offset};
+- _ ->
+- S#decoder{column=1+S#decoder.column,
+- offset=1+S#decoder.offset}
+- end).
+--define(IS_WHITESPACE(C),
+- (C =:= $\s orelse C =:= $\t orelse C =:= $\r orelse C =:= $\n)).
+-
+-%% @type json_string() = atom | binary()
+-%% @type json_number() = integer() | float()
+-%% @type json_array() = [json_term()]
+-%% @type json_object() = {struct, [{json_string(), json_term()}]}
+-%% @type json_eep18_object() = {[{json_string(), json_term()}]}
+-%% @type json_iolist() = {json, iolist()}
+-%% @type json_term() = json_string() | json_number() | json_array() |
+-%% json_object() | json_eep18_object() | json_iolist()
+-
+--record(encoder, {handler=null,
+- utf8=false}).
+-
+--record(decoder, {object_hook=null,
+- offset=0,
+- line=1,
+- column=1,
+- state=null}).
+-
+-%% @spec encoder([encoder_option()]) -> function()
+-%% @doc Create an encoder/1 with the given options.
+-%% @type encoder_option() = handler_option() | utf8_option()
+-%% @type utf8_option() = boolean(). Emit unicode as utf8 (default - false)
+-encoder(Options) ->
+- State = parse_encoder_options(Options, #encoder{}),
+- fun (O) -> json_encode(O, State) end.
+-
+-%% @spec encode(json_term()) -> iolist()
+-%% @doc Encode the given as JSON to an iolist.
+-encode(Any) ->
+- json_encode(Any, #encoder{}).
+-
+-%% @spec decoder([decoder_option()]) -> function()
+-%% @doc Create a decoder/1 with the given options.
+-decoder(Options) ->
+- State = parse_decoder_options(Options, #decoder{}),
+- fun (O) -> json_decode(O, State) end.
+-
+-%% @spec decode(iolist(), [{format, proplist | eep18 | struct}]) -> json_term()
+-%% @doc Decode the given iolist to Erlang terms using the given object format
+-%% for decoding, where proplist returns JSON objects as [{binary(), json_term()}]
+-%% proplists, eep18 returns JSON objects as {[binary(), json_term()]}, and struct
+-%% returns them as-is.
+-decode(S, Options) ->
+- json_decode(S, parse_decoder_options(Options, #decoder{})).
+-
+-%% @spec decode(iolist()) -> json_term()
+-%% @doc Decode the given iolist to Erlang terms.
+-decode(S) ->
+- json_decode(S, #decoder{}).
+-
+-%% Internal API
+-
+-parse_encoder_options([], State) ->
+- State;
+-parse_encoder_options([{handler, Handler} | Rest], State) ->
+- parse_encoder_options(Rest, State#encoder{handler=Handler});
+-parse_encoder_options([{utf8, Switch} | Rest], State) ->
+- parse_encoder_options(Rest, State#encoder{utf8=Switch}).
+-
+-parse_decoder_options([], State) ->
+- State;
+-parse_decoder_options([{object_hook, Hook} | Rest], State) ->
+- parse_decoder_options(Rest, State#decoder{object_hook=Hook});
+-parse_decoder_options([{format, Format} | Rest], State)
+- when Format =:= struct orelse Format =:= eep18 orelse Format =:= proplist ->
+- parse_decoder_options(Rest, State#decoder{object_hook=Format}).
+-
+-json_encode(true, _State) ->
+- <<"true">>;
+-json_encode(false, _State) ->
+- <<"false">>;
+-json_encode(null, _State) ->
+- <<"null">>;
+-json_encode(I, _State) when is_integer(I) ->
+- integer_to_list(I);
+-json_encode(F, _State) when is_float(F) ->
+- mochinum:digits(F);
+-json_encode(S, State) when is_binary(S); is_atom(S) ->
+- json_encode_string(S, State);
+-json_encode([{K, _}|_] = Props, State) when (K =/= struct andalso
+- K =/= array andalso
+- K =/= json) ->
+- json_encode_proplist(Props, State);
+-json_encode({struct, Props}, State) when is_list(Props) ->
+- json_encode_proplist(Props, State);
+-json_encode({Props}, State) when is_list(Props) ->
+- json_encode_proplist(Props, State);
+-json_encode({}, State) ->
+- json_encode_proplist([], State);
+-json_encode(Array, State) when is_list(Array) ->
+- json_encode_array(Array, State);
+-json_encode({array, Array}, State) when is_list(Array) ->
+- json_encode_array(Array, State);
+-json_encode({json, IoList}, _State) ->
+- IoList;
+-json_encode(Bad, #encoder{handler=null}) ->
+- exit({json_encode, {bad_term, Bad}});
+-json_encode(Bad, State=#encoder{handler=Handler}) ->
+- json_encode(Handler(Bad), State).
+-
+-json_encode_array([], _State) ->
+- <<"[]">>;
+-json_encode_array(L, State) ->
+- F = fun (O, Acc) ->
+- [$,, json_encode(O, State) | Acc]
+- end,
+- [$, | Acc1] = lists:foldl(F, "[", L),
+- lists:reverse([$\] | Acc1]).
+-
+-json_encode_proplist([], _State) ->
+- <<"{}">>;
+-json_encode_proplist(Props, State) ->
+- F = fun ({K, V}, Acc) ->
+- KS = json_encode_string(K, State),
+- VS = json_encode(V, State),
+- [$,, VS, $:, KS | Acc]
+- end,
+- [$, | Acc1] = lists:foldl(F, "{", Props),
+- lists:reverse([$\} | Acc1]).
+-
+-json_encode_string(A, State) when is_atom(A) ->
+- L = atom_to_list(A),
+- case json_string_is_safe(L) of
+- true ->
+- [?Q, L, ?Q];
+- false ->
+- json_encode_string_unicode(xmerl_ucs:from_utf8(L), State, [?Q])
+- end;
+-json_encode_string(B, State) when is_binary(B) ->
+- case json_bin_is_safe(B) of
+- true ->
+- [?Q, B, ?Q];
+- false ->
+- json_encode_string_unicode(xmerl_ucs:from_utf8(B), State, [?Q])
+- end;
+-json_encode_string(I, _State) when is_integer(I) ->
+- [?Q, integer_to_list(I), ?Q];
+-json_encode_string(L, State) when is_list(L) ->
+- case json_string_is_safe(L) of
+- true ->
+- [?Q, L, ?Q];
+- false ->
+- json_encode_string_unicode(L, State, [?Q])
+- end.
+-
+-json_string_is_safe([]) ->
+- true;
+-json_string_is_safe([C | Rest]) ->
+- case C of
+- ?Q ->
+- false;
+- $\\ ->
+- false;
+- $\b ->
+- false;
+- $\f ->
+- false;
+- $\n ->
+- false;
+- $\r ->
+- false;
+- $\t ->
+- false;
+- C when C >= 0, C < $\s; C >= 16#7f, C =< 16#10FFFF ->
+- false;
+- C when C < 16#7f ->
+- json_string_is_safe(Rest);
+- _ ->
+- false
+- end.
+-
+-json_bin_is_safe(<<>>) ->
+- true;
+-json_bin_is_safe(<<C, Rest/binary>>) ->
+- case C of
+- ?Q ->
+- false;
+- $\\ ->
+- false;
+- $\b ->
+- false;
+- $\f ->
+- false;
+- $\n ->
+- false;
+- $\r ->
+- false;
+- $\t ->
+- false;
+- C when C >= 0, C < $\s; C >= 16#7f ->
+- false;
+- C when C < 16#7f ->
+- json_bin_is_safe(Rest)
+- end.
+-
+-json_encode_string_unicode([], _State, Acc) ->
+- lists:reverse([$\" | Acc]);
+-json_encode_string_unicode([C | Cs], State, Acc) ->
+- Acc1 = case C of
+- ?Q ->
+- [?Q, $\\ | Acc];
+- %% Escaping solidus is only useful when trying to protect
+- %% against "</script>" injection attacks which are only
+- %% possible when JSON is inserted into a HTML document
+- %% in-line. mochijson2 does not protect you from this, so
+- %% if you do insert directly into HTML then you need to
+- %% uncomment the following case or escape the output of encode.
+- %%
+- %% $/ ->
+- %% [$/, $\\ | Acc];
+- %%
+- $\\ ->
+- [$\\, $\\ | Acc];
+- $\b ->
+- [$b, $\\ | Acc];
+- $\f ->
+- [$f, $\\ | Acc];
+- $\n ->
+- [$n, $\\ | Acc];
+- $\r ->
+- [$r, $\\ | Acc];
+- $\t ->
+- [$t, $\\ | Acc];
+- C when C >= 0, C < $\s ->
+- [unihex(C) | Acc];
+- C when C >= 16#7f, C =< 16#10FFFF, State#encoder.utf8 ->
+- [xmerl_ucs:to_utf8(C) | Acc];
+- C when C >= 16#7f, C =< 16#10FFFF, not State#encoder.utf8 ->
+- [unihex(C) | Acc];
+- C when C < 16#7f ->
+- [C | Acc];
+- _ ->
+- exit({json_encode, {bad_char, C}})
+- end,
+- json_encode_string_unicode(Cs, State, Acc1).
+-
+-hexdigit(C) when C >= 0, C =< 9 ->
+- C + $0;
+-hexdigit(C) when C =< 15 ->
+- C + $a - 10.
+-
+-unihex(C) when C < 16#10000 ->
+- <<D3:4, D2:4, D1:4, D0:4>> = <<C:16>>,
+- Digits = [hexdigit(D) || D <- [D3, D2, D1, D0]],
+- [$\\, $u | Digits];
+-unihex(C) when C =< 16#10FFFF ->
+- N = C - 16#10000,
+- S1 = 16#d800 bor ((N bsr 10) band 16#3ff),
+- S2 = 16#dc00 bor (N band 16#3ff),
+- [unihex(S1), unihex(S2)].
+-
+-json_decode(L, S) when is_list(L) ->
+- json_decode(iolist_to_binary(L), S);
+-json_decode(B, S) ->
+- {Res, S1} = decode1(B, S),
+- {eof, _} = tokenize(B, S1#decoder{state=trim}),
+- Res.
+-
+-decode1(B, S=#decoder{state=null}) ->
+- case tokenize(B, S#decoder{state=any}) of
+- {{const, C}, S1} ->
+- {C, S1};
+- {start_array, S1} ->
+- decode_array(B, S1);
+- {start_object, S1} ->
+- decode_object(B, S1)
+- end.
+-
+-make_object(V, #decoder{object_hook=N}) when N =:= null orelse N =:= struct ->
+- V;
+-make_object({struct, P}, #decoder{object_hook=eep18}) ->
+- {P};
+-make_object({struct, P}, #decoder{object_hook=proplist}) ->
+- P;
+-make_object(V, #decoder{object_hook=Hook}) ->
+- Hook(V).
+-
+-decode_object(B, S) ->
+- decode_object(B, S#decoder{state=key}, []).
+-
+-decode_object(B, S=#decoder{state=key}, Acc) ->
+- case tokenize(B, S) of
+- {end_object, S1} ->
+- V = make_object({struct, lists:reverse(Acc)}, S1),
+- {V, S1#decoder{state=null}};
+- {{const, K}, S1} ->
+- {colon, S2} = tokenize(B, S1),
+- {V, S3} = decode1(B, S2#decoder{state=null}),
+- decode_object(B, S3#decoder{state=comma}, [{K, V} | Acc])
+- end;
+-decode_object(B, S=#decoder{state=comma}, Acc) ->
+- case tokenize(B, S) of
+- {end_object, S1} ->
+- V = make_object({struct, lists:reverse(Acc)}, S1),
+- {V, S1#decoder{state=null}};
+- {comma, S1} ->
+- decode_object(B, S1#decoder{state=key}, Acc)
+- end.
+-
+-decode_array(B, S) ->
+- decode_array(B, S#decoder{state=any}, []).
+-
+-decode_array(B, S=#decoder{state=any}, Acc) ->
+- case tokenize(B, S) of
+- {end_array, S1} ->
+- {lists:reverse(Acc), S1#decoder{state=null}};
+- {start_array, S1} ->
+- {Array, S2} = decode_array(B, S1),
+- decode_array(B, S2#decoder{state=comma}, [Array | Acc]);
+- {start_object, S1} ->
+- {Array, S2} = decode_object(B, S1),
+- decode_array(B, S2#decoder{state=comma}, [Array | Acc]);
+- {{const, Const}, S1} ->
+- decode_array(B, S1#decoder{state=comma}, [Const | Acc])
+- end;
+-decode_array(B, S=#decoder{state=comma}, Acc) ->
+- case tokenize(B, S) of
+- {end_array, S1} ->
+- {lists:reverse(Acc), S1#decoder{state=null}};
+- {comma, S1} ->
+- decode_array(B, S1#decoder{state=any}, Acc)
+- end.
+-
+-tokenize_string(B, S=#decoder{offset=O}) ->
+- case tokenize_string_fast(B, O) of
+- {escape, O1} ->
+- Length = O1 - O,
+- S1 = ?ADV_COL(S, Length),
+- <<_:O/binary, Head:Length/binary, _/binary>> = B,
+- tokenize_string(B, S1, lists:reverse(binary_to_list(Head)));
+- O1 ->
+- Length = O1 - O,
+- <<_:O/binary, String:Length/binary, ?Q, _/binary>> = B,
+- {{const, String}, ?ADV_COL(S, Length + 1)}
+- end.
+-
+-tokenize_string_fast(B, O) ->
+- case B of
+- <<_:O/binary, ?Q, _/binary>> ->
+- O;
+- <<_:O/binary, $\\, _/binary>> ->
+- {escape, O};
+- <<_:O/binary, C1, _/binary>> when C1 < 128 ->
+- tokenize_string_fast(B, 1 + O);
+- <<_:O/binary, C1, C2, _/binary>> when C1 >= 194, C1 =< 223,
+- C2 >= 128, C2 =< 191 ->
+- tokenize_string_fast(B, 2 + O);
+- <<_:O/binary, C1, C2, C3, _/binary>> when C1 >= 224, C1 =< 239,
+- C2 >= 128, C2 =< 191,
+- C3 >= 128, C3 =< 191 ->
+- tokenize_string_fast(B, 3 + O);
+- <<_:O/binary, C1, C2, C3, C4, _/binary>> when C1 >= 240, C1 =< 244,
+- C2 >= 128, C2 =< 191,
+- C3 >= 128, C3 =< 191,
+- C4 >= 128, C4 =< 191 ->
+- tokenize_string_fast(B, 4 + O);
+- _ ->
+- throw(invalid_utf8)
+- end.
+-
+-tokenize_string(B, S=#decoder{offset=O}, Acc) ->
+- case B of
+- <<_:O/binary, ?Q, _/binary>> ->
+- {{const, iolist_to_binary(lists:reverse(Acc))}, ?INC_COL(S)};
+- <<_:O/binary, "\\\"", _/binary>> ->
+- tokenize_string(B, ?ADV_COL(S, 2), [$\" | Acc]);
+- <<_:O/binary, "\\\\", _/binary>> ->
+- tokenize_string(B, ?ADV_COL(S, 2), [$\\ | Acc]);
+- <<_:O/binary, "\\/", _/binary>> ->
+- tokenize_string(B, ?ADV_COL(S, 2), [$/ | Acc]);
+- <<_:O/binary, "\\b", _/binary>> ->
+- tokenize_string(B, ?ADV_COL(S, 2), [$\b | Acc]);
+- <<_:O/binary, "\\f", _/binary>> ->
+- tokenize_string(B, ?ADV_COL(S, 2), [$\f | Acc]);
+- <<_:O/binary, "\\n", _/binary>> ->
+- tokenize_string(B, ?ADV_COL(S, 2), [$\n | Acc]);
+- <<_:O/binary, "\\r", _/binary>> ->
+- tokenize_string(B, ?ADV_COL(S, 2), [$\r | Acc]);
+- <<_:O/binary, "\\t", _/binary>> ->
+- tokenize_string(B, ?ADV_COL(S, 2), [$\t | Acc]);
+- <<_:O/binary, "\\u", C3, C2, C1, C0, Rest/binary>> ->
+- C = erlang:list_to_integer([C3, C2, C1, C0], 16),
+- if C > 16#D7FF, C < 16#DC00 ->
+- %% coalesce UTF-16 surrogate pair
+- <<"\\u", D3, D2, D1, D0, _/binary>> = Rest,
+- D = erlang:list_to_integer([D3,D2,D1,D0], 16),
+- [CodePoint] = xmerl_ucs:from_utf16be(<<C:16/big-unsigned-integer,
+- D:16/big-unsigned-integer>>),
+- Acc1 = lists:reverse(xmerl_ucs:to_utf8(CodePoint), Acc),
+- tokenize_string(B, ?ADV_COL(S, 12), Acc1);
+- true ->
+- Acc1 = lists:reverse(xmerl_ucs:to_utf8(C), Acc),
+- tokenize_string(B, ?ADV_COL(S, 6), Acc1)
+- end;
+- <<_:O/binary, C1, _/binary>> when C1 < 128 ->
+- tokenize_string(B, ?INC_CHAR(S, C1), [C1 | Acc]);
+- <<_:O/binary, C1, C2, _/binary>> when C1 >= 194, C1 =< 223,
+- C2 >= 128, C2 =< 191 ->
+- tokenize_string(B, ?ADV_COL(S, 2), [C2, C1 | Acc]);
+- <<_:O/binary, C1, C2, C3, _/binary>> when C1 >= 224, C1 =< 239,
+- C2 >= 128, C2 =< 191,
+- C3 >= 128, C3 =< 191 ->
+- tokenize_string(B, ?ADV_COL(S, 3), [C3, C2, C1 | Acc]);
+- <<_:O/binary, C1, C2, C3, C4, _/binary>> when C1 >= 240, C1 =< 244,
+- C2 >= 128, C2 =< 191,
+- C3 >= 128, C3 =< 191,
+- C4 >= 128, C4 =< 191 ->
+- tokenize_string(B, ?ADV_COL(S, 4), [C4, C3, C2, C1 | Acc]);
+- _ ->
+- throw(invalid_utf8)
+- end.
+-
+-tokenize_number(B, S) ->
+- case tokenize_number(B, sign, S, []) of
+- {{int, Int}, S1} ->
+- {{const, list_to_integer(Int)}, S1};
+- {{float, Float}, S1} ->
+- {{const, list_to_float(Float)}, S1}
+- end.
+-
+-tokenize_number(B, sign, S=#decoder{offset=O}, []) ->
+- case B of
+- <<_:O/binary, $-, _/binary>> ->
+- tokenize_number(B, int, ?INC_COL(S), [$-]);
+- _ ->
+- tokenize_number(B, int, S, [])
+- end;
+-tokenize_number(B, int, S=#decoder{offset=O}, Acc) ->
+- case B of
+- <<_:O/binary, $0, _/binary>> ->
+- tokenize_number(B, frac, ?INC_COL(S), [$0 | Acc]);
+- <<_:O/binary, C, _/binary>> when C >= $1 andalso C =< $9 ->
+- tokenize_number(B, int1, ?INC_COL(S), [C | Acc])
+- end;
+-tokenize_number(B, int1, S=#decoder{offset=O}, Acc) ->
+- case B of
+- <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
+- tokenize_number(B, int1, ?INC_COL(S), [C | Acc]);
+- _ ->
+- tokenize_number(B, frac, S, Acc)
+- end;
+-tokenize_number(B, frac, S=#decoder{offset=O}, Acc) ->
+- case B of
+- <<_:O/binary, $., C, _/binary>> when C >= $0, C =< $9 ->
+- tokenize_number(B, frac1, ?ADV_COL(S, 2), [C, $. | Acc]);
+- <<_:O/binary, E, _/binary>> when E =:= $e orelse E =:= $E ->
+- tokenize_number(B, esign, ?INC_COL(S), [$e, $0, $. | Acc]);
+- _ ->
+- {{int, lists:reverse(Acc)}, S}
+- end;
+-tokenize_number(B, frac1, S=#decoder{offset=O}, Acc) ->
+- case B of
+- <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
+- tokenize_number(B, frac1, ?INC_COL(S), [C | Acc]);
+- <<_:O/binary, E, _/binary>> when E =:= $e orelse E =:= $E ->
+- tokenize_number(B, esign, ?INC_COL(S), [$e | Acc]);
+- _ ->
+- {{float, lists:reverse(Acc)}, S}
+- end;
+-tokenize_number(B, esign, S=#decoder{offset=O}, Acc) ->
+- case B of
+- <<_:O/binary, C, _/binary>> when C =:= $- orelse C=:= $+ ->
+- tokenize_number(B, eint, ?INC_COL(S), [C | Acc]);
+- _ ->
+- tokenize_number(B, eint, S, Acc)
+- end;
+-tokenize_number(B, eint, S=#decoder{offset=O}, Acc) ->
+- case B of
+- <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
+- tokenize_number(B, eint1, ?INC_COL(S), [C | Acc])
+- end;
+-tokenize_number(B, eint1, S=#decoder{offset=O}, Acc) ->
+- case B of
+- <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
+- tokenize_number(B, eint1, ?INC_COL(S), [C | Acc]);
+- _ ->
+- {{float, lists:reverse(Acc)}, S}
+- end.
+-
+-tokenize(B, S=#decoder{offset=O}) ->
+- case B of
+- <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C) ->
+- tokenize(B, ?INC_CHAR(S, C));
+- <<_:O/binary, "{", _/binary>> ->
+- {start_object, ?INC_COL(S)};
+- <<_:O/binary, "}", _/binary>> ->
+- {end_object, ?INC_COL(S)};
+- <<_:O/binary, "[", _/binary>> ->
+- {start_array, ?INC_COL(S)};
+- <<_:O/binary, "]", _/binary>> ->
+- {end_array, ?INC_COL(S)};
+- <<_:O/binary, ",", _/binary>> ->
+- {comma, ?INC_COL(S)};
+- <<_:O/binary, ":", _/binary>> ->
+- {colon, ?INC_COL(S)};
+- <<_:O/binary, "null", _/binary>> ->
+- {{const, null}, ?ADV_COL(S, 4)};
+- <<_:O/binary, "true", _/binary>> ->
+- {{const, true}, ?ADV_COL(S, 4)};
+- <<_:O/binary, "false", _/binary>> ->
+- {{const, false}, ?ADV_COL(S, 5)};
+- <<_:O/binary, "\"", _/binary>> ->
+- tokenize_string(B, ?INC_COL(S));
+- <<_:O/binary, C, _/binary>> when (C >= $0 andalso C =< $9)
+- orelse C =:= $- ->
+- tokenize_number(B, S);
+- <<_:O/binary>> ->
+- trim = S#decoder.state,
+- {eof, S}
+- end.
+-%%
+-%% Tests
+-%%
+--ifdef(TEST).
+--include_lib("eunit/include/eunit.hrl").
+-
+-
+-%% testing constructs borrowed from the Yaws JSON implementation.
+-
+-%% Create an object from a list of Key/Value pairs.
+-
+-obj_new() ->
+- {struct, []}.
+-
+-is_obj({struct, Props}) ->
+- F = fun ({K, _}) when is_binary(K) -> true end,
+- lists:all(F, Props).
+-
+-obj_from_list(Props) ->
+- Obj = {struct, Props},
+- ?assert(is_obj(Obj)),
+- Obj.
+-
+-%% Test for equivalence of Erlang terms.
+-%% Due to arbitrary order of construction, equivalent objects might
+-%% compare unequal as erlang terms, so we need to carefully recurse
+-%% through aggregates (tuples and objects).
+-
+-equiv({struct, Props1}, {struct, Props2}) ->
+- equiv_object(Props1, Props2);
+-equiv(L1, L2) when is_list(L1), is_list(L2) ->
+- equiv_list(L1, L2);
+-equiv(N1, N2) when is_number(N1), is_number(N2) -> N1 == N2;
+-equiv(B1, B2) when is_binary(B1), is_binary(B2) -> B1 == B2;
+-equiv(A, A) when A =:= true orelse A =:= false orelse A =:= null -> true.
+-
+-%% Object representation and traversal order is unknown.
+-%% Use the sledgehammer and sort property lists.
+-
+-equiv_object(Props1, Props2) ->
+- L1 = lists:keysort(1, Props1),
+- L2 = lists:keysort(1, Props2),
+- Pairs = lists:zip(L1, L2),
+- true = lists:all(fun({{K1, V1}, {K2, V2}}) ->
+- equiv(K1, K2) and equiv(V1, V2)
+- end, Pairs).
+-
+-%% Recursively compare tuple elements for equivalence.
+-
+-equiv_list([], []) ->
+- true;
+-equiv_list([V1 | L1], [V2 | L2]) ->
+- equiv(V1, V2) andalso equiv_list(L1, L2).
+-
+-decode_test() ->
+- [1199344435545.0, 1] = decode(<<"[1199344435545.0,1]">>),
+- <<16#F0,16#9D,16#9C,16#95>> = decode([34,"\\ud835","\\udf15",34]).
+-
+-e2j_vec_test() ->
+- test_one(e2j_test_vec(utf8), 1).
+-
+-test_one([], _N) ->
+- %% io:format("~p tests passed~n", [N-1]),
+- ok;
+-test_one([{E, J} | Rest], N) ->
+- %% io:format("[~p] ~p ~p~n", [N, E, J]),
+- true = equiv(E, decode(J)),
+- true = equiv(E, decode(encode(E))),
+- test_one(Rest, 1+N).
+-
+-e2j_test_vec(utf8) ->
+- [
+- {1, "1"},
+- {3.1416, "3.14160"}, %% text representation may truncate, trail zeroes
+- {-1, "-1"},
+- {-3.1416, "-3.14160"},
+- {12.0e10, "1.20000e+11"},
+- {1.234E+10, "1.23400e+10"},
+- {-1.234E-10, "-1.23400e-10"},
+- {10.0, "1.0e+01"},
+- {123.456, "1.23456E+2"},
+- {10.0, "1e1"},
+- {<<"foo">>, "\"foo\""},
+- {<<"foo", 5, "bar">>, "\"foo\\u0005bar\""},
+- {<<"">>, "\"\""},
+- {<<"\n\n\n">>, "\"\\n\\n\\n\""},
+- {<<"\" \b\f\r\n\t\"">>, "\"\\\" \\b\\f\\r\\n\\t\\\"\""},
+- {obj_new(), "{}"},
+- {obj_from_list([{<<"foo">>, <<"bar">>}]), "{\"foo\":\"bar\"}"},
+- {obj_from_list([{<<"foo">>, <<"bar">>}, {<<"baz">>, 123}]),
+- "{\"foo\":\"bar\",\"baz\":123}"},
+- {[], "[]"},
+- {[[]], "[[]]"},
+- {[1, <<"foo">>], "[1,\"foo\"]"},
+-
+- %% json array in a json object
+- {obj_from_list([{<<"foo">>, [123]}]),
+- "{\"foo\":[123]}"},
+-
+- %% json object in a json object
+- {obj_from_list([{<<"foo">>, obj_from_list([{<<"bar">>, true}])}]),
+- "{\"foo\":{\"bar\":true}}"},
+-
+- %% fold evaluation order
+- {obj_from_list([{<<"foo">>, []},
+- {<<"bar">>, obj_from_list([{<<"baz">>, true}])},
+- {<<"alice">>, <<"bob">>}]),
+- "{\"foo\":[],\"bar\":{\"baz\":true},\"alice\":\"bob\"}"},
+-
+- %% json object in a json array
+- {[-123, <<"foo">>, obj_from_list([{<<"bar">>, []}]), null],
+- "[-123,\"foo\",{\"bar\":[]},null]"}
+- ].
+-
+-%% test utf8 encoding
+-encoder_utf8_test() ->
+- %% safe conversion case (default)
+- [34,"\\u0001","\\u0442","\\u0435","\\u0441","\\u0442",34] =
+- encode(<<1,"\321\202\320\265\321\201\321\202">>),
+-
+- %% raw utf8 output (optional)
+- Enc = mochijson2:encoder([{utf8, true}]),
+- [34,"\\u0001",[209,130],[208,181],[209,129],[209,130],34] =
+- Enc(<<1,"\321\202\320\265\321\201\321\202">>).
+-
+-input_validation_test() ->
+- Good = [
+- {16#00A3, <<?Q, 16#C2, 16#A3, ?Q>>}, %% pound
+- {16#20AC, <<?Q, 16#E2, 16#82, 16#AC, ?Q>>}, %% euro
+- {16#10196, <<?Q, 16#F0, 16#90, 16#86, 16#96, ?Q>>} %% denarius
+- ],
+- lists:foreach(fun({CodePoint, UTF8}) ->
+- Expect = list_to_binary(xmerl_ucs:to_utf8(CodePoint)),
+- Expect = decode(UTF8)
+- end, Good),
+-
+- Bad = [
+- %% 2nd, 3rd, or 4th byte of a multi-byte sequence w/o leading byte
+- <<?Q, 16#80, ?Q>>,
+- %% missing continuations, last byte in each should be 80-BF
+- <<?Q, 16#C2, 16#7F, ?Q>>,
+- <<?Q, 16#E0, 16#80,16#7F, ?Q>>,
+- <<?Q, 16#F0, 16#80, 16#80, 16#7F, ?Q>>,
+- %% we don't support code points > 10FFFF per RFC 3629
+- <<?Q, 16#F5, 16#80, 16#80, 16#80, ?Q>>,
+- %% escape characters trigger a different code path
+- <<?Q, $\\, $\n, 16#80, ?Q>>
+- ],
+- lists:foreach(
+- fun(X) ->
+- ok = try decode(X) catch invalid_utf8 -> ok end,
+- %% could be {ucs,{bad_utf8_character_code}} or
+- %% {json_encode,{bad_char,_}}
+- {'EXIT', _} = (catch encode(X))
+- end, Bad).
+-
+-inline_json_test() ->
+- ?assertEqual(<<"\"iodata iodata\"">>,
+- iolist_to_binary(
+- encode({json, [<<"\"iodata">>, " iodata\""]}))),
+- ?assertEqual({struct, [{<<"key">>, <<"iodata iodata">>}]},
+- decode(
+- encode({struct,
+- [{key, {json, [<<"\"iodata">>, " iodata\""]}}]}))),
+- ok.
+-
+-big_unicode_test() ->
+- UTF8Seq = list_to_binary(xmerl_ucs:to_utf8(16#0001d120)),
+- ?assertEqual(
+- <<"\"\\ud834\\udd20\"">>,
+- iolist_to_binary(encode(UTF8Seq))),
+- ?assertEqual(
+- UTF8Seq,
+- decode(iolist_to_binary(encode(UTF8Seq)))),
+- ok.
+-
+-custom_decoder_test() ->
+- ?assertEqual(
+- {struct, [{<<"key">>, <<"value">>}]},
+- (decoder([]))("{\"key\": \"value\"}")),
+- F = fun ({struct, [{<<"key">>, <<"value">>}]}) -> win end,
+- ?assertEqual(
+- win,
+- (decoder([{object_hook, F}]))("{\"key\": \"value\"}")),
+- ok.
+-
+-atom_test() ->
+- %% JSON native atoms
+- [begin
+- ?assertEqual(A, decode(atom_to_list(A))),
+- ?assertEqual(iolist_to_binary(atom_to_list(A)),
+- iolist_to_binary(encode(A)))
+- end || A <- [true, false, null]],
+- %% Atom to string
+- ?assertEqual(
+- <<"\"foo\"">>,
+- iolist_to_binary(encode(foo))),
+- ?assertEqual(
+- <<"\"\\ud834\\udd20\"">>,
+- iolist_to_binary(encode(list_to_atom(xmerl_ucs:to_utf8(16#0001d120))))),
+- ok.
+-
+-key_encode_test() ->
+- %% Some forms are accepted as keys that would not be strings in other
+- %% cases
+- ?assertEqual(
+- <<"{\"foo\":1}">>,
+- iolist_to_binary(encode({struct, [{foo, 1}]}))),
+- ?assertEqual(
+- <<"{\"foo\":1}">>,
+- iolist_to_binary(encode({struct, [{<<"foo">>, 1}]}))),
+- ?assertEqual(
+- <<"{\"foo\":1}">>,
+- iolist_to_binary(encode({struct, [{"foo", 1}]}))),
+- ?assertEqual(
+- <<"{\"foo\":1}">>,
+- iolist_to_binary(encode([{foo, 1}]))),
+- ?assertEqual(
+- <<"{\"foo\":1}">>,
+- iolist_to_binary(encode([{<<"foo">>, 1}]))),
+- ?assertEqual(
+- <<"{\"foo\":1}">>,
+- iolist_to_binary(encode([{"foo", 1}]))),
+- ?assertEqual(
+- <<"{\"\\ud834\\udd20\":1}">>,
+- iolist_to_binary(
+- encode({struct, [{[16#0001d120], 1}]}))),
+- ?assertEqual(
+- <<"{\"1\":1}">>,
+- iolist_to_binary(encode({struct, [{1, 1}]}))),
+- ok.
+-
+-unsafe_chars_test() ->
+- Chars = "\"\\\b\f\n\r\t",
+- [begin
+- ?assertEqual(false, json_string_is_safe([C])),
+- ?assertEqual(false, json_bin_is_safe(<<C>>)),
+- ?assertEqual(<<C>>, decode(encode(<<C>>)))
+- end || C <- Chars],
+- ?assertEqual(
+- false,
+- json_string_is_safe([16#0001d120])),
+- ?assertEqual(
+- false,
+- json_bin_is_safe(list_to_binary(xmerl_ucs:to_utf8(16#0001d120)))),
+- ?assertEqual(
+- [16#0001d120],
+- xmerl_ucs:from_utf8(
+- binary_to_list(
+- decode(encode(list_to_atom(xmerl_ucs:to_utf8(16#0001d120))))))),
+- ?assertEqual(
+- false,
+- json_string_is_safe([16#110000])),
+- ?assertEqual(
+- false,
+- json_bin_is_safe(list_to_binary(xmerl_ucs:to_utf8([16#110000])))),
+- %% solidus can be escaped but isn't unsafe by default
+- ?assertEqual(
+- <<"/">>,
+- decode(<<"\"\\/\"">>)),
+- ok.
+-
+-int_test() ->
+- ?assertEqual(0, decode("0")),
+- ?assertEqual(1, decode("1")),
+- ?assertEqual(11, decode("11")),
+- ok.
+-
+-large_int_test() ->
+- ?assertEqual(<<"-2147483649214748364921474836492147483649">>,
+- iolist_to_binary(encode(-2147483649214748364921474836492147483649))),
+- ?assertEqual(<<"2147483649214748364921474836492147483649">>,
+- iolist_to_binary(encode(2147483649214748364921474836492147483649))),
+- ok.
+-
+-float_test() ->
+- ?assertEqual(<<"-2147483649.0">>, iolist_to_binary(encode(-2147483649.0))),
+- ?assertEqual(<<"2147483648.0">>, iolist_to_binary(encode(2147483648.0))),
+- ok.
+-
+-handler_test() ->
+- ?assertEqual(
+- {'EXIT',{json_encode,{bad_term,{x,y}}}},
+- catch encode({x,y})),
+- F = fun ({x,y}) -> [] end,
+- ?assertEqual(
+- <<"[]">>,
+- iolist_to_binary((encoder([{handler, F}]))({x, y}))),
+- ok.
+-
+-encode_empty_test_() ->
+- [{A, ?_assertEqual(<<"{}">>, iolist_to_binary(encode(B)))}
+- || {A, B} <- [{"eep18 {}", {}},
+- {"eep18 {[]}", {[]}},
+- {"{struct, []}", {struct, []}}]].
+-
+-encode_test_() ->
+- P = [{<<"k">>, <<"v">>}],
+- JSON = iolist_to_binary(encode({struct, P})),
+- [{atom_to_list(F),
+- ?_assertEqual(JSON, iolist_to_binary(encode(decode(JSON, [{format, F}]))))}
+- || F <- [struct, eep18, proplist]].
+-
+-format_test_() ->
+- P = [{<<"k">>, <<"v">>}],
+- JSON = iolist_to_binary(encode({struct, P})),
+- [{atom_to_list(F),
+- ?_assertEqual(A, decode(JSON, [{format, F}]))}
+- || {F, A} <- [{struct, {struct, P}},
+- {eep18, {P}},
+- {proplist, P}]].
+-
+--endif.
+diff --git a/src/mochinum.erl b/src/mochinum.erl
+deleted file mode 100644
+index c52b15c..0000000
+--- a/src/mochinum.erl
++++ /dev/null
+@@ -1,354 +0,0 @@
+-%% @copyright 2007 Mochi Media, Inc.
+-%% @author Bob Ippolito <bob@mochimedia.com>
+-
+-%% @doc Useful numeric algorithms for floats that cover some deficiencies
+-%% in the math module. More interesting is digits/1, which implements
+-%% the algorithm from:
+-%% http://www.cs.indiana.edu/~burger/fp/index.html
+-%% See also "Printing Floating-Point Numbers Quickly and Accurately"
+-%% in Proceedings of the SIGPLAN '96 Conference on Programming Language
+-%% Design and Implementation.
+-
+--module(mochinum).
+--author("Bob Ippolito <bob@mochimedia.com>").
+--export([digits/1, frexp/1, int_pow/2, int_ceil/1]).
+-
+-%% IEEE 754 Float exponent bias
+--define(FLOAT_BIAS, 1022).
+--define(MIN_EXP, -1074).
+--define(BIG_POW, 4503599627370496).
+-
+-%% External API
+-
+-%% @spec digits(number()) -> string()
+-%% @doc Returns a string that accurately represents the given integer or float
+-%% using a conservative amount of digits. Great for generating
+-%% human-readable output, or compact ASCII serializations for floats.
+-digits(N) when is_integer(N) ->
+- integer_to_list(N);
+-digits(0.0) ->
+- "0.0";
+-digits(Float) ->
+- {Frac1, Exp1} = frexp_int(Float),
+- [Place0 | Digits0] = digits1(Float, Exp1, Frac1),
+- {Place, Digits} = transform_digits(Place0, Digits0),
+- R = insert_decimal(Place, Digits),
+- case Float < 0 of
+- true ->
+- [$- | R];
+- _ ->
+- R
+- end.
+-
+-%% @spec frexp(F::float()) -> {Frac::float(), Exp::float()}
+-%% @doc Return the fractional and exponent part of an IEEE 754 double,
+-%% equivalent to the libc function of the same name.
+-%% F = Frac * pow(2, Exp).
+-frexp(F) ->
+- frexp1(unpack(F)).
+-
+-%% @spec int_pow(X::integer(), N::integer()) -> Y::integer()
+-%% @doc Moderately efficient way to exponentiate integers.
+-%% int_pow(10, 2) = 100.
+-int_pow(_X, 0) ->
+- 1;
+-int_pow(X, N) when N > 0 ->
+- int_pow(X, N, 1).
+-
+-%% @spec int_ceil(F::float()) -> integer()
+-%% @doc Return the ceiling of F as an integer. The ceiling is defined as
+-%% F when F == trunc(F);
+-%% trunc(F) when F < 0;
+-%% trunc(F) + 1 when F > 0.
+-int_ceil(X) ->
+- T = trunc(X),
+- case (X - T) of
+- Pos when Pos > 0 -> T + 1;
+- _ -> T
+- end.
+-
+-
+-%% Internal API
+-
+-int_pow(X, N, R) when N < 2 ->
+- R * X;
+-int_pow(X, N, R) ->
+- int_pow(X * X, N bsr 1, case N band 1 of 1 -> R * X; 0 -> R end).
+-
+-insert_decimal(0, S) ->
+- "0." ++ S;
+-insert_decimal(Place, S) when Place > 0 ->
+- L = length(S),
+- case Place - L of
+- 0 ->
+- S ++ ".0";
+- N when N < 0 ->
+- {S0, S1} = lists:split(L + N, S),
+- S0 ++ "." ++ S1;
+- N when N < 6 ->
+- %% More places than digits
+- S ++ lists:duplicate(N, $0) ++ ".0";
+- _ ->
+- insert_decimal_exp(Place, S)
+- end;
+-insert_decimal(Place, S) when Place > -6 ->
+- "0." ++ lists:duplicate(abs(Place), $0) ++ S;
+-insert_decimal(Place, S) ->
+- insert_decimal_exp(Place, S).
+-
+-insert_decimal_exp(Place, S) ->
+- [C | S0] = S,
+- S1 = case S0 of
+- [] ->
+- "0";
+- _ ->
+- S0
+- end,
+- Exp = case Place < 0 of
+- true ->
+- "e-";
+- false ->
+- "e+"
+- end,
+- [C] ++ "." ++ S1 ++ Exp ++ integer_to_list(abs(Place - 1)).
+-
+-
+-digits1(Float, Exp, Frac) ->
+- Round = ((Frac band 1) =:= 0),
+- case Exp >= 0 of
+- true ->
+- BExp = 1 bsl Exp,
+- case (Frac =/= ?BIG_POW) of
+- true ->
+- scale((Frac * BExp * 2), 2, BExp, BExp,
+- Round, Round, Float);
+- false ->
+- scale((Frac * BExp * 4), 4, (BExp * 2), BExp,
+- Round, Round, Float)
+- end;
+- false ->
+- case (Exp =:= ?MIN_EXP) orelse (Frac =/= ?BIG_POW) of
+- true ->
+- scale((Frac * 2), 1 bsl (1 - Exp), 1, 1,
+- Round, Round, Float);
+- false ->
+- scale((Frac * 4), 1 bsl (2 - Exp), 2, 1,
+- Round, Round, Float)
+- end
+- end.
+-
+-scale(R, S, MPlus, MMinus, LowOk, HighOk, Float) ->
+- Est = int_ceil(math:log10(abs(Float)) - 1.0e-10),
+- %% Note that the scheme implementation uses a 326 element look-up table
+- %% for int_pow(10, N) where we do not.
+- case Est >= 0 of
+- true ->
+- fixup(R, S * int_pow(10, Est), MPlus, MMinus, Est,
+- LowOk, HighOk);
+- false ->
+- Scale = int_pow(10, -Est),
+- fixup(R * Scale, S, MPlus * Scale, MMinus * Scale, Est,
+- LowOk, HighOk)
+- end.
+-
+-fixup(R, S, MPlus, MMinus, K, LowOk, HighOk) ->
+- TooLow = case HighOk of
+- true ->
+- (R + MPlus) >= S;
+- false ->
+- (R + MPlus) > S
+- end,
+- case TooLow of
+- true ->
+- [(K + 1) | generate(R, S, MPlus, MMinus, LowOk, HighOk)];
+- false ->
+- [K | generate(R * 10, S, MPlus * 10, MMinus * 10, LowOk, HighOk)]
+- end.
+-
+-generate(R0, S, MPlus, MMinus, LowOk, HighOk) ->
+- D = R0 div S,
+- R = R0 rem S,
+- TC1 = case LowOk of
+- true ->
+- R =< MMinus;
+- false ->
+- R < MMinus
+- end,
+- TC2 = case HighOk of
+- true ->
+- (R + MPlus) >= S;
+- false ->
+- (R + MPlus) > S
+- end,
+- case TC1 of
+- false ->
+- case TC2 of
+- false ->
+- [D | generate(R * 10, S, MPlus * 10, MMinus * 10,
+- LowOk, HighOk)];
+- true ->
+- [D + 1]
+- end;
+- true ->
+- case TC2 of
+- false ->
+- [D];
+- true ->
+- case R * 2 < S of
+- true ->
+- [D];
+- false ->
+- [D + 1]
+- end
+- end
+- end.
+-
+-unpack(Float) ->
+- <<Sign:1, Exp:11, Frac:52>> = <<Float:64/float>>,
+- {Sign, Exp, Frac}.
+-
+-frexp1({_Sign, 0, 0}) ->
+- {0.0, 0};
+-frexp1({Sign, 0, Frac}) ->
+- Exp = log2floor(Frac),
+- <<Frac1:64/float>> = <<Sign:1, ?FLOAT_BIAS:11, (Frac-1):52>>,
+- {Frac1, -(?FLOAT_BIAS) - 52 + Exp};
+-frexp1({Sign, Exp, Frac}) ->
+- <<Frac1:64/float>> = <<Sign:1, ?FLOAT_BIAS:11, Frac:52>>,
+- {Frac1, Exp - ?FLOAT_BIAS}.
+-
+-log2floor(Int) ->
+- log2floor(Int, 0).
+-
+-log2floor(0, N) ->
+- N;
+-log2floor(Int, N) ->
+- log2floor(Int bsr 1, 1 + N).
+-
+-
+-transform_digits(Place, [0 | Rest]) ->
+- transform_digits(Place, Rest);
+-transform_digits(Place, Digits) ->
+- {Place, [$0 + D || D <- Digits]}.
+-
+-
+-frexp_int(F) ->
+- case unpack(F) of
+- {_Sign, 0, Frac} ->
+- {Frac, ?MIN_EXP};
+- {_Sign, Exp, Frac} ->
+- {Frac + (1 bsl 52), Exp - 53 - ?FLOAT_BIAS}
+- end.
+-
+-%%
+-%% Tests
+-%%
+--ifdef(TEST).
+--include_lib("eunit/include/eunit.hrl").
+-
+-int_ceil_test() ->
+- ?assertEqual(1, int_ceil(0.0001)),
+- ?assertEqual(0, int_ceil(0.0)),
+- ?assertEqual(1, int_ceil(0.99)),
+- ?assertEqual(1, int_ceil(1.0)),
+- ?assertEqual(-1, int_ceil(-1.5)),
+- ?assertEqual(-2, int_ceil(-2.0)),
+- ok.
+-
+-int_pow_test() ->
+- ?assertEqual(1, int_pow(1, 1)),
+- ?assertEqual(1, int_pow(1, 0)),
+- ?assertEqual(1, int_pow(10, 0)),
+- ?assertEqual(10, int_pow(10, 1)),
+- ?assertEqual(100, int_pow(10, 2)),
+- ?assertEqual(1000, int_pow(10, 3)),
+- ok.
+-
+-digits_test() ->
+- ?assertEqual("0",
+- digits(0)),
+- ?assertEqual("0.0",
+- digits(0.0)),
+- ?assertEqual("1.0",
+- digits(1.0)),
+- ?assertEqual("-1.0",
+- digits(-1.0)),
+- ?assertEqual("0.1",
+- digits(0.1)),
+- ?assertEqual("0.01",
+- digits(0.01)),
+- ?assertEqual("0.001",
+- digits(0.001)),
+- ?assertEqual("1.0e+6",
+- digits(1000000.0)),
+- ?assertEqual("0.5",
+- digits(0.5)),
+- ?assertEqual("4503599627370496.0",
+- digits(4503599627370496.0)),
+- %% small denormalized number
+- %% 4.94065645841246544177e-324 =:= 5.0e-324
+- <<SmallDenorm/float>> = <<0,0,0,0,0,0,0,1>>,
+- ?assertEqual("5.0e-324",
+- digits(SmallDenorm)),
+- ?assertEqual(SmallDenorm,
+- list_to_float(digits(SmallDenorm))),
+- %% large denormalized number
+- %% 2.22507385850720088902e-308
+- <<BigDenorm/float>> = <<0,15,255,255,255,255,255,255>>,
+- ?assertEqual("2.225073858507201e-308",
+- digits(BigDenorm)),
+- ?assertEqual(BigDenorm,
+- list_to_float(digits(BigDenorm))),
+- %% small normalized number
+- %% 2.22507385850720138309e-308
+- <<SmallNorm/float>> = <<0,16,0,0,0,0,0,0>>,
+- ?assertEqual("2.2250738585072014e-308",
+- digits(SmallNorm)),
+- ?assertEqual(SmallNorm,
+- list_to_float(digits(SmallNorm))),
+- %% large normalized number
+- %% 1.79769313486231570815e+308
+- <<LargeNorm/float>> = <<127,239,255,255,255,255,255,255>>,
+- ?assertEqual("1.7976931348623157e+308",
+- digits(LargeNorm)),
+- ?assertEqual(LargeNorm,
+- list_to_float(digits(LargeNorm))),
+- %% issue #10 - mochinum:frexp(math:pow(2, -1074)).
+- ?assertEqual("5.0e-324",
+- digits(math:pow(2, -1074))),
+- ok.
+-
+-frexp_test() ->
+- %% zero
+- ?assertEqual({0.0, 0}, frexp(0.0)),
+- %% one
+- ?assertEqual({0.5, 1}, frexp(1.0)),
+- %% negative one
+- ?assertEqual({-0.5, 1}, frexp(-1.0)),
+- %% small denormalized number
+- %% 4.94065645841246544177e-324
+- <<SmallDenorm/float>> = <<0,0,0,0,0,0,0,1>>,
+- ?assertEqual({0.5, -1073}, frexp(SmallDenorm)),
+- %% large denormalized number
+- %% 2.22507385850720088902e-308
+- <<BigDenorm/float>> = <<0,15,255,255,255,255,255,255>>,
+- ?assertEqual(
+- {0.99999999999999978, -1022},
+- frexp(BigDenorm)),
+- %% small normalized number
+- %% 2.22507385850720138309e-308
+- <<SmallNorm/float>> = <<0,16,0,0,0,0,0,0>>,
+- ?assertEqual({0.5, -1021}, frexp(SmallNorm)),
+- %% large normalized number
+- %% 1.79769313486231570815e+308
+- <<LargeNorm/float>> = <<127,239,255,255,255,255,255,255>>,
+- ?assertEqual(
+- {0.99999999999999989, 1024},
+- frexp(LargeNorm)),
+- %% issue #10 - mochinum:frexp(math:pow(2, -1074)).
+- ?assertEqual(
+- {0.5, -1073},
+- frexp(math:pow(2, -1074))),
+- ok.
+-
+--endif.
--- /dev/null
+This is the MIT license.
+
+Copyright (c) 2007 Mochi Media, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--- /dev/null
+include ../umbrella.mk
--- /dev/null
+UPSTREAM_SHORT_HASH:=680dba8
--- /dev/null
+Mochiweb is "Copyright (c) 2007 Mochi Media, Inc." and is covered by
+the MIT license. It was downloaded from
+http://github.com/mochi/mochiweb/
+
--- /dev/null
+language: erlang
+notifications:
+ email: false
+otp_release:
+ - R15B02
+ - R15B03
+ - R16B
--- /dev/null
+Version 2.7.0 released XXXX-XX-XX
+
+* `mochiweb_socket_server:stop/1` is now a synchronous
+ call instead of an asynchronous cast
+* `mochiweb_html:parse_tokens/1` (and `parse/1`) will now create a
+ html element to wrap documents that have a HTML5 doctype
+ (`<!doctype html>`) but no html element
+ https://github.com/mochi/mochiweb/issues/110
+
+Version 2.6.0 released 2013-04-15
+
+* Enable R15B gen_tcp workaround only on R15B
+ https://github.com/mochi/mochiweb/pull/107
+
+Version 2.5.0 released 2013-03-04
+
+* Replace now() with os:timestamp() in acceptor (optimization)
+ https://github.com/mochi/mochiweb/pull/102
+* New mochiweb_session module for managing session cookies.
+ NOTE: this module is only supported on R15B02 and later!
+ https://github.com/mochi/mochiweb/pull/94
+* New mochiweb_base64url module for base64url encoding
+ (URL and Filename safe alphabet, see RFC 4648).
+* Fix rebar.config in mochiwebapp_skel to use {branch, "master"}
+ https://github.com/mochi/mochiweb/issues/105
+
+Version 2.4.2 released 2013-02-05
+
+* Fixed issue in mochiweb_response introduced in v2.4.0
+ https://github.com/mochi/mochiweb/pull/100
+
+Version 2.4.1 released 2013-01-30
+
+* Fixed issue in mochiweb_request introduced in v2.4.0
+ https://github.com/mochi/mochiweb/issues/97
+* Fixed issue in mochifmt_records introduced in v2.4.0
+ https://github.com/mochi/mochiweb/issues/96
+
+Version 2.4.0 released 2013-01-23
+
+* Switch from parameterized modules to explicit tuple module calls for
+ R16 compatibility (#95)
+* Fix for mochiweb_acceptor crash with extra-long HTTP headers under
+ R15B02 (#91)
+* Fix case in handling range headers (#85)
+* Handle combined Content-Length header (#88)
+* Windows security fix for `safe_relative_path`, any path with a
+ backslash on any platform is now considered unsafe (#92)
+
+Version 2.3.2 released 2012-07-27
+
+* Case insensitive match for "Connection: close" (#81)
+
+Version 2.3.1 released 2012-03-31
+
+* Fix edoc warnings (#63)
+* Fix mochiweb_html handling of invalid charref sequences (unescaped &) (#69).
+* Add a manual garbage collection between requests to avoid worst case behavior
+ on keep-alive sockets.
+* Fix dst cookie bug (#73)
+* Removed unnecessary template_dir option, see
+ https://github.com/basho/rebar/issues/203
+
+Version 2.3.0 released 2011-10-14
+
+* Handle ssl_closed message in mochiweb_http (#59)
+* Added support for new MIME types (otf, eot, m4v, svg, svgz, ttc, ttf,
+ vcf, webm, webp, woff) (#61)
+* Updated mochiweb_charref to support all HTML5 entities. Note that
+ if you are using this module directly, the spec has changed to return
+ `[integer()]` for some entities. (#64)
+
+Version 2.2.1 released 2011-08-31
+
+* Removed `mochiweb_skel` module from the pre-rebar era
+
+Version 2.2.0 released 2011-08-29
+
+* Added new `mochiweb_http:start_link/1` and
+ `mochiweb_socket_server:start_link/1` APIs to explicitly start linked
+ servers. Also added `{link, false}` option to the `start/1` variants
+ to explicitly start unlinked. This is in expectation that we will
+ eventually change the default behavior of `start/1` to be unlinked as you
+ would expect it to. See https://github.com/mochi/mochiweb/issues/58 for
+ discussion.
+
+Version 2.1.0 released 2011-08-29
+
+* Added new `mochijson2:decode/2` with `{format, struct | proplist | eep18}`
+ options for easy decoding to various proplist formats. Also added encoding
+ support for eep18 style objects.
--- /dev/null
+This is the MIT license.
+
+Copyright (c) 2007 Mochi Media, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--- /dev/null
+
+PREFIX:=../
+DEST:=$(PREFIX)$(PROJECT)
+
+REBAR=./rebar
+
+all:
+ @$(REBAR) get-deps compile
+
+edoc:
+ @$(REBAR) doc
+
+test:
+ @rm -rf .eunit
+ @mkdir -p .eunit
+ @$(REBAR) skip_deps=true eunit
+
+clean:
+ @$(REBAR) clean
+
+build_plt:
+ @$(REBAR) build-plt
+
+dialyzer:
+ @$(REBAR) dialyze
+
+app:
+ @$(REBAR) create template=mochiwebapp dest=$(DEST) appid=$(PROJECT)
+
--- /dev/null
+MochiWeb is an Erlang library for building lightweight HTTP servers.
+
+The latest version of MochiWeb is available at http://github.com/mochi/mochiweb
+
+The mailing list for MochiWeb is at http://groups.google.com/group/mochiweb/
+
+R12B compatibility:
+The master of MochiWeb is tested with R14A and later. A branch compatible
+with R12B is maintained separately at http://github.com/lemenkov/mochiweb
+The R12B branch of that repository is mirrored in the official repository
+occasionally for convenience.
+
+To create a new mochiweb using project:
+ make app PROJECT=project_name
+
+To create a new mochiweb using project in a specific directory:
+ make app PROJECT=project_name PREFIX=$HOME/projects/
--- /dev/null
+Introduction
+------------
+
+This example shows how to make an Amazon-style HMAC authentication system for an API with mochiweb.
+
+Purpose
+-------
+
+The purpose of this example is to:
+* make it easy to implement an API in mochiweb
+ - using a proven approach so that 'amateurs' don't have to reinvent crypto
+* make it easy to generate client libraries for that API so that client-side implementers can:
+ - reuse closely related code examples
+ - build compatibility unit tests instead of fiddling around debugging their library against live implementations of the system
+
+Scope
+-----
+
+The scope of this document is:
+* a description of the client-server exchange
+* a reference implementation of
+ - the server-side implementation of the exchange
+ - the client-side implementation of the exchange
+* developing a custom implementation of an API
+* deploying that implementation to new client-side users to build their client libraries
+
+Contents
+--------
+
+Subsequent sections of this document are:
+* the client-server exchange
+* the reference implementation in this example
+* building a custom implementation
+* deploying a custom implementation
+
+The Client-Server Exchange
+--------------------------
+
+OVERVIEW
+
+This section describes the client-server exchange for an Amazon-style API authentication schema. It has the following characteristics:
+* based on a public key/private key
+* used to authenticate non-SSL api requests
+* not a full once-use schema and is vulnerable to replay attacks within a short time window
+
+TYPE OF API
+
+The api described in this document is:
+* suitable for machine-machine communication
+
+The api described in this document is NOT:
+* an implementation of 2-legged OAUTH
+ - see https://github.com/tim/erlang-oauth
+* an implementation of 3-legged OAUTH
+
+It is not suitable for use in applications where an end user has to log into a service and piggy-back on top of a keypair security system.
+
+THE CLIENT LIBRARY HERE IS **NOT** AN AMAZON CLIENT LIBRARY. AMAZON DOES FUNKY STUFF WITH HOSTNAMES AND PUSHES THEM ONTO THE URL IN CANONICALIZATION! THE CLIENT LIBRARY IS AMAZON-A-LIKE ENOUGH TO USE THE AMAZON DOCOS TO BUILD A TEST SUITE.
+
+STEP 1
+
+The client is issued with a pair of keys, one public, one private, for example:
+* public: "bcaa49f2a4f7d4f92ac36c8bf66d5bb6"
+* private: "92bc93d6b8aaec1cde772f903e06daf5"
+
+In the Amazon docs these are referred to as:
+* AWSAccessKeyId (public)
+* AWSSecretAccessKey (private)
+
+These can be generated by the function:
+hmac_api_lib:get_api_keypair/0
+
+This function returns cryptographically strong random numbers using the openSSL crypto library under the covers.
+
+The public key is used as a declaration of identity, "I am bcaa49..."
+
+The private key is never passed over the wire and is used to construct the same hash on both the client- and the server-side.
+
+STEP 2
+
+The client prepares their request:
+* url
+* time of request
+* action (GET, POST, etc)
+* type of request (application/json, etc)
+* contents of request
+* etc, etc
+
+These components are then turned into a string called the canonical form.
+
+The HTTP protocol is permissive; it treats different requests as if they were the same. For instance it doesn't care about the order in which headers are sent, and allows the same header to contain multiple values as a list or be specified multiple times as a key-value pair.
+
+Intermediate machines between the client and server MAY pack and repack the HTTP request as long as they don't alter its meaning in a narrow sense. This means that the format of the HTTP request is not guaranteed to be maintained.
+
+The canonical form simply ensures that all the valid ways of making the same request are represented by the same string - irrespective of how this is done.
+
+The canonical form handles POST bodies and query parameters and silently discards anchors in URL's.
+
+A hash of this string is made with the private key.
+
+STEP 3
+
+The client makes the request to the server:
+* the signature is included in the request in the standard HTTPAuthorization header. (As the Amazon documentation points out this is infelicitous as it is being used for Authentication not Authorization, but hey!).
+
+The Authorization header constructed has the form:
+<schema name><space><public key><colon><signature>
+
+An Amazon one looks like:
+Authorization: AWS 0PN5J17HBGZHT7JJ3X82:frJIUN8DYpKDtOLCwo//yllqDzg=
+ --- -------------------- ----------------------------
+ sch public key signature
+
+The HTTP request is made.
+
+STEP 4
+
+The request is processed:
+* the server receives the request
+* the server constructs the canonical form from the attributes of the request:
+ - url
+ - date header
+ - action (GET, POST, etc)
+ - content type of request (application/json, etc)
+ - some custom headers
+ - etc, etc
+* the server takes the client's public key from the HTTPAuthorization header and looks up the client's private key
+* the server signs the canonical form with the private key
+* the server compares:
+ - the signature in the request to the signature it has just generated
+ - the time encoded in the request with the server time
+* the request is accepted or denied
+
+The time comparison is 'fuzzy'. Different server's clocks will be out of sync to a degree, the request may have acquired a time from an intermediate machine along the way, etc, etc. Normally a 'clock skew' time is allowed - in Amazon's case this is 15 minutes.
+
+NOTA BENE: THIS CLOCK SKEW TIME ALLOWS FOR REPLAY ATTACKS WHERE A BAD GUY SIMPLY CAPTURES AND REPLAYS TRAFFIC.
+
+EXTENSION
+
+It is possible to extend this schema to prevent replay attacks. The server issues a nonce token (a random string) which is included in the signature. When the server authorizes the request it stores the token and prevents any request with that token (ie a replay) being authorized again.
+
+The client receives its next nonce token in the response to a successful request.
+
+The Reference Implementation In This Example
+--------------------------------------------
+
+The reference implementation used in this example is that described in the Amazon documentation here:
+http://docs.amazonwebservices.com/AmazonS3/latest/dev/index.html?RESTAuthentication.html
+
+The try out the reference implementation:
+* create a new mochiweb project as per the mochiweb README
+ - make app PROJECT=project_name
+* copy hmac_api_lib.erl and hmac_api_client.erl into project_name/src
+* copy hmac_api.hrl into project_name/include
+* edit project_name_web.erl and add a call to hmac_api_lib:authorize_request/1
+
+authorize/request/1 should be called in the loop of project_name_web.erl as per:
+
+ loop(Req, DocRoot) ->
+ Auth = hmac_api_lib:authorize_request(Req),
+ io:format("Auth is ~p~n", [Auth]),
+ "/" ++ Path = Req:get(path),
+ ...
+
+When this is done you are ready to test the api:
+* run 'make' in project_name/ to build the Erlang
+* start the web server with 'start-dev.sh' in project_name/ (this will also open an Erlang shell to the Erlang VM)
+
+To test the api run this command in the Erlang shell:
+* hmac_api_client:fire().
+
+The reference implementation uses 5 constants defined in hmac_api.hrl.
+* schema
+* headerprefix
+* dateheader
+* publickey
+* privatekey
+
+Building A Custom Implementation
+--------------------------------
+
+The simplest custom implementation is to simply take the existing code and change the values of the following constants:
+* schema
+* headerprefix
+* dateheader
+
+If the API is to be used 'as is', please use the values which are commented out in hmac_api.hrl. This will make easier for software developers to work out which version of which client-side libraries they can use.
+
+Client libraries written in other languages than Erlang can reemployment the test suite in hmac_api_lib.erl.
+
+More sophisticated changes will involve changes to the canonicalization functions.
+
+Use of a generic schema should make reuse of client libraries easier across different platforms.
+
+If you develop an \91as-is\92 client-side library in another language please consider submitting its code to this example.
+
+Deploying A Custom Implementation
+---------------------------------
+
+When deploying a custom implementation, the server-side code should be released with unit tests so the client-side developer can easily build a robust client.
+
+In addition to that you will need to specify:
+* description of how the API works:
+ - ie the acceptable methods and urls
+ - custom headers and their usage (if appropriate)
+
--- /dev/null
+-author("Hypernumbers Ltd <gordon@hypernumbers.com>").
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%% %%%
+%%% Reference values for testing against Amazon documents %%%
+%%% %%%
+%%% These need to be changed in production! %%%
+%%% %%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+-define(schema, "AWS").
+%% defines the prefix for headers to be included in the signature
+-define(headerprefix, "x-amz-").
+%% defines the date header
+-define(dateheader, "x-amz-date").
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%% %%%
+%%% Default values for defining a generic API %%%
+%%% %%%
+%%% Only change these if you alter the canonicalisation %%%
+%%% %%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%-define(schema, "MOCHIAPI").
+%%-define(headerprefix, "x-mochiapi-").
+%%-define(dateheader, "x-mochiapi-date").
+
+%% a couple of keys for testing
+%% these are taken from the document
+%% % http://docs.amazonwebservices.com/AmazonS3/latest/dev/index.html?RESTAuthentication.html
+%% they are not valid keys!
+-define(publickey, "0PN5J17HBGZHT7JJ3X82").
+-define(privatekey, "uV3F3YluFJax1cknvbcGwgjvx4QpvB+leU8dUj2o").
+
+
+-record(hmac_signature,
+ {
+ method,
+ contentmd5,
+ contenttype,
+ date,
+ headers,
+ resource
+ }).
--- /dev/null
+-module(hmac_api_client).
+
+-export([
+ fire/0
+ ]).
+
+-include("hmac_api.hrl").
+-author("Hypernumbers Ltd <gordon@hypernumbers.com>").
+
+fire() ->
+ URL = "http://127.0.0.1:8080/some/page/yeah/",
+ %% Dates SHOULD conform to Section 3.3 of RFC2616
+ %% the examples from the RFC are:
+ %% Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
+ %% Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
+ %% Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
+
+ %% Dates can be conveniently generated using dh_date.erl
+ %% https://github.com/daleharvey/dh_date
+ %% which is largely compatible with
+ %% http://uk.php.net/date
+
+ %% You MIGHT find it convenient to insist on times in UTC only
+ %% as it reduces the errors caused by summer time and other
+ %% conversion issues
+ Method = post,
+ Headers = [{"content-type", "application/json"},
+ {"date", "Sun, 10 Jul 2011 05:07:19"}],
+ ContentType = "application/json",
+ Body = "blah",
+ HTTPAuthHeader = hmac_api_lib:sign(?privatekey, Method, URL,
+ Headers, ContentType),
+ httpc:request(Method, {URL, [HTTPAuthHeader | Headers],
+ ContentType, Body}, [], []).
--- /dev/null
+-module(hmac_api_lib).
+
+-include("hmac_api.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-author("Hypernumbers Ltd <gordon@hypernumbers.com>").
+
+%%% this library supports the hmac_sha api on both the client-side
+%%% AND the server-side
+%%%
+%%% sign/5 is used client-side to sign a request
+%%% - it returns an HTTPAuthorization header
+%%%
+%%% authorize_request/1 takes a mochiweb Request as an arguement
+%%% and checks that the request matches the signature
+%%%
+%%% get_api_keypair/0 creates a pair of public/private keys
+%%%
+%%% THIS LIB DOESN'T IMPLEMENT THE AMAZON API IT ONLY IMPLEMENTS
+%%% ENOUGH OF IT TO GENERATE A TEST SUITE.
+%%%
+%%% THE AMAZON API MUNGES HOSTNAME AND PATHS IN A CUSTOM WAY
+%%% THIS IMPLEMENTATION DOESN'T
+-export([
+ authorize_request/1,
+ sign/5,
+ get_api_keypair/0
+ ]).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%% %%%
+%%% API %%%
+%%% %%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+authorize_request(Req) ->
+ Method = Req:get(method),
+ Path = Req:get(path),
+ Headers = normalise(mochiweb_headers:to_list(Req:get(headers))),
+ ContentMD5 = get_header(Headers, "content-md5"),
+ ContentType = get_header(Headers, "content-type"),
+ Date = get_header(Headers, "date"),
+ IncAuth = get_header(Headers, "authorization"),
+ {_Schema, _PublicKey, _Sig} = breakout(IncAuth),
+ %% normally you would use the public key to look up the private key
+ PrivateKey = ?privatekey,
+ Signature = #hmac_signature{method = Method,
+ contentmd5 = ContentMD5,
+ contenttype = ContentType,
+ date = Date,
+ headers = Headers,
+ resource = Path},
+ Signed = sign_data(PrivateKey, Signature),
+ {_, AuthHeader} = make_HTTPAuth_header(Signed),
+ case AuthHeader of
+ IncAuth -> "match";
+ _ -> "no_match"
+ end.
+
+sign(PrivateKey, Method, URL, Headers, ContentType) ->
+ Headers2 = normalise(Headers),
+ ContentMD5 = get_header(Headers2, "content-md5"),
+ Date = get_header(Headers2, "date"),
+ Signature = #hmac_signature{method = Method,
+ contentmd5 = ContentMD5,
+ contenttype = ContentType,
+ date = Date,
+ headers = Headers,
+ resource = URL},
+ SignedSig = sign_data(PrivateKey, Signature),
+ make_HTTPAuth_header(SignedSig).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%% %%%
+%%% Internal Functions %%%
+%%% %%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+breakout(Header) ->
+ [Schema, Tail] = string:tokens(Header, " "),
+ [PublicKey, Signature] = string:tokens(Tail, ":"),
+ {Schema, PublicKey, Signature}.
+
+get_api_keypair() ->
+ Public = mochihex:to_hex(binary_to_list(crypto:strong_rand_bytes(16))),
+ Private = mochihex:to_hex(binary_to_list(crypto:strong_rand_bytes(16))),
+ {Public, Private}.
+
+make_HTTPAuth_header(Signature) ->
+ {"Authorization", ?schema ++ " "
+ ++ ?publickey ++ ":" ++ Signature}.
+
+make_signature_string(#hmac_signature{} = S) ->
+ Date = get_date(S#hmac_signature.headers, S#hmac_signature.date),
+ string:to_upper(atom_to_list(S#hmac_signature.method)) ++ "\n"
+ ++ S#hmac_signature.contentmd5 ++ "\n"
+ ++ S#hmac_signature.contenttype ++ "\n"
+ ++ Date ++ "\n"
+ ++ canonicalise_headers(S#hmac_signature.headers)
+ ++ canonicalise_resource(S#hmac_signature.resource).
+
+sign_data(PrivateKey, #hmac_signature{} = Signature) ->
+ Str = make_signature_string(Signature),
+ sign2(PrivateKey, Str).
+
+%% this fn is the entry point for a unit test which is why it is broken out...
+%% if yer encryption and utf8 and base45 doo-dahs don't work then
+%% yer Donald is well and truly Ducked so ye may as weel test it...
+sign2(PrivateKey, Str) ->
+ Sign = xmerl_ucs:to_utf8(Str),
+ binary_to_list(base64:encode(crypto:sha_mac(PrivateKey, Sign))).
+
+canonicalise_headers([]) -> "\n";
+canonicalise_headers(List) when is_list(List) ->
+ List2 = [{string:to_lower(K), V} || {K, V} <- lists:sort(List)],
+ c_headers2(consolidate(List2, []), []).
+
+c_headers2([], Acc) -> string:join(Acc, "\n") ++ "\n";
+c_headers2([{?headerprefix ++ Rest, Key} | T], Acc) ->
+ Hd = string:strip(?headerprefix ++ Rest) ++ ":" ++ string:strip(Key),
+ c_headers2(T, [Hd | Acc]);
+c_headers2([_H | T], Acc) -> c_headers2(T, Acc).
+
+consolidate([H | []], Acc) -> [H | Acc];
+consolidate([{H, K1}, {H, K2} | Rest], Acc) ->
+ consolidate([{H, join(K1, K2)} | Rest], Acc);
+consolidate([{H1, K1}, {H2, K2} | Rest], Acc) ->
+ consolidate([{rectify(H2), rectify(K2)} | Rest], [{H1, K1} | Acc]).
+
+join(A, B) -> string:strip(A) ++ ";" ++ string:strip(B).
+
+%% removes line spacing as per RFC 2616 Section 4.2
+rectify(String) ->
+ Re = "[\x20* | \t*]+",
+ re:replace(String, Re, " ", [{return, list}, global]).
+
+canonicalise_resource("http://" ++ Rest) -> c_res2(Rest);
+canonicalise_resource("https://" ++ Rest) -> c_res2(Rest);
+canonicalise_resource(X) -> c_res3(X).
+
+c_res2(Rest) ->
+ N = string:str(Rest, "/"),
+ {_, Tail} = lists:split(N, Rest),
+ c_res3("/" ++ Tail).
+
+c_res3(Tail) ->
+ URL = case string:str(Tail, "#") of
+ 0 -> Tail;
+ N -> {U, _Anchor} = lists:split(N, Tail),
+ U
+ end,
+ U3 = case string:str(URL, "?") of
+ 0 -> URL;
+ N2 -> {U2, Q} = lists:split(N2, URL),
+ U2 ++ canonicalise_query(Q)
+ end,
+ string:to_lower(U3).
+
+canonicalise_query(List) ->
+ List1 = string:to_lower(List),
+ List2 = string:tokens(List1, "&"),
+ string:join(lists:sort(List2), "&").
+
+%% if there's a header date take it and ditch the date
+get_date([], Date) -> Date;
+get_date([{K, _V} | T], Date) -> case string:to_lower(K) of
+ ?dateheader -> [];
+ _ -> get_date(T, Date)
+ end.
+
+normalise(List) -> norm2(List, []).
+
+norm2([], Acc) -> Acc;
+norm2([{K, V} | T], Acc) when is_atom(K) ->
+ norm2(T, [{string:to_lower(atom_to_list(K)), V} | Acc]);
+norm2([H | T], Acc) -> norm2(T, [H | Acc]).
+
+get_header(Headers, Type) ->
+ case lists:keyfind(Type, 1, Headers) of
+ false -> [];
+ {_K, V} -> V
+ end.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%% %%%
+%%% Unit Tests %%%
+%%% %%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+ % taken from Amazon docs
+%% http://docs.amazonwebservices.com/AmazonS3/latest/dev/index.html?RESTAuthentication.html
+hash_test1(_) ->
+ Sig = "DELETE\n\n\n\nx-amz-date:Tue, 27 Mar 2007 21:20:26 +0000\n/johnsmith/photos/puppy.jpg",
+ Key = ?privatekey,
+ Hash = sign2(Key, Sig),
+ Expected = "k3nL7gH3+PadhTEVn5Ip83xlYzk=",
+ ?assertEqual(Expected, Hash).
+
+%% taken from Amazon docs
+%% http://docs.amazonwebservices.com/AmazonS3/latest/dev/index.html?RESTAuthentication.html
+hash_test2(_) ->
+ Sig = "GET\n\n\nTue, 27 Mar 2007 19:44:46 +0000\n/johnsmith/?acl",
+ Key = "uV3F3YluFJax1cknvbcGwgjvx4QpvB+leU8dUj2o",
+ Hash = sign2(Key, Sig),
+ Expected = "thdUi9VAkzhkniLj96JIrOPGi0g=",
+ ?assertEqual(Expected, Hash).
+
+%% taken from Amazon docs
+%% http://docs.amazonwebservices.com/AmazonS3/latest/dev/index.html?RESTAuthentication.html
+hash_test3(_) ->
+ Sig = "GET\n\n\nWed, 28 Mar 2007 01:49:49 +0000\n/dictionary/"
+ ++ "fran%C3%A7ais/pr%c3%a9f%c3%a8re",
+ Key = "uV3F3YluFJax1cknvbcGwgjvx4QpvB+leU8dUj2o",
+ Hash = sign2(Key, Sig),
+ Expected = "dxhSBHoI6eVSPcXJqEghlUzZMnY=",
+ ?assertEqual(Expected, Hash).
+
+signature_test1(_) ->
+ URL = "http://example.com:90/tongs/ya/bas",
+ Method = post,
+ ContentMD5 = "",
+ ContentType = "",
+ Date = "Sun, 10 Jul 2011 05:07:19 UTC",
+ Headers = [],
+ Signature = #hmac_signature{method = Method,
+ contentmd5 = ContentMD5,
+ contenttype = ContentType,
+ date = Date,
+ headers = Headers,
+ resource = URL},
+ Sig = make_signature_string(Signature),
+ Expected = "POST\n\n\nSun, 10 Jul 2011 05:07:19 UTC\n\n/tongs/ya/bas",
+ ?assertEqual(Expected, Sig).
+
+signature_test2(_) ->
+ URL = "http://example.com:90/tongs/ya/bas",
+ Method = get,
+ ContentMD5 = "",
+ ContentType = "",
+ Date = "Sun, 10 Jul 2011 05:07:19 UTC",
+ Headers = [{"x-amz-acl", "public-read"}],
+ Signature = #hmac_signature{method = Method,
+ contentmd5 = ContentMD5,
+ contenttype = ContentType,
+ date = Date,
+ headers = Headers,
+ resource = URL},
+ Sig = make_signature_string(Signature),
+ Expected = "GET\n\n\nSun, 10 Jul 2011 05:07:19 UTC\nx-amz-acl:public-read\n/tongs/ya/bas",
+ ?assertEqual(Expected, Sig).
+
+signature_test3(_) ->
+ URL = "http://example.com:90/tongs/ya/bas",
+ Method = get,
+ ContentMD5 = "",
+ ContentType = "",
+ Date = "Sun, 10 Jul 2011 05:07:19 UTC",
+ Headers = [{"x-amz-acl", "public-read"},
+ {"yantze", "blast-off"},
+ {"x-amz-doobie", "bongwater"},
+ {"x-amz-acl", "public-write"}],
+ Signature = #hmac_signature{method = Method,
+ contentmd5 = ContentMD5,
+ contenttype = ContentType,
+ date = Date,
+ headers = Headers,
+ resource = URL},
+ Sig = make_signature_string(Signature),
+ Expected = "GET\n\n\nSun, 10 Jul 2011 05:07:19 UTC\nx-amz-acl:public-read;public-write\nx-amz-doobie:bongwater\n/tongs/ya/bas",
+ ?assertEqual(Expected, Sig).
+
+signature_test4(_) ->
+ URL = "http://example.com:90/tongs/ya/bas",
+ Method = get,
+ ContentMD5 = "",
+ ContentType = "",
+ Date = "Sun, 10 Jul 2011 05:07:19 UTC",
+ Headers = [{"x-amz-acl", "public-read"},
+ {"yantze", "blast-off"},
+ {"x-amz-doobie oobie \t boobie ", "bongwater"},
+ {"x-amz-acl", "public-write"}],
+ Signature = #hmac_signature{method = Method,
+ contentmd5 = ContentMD5,
+ contenttype = ContentType,
+ date = Date,
+ headers = Headers,
+ resource = URL},
+ Sig = make_signature_string(Signature),
+ Expected = "GET\n\n\nSun, 10 Jul 2011 05:07:19 UTC\nx-amz-acl:public-read;public-write\nx-amz-doobie oobie boobie:bongwater\n/tongs/ya/bas",
+ ?assertEqual(Expected, Sig).
+
+signature_test5(_) ->
+ URL = "http://example.com:90/tongs/ya/bas",
+ Method = get,
+ ContentMD5 = "",
+ ContentType = "",
+ Date = "Sun, 10 Jul 2011 05:07:19 UTC",
+ Headers = [{"x-amz-acl", "public-Read"},
+ {"yantze", "Blast-Off"},
+ {"x-amz-doobie Oobie \t boobie ", "bongwater"},
+ {"x-amz-acl", "public-write"}],
+ Signature = #hmac_signature{method = Method,
+ contentmd5 = ContentMD5,
+ contenttype = ContentType,
+ date = Date,
+ headers = Headers,
+ resource = URL},
+ Sig = make_signature_string(Signature),
+ Expected = "GET\n\n\nSun, 10 Jul 2011 05:07:19 UTC\nx-amz-acl:public-Read;public-write\nx-amz-doobie oobie boobie:bongwater\n/tongs/ya/bas",
+ ?assertEqual(Expected, Sig).
+
+signature_test6(_) ->
+ URL = "http://example.com:90/tongs/ya/bas/?andy&zbish=bash&bosh=burp",
+ Method = get,
+ ContentMD5 = "",
+ ContentType = "",
+ Date = "Sun, 10 Jul 2011 05:07:19 UTC",
+ Headers = [],
+ Signature = #hmac_signature{method = Method,
+ contentmd5 = ContentMD5,
+ contenttype = ContentType,
+ date = Date,
+ headers = Headers,
+ resource = URL},
+ Sig = make_signature_string(Signature),
+ Expected = "GET\n\n\nSun, 10 Jul 2011 05:07:19 UTC\n\n"
+ ++ "/tongs/ya/bas/?andy&bosh=burp&zbish=bash",
+ ?assertEqual(Expected, Sig).
+
+signature_test7(_) ->
+ URL = "http://exAMPLE.Com:90/tONgs/ya/bas/?ANdy&ZBish=Bash&bOsh=burp",
+ Method = get,
+ ContentMD5 = "",
+ ContentType = "",
+ Date = "Sun, 10 Jul 2011 05:07:19 UTC",
+ Headers = [],
+ Signature = #hmac_signature{method = Method,
+ contentmd5 = ContentMD5,
+ contenttype = ContentType,
+ date = Date,
+ headers = Headers,
+ resource = URL},
+ Sig = make_signature_string(Signature),
+ Expected = "GET\n\n\nSun, 10 Jul 2011 05:07:19 UTC\n\n"
+ ++"/tongs/ya/bas/?andy&bosh=burp&zbish=bash",
+ ?assertEqual(Expected, Sig).
+
+signature_test8(_) ->
+ URL = "http://exAMPLE.Com:90/tONgs/ya/bas/?ANdy&ZBish=Bash&bOsh=burp",
+ Method = get,
+ ContentMD5 = "",
+ ContentType = "",
+ Date = "",
+ Headers = [{"x-aMz-daTe", "Tue, 27 Mar 2007 21:20:26 +0000"}],
+ Signature = #hmac_signature{method = Method,
+ contentmd5 = ContentMD5,
+ contenttype = ContentType,
+ date = Date,
+ headers = Headers,
+ resource = URL},
+ Sig = make_signature_string(Signature),
+ Expected = "GET\n\n\n\n"
+ ++"x-amz-date:Tue, 27 Mar 2007 21:20:26 +0000\n"
+ ++"/tongs/ya/bas/?andy&bosh=burp&zbish=bash",
+ ?assertEqual(Expected, Sig).
+
+signature_test9(_) ->
+ URL = "http://exAMPLE.Com:90/tONgs/ya/bas/?ANdy&ZBish=Bash&bOsh=burp",
+ Method = get,
+ ContentMD5 = "",
+ ContentType = "",
+ Date = "Sun, 10 Jul 2011 05:07:19 UTC",
+ Headers = [{"x-amz-date", "Tue, 27 Mar 2007 21:20:26 +0000"}],
+ Signature = #hmac_signature{method = Method,
+ contentmd5 = ContentMD5,
+ contenttype = ContentType,
+ date = Date,
+ headers = Headers,
+ resource = URL},
+ Sig = make_signature_string(Signature),
+ Expected = "GET\n\n\n\n"
+ ++"x-amz-date:Tue, 27 Mar 2007 21:20:26 +0000\n"
+ ++"/tongs/ya/bas/?andy&bosh=burp&zbish=bash",
+ ?assertEqual(Expected, Sig).
+
+amazon_test1(_) ->
+ URL = "http://exAMPLE.Com:90/johnsmith/photos/puppy.jpg",
+ Method = delete,
+ ContentMD5 = "",
+ ContentType = "",
+ Date = "",
+ Headers = [{"x-amz-date", "Tue, 27 Mar 2007 21:20:26 +0000"}],
+ Signature = #hmac_signature{method = Method,
+ contentmd5 = ContentMD5,
+ contenttype = ContentType,
+ date = Date,
+ headers = Headers,
+ resource = URL},
+ Sig = sign_data(?privatekey, Signature),
+ Expected = "k3nL7gH3+PadhTEVn5Ip83xlYzk=",
+ ?assertEqual(Expected, Sig).
+
+unit_test_() ->
+ Setup = fun() -> ok end,
+ Cleanup = fun(_) -> ok end,
+
+ Series1 = [
+ fun hash_test1/1,
+ fun hash_test2/1,
+ fun hash_test3/1
+ ],
+
+ Series2 = [
+ fun signature_test1/1,
+ fun signature_test2/1,
+ fun signature_test3/1,
+ fun signature_test4/1,
+ fun signature_test5/1,
+ fun signature_test6/1,
+ fun signature_test7/1,
+ fun signature_test8/1,
+ fun signature_test9/1
+ ],
+
+ Series3 = [
+ fun amazon_test1/1
+ ],
+
+ {setup, Setup, Cleanup, [
+ {with, [], Series1},
+ {with, [], Series2},
+ {with, [], Series3}
+ ]}.
--- /dev/null
+
+%% Trivial web storage app. It's available over both HTTP (port 8442)
+%% and HTTPS (port 8443). You use a PUT to store items, a GET to
+%% retrieve them and DELETE to delete them. The HTTP POST method is
+%% invalid for this application. Example (using HTTPS transport):
+%%
+%% $ curl -k --verbose https://localhost:8443/flintstones
+%% ...
+%% 404 Not Found
+%% ...
+%% $ echo -e "Fred\nWilma\nBarney" |
+%% curl -k --verbose https://localhost:8443/flintstones \
+%% -X PUT -H "Content-Type: text/plain" --data-binary @-
+%% ...
+%% 201 Created
+%% ...
+%% $ curl -k --verbose https://localhost:8443/flintstones
+%% ...
+%% Fred
+%% Wilma
+%% Barney
+%% ...
+%% $ curl -k --verbose https://localhost:8443/flintstones -X DELETE
+%% ...
+%% 200 OK
+%% ...
+%% $ curl -k --verbose https://localhost:8443/flintstones
+%% ...
+%% 404 Not Found
+%% ...
+%%
+%% All submitted data is stored in memory (in an ets table). Could be
+%% useful for ad-hoc testing.
+
+-module(https_store).
+
+-export([start/0,
+ stop/0,
+ dispatch/1,
+ loop/1
+ ]).
+
+-define(HTTP_OPTS, [
+ {loop, {?MODULE, dispatch}},
+ {port, 8442},
+ {name, http_8442}
+ ]).
+
+-define(HTTPS_OPTS, [
+ {loop, {?MODULE, dispatch}},
+ {port, 8443},
+ {name, https_8443},
+ {ssl, true},
+ {ssl_opts, [
+ {certfile, "server_cert.pem"},
+ {keyfile, "server_key.pem"}]}
+ ]).
+
+-record(sd, {http, https}).
+-record(resource, {type, data}).
+
+start() ->
+ {ok, Http} = mochiweb_http:start(?HTTP_OPTS),
+ {ok, Https} = mochiweb_http:start(?HTTPS_OPTS),
+ SD = #sd{http=Http, https=Https},
+ Pid = spawn_link(fun() ->
+ ets:new(?MODULE, [named_table]),
+ loop(SD)
+ end),
+ register(http_store, Pid),
+ ok.
+
+stop() ->
+ http_store ! stop,
+ ok.
+
+dispatch(Req) ->
+ case Req:get(method) of
+ 'GET' ->
+ get_resource(Req);
+ 'PUT' ->
+ put_resource(Req);
+ 'DELETE' ->
+ delete_resource(Req);
+ _ ->
+ Headers = [{"Allow", "GET,PUT,DELETE"}],
+ Req:respond({405, Headers, "405 Method Not Allowed\r\n"})
+ end.
+
+get_resource(Req) ->
+ Path = Req:get(path),
+ case ets:lookup(?MODULE, Path) of
+ [{Path, #resource{type=Type, data=Data}}] ->
+ Req:ok({Type, Data});
+ [] ->
+ Req:respond({404, [], "404 Not Found\r\n"})
+ end.
+
+put_resource(Req) ->
+ ContentType = case Req:get_header_value("Content-Type") of
+ undefined ->
+ "application/octet-stream";
+ S ->
+ S
+ end,
+ Resource = #resource{type=ContentType, data=Req:recv_body()},
+ http_store ! {self(), {put, Req:get(path), Resource}},
+ Pid = whereis(http_store),
+ receive
+ {Pid, created} ->
+ Req:respond({201, [], "201 Created\r\n"});
+ {Pid, updated} ->
+ Req:respond({200, [], "200 OK\r\n"})
+ end.
+
+delete_resource(Req) ->
+ http_store ! {self(), {delete, Req:get(path)}},
+ Pid = whereis(http_store),
+ receive
+ {Pid, ok} ->
+ Req:respond({200, [], "200 OK\r\n"})
+ end.
+
+loop(#sd{http=Http, https=Https} = SD) ->
+ receive
+ stop ->
+ ok = mochiweb_http:stop(Http),
+ ok = mochiweb_http:stop(Https),
+ exit(normal);
+ {From, {put, Key, Val}} ->
+ Exists = ets:member(?MODULE, Key),
+ ets:insert(?MODULE, {Key, Val}),
+ case Exists of
+ true ->
+ From ! {self(), updated};
+ false ->
+ From ! {self(), created}
+ end;
+ {From, {delete, Key}} ->
+ ets:delete(?MODULE, Key),
+ From ! {self(), ok};
+ _ ->
+ ignore
+ end,
+ ?MODULE:loop(SD).
+
--- /dev/null
+-----BEGIN CERTIFICATE-----
+MIIDIDCCAgigAwIBAgIJAJLkNZzERPIUMA0GCSqGSIb3DQEBBQUAMBQxEjAQBgNV
+BAMTCWxvY2FsaG9zdDAeFw0xMDAzMTgxOTM5MThaFw0yMDAzMTUxOTM5MThaMBQx
+EjAQBgNVBAMTCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBAJeUCOZxbmtngF4S5lXckjSDLc+8C+XjMBYBPyy5eKdJY20AQ1s9/hhp3ulI
+8pAvl+xVo4wQ+iBSvOzcy248Q+Xi6+zjceF7UNRgoYPgtJjKhdwcHV3mvFFrS/fp
+9ggoAChaJQWDO1OCfUgTWXImhkw+vcDR11OVMAJ/h73dqzJPI9mfq44PTTHfYtgr
+v4LAQAOlhXIAa2B+a6PlF6sqDqJaW5jLTcERjsBwnRhUGi7JevQzkejujX/vdA+N
+jRBjKH/KLU5h3Q7wUchvIez0PXWVTCnZjpA9aR4m7YV05nKQfxtGd71czYDYk+j8
+hd005jetT4ir7JkAWValBybJVksCAwEAAaN1MHMwHQYDVR0OBBYEFJl9s51SnjJt
+V/wgKWqV5Q6jnv1ZMEQGA1UdIwQ9MDuAFJl9s51SnjJtV/wgKWqV5Q6jnv1ZoRik
+FjAUMRIwEAYDVQQDEwlsb2NhbGhvc3SCCQCS5DWcxETyFDAMBgNVHRMEBTADAQH/
+MA0GCSqGSIb3DQEBBQUAA4IBAQB2ldLeLCc+lxK5i0EZquLamMBJwDIjGpT0JMP9
+b4XQOK2JABIu54BQIZhwcjk3FDJz/uOW5vm8k1kYni8FCjNZAaRZzCUfiUYTbTKL
+Rq9LuIAODyP2dnTqyKaQOOJHvrx9MRZ3XVecXPS0Tib4aO57vCaAbIkmhtYpTWmw
+e3t8CAIDVtgvjR6Se0a1JA4LktR7hBu22tDImvCSJn1nVAaHpani6iPBPPdMuMsP
+TBoeQfj8VpqBUjCStqJGa8ytjDFX73YaxV2mgrtGwPNme1x3YNRR11yTu7tksyMO
+GrmgxNriqYRchBhNEf72AKF0LR1ByKwfbDB9rIsV00HtCgOp
+-----END CERTIFICATE-----
--- /dev/null
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAl5QI5nFua2eAXhLmVdySNIMtz7wL5eMwFgE/LLl4p0ljbQBD
+Wz3+GGne6UjykC+X7FWjjBD6IFK87NzLbjxD5eLr7ONx4XtQ1GChg+C0mMqF3Bwd
+Xea8UWtL9+n2CCgAKFolBYM7U4J9SBNZciaGTD69wNHXU5UwAn+Hvd2rMk8j2Z+r
+jg9NMd9i2Cu/gsBAA6WFcgBrYH5ro+UXqyoOolpbmMtNwRGOwHCdGFQaLsl69DOR
+6O6Nf+90D42NEGMof8otTmHdDvBRyG8h7PQ9dZVMKdmOkD1pHibthXTmcpB/G0Z3
+vVzNgNiT6PyF3TTmN61PiKvsmQBZVqUHJslWSwIDAQABAoIBACI8Ky5xHDFh9RpK
+Rn/KC7OUlTpADKflgizWJ0Cgu2F9L9mkn5HyFHvLHa+u7CootbWJOiEejH/UcBtH
+WyMQtX0snYCpdkUpJv5wvMoebGu+AjHOn8tfm9T/2O6rhwgckLyMb6QpGbMo28b1
+p9QiY17BJPZx7qJQJcHKsAvwDwSThlb7MFmWf42LYWlzybpeYQvwpd+UY4I0WXLu
+/dqJIS9Npq+5Y5vbo2kAEAssb2hSCvhCfHmwFdKmBzlvgOn4qxgZ1iHQgfKI6Z3Y
+J0573ZgOVTuacn+lewtdg5AaHFcl/zIYEr9SNqRoPNGbPliuv6k6N2EYcufWL5lR
+sCmmmHECgYEAxm+7OpepGr++K3+O1e1MUhD7vSPkKJrCzNtUxbOi2NWj3FFUSPRU
+adWhuxvUnZgTcgM1+KuQ0fB2VmxXe9IDcrSFS7PKFGtd2kMs/5mBw4UgDZkOQh+q
+kDiBEV3HYYJWRq0w3NQ/9Iy1jxxdENHtGmG9aqamHxNtuO608wGW2S8CgYEAw4yG
+ZyAic0Q/U9V2OHI0MLxLCzuQz17C2wRT1+hBywNZuil5YeTuIt2I46jro6mJmWI2
+fH4S/geSZzg2RNOIZ28+aK79ab2jWBmMnvFCvaru+odAuser4N9pfAlHZvY0pT+S
+1zYX3f44ygiio+oosabLC5nWI0zB2gG8pwaJlaUCgYEAgr7poRB+ZlaCCY0RYtjo
+mYYBKD02vp5BzdKSB3V1zeLuBWM84pjB6b3Nw0fyDig+X7fH3uHEGN+USRs3hSj6
+BqD01s1OT6fyfbYXNw5A1r+nP+5h26Wbr0zblcKxdQj4qbbBZC8hOJNhqTqqA0Qe
+MmzF7jiBaiZV/Cyj4x1f9BcCgYEAhjL6SeuTuOctTqs/5pz5lDikh6DpUGcH8qaV
+o6aRAHHcMhYkZzpk8yh1uUdD7516APmVyvn6rrsjjhLVq4ZAJjwB6HWvE9JBN0TR
+bILF+sREHUqU8Zn2Ku0nxyfXCKIOnxlx/J/y4TaGYqBqfXNFWiXNUrjQbIlQv/xR
+K48g/MECgYBZdQlYbMSDmfPCC5cxkdjrkmAl0EgV051PWAi4wR+hLxIMRjHBvAk7
+IweobkFvT4TICulgroLkYcSa5eOZGxB/DHqcQCbWj3reFV0VpzmTDoFKG54sqBRl
+vVntGt0pfA40fF17VoS7riAdHF53ippTtsovHEsg5tq5NrBl5uKm2g==
+-----END RSA PRIVATE KEY-----
--- /dev/null
+-module(keepalive).
+
+%% your web app can push data to clients using a technique called comet long
+%% polling. browsers make a request and your server waits to send a
+%% response until data is available. see wikipedia for a better explanation:
+%% http://en.wikipedia.org/wiki/Comet_(programming)#Ajax_with_long_polling
+%%
+%% since the majority of your http handlers will be idle at any given moment,
+%% you might consider making them hibernate while they wait for more data from
+%% another process. however, since the execution stack is discarded when a
+%% process hibernates, the handler would usually terminate after your response
+%% code runs. this means http keep alives wouldn't work; the handler process
+%% would terminate after each response and close its socket rather than
+%% returning to the big @mochiweb_http@ loop and processing another request.
+%%
+%% however, if mochiweb exposes a continuation that encapsulates the return to
+%% the top of the big loop in @mochiweb_http@, we can call that after the
+%% response. if you do that then control flow returns to the proper place,
+%% and keep alives work like they would if you hadn't hibernated.
+
+-export([ start/1, loop/1
+ ]).
+
+%% internal export (so hibernate can reach it)
+-export([ resume/3
+ ]).
+
+-define(LOOP, {?MODULE, loop}).
+
+start(Options = [{port, _Port}]) ->
+ mochiweb_http:start([{name, ?MODULE}, {loop, ?LOOP} | Options]).
+
+loop(Req) ->
+ Path = Req:get(path),
+ case string:tokens(Path, "/") of
+ ["longpoll" | RestOfPath] ->
+ %% the "reentry" is a continuation -- what @mochiweb_http@
+ %% needs to do to start its loop back at the top
+ Reentry = mochiweb_http:reentry(?LOOP),
+
+ %% here we could send a message to some other process and hope
+ %% to get an interesting message back after a while. for
+ %% simplicity let's just send ourselves a message after a few
+ %% seconds
+ erlang:send_after(2000, self(), "honk honk"),
+
+ %% since we expect to wait for a long time before getting a
+ %% reply, let's hibernate. memory usage will be minimized, so
+ %% we won't be wasting memory just sitting in a @receive@
+ proc_lib:hibernate(?MODULE, resume, [Req, RestOfPath, Reentry]),
+
+ %% we'll never reach this point, and this function @loop/1@
+ %% won't ever return control to @mochiweb_http@. luckily
+ %% @resume/3@ will take care of that.
+ io:format("not gonna happen~n", []);
+
+ _ ->
+ ok(Req, io_lib:format("some other page: ~p", [Path]))
+ end,
+
+ io:format("restarting loop normally in ~p~n", [Path]),
+ ok.
+
+%% this is the function that's called when a message arrives.
+resume(Req, RestOfPath, Reentry) ->
+ receive
+ Msg ->
+ Text = io_lib:format("wake up message: ~p~nrest of path: ~p", [Msg, RestOfPath]),
+ ok(Req, Text)
+ end,
+
+ %% if we didn't call @Reentry@ here then the function would finish and the
+ %% process would exit. calling @Reentry@ takes care of returning control
+ %% to @mochiweb_http@
+ io:format("reentering loop via continuation in ~p~n", [Req:get(path)]),
+ Reentry(Req).
+
+ok(Req, Response) ->
+ Req:ok({_ContentType = "text/plain",
+ _Headers = [],
+ Response}).
--- /dev/null
+
+-define(RECBUF_SIZE, 8192).
+
--- /dev/null
+% -*- mode: erlang -*-
+{erl_opts, [debug_info,
+ {platform_define, "R15", 'gen_tcp_r15b_workaround'}]}.
+{cover_enabled, true}.
+{eunit_opts, [verbose, {report,{eunit_surefire,[{dir,"."}]}}]}.
+{dialyzer_opts, [{warnings, [no_return,
+ no_unused,
+ no_improper_lists,
+ no_fun_app,
+ no_match,
+ no_opaque,
+ no_fail_call,
+ error_handling,
+ race_conditions,
+ behaviours,
+ unmatched_returns]}]}.
--- /dev/null
+#!/usr/bin/env escript
+%% -*- mode: erlang -*-
+-export([main/1]).
+
+%% @doc Script used to generate mochiweb_charref.erl table.
+
+main(_) ->
+ application:start(inets),
+ code:add_patha("ebin"),
+ {ok, {_, _, HTML}} = httpc:request("http://www.w3.org/TR/html5/named-character-references.html"),
+ print(lists:sort(search(mochiweb_html:parse(HTML)))).
+
+print([F | T]) ->
+ io:put_chars([clause(F), ";\n"]),
+ print(T);
+print([]) ->
+ io:put_chars(["entity(_) -> undefined.\n"]),
+ ok.
+
+clause({Title, [Codepoint]}) ->
+ ["entity(\"", Title, "\") -> 16#", Codepoint];
+clause({Title, [First | Rest]}) ->
+ ["entity(\"", Title, "\") -> [16#", First,
+ [[", 16#", Codepoint] || Codepoint <- Rest],
+ "]"].
+
+
+search(Elem) ->
+ search(Elem, []).
+
+search({<<"tr">>, [{<<"id">>, <<"entity-", _/binary>>} | _], Children}, Acc) ->
+ %% HTML5 charrefs can have more than one code point(!)
+ [{<<"td">>, _, [{<<"code">>, _, [TitleSemi]}]},
+ {<<"td">>, [], [RawCPs]} | _] = Children,
+ L = byte_size(TitleSemi) - 1,
+ <<Title:L/binary, $;>> = TitleSemi,
+ {match, Matches} = re:run(RawCPs, "(?:\\s*U\\+)([a-fA-F0-9]+)",
+ [{capture, all, binary}, global]),
+ [{Title, [CP || [_, CP] <- Matches]} | Acc];
+search({Tag, Attrs, [H | T]}, Acc) ->
+ search({Tag, Attrs, T}, search(H, Acc));
+search({_Tag, _Attrs, []}, Acc) ->
+ Acc;
+search(<<_/binary>>, Acc) ->
+ Acc.
--- /dev/null
+#!/usr/bin/env escript
+%% -*- mode: erlang -*-
+-export([main/1]).
+
+%% External API
+
+main(_) ->
+ usage().
+
+%% Internal API
+
+usage() ->
+ io:format(
+ "new_mochiweb.erl has been replaced by a rebar template!\n"
+ "\n"
+ "To create a new mochiweb using project:\n"
+ " make app PROJECT=project_name\n"
+ "\n"
+ "To create a new mochiweb using project in a specific directory:\n"
+ " make app PROJECT=project_name PREFIX=$HOME/projects/\n"
+ "\n"
+ ),
+ halt(1).
--- /dev/null
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2008 Mochi Media, Inc.
+
+%% @doc String Formatting for Erlang, inspired by Python 2.6
+%% (<a href="http://www.python.org/dev/peps/pep-3101/">PEP 3101</a>).
+%%
+-module(mochifmt).
+-author('bob@mochimedia.com').
+-export([format/2, format_field/2, convert_field/2, get_value/2, get_field/2]).
+-export([tokenize/1, format/3, get_field/3, format_field/3]).
+-export([bformat/2, bformat/3]).
+-export([f/2, f/3]).
+
+-record(conversion, {length, precision, ctype, align, fill_char, sign}).
+
+%% @spec tokenize(S::string()) -> tokens()
+%% @doc Tokenize a format string into mochifmt's internal format.
+tokenize(S) ->
+ {?MODULE, tokenize(S, "", [])}.
+
+%% @spec convert_field(Arg, Conversion::conversion()) -> term()
+%% @doc Process Arg according to the given explicit conversion specifier.
+convert_field(Arg, "") ->
+ Arg;
+convert_field(Arg, "r") ->
+ repr(Arg);
+convert_field(Arg, "s") ->
+ str(Arg).
+
+%% @spec get_value(Key::string(), Args::args()) -> term()
+%% @doc Get the Key from Args. If Args is a tuple then convert Key to
+%% an integer and get element(1 + Key, Args). If Args is a list and Key
+%% can be parsed as an integer then use lists:nth(1 + Key, Args),
+%% otherwise try and look for Key in Args as a proplist, converting
+%% Key to an atom or binary if necessary.
+get_value(Key, Args) when is_tuple(Args) ->
+ element(1 + list_to_integer(Key), Args);
+get_value(Key, Args) when is_list(Args) ->
+ try lists:nth(1 + list_to_integer(Key), Args)
+ catch error:_ ->
+ {_K, V} = proplist_lookup(Key, Args),
+ V
+ end.
+
+%% @spec get_field(Key::string(), Args) -> term()
+%% @doc Consecutively call get_value/2 on parts of Key delimited by ".",
+%% replacing Args with the result of the previous get_value. This
+%% is used to implement formats such as {0.0}.
+get_field(Key, Args) ->
+ get_field(Key, Args, ?MODULE).
+
+%% @spec get_field(Key::string(), Args, Module) -> term()
+%% @doc Consecutively call Module:get_value/2 on parts of Key delimited by ".",
+%% replacing Args with the result of the previous get_value. This
+%% is used to implement formats such as {0.0}.
+get_field(Key, Args, Module) ->
+ {Name, Next} = lists:splitwith(fun (C) -> C =/= $. end, Key),
+ Res = try Module:get_value(Name, Args)
+ catch error:undef -> get_value(Name, Args) end,
+ case Next of
+ "" ->
+ Res;
+ "." ++ S1 ->
+ get_field(S1, Res, Module)
+ end.
+
+%% @spec format(Format::string(), Args) -> iolist()
+%% @doc Format Args with Format.
+format(Format, Args) ->
+ format(Format, Args, ?MODULE).
+
+%% @spec format(Format::string(), Args, Module) -> iolist()
+%% @doc Format Args with Format using Module.
+format({?MODULE, Parts}, Args, Module) ->
+ format2(Parts, Args, Module, []);
+format(S, Args, Module) ->
+ format(tokenize(S), Args, Module).
+
+%% @spec format_field(Arg, Format) -> iolist()
+%% @doc Format Arg with Format.
+format_field(Arg, Format) ->
+ format_field(Arg, Format, ?MODULE).
+
+%% @spec format_field(Arg, Format, _Module) -> iolist()
+%% @doc Format Arg with Format.
+format_field(Arg, Format, _Module) ->
+ F = default_ctype(Arg, parse_std_conversion(Format)),
+ fix_padding(fix_sign(convert2(Arg, F), F), F).
+
+%% @spec f(Format::string(), Args) -> string()
+%% @doc Format Args with Format and return a string().
+f(Format, Args) ->
+ f(Format, Args, ?MODULE).
+
+%% @spec f(Format::string(), Args, Module) -> string()
+%% @doc Format Args with Format using Module and return a string().
+f(Format, Args, Module) ->
+ case lists:member(${, Format) of
+ true ->
+ binary_to_list(bformat(Format, Args, Module));
+ false ->
+ Format
+ end.
+
+%% @spec bformat(Format::string(), Args) -> binary()
+%% @doc Format Args with Format and return a binary().
+bformat(Format, Args) ->
+ iolist_to_binary(format(Format, Args)).
+
+%% @spec bformat(Format::string(), Args, Module) -> binary()
+%% @doc Format Args with Format using Module and return a binary().
+bformat(Format, Args, Module) ->
+ iolist_to_binary(format(Format, Args, Module)).
+
+%% Internal API
+
+add_raw("", Acc) ->
+ Acc;
+add_raw(S, Acc) ->
+ [{raw, lists:reverse(S)} | Acc].
+
+tokenize([], S, Acc) ->
+ lists:reverse(add_raw(S, Acc));
+tokenize("{{" ++ Rest, S, Acc) ->
+ tokenize(Rest, "{" ++ S, Acc);
+tokenize("{" ++ Rest, S, Acc) ->
+ {Format, Rest1} = tokenize_format(Rest),
+ tokenize(Rest1, "", [{format, make_format(Format)} | add_raw(S, Acc)]);
+tokenize("}}" ++ Rest, S, Acc) ->
+ tokenize(Rest, "}" ++ S, Acc);
+tokenize([C | Rest], S, Acc) ->
+ tokenize(Rest, [C | S], Acc).
+
+tokenize_format(S) ->
+ tokenize_format(S, 1, []).
+
+tokenize_format("}" ++ Rest, 1, Acc) ->
+ {lists:reverse(Acc), Rest};
+tokenize_format("}" ++ Rest, N, Acc) ->
+ tokenize_format(Rest, N - 1, "}" ++ Acc);
+tokenize_format("{" ++ Rest, N, Acc) ->
+ tokenize_format(Rest, 1 + N, "{" ++ Acc);
+tokenize_format([C | Rest], N, Acc) ->
+ tokenize_format(Rest, N, [C | Acc]).
+
+make_format(S) ->
+ {Name0, Spec} = case lists:splitwith(fun (C) -> C =/= $: end, S) of
+ {_, ""} ->
+ {S, ""};
+ {SN, ":" ++ SS} ->
+ {SN, SS}
+ end,
+ {Name, Transform} = case lists:splitwith(fun (C) -> C =/= $! end, Name0) of
+ {_, ""} ->
+ {Name0, ""};
+ {TN, "!" ++ TT} ->
+ {TN, TT}
+ end,
+ {Name, Transform, Spec}.
+
+proplist_lookup(S, P) ->
+ A = try list_to_existing_atom(S)
+ catch error:_ -> make_ref() end,
+ B = try list_to_binary(S)
+ catch error:_ -> make_ref() end,
+ proplist_lookup2({S, A, B}, P).
+
+proplist_lookup2({KS, KA, KB}, [{K, V} | _])
+ when KS =:= K orelse KA =:= K orelse KB =:= K ->
+ {K, V};
+proplist_lookup2(Keys, [_ | Rest]) ->
+ proplist_lookup2(Keys, Rest).
+
+format2([], _Args, _Module, Acc) ->
+ lists:reverse(Acc);
+format2([{raw, S} | Rest], Args, Module, Acc) ->
+ format2(Rest, Args, Module, [S | Acc]);
+format2([{format, {Key, Convert, Format0}} | Rest], Args, Module, Acc) ->
+ Format = f(Format0, Args, Module),
+ V = case Module of
+ ?MODULE ->
+ V0 = get_field(Key, Args),
+ V1 = convert_field(V0, Convert),
+ format_field(V1, Format);
+ _ ->
+ V0 = try Module:get_field(Key, Args)
+ catch error:undef -> get_field(Key, Args, Module) end,
+ V1 = try Module:convert_field(V0, Convert)
+ catch error:undef -> convert_field(V0, Convert) end,
+ try Module:format_field(V1, Format)
+ catch error:undef -> format_field(V1, Format, Module) end
+ end,
+ format2(Rest, Args, Module, [V | Acc]).
+
+default_ctype(_Arg, C=#conversion{ctype=N}) when N =/= undefined ->
+ C;
+default_ctype(Arg, C) when is_integer(Arg) ->
+ C#conversion{ctype=decimal};
+default_ctype(Arg, C) when is_float(Arg) ->
+ C#conversion{ctype=general};
+default_ctype(_Arg, C) ->
+ C#conversion{ctype=string}.
+
+fix_padding(Arg, #conversion{length=undefined}) ->
+ Arg;
+fix_padding(Arg, F=#conversion{length=Length, fill_char=Fill0, align=Align0,
+ ctype=Type}) ->
+ Padding = Length - iolist_size(Arg),
+ Fill = case Fill0 of
+ undefined ->
+ $\s;
+ _ ->
+ Fill0
+ end,
+ Align = case Align0 of
+ undefined ->
+ case Type of
+ string ->
+ left;
+ _ ->
+ right
+ end;
+ _ ->
+ Align0
+ end,
+ case Padding > 0 of
+ true ->
+ do_padding(Arg, Padding, Fill, Align, F);
+ false ->
+ Arg
+ end.
+
+do_padding(Arg, Padding, Fill, right, _F) ->
+ [lists:duplicate(Padding, Fill), Arg];
+do_padding(Arg, Padding, Fill, center, _F) ->
+ LPadding = lists:duplicate(Padding div 2, Fill),
+ RPadding = case Padding band 1 of
+ 1 ->
+ [Fill | LPadding];
+ _ ->
+ LPadding
+ end,
+ [LPadding, Arg, RPadding];
+do_padding([$- | Arg], Padding, Fill, sign_right, _F) ->
+ [[$- | lists:duplicate(Padding, Fill)], Arg];
+do_padding(Arg, Padding, Fill, sign_right, #conversion{sign=$-}) ->
+ [lists:duplicate(Padding, Fill), Arg];
+do_padding([S | Arg], Padding, Fill, sign_right, #conversion{sign=S}) ->
+ [[S | lists:duplicate(Padding, Fill)], Arg];
+do_padding(Arg, Padding, Fill, sign_right, #conversion{sign=undefined}) ->
+ [lists:duplicate(Padding, Fill), Arg];
+do_padding(Arg, Padding, Fill, left, _F) ->
+ [Arg | lists:duplicate(Padding, Fill)].
+
+fix_sign(Arg, #conversion{sign=$+}) when Arg >= 0 ->
+ [$+, Arg];
+fix_sign(Arg, #conversion{sign=$\s}) when Arg >= 0 ->
+ [$\s, Arg];
+fix_sign(Arg, _F) ->
+ Arg.
+
+ctype($\%) -> percent;
+ctype($s) -> string;
+ctype($b) -> bin;
+ctype($o) -> oct;
+ctype($X) -> upper_hex;
+ctype($x) -> hex;
+ctype($c) -> char;
+ctype($d) -> decimal;
+ctype($g) -> general;
+ctype($f) -> fixed;
+ctype($e) -> exp.
+
+align($<) -> left;
+align($>) -> right;
+align($^) -> center;
+align($=) -> sign_right.
+
+convert2(Arg, F=#conversion{ctype=percent}) ->
+ [convert2(100.0 * Arg, F#conversion{ctype=fixed}), $\%];
+convert2(Arg, #conversion{ctype=string}) ->
+ str(Arg);
+convert2(Arg, #conversion{ctype=bin}) ->
+ erlang:integer_to_list(Arg, 2);
+convert2(Arg, #conversion{ctype=oct}) ->
+ erlang:integer_to_list(Arg, 8);
+convert2(Arg, #conversion{ctype=upper_hex}) ->
+ erlang:integer_to_list(Arg, 16);
+convert2(Arg, #conversion{ctype=hex}) ->
+ string:to_lower(erlang:integer_to_list(Arg, 16));
+convert2(Arg, #conversion{ctype=char}) when Arg < 16#80 ->
+ [Arg];
+convert2(Arg, #conversion{ctype=char}) ->
+ xmerl_ucs:to_utf8(Arg);
+convert2(Arg, #conversion{ctype=decimal}) ->
+ integer_to_list(Arg);
+convert2(Arg, #conversion{ctype=general, precision=undefined}) ->
+ try mochinum:digits(Arg)
+ catch error:undef -> io_lib:format("~g", [Arg]) end;
+convert2(Arg, #conversion{ctype=fixed, precision=undefined}) ->
+ io_lib:format("~f", [Arg]);
+convert2(Arg, #conversion{ctype=exp, precision=undefined}) ->
+ io_lib:format("~e", [Arg]);
+convert2(Arg, #conversion{ctype=general, precision=P}) ->
+ io_lib:format("~." ++ integer_to_list(P) ++ "g", [Arg]);
+convert2(Arg, #conversion{ctype=fixed, precision=P}) ->
+ io_lib:format("~." ++ integer_to_list(P) ++ "f", [Arg]);
+convert2(Arg, #conversion{ctype=exp, precision=P}) ->
+ io_lib:format("~." ++ integer_to_list(P) ++ "e", [Arg]).
+
+str(A) when is_atom(A) ->
+ atom_to_list(A);
+str(I) when is_integer(I) ->
+ integer_to_list(I);
+str(F) when is_float(F) ->
+ try mochinum:digits(F)
+ catch error:undef -> io_lib:format("~g", [F]) end;
+str(L) when is_list(L) ->
+ L;
+str(B) when is_binary(B) ->
+ B;
+str(P) ->
+ repr(P).
+
+repr(P) when is_float(P) ->
+ try mochinum:digits(P)
+ catch error:undef -> float_to_list(P) end;
+repr(P) ->
+ io_lib:format("~p", [P]).
+
+parse_std_conversion(S) ->
+ parse_std_conversion(S, #conversion{}).
+
+parse_std_conversion("", Acc) ->
+ Acc;
+parse_std_conversion([Fill, Align | Spec], Acc)
+ when Align =:= $< orelse Align =:= $> orelse Align =:= $= orelse Align =:= $^ ->
+ parse_std_conversion(Spec, Acc#conversion{fill_char=Fill,
+ align=align(Align)});
+parse_std_conversion([Align | Spec], Acc)
+ when Align =:= $< orelse Align =:= $> orelse Align =:= $= orelse Align =:= $^ ->
+ parse_std_conversion(Spec, Acc#conversion{align=align(Align)});
+parse_std_conversion([Sign | Spec], Acc)
+ when Sign =:= $+ orelse Sign =:= $- orelse Sign =:= $\s ->
+ parse_std_conversion(Spec, Acc#conversion{sign=Sign});
+parse_std_conversion("0" ++ Spec, Acc) ->
+ Align = case Acc#conversion.align of
+ undefined ->
+ sign_right;
+ A ->
+ A
+ end,
+ parse_std_conversion(Spec, Acc#conversion{fill_char=$0, align=Align});
+parse_std_conversion(Spec=[D|_], Acc) when D >= $0 andalso D =< $9 ->
+ {W, Spec1} = lists:splitwith(fun (C) -> C >= $0 andalso C =< $9 end, Spec),
+ parse_std_conversion(Spec1, Acc#conversion{length=list_to_integer(W)});
+parse_std_conversion([$. | Spec], Acc) ->
+ case lists:splitwith(fun (C) -> C >= $0 andalso C =< $9 end, Spec) of
+ {"", Spec1} ->
+ parse_std_conversion(Spec1, Acc);
+ {P, Spec1} ->
+ parse_std_conversion(Spec1,
+ Acc#conversion{precision=list_to_integer(P)})
+ end;
+parse_std_conversion([Type], Acc) ->
+ parse_std_conversion("", Acc#conversion{ctype=ctype(Type)}).
+
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+tokenize_test() ->
+ {?MODULE, [{raw, "ABC"}]} = tokenize("ABC"),
+ {?MODULE, [{format, {"0", "", ""}}]} = tokenize("{0}"),
+ {?MODULE, [{raw, "ABC"}, {format, {"1", "", ""}}, {raw, "DEF"}]} =
+ tokenize("ABC{1}DEF"),
+ ok.
+
+format_test() ->
+ <<" -4">> = bformat("{0:4}", [-4]),
+ <<" 4">> = bformat("{0:4}", [4]),
+ <<" 4">> = bformat("{0:{0}}", [4]),
+ <<"4 ">> = bformat("{0:4}", ["4"]),
+ <<"4 ">> = bformat("{0:{0}}", ["4"]),
+ <<"1.2yoDEF">> = bformat("{2}{0}{1}{3}", {yo, "DE", 1.2, <<"F">>}),
+ <<"cafebabe">> = bformat("{0:x}", {16#cafebabe}),
+ <<"CAFEBABE">> = bformat("{0:X}", {16#cafebabe}),
+ <<"CAFEBABE">> = bformat("{0:X}", {16#cafebabe}),
+ <<"755">> = bformat("{0:o}", {8#755}),
+ <<"a">> = bformat("{0:c}", {97}),
+ %% Horizontal ellipsis
+ <<226, 128, 166>> = bformat("{0:c}", {16#2026}),
+ <<"11">> = bformat("{0:b}", {3}),
+ <<"11">> = bformat("{0:b}", [3]),
+ <<"11">> = bformat("{three:b}", [{three, 3}]),
+ <<"11">> = bformat("{three:b}", [{"three", 3}]),
+ <<"11">> = bformat("{three:b}", [{<<"three">>, 3}]),
+ <<"\"foo\"">> = bformat("{0!r}", {"foo"}),
+ <<"2008-5-4">> = bformat("{0.0}-{0.1}-{0.2}", {{2008,5,4}}),
+ <<"2008-05-04">> = bformat("{0.0:04}-{0.1:02}-{0.2:02}", {{2008,5,4}}),
+ <<"foo6bar-6">> = bformat("foo{1}{0}-{1}", {bar, 6}),
+ <<"-'atom test'-">> = bformat("-{arg!r}-", [{arg, 'atom test'}]),
+ <<"2008-05-04">> = bformat("{0.0:0{1.0}}-{0.1:0{1.1}}-{0.2:0{1.2}}",
+ {{2008,5,4}, {4, 2, 2}}),
+ ok.
+
+std_test() ->
+ M = mochifmt_std:new(),
+ <<"01">> = bformat("{0}{1}", [0, 1], M),
+ ok.
+
+records_test() ->
+ M = mochifmt_records:new([{conversion, record_info(fields, conversion)}]),
+ R = #conversion{length=long, precision=hard, sign=peace},
+ long = M:get_value("length", R),
+ hard = M:get_value("precision", R),
+ peace = M:get_value("sign", R),
+ <<"long hard">> = bformat("{length} {precision}", R, M),
+ <<"long hard">> = bformat("{0.length} {0.precision}", [R], M),
+ ok.
+
+-endif.
--- /dev/null
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2008 Mochi Media, Inc.
+
+%% @doc Formatter that understands records.
+%%
+%% Usage:
+%%
+%% 1> M = mochifmt_records:new([{rec, record_info(fields, rec)}]),
+%% M:format("{0.bar}", [#rec{bar=foo}]).
+%% foo
+
+-module(mochifmt_records).
+-author('bob@mochimedia.com').
+-export([new/1, get_value/3]).
+
+new([{_Rec, RecFields}]=Recs) when is_list(RecFields) ->
+ {?MODULE, Recs}.
+
+get_value(Key, Rec, {?MODULE, Recs})
+ when is_tuple(Rec) and is_atom(element(1, Rec)) ->
+ try begin
+ Atom = list_to_existing_atom(Key),
+ {_, Fields} = proplists:lookup(element(1, Rec), Recs),
+ element(get_rec_index(Atom, Fields, 2), Rec)
+ end
+ catch error:_ -> mochifmt:get_value(Key, Rec)
+ end;
+get_value(Key, Args, {?MODULE, _Recs}) ->
+ mochifmt:get_value(Key, Args).
+
+get_rec_index(Atom, [Atom | _], Index) ->
+ Index;
+get_rec_index(Atom, [_ | Rest], Index) ->
+ get_rec_index(Atom, Rest, 1 + Index).
+
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+-endif.
--- /dev/null
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2008 Mochi Media, Inc.
+
+%% @doc Template module for a mochifmt formatter.
+
+-module(mochifmt_std).
+-author('bob@mochimedia.com').
+-export([new/0, format/3, get_value/3, format_field/3, get_field/3, convert_field/3]).
+
+new() ->
+ {?MODULE}.
+
+format(Format, Args, {?MODULE}=THIS) ->
+ mochifmt:format(Format, Args, THIS).
+
+get_field(Key, Args, {?MODULE}=THIS) ->
+ mochifmt:get_field(Key, Args, THIS).
+
+convert_field(Key, Args, {?MODULE}) ->
+ mochifmt:convert_field(Key, Args).
+
+get_value(Key, Args, {?MODULE}) ->
+ mochifmt:get_value(Key, Args).
+
+format_field(Arg, Format, {?MODULE}=THIS) ->
+ mochifmt:format_field(Arg, Format, THIS).
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+-endif.
--- /dev/null
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2006 Mochi Media, Inc.
+
+%% @doc Utilities for working with hexadecimal strings.
+
+-module(mochihex).
+-author('bob@mochimedia.com').
+
+-export([to_hex/1, to_bin/1, to_int/1, dehex/1, hexdigit/1]).
+
+%% @spec to_hex(integer | iolist()) -> string()
+%% @doc Convert an iolist to a hexadecimal string.
+to_hex(0) ->
+ "0";
+to_hex(I) when is_integer(I), I > 0 ->
+ to_hex_int(I, []);
+to_hex(B) ->
+ to_hex(iolist_to_binary(B), []).
+
+%% @spec to_bin(string()) -> binary()
+%% @doc Convert a hexadecimal string to a binary.
+to_bin(L) ->
+ to_bin(L, []).
+
+%% @spec to_int(string()) -> integer()
+%% @doc Convert a hexadecimal string to an integer.
+to_int(L) ->
+ erlang:list_to_integer(L, 16).
+
+%% @spec dehex(char()) -> integer()
+%% @doc Convert a hex digit to its integer value.
+dehex(C) when C >= $0, C =< $9 ->
+ C - $0;
+dehex(C) when C >= $a, C =< $f ->
+ C - $a + 10;
+dehex(C) when C >= $A, C =< $F ->
+ C - $A + 10.
+
+%% @spec hexdigit(integer()) -> char()
+%% @doc Convert an integer less than 16 to a hex digit.
+hexdigit(C) when C >= 0, C =< 9 ->
+ C + $0;
+hexdigit(C) when C =< 15 ->
+ C + $a - 10.
+
+%% Internal API
+
+to_hex(<<>>, Acc) ->
+ lists:reverse(Acc);
+to_hex(<<C1:4, C2:4, Rest/binary>>, Acc) ->
+ to_hex(Rest, [hexdigit(C2), hexdigit(C1) | Acc]).
+
+to_hex_int(0, Acc) ->
+ Acc;
+to_hex_int(I, Acc) ->
+ to_hex_int(I bsr 4, [hexdigit(I band 15) | Acc]).
+
+to_bin([], Acc) ->
+ iolist_to_binary(lists:reverse(Acc));
+to_bin([C1, C2 | Rest], Acc) ->
+ to_bin(Rest, [(dehex(C1) bsl 4) bor dehex(C2) | Acc]).
+
+
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+to_hex_test() ->
+ "ff000ff1" = to_hex([255, 0, 15, 241]),
+ "ff000ff1" = to_hex(16#ff000ff1),
+ "0" = to_hex(16#0),
+ ok.
+
+to_bin_test() ->
+ <<255, 0, 15, 241>> = to_bin("ff000ff1"),
+ <<255, 0, 10, 161>> = to_bin("Ff000aA1"),
+ ok.
+
+to_int_test() ->
+ 16#ff000ff1 = to_int("ff000ff1"),
+ 16#ff000aa1 = to_int("FF000Aa1"),
+ 16#0 = to_int("0"),
+ ok.
+
+-endif.
--- /dev/null
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2006 Mochi Media, Inc.
+
+%% @doc Yet another JSON (RFC 4627) library for Erlang.
+-module(mochijson).
+-author('bob@mochimedia.com').
+-export([encoder/1, encode/1]).
+-export([decoder/1, decode/1]).
+-export([binary_encoder/1, binary_encode/1]).
+-export([binary_decoder/1, binary_decode/1]).
+
+% This is a macro to placate syntax highlighters..
+-define(Q, $\").
+-define(ADV_COL(S, N), S#decoder{column=N+S#decoder.column}).
+-define(INC_COL(S), S#decoder{column=1+S#decoder.column}).
+-define(INC_LINE(S), S#decoder{column=1, line=1+S#decoder.line}).
+
+%% @type json_string() = atom | string() | binary()
+%% @type json_number() = integer() | float()
+%% @type json_array() = {array, [json_term()]}
+%% @type json_object() = {struct, [{json_string(), json_term()}]}
+%% @type json_term() = json_string() | json_number() | json_array() |
+%% json_object()
+%% @type encoding() = utf8 | unicode
+%% @type encoder_option() = {input_encoding, encoding()} |
+%% {handler, function()}
+%% @type decoder_option() = {input_encoding, encoding()} |
+%% {object_hook, function()}
+%% @type bjson_string() = binary()
+%% @type bjson_number() = integer() | float()
+%% @type bjson_array() = [bjson_term()]
+%% @type bjson_object() = {struct, [{bjson_string(), bjson_term()}]}
+%% @type bjson_term() = bjson_string() | bjson_number() | bjson_array() |
+%% bjson_object()
+%% @type binary_encoder_option() = {handler, function()}
+%% @type binary_decoder_option() = {object_hook, function()}
+
+-record(encoder, {input_encoding=unicode,
+ handler=null}).
+
+-record(decoder, {input_encoding=utf8,
+ object_hook=null,
+ line=1,
+ column=1,
+ state=null}).
+
+%% @spec encoder([encoder_option()]) -> function()
+%% @doc Create an encoder/1 with the given options.
+encoder(Options) ->
+ State = parse_encoder_options(Options, #encoder{}),
+ fun (O) -> json_encode(O, State) end.
+
+%% @spec encode(json_term()) -> iolist()
+%% @doc Encode the given as JSON to an iolist.
+encode(Any) ->
+ json_encode(Any, #encoder{}).
+
+%% @spec decoder([decoder_option()]) -> function()
+%% @doc Create a decoder/1 with the given options.
+decoder(Options) ->
+ State = parse_decoder_options(Options, #decoder{}),
+ fun (O) -> json_decode(O, State) end.
+
+%% @spec decode(iolist()) -> json_term()
+%% @doc Decode the given iolist to Erlang terms.
+decode(S) ->
+ json_decode(S, #decoder{}).
+
+%% @spec binary_decoder([binary_decoder_option()]) -> function()
+%% @doc Create a binary_decoder/1 with the given options.
+binary_decoder(Options) ->
+ mochijson2:decoder(Options).
+
+%% @spec binary_encoder([binary_encoder_option()]) -> function()
+%% @doc Create a binary_encoder/1 with the given options.
+binary_encoder(Options) ->
+ mochijson2:encoder(Options).
+
+%% @spec binary_encode(bjson_term()) -> iolist()
+%% @doc Encode the given as JSON to an iolist, using lists for arrays and
+%% binaries for strings.
+binary_encode(Any) ->
+ mochijson2:encode(Any).
+
+%% @spec binary_decode(iolist()) -> bjson_term()
+%% @doc Decode the given iolist to Erlang terms, using lists for arrays and
+%% binaries for strings.
+binary_decode(S) ->
+ mochijson2:decode(S).
+
+%% Internal API
+
+parse_encoder_options([], State) ->
+ State;
+parse_encoder_options([{input_encoding, Encoding} | Rest], State) ->
+ parse_encoder_options(Rest, State#encoder{input_encoding=Encoding});
+parse_encoder_options([{handler, Handler} | Rest], State) ->
+ parse_encoder_options(Rest, State#encoder{handler=Handler}).
+
+parse_decoder_options([], State) ->
+ State;
+parse_decoder_options([{input_encoding, Encoding} | Rest], State) ->
+ parse_decoder_options(Rest, State#decoder{input_encoding=Encoding});
+parse_decoder_options([{object_hook, Hook} | Rest], State) ->
+ parse_decoder_options(Rest, State#decoder{object_hook=Hook}).
+
+json_encode(true, _State) ->
+ "true";
+json_encode(false, _State) ->
+ "false";
+json_encode(null, _State) ->
+ "null";
+json_encode(I, _State) when is_integer(I) ->
+ integer_to_list(I);
+json_encode(F, _State) when is_float(F) ->
+ mochinum:digits(F);
+json_encode(L, State) when is_list(L); is_binary(L); is_atom(L) ->
+ json_encode_string(L, State);
+json_encode({array, Props}, State) when is_list(Props) ->
+ json_encode_array(Props, State);
+json_encode({struct, Props}, State) when is_list(Props) ->
+ json_encode_proplist(Props, State);
+json_encode(Bad, #encoder{handler=null}) ->
+ exit({json_encode, {bad_term, Bad}});
+json_encode(Bad, State=#encoder{handler=Handler}) ->
+ json_encode(Handler(Bad), State).
+
+json_encode_array([], _State) ->
+ "[]";
+json_encode_array(L, State) ->
+ F = fun (O, Acc) ->
+ [$,, json_encode(O, State) | Acc]
+ end,
+ [$, | Acc1] = lists:foldl(F, "[", L),
+ lists:reverse([$\] | Acc1]).
+
+json_encode_proplist([], _State) ->
+ "{}";
+json_encode_proplist(Props, State) ->
+ F = fun ({K, V}, Acc) ->
+ KS = case K of
+ K when is_atom(K) ->
+ json_encode_string_utf8(atom_to_list(K));
+ K when is_integer(K) ->
+ json_encode_string(integer_to_list(K), State);
+ K when is_list(K); is_binary(K) ->
+ json_encode_string(K, State)
+ end,
+ VS = json_encode(V, State),
+ [$,, VS, $:, KS | Acc]
+ end,
+ [$, | Acc1] = lists:foldl(F, "{", Props),
+ lists:reverse([$\} | Acc1]).
+
+json_encode_string(A, _State) when is_atom(A) ->
+ json_encode_string_unicode(xmerl_ucs:from_utf8(atom_to_list(A)));
+json_encode_string(B, _State) when is_binary(B) ->
+ json_encode_string_unicode(xmerl_ucs:from_utf8(B));
+json_encode_string(S, #encoder{input_encoding=utf8}) ->
+ json_encode_string_utf8(S);
+json_encode_string(S, #encoder{input_encoding=unicode}) ->
+ json_encode_string_unicode(S).
+
+json_encode_string_utf8(S) ->
+ [?Q | json_encode_string_utf8_1(S)].
+
+json_encode_string_utf8_1([C | Cs]) when C >= 0, C =< 16#7f ->
+ NewC = case C of
+ $\\ -> "\\\\";
+ ?Q -> "\\\"";
+ _ when C >= $\s, C < 16#7f -> C;
+ $\t -> "\\t";
+ $\n -> "\\n";
+ $\r -> "\\r";
+ $\f -> "\\f";
+ $\b -> "\\b";
+ _ when C >= 0, C =< 16#7f -> unihex(C);
+ _ -> exit({json_encode, {bad_char, C}})
+ end,
+ [NewC | json_encode_string_utf8_1(Cs)];
+json_encode_string_utf8_1(All=[C | _]) when C >= 16#80, C =< 16#10FFFF ->
+ [?Q | Rest] = json_encode_string_unicode(xmerl_ucs:from_utf8(All)),
+ Rest;
+json_encode_string_utf8_1([]) ->
+ "\"".
+
+json_encode_string_unicode(S) ->
+ [?Q | json_encode_string_unicode_1(S)].
+
+json_encode_string_unicode_1([C | Cs]) ->
+ NewC = case C of
+ $\\ -> "\\\\";
+ ?Q -> "\\\"";
+ _ when C >= $\s, C < 16#7f -> C;
+ $\t -> "\\t";
+ $\n -> "\\n";
+ $\r -> "\\r";
+ $\f -> "\\f";
+ $\b -> "\\b";
+ _ when C >= 0, C =< 16#10FFFF -> unihex(C);
+ _ -> exit({json_encode, {bad_char, C}})
+ end,
+ [NewC | json_encode_string_unicode_1(Cs)];
+json_encode_string_unicode_1([]) ->
+ "\"".
+
+dehex(C) when C >= $0, C =< $9 ->
+ C - $0;
+dehex(C) when C >= $a, C =< $f ->
+ C - $a + 10;
+dehex(C) when C >= $A, C =< $F ->
+ C - $A + 10.
+
+hexdigit(C) when C >= 0, C =< 9 ->
+ C + $0;
+hexdigit(C) when C =< 15 ->
+ C + $a - 10.
+
+unihex(C) when C < 16#10000 ->
+ <<D3:4, D2:4, D1:4, D0:4>> = <<C:16>>,
+ Digits = [hexdigit(D) || D <- [D3, D2, D1, D0]],
+ [$\\, $u | Digits];
+unihex(C) when C =< 16#10FFFF ->
+ N = C - 16#10000,
+ S1 = 16#d800 bor ((N bsr 10) band 16#3ff),
+ S2 = 16#dc00 bor (N band 16#3ff),
+ [unihex(S1), unihex(S2)].
+
+json_decode(B, S) when is_binary(B) ->
+ json_decode(binary_to_list(B), S);
+json_decode(L, S) ->
+ {Res, L1, S1} = decode1(L, S),
+ {eof, [], _} = tokenize(L1, S1#decoder{state=trim}),
+ Res.
+
+decode1(L, S=#decoder{state=null}) ->
+ case tokenize(L, S#decoder{state=any}) of
+ {{const, C}, L1, S1} ->
+ {C, L1, S1};
+ {start_array, L1, S1} ->
+ decode_array(L1, S1#decoder{state=any}, []);
+ {start_object, L1, S1} ->
+ decode_object(L1, S1#decoder{state=key}, [])
+ end.
+
+make_object(V, #decoder{object_hook=null}) ->
+ V;
+make_object(V, #decoder{object_hook=Hook}) ->
+ Hook(V).
+
+decode_object(L, S=#decoder{state=key}, Acc) ->
+ case tokenize(L, S) of
+ {end_object, Rest, S1} ->
+ V = make_object({struct, lists:reverse(Acc)}, S1),
+ {V, Rest, S1#decoder{state=null}};
+ {{const, K}, Rest, S1} when is_list(K) ->
+ {colon, L2, S2} = tokenize(Rest, S1),
+ {V, L3, S3} = decode1(L2, S2#decoder{state=null}),
+ decode_object(L3, S3#decoder{state=comma}, [{K, V} | Acc])
+ end;
+decode_object(L, S=#decoder{state=comma}, Acc) ->
+ case tokenize(L, S) of
+ {end_object, Rest, S1} ->
+ V = make_object({struct, lists:reverse(Acc)}, S1),
+ {V, Rest, S1#decoder{state=null}};
+ {comma, Rest, S1} ->
+ decode_object(Rest, S1#decoder{state=key}, Acc)
+ end.
+
+decode_array(L, S=#decoder{state=any}, Acc) ->
+ case tokenize(L, S) of
+ {end_array, Rest, S1} ->
+ {{array, lists:reverse(Acc)}, Rest, S1#decoder{state=null}};
+ {start_array, Rest, S1} ->
+ {Array, Rest1, S2} = decode_array(Rest, S1#decoder{state=any}, []),
+ decode_array(Rest1, S2#decoder{state=comma}, [Array | Acc]);
+ {start_object, Rest, S1} ->
+ {Array, Rest1, S2} = decode_object(Rest, S1#decoder{state=key}, []),
+ decode_array(Rest1, S2#decoder{state=comma}, [Array | Acc]);
+ {{const, Const}, Rest, S1} ->
+ decode_array(Rest, S1#decoder{state=comma}, [Const | Acc])
+ end;
+decode_array(L, S=#decoder{state=comma}, Acc) ->
+ case tokenize(L, S) of
+ {end_array, Rest, S1} ->
+ {{array, lists:reverse(Acc)}, Rest, S1#decoder{state=null}};
+ {comma, Rest, S1} ->
+ decode_array(Rest, S1#decoder{state=any}, Acc)
+ end.
+
+tokenize_string(IoList=[C | _], S=#decoder{input_encoding=utf8}, Acc)
+ when is_list(C); is_binary(C); C >= 16#7f ->
+ List = xmerl_ucs:from_utf8(iolist_to_binary(IoList)),
+ tokenize_string(List, S#decoder{input_encoding=unicode}, Acc);
+tokenize_string("\"" ++ Rest, S, Acc) ->
+ {lists:reverse(Acc), Rest, ?INC_COL(S)};
+tokenize_string("\\\"" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\" | Acc]);
+tokenize_string("\\\\" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\\ | Acc]);
+tokenize_string("\\/" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$/ | Acc]);
+tokenize_string("\\b" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\b | Acc]);
+tokenize_string("\\f" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\f | Acc]);
+tokenize_string("\\n" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\n | Acc]);
+tokenize_string("\\r" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\r | Acc]);
+tokenize_string("\\t" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\t | Acc]);
+tokenize_string([$\\, $u, C3, C2, C1, C0 | Rest], S, Acc) ->
+ % coalesce UTF-16 surrogate pair?
+ C = dehex(C0) bor
+ (dehex(C1) bsl 4) bor
+ (dehex(C2) bsl 8) bor
+ (dehex(C3) bsl 12),
+ tokenize_string(Rest, ?ADV_COL(S, 6), [C | Acc]);
+tokenize_string([C | Rest], S, Acc) when C >= $\s; C < 16#10FFFF ->
+ tokenize_string(Rest, ?ADV_COL(S, 1), [C | Acc]).
+
+tokenize_number(IoList=[C | _], Mode, S=#decoder{input_encoding=utf8}, Acc)
+ when is_list(C); is_binary(C); C >= 16#7f ->
+ List = xmerl_ucs:from_utf8(iolist_to_binary(IoList)),
+ tokenize_number(List, Mode, S#decoder{input_encoding=unicode}, Acc);
+tokenize_number([$- | Rest], sign, S, []) ->
+ tokenize_number(Rest, int, ?INC_COL(S), [$-]);
+tokenize_number(Rest, sign, S, []) ->
+ tokenize_number(Rest, int, S, []);
+tokenize_number([$0 | Rest], int, S, Acc) ->
+ tokenize_number(Rest, frac, ?INC_COL(S), [$0 | Acc]);
+tokenize_number([C | Rest], int, S, Acc) when C >= $1, C =< $9 ->
+ tokenize_number(Rest, int1, ?INC_COL(S), [C | Acc]);
+tokenize_number([C | Rest], int1, S, Acc) when C >= $0, C =< $9 ->
+ tokenize_number(Rest, int1, ?INC_COL(S), [C | Acc]);
+tokenize_number(Rest, int1, S, Acc) ->
+ tokenize_number(Rest, frac, S, Acc);
+tokenize_number([$., C | Rest], frac, S, Acc) when C >= $0, C =< $9 ->
+ tokenize_number(Rest, frac1, ?ADV_COL(S, 2), [C, $. | Acc]);
+tokenize_number([E | Rest], frac, S, Acc) when E == $e; E == $E ->
+ tokenize_number(Rest, esign, ?INC_COL(S), [$e, $0, $. | Acc]);
+tokenize_number(Rest, frac, S, Acc) ->
+ {{int, lists:reverse(Acc)}, Rest, S};
+tokenize_number([C | Rest], frac1, S, Acc) when C >= $0, C =< $9 ->
+ tokenize_number(Rest, frac1, ?INC_COL(S), [C | Acc]);
+tokenize_number([E | Rest], frac1, S, Acc) when E == $e; E == $E ->
+ tokenize_number(Rest, esign, ?INC_COL(S), [$e | Acc]);
+tokenize_number(Rest, frac1, S, Acc) ->
+ {{float, lists:reverse(Acc)}, Rest, S};
+tokenize_number([C | Rest], esign, S, Acc) when C == $-; C == $+ ->
+ tokenize_number(Rest, eint, ?INC_COL(S), [C | Acc]);
+tokenize_number(Rest, esign, S, Acc) ->
+ tokenize_number(Rest, eint, S, Acc);
+tokenize_number([C | Rest], eint, S, Acc) when C >= $0, C =< $9 ->
+ tokenize_number(Rest, eint1, ?INC_COL(S), [C | Acc]);
+tokenize_number([C | Rest], eint1, S, Acc) when C >= $0, C =< $9 ->
+ tokenize_number(Rest, eint1, ?INC_COL(S), [C | Acc]);
+tokenize_number(Rest, eint1, S, Acc) ->
+ {{float, lists:reverse(Acc)}, Rest, S}.
+
+tokenize([], S=#decoder{state=trim}) ->
+ {eof, [], S};
+tokenize([L | Rest], S) when is_list(L) ->
+ tokenize(L ++ Rest, S);
+tokenize([B | Rest], S) when is_binary(B) ->
+ tokenize(xmerl_ucs:from_utf8(B) ++ Rest, S);
+tokenize("\r\n" ++ Rest, S) ->
+ tokenize(Rest, ?INC_LINE(S));
+tokenize("\n" ++ Rest, S) ->
+ tokenize(Rest, ?INC_LINE(S));
+tokenize([C | Rest], S) when C == $\s; C == $\t ->
+ tokenize(Rest, ?INC_COL(S));
+tokenize("{" ++ Rest, S) ->
+ {start_object, Rest, ?INC_COL(S)};
+tokenize("}" ++ Rest, S) ->
+ {end_object, Rest, ?INC_COL(S)};
+tokenize("[" ++ Rest, S) ->
+ {start_array, Rest, ?INC_COL(S)};
+tokenize("]" ++ Rest, S) ->
+ {end_array, Rest, ?INC_COL(S)};
+tokenize("," ++ Rest, S) ->
+ {comma, Rest, ?INC_COL(S)};
+tokenize(":" ++ Rest, S) ->
+ {colon, Rest, ?INC_COL(S)};
+tokenize("null" ++ Rest, S) ->
+ {{const, null}, Rest, ?ADV_COL(S, 4)};
+tokenize("true" ++ Rest, S) ->
+ {{const, true}, Rest, ?ADV_COL(S, 4)};
+tokenize("false" ++ Rest, S) ->
+ {{const, false}, Rest, ?ADV_COL(S, 5)};
+tokenize("\"" ++ Rest, S) ->
+ {String, Rest1, S1} = tokenize_string(Rest, ?INC_COL(S), []),
+ {{const, String}, Rest1, S1};
+tokenize(L=[C | _], S) when C >= $0, C =< $9; C == $- ->
+ case tokenize_number(L, sign, S, []) of
+ {{int, Int}, Rest, S1} ->
+ {{const, list_to_integer(Int)}, Rest, S1};
+ {{float, Float}, Rest, S1} ->
+ {{const, list_to_float(Float)}, Rest, S1}
+ end.
+
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+%% testing constructs borrowed from the Yaws JSON implementation.
+
+%% Create an object from a list of Key/Value pairs.
+
+obj_new() ->
+ {struct, []}.
+
+is_obj({struct, Props}) ->
+ F = fun ({K, _}) when is_list(K) ->
+ true;
+ (_) ->
+ false
+ end,
+ lists:all(F, Props).
+
+obj_from_list(Props) ->
+ Obj = {struct, Props},
+ case is_obj(Obj) of
+ true -> Obj;
+ false -> exit(json_bad_object)
+ end.
+
+%% Test for equivalence of Erlang terms.
+%% Due to arbitrary order of construction, equivalent objects might
+%% compare unequal as erlang terms, so we need to carefully recurse
+%% through aggregates (tuples and objects).
+
+equiv({struct, Props1}, {struct, Props2}) ->
+ equiv_object(Props1, Props2);
+equiv({array, L1}, {array, L2}) ->
+ equiv_list(L1, L2);
+equiv(N1, N2) when is_number(N1), is_number(N2) -> N1 == N2;
+equiv(S1, S2) when is_list(S1), is_list(S2) -> S1 == S2;
+equiv(true, true) -> true;
+equiv(false, false) -> true;
+equiv(null, null) -> true.
+
+%% Object representation and traversal order is unknown.
+%% Use the sledgehammer and sort property lists.
+
+equiv_object(Props1, Props2) ->
+ L1 = lists:keysort(1, Props1),
+ L2 = lists:keysort(1, Props2),
+ Pairs = lists:zip(L1, L2),
+ true = lists:all(fun({{K1, V1}, {K2, V2}}) ->
+ equiv(K1, K2) and equiv(V1, V2)
+ end, Pairs).
+
+%% Recursively compare tuple elements for equivalence.
+
+equiv_list([], []) ->
+ true;
+equiv_list([V1 | L1], [V2 | L2]) ->
+ equiv(V1, V2) andalso equiv_list(L1, L2).
+
+e2j_vec_test() ->
+ test_one(e2j_test_vec(utf8), 1).
+
+issue33_test() ->
+ %% http://code.google.com/p/mochiweb/issues/detail?id=33
+ Js = {struct, [{"key", [194, 163]}]},
+ Encoder = encoder([{input_encoding, utf8}]),
+ "{\"key\":\"\\u00a3\"}" = lists:flatten(Encoder(Js)).
+
+test_one([], _N) ->
+ %% io:format("~p tests passed~n", [N-1]),
+ ok;
+test_one([{E, J} | Rest], N) ->
+ %% io:format("[~p] ~p ~p~n", [N, E, J]),
+ true = equiv(E, decode(J)),
+ true = equiv(E, decode(encode(E))),
+ test_one(Rest, 1+N).
+
+e2j_test_vec(utf8) ->
+ [
+ {1, "1"},
+ {3.1416, "3.14160"}, % text representation may truncate, trail zeroes
+ {-1, "-1"},
+ {-3.1416, "-3.14160"},
+ {12.0e10, "1.20000e+11"},
+ {1.234E+10, "1.23400e+10"},
+ {-1.234E-10, "-1.23400e-10"},
+ {10.0, "1.0e+01"},
+ {123.456, "1.23456E+2"},
+ {10.0, "1e1"},
+ {"foo", "\"foo\""},
+ {"foo" ++ [5] ++ "bar", "\"foo\\u0005bar\""},
+ {"", "\"\""},
+ {"\"", "\"\\\"\""},
+ {"\n\n\n", "\"\\n\\n\\n\""},
+ {"\\", "\"\\\\\""},
+ {"\" \b\f\r\n\t\"", "\"\\\" \\b\\f\\r\\n\\t\\\"\""},
+ {obj_new(), "{}"},
+ {obj_from_list([{"foo", "bar"}]), "{\"foo\":\"bar\"}"},
+ {obj_from_list([{"foo", "bar"}, {"baz", 123}]),
+ "{\"foo\":\"bar\",\"baz\":123}"},
+ {{array, []}, "[]"},
+ {{array, [{array, []}]}, "[[]]"},
+ {{array, [1, "foo"]}, "[1,\"foo\"]"},
+
+ % json array in a json object
+ {obj_from_list([{"foo", {array, [123]}}]),
+ "{\"foo\":[123]}"},
+
+ % json object in a json object
+ {obj_from_list([{"foo", obj_from_list([{"bar", true}])}]),
+ "{\"foo\":{\"bar\":true}}"},
+
+ % fold evaluation order
+ {obj_from_list([{"foo", {array, []}},
+ {"bar", obj_from_list([{"baz", true}])},
+ {"alice", "bob"}]),
+ "{\"foo\":[],\"bar\":{\"baz\":true},\"alice\":\"bob\"}"},
+
+ % json object in a json array
+ {{array, [-123, "foo", obj_from_list([{"bar", {array, []}}]), null]},
+ "[-123,\"foo\",{\"bar\":[]},null]"}
+ ].
+
+-endif.
--- /dev/null
+%% @copyright Copyright (c) 2010 Mochi Media, Inc.
+%% @author David Reid <dreid@mochimedia.com>
+
+%% @doc Utility functions for dealing with proplists.
+
+-module(mochilists).
+-author("David Reid <dreid@mochimedia.com>").
+-export([get_value/2, get_value/3, is_defined/2, set_default/2, set_defaults/2]).
+
+%% @spec set_default({Key::term(), Value::term()}, Proplist::list()) -> list()
+%%
+%% @doc Return new Proplist with {Key, Value} set if not is_defined(Key, Proplist).
+set_default({Key, Value}, Proplist) ->
+ case is_defined(Key, Proplist) of
+ true ->
+ Proplist;
+ false ->
+ [{Key, Value} | Proplist]
+ end.
+
+%% @spec set_defaults([{Key::term(), Value::term()}], Proplist::list()) -> list()
+%%
+%% @doc Return new Proplist with {Key, Value} set if not is_defined(Key, Proplist).
+set_defaults(DefaultProps, Proplist) ->
+ lists:foldl(fun set_default/2, Proplist, DefaultProps).
+
+
+%% @spec is_defined(Key::term(), Proplist::list()) -> bool()
+%%
+%% @doc Returns true if Propist contains at least one entry associated
+%% with Key, otherwise false is returned.
+is_defined(Key, Proplist) ->
+ lists:keyfind(Key, 1, Proplist) =/= false.
+
+
+%% @spec get_value(Key::term(), Proplist::list()) -> term() | undefined
+%%
+%% @doc Return the value of <code>Key</code> or undefined
+get_value(Key, Proplist) ->
+ get_value(Key, Proplist, undefined).
+
+%% @spec get_value(Key::term(), Proplist::list(), Default::term()) -> term()
+%%
+%% @doc Return the value of <code>Key</code> or <code>Default</code>
+get_value(_Key, [], Default) ->
+ Default;
+get_value(Key, Proplist, Default) ->
+ case lists:keyfind(Key, 1, Proplist) of
+ false ->
+ Default;
+ {Key, Value} ->
+ Value
+ end.
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+set_defaults_test() ->
+ ?assertEqual(
+ [{k, v}],
+ set_defaults([{k, v}], [])),
+ ?assertEqual(
+ [{k, v}],
+ set_defaults([{k, vee}], [{k, v}])),
+ ?assertEqual(
+ lists:sort([{kay, vee}, {k, v}]),
+ lists:sort(set_defaults([{k, vee}, {kay, vee}], [{k, v}]))),
+ ok.
+
+set_default_test() ->
+ ?assertEqual(
+ [{k, v}],
+ set_default({k, v}, [])),
+ ?assertEqual(
+ [{k, v}],
+ set_default({k, vee}, [{k, v}])),
+ ok.
+
+get_value_test() ->
+ ?assertEqual(
+ undefined,
+ get_value(foo, [])),
+ ?assertEqual(
+ undefined,
+ get_value(foo, [{bar, baz}])),
+ ?assertEqual(
+ bar,
+ get_value(foo, [{foo, bar}])),
+ ?assertEqual(
+ default,
+ get_value(foo, [], default)),
+ ?assertEqual(
+ default,
+ get_value(foo, [{bar, baz}], default)),
+ ?assertEqual(
+ bar,
+ get_value(foo, [{foo, bar}], default)),
+ ok.
+
+-endif.
+
--- /dev/null
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2010 Mochi Media, Inc.
+
+%% @doc Write newline delimited log files, ensuring that if a truncated
+%% entry is found on log open then it is fixed before writing. Uses
+%% delayed writes and raw files for performance.
+-module(mochilogfile2).
+-author('bob@mochimedia.com').
+
+-export([open/1, write/2, close/1, name/1]).
+
+%% @spec open(Name) -> Handle
+%% @doc Open the log file Name, creating or appending as necessary. All data
+%% at the end of the file will be truncated until a newline is found, to
+%% ensure that all records are complete.
+open(Name) ->
+ {ok, FD} = file:open(Name, [raw, read, write, delayed_write, binary]),
+ fix_log(FD),
+ {?MODULE, Name, FD}.
+
+%% @spec name(Handle) -> string()
+%% @doc Return the path of the log file.
+name({?MODULE, Name, _FD}) ->
+ Name.
+
+%% @spec write(Handle, IoData) -> ok
+%% @doc Write IoData to the log file referenced by Handle.
+write({?MODULE, _Name, FD}, IoData) ->
+ ok = file:write(FD, [IoData, $\n]),
+ ok.
+
+%% @spec close(Handle) -> ok
+%% @doc Close the log file referenced by Handle.
+close({?MODULE, _Name, FD}) ->
+ ok = file:sync(FD),
+ ok = file:close(FD),
+ ok.
+
+fix_log(FD) ->
+ {ok, Location} = file:position(FD, eof),
+ Seek = find_last_newline(FD, Location),
+ {ok, Seek} = file:position(FD, Seek),
+ ok = file:truncate(FD),
+ ok.
+
+%% Seek backwards to the last valid log entry
+find_last_newline(_FD, N) when N =< 1 ->
+ 0;
+find_last_newline(FD, Location) ->
+ case file:pread(FD, Location - 1, 1) of
+ {ok, <<$\n>>} ->
+ Location;
+ {ok, _} ->
+ find_last_newline(FD, Location - 1)
+ end.
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+name_test() ->
+ D = mochitemp:mkdtemp(),
+ FileName = filename:join(D, "open_close_test.log"),
+ H = open(FileName),
+ ?assertEqual(
+ FileName,
+ name(H)),
+ close(H),
+ file:delete(FileName),
+ file:del_dir(D),
+ ok.
+
+open_close_test() ->
+ D = mochitemp:mkdtemp(),
+ FileName = filename:join(D, "open_close_test.log"),
+ OpenClose = fun () ->
+ H = open(FileName),
+ ?assertEqual(
+ true,
+ filelib:is_file(FileName)),
+ ok = close(H),
+ ?assertEqual(
+ {ok, <<>>},
+ file:read_file(FileName)),
+ ok
+ end,
+ OpenClose(),
+ OpenClose(),
+ file:delete(FileName),
+ file:del_dir(D),
+ ok.
+
+write_test() ->
+ D = mochitemp:mkdtemp(),
+ FileName = filename:join(D, "write_test.log"),
+ F = fun () ->
+ H = open(FileName),
+ write(H, "test line"),
+ close(H),
+ ok
+ end,
+ F(),
+ ?assertEqual(
+ {ok, <<"test line\n">>},
+ file:read_file(FileName)),
+ F(),
+ ?assertEqual(
+ {ok, <<"test line\ntest line\n">>},
+ file:read_file(FileName)),
+ file:delete(FileName),
+ file:del_dir(D),
+ ok.
+
+fix_log_test() ->
+ D = mochitemp:mkdtemp(),
+ FileName = filename:join(D, "write_test.log"),
+ file:write_file(FileName, <<"first line good\nsecond line bad">>),
+ F = fun () ->
+ H = open(FileName),
+ write(H, "test line"),
+ close(H),
+ ok
+ end,
+ F(),
+ ?assertEqual(
+ {ok, <<"first line good\ntest line\n">>},
+ file:read_file(FileName)),
+ file:write_file(FileName, <<"first line bad">>),
+ F(),
+ ?assertEqual(
+ {ok, <<"test line\n">>},
+ file:read_file(FileName)),
+ F(),
+ ?assertEqual(
+ {ok, <<"test line\ntest line\n">>},
+ file:read_file(FileName)),
+ ok.
+
+-endif.
--- /dev/null
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2010 Mochi Media, Inc.
+
+%% @doc Create temporary files and directories.
+
+-module(mochitemp).
+-export([gettempdir/0]).
+-export([mkdtemp/0, mkdtemp/3]).
+-export([rmtempdir/1]).
+%% -export([mkstemp/4]).
+-define(SAFE_CHARS, {$a, $b, $c, $d, $e, $f, $g, $h, $i, $j, $k, $l, $m,
+ $n, $o, $p, $q, $r, $s, $t, $u, $v, $w, $x, $y, $z,
+ $A, $B, $C, $D, $E, $F, $G, $H, $I, $J, $K, $L, $M,
+ $N, $O, $P, $Q, $R, $S, $T, $U, $V, $W, $X, $Y, $Z,
+ $0, $1, $2, $3, $4, $5, $6, $7, $8, $9, $_}).
+-define(TMP_MAX, 10000).
+
+-include_lib("kernel/include/file.hrl").
+
+%% TODO: An ugly wrapper over the mktemp tool with open_port and sadness?
+%% We can't implement this race-free in Erlang without the ability
+%% to issue O_CREAT|O_EXCL. I suppose we could hack something with
+%% mkdtemp, del_dir, open.
+%% mkstemp(Suffix, Prefix, Dir, Options) ->
+%% ok.
+
+rmtempdir(Dir) ->
+ case file:del_dir(Dir) of
+ {error, eexist} ->
+ ok = rmtempdirfiles(Dir),
+ ok = file:del_dir(Dir);
+ ok ->
+ ok
+ end.
+
+rmtempdirfiles(Dir) ->
+ {ok, Files} = file:list_dir(Dir),
+ ok = rmtempdirfiles(Dir, Files).
+
+rmtempdirfiles(_Dir, []) ->
+ ok;
+rmtempdirfiles(Dir, [Basename | Rest]) ->
+ Path = filename:join([Dir, Basename]),
+ case filelib:is_dir(Path) of
+ true ->
+ ok = rmtempdir(Path);
+ false ->
+ ok = file:delete(Path)
+ end,
+ rmtempdirfiles(Dir, Rest).
+
+mkdtemp() ->
+ mkdtemp("", "tmp", gettempdir()).
+
+mkdtemp(Suffix, Prefix, Dir) ->
+ mkdtemp_n(rngpath_fun(Suffix, Prefix, Dir), ?TMP_MAX).
+
+
+
+mkdtemp_n(RngPath, 1) ->
+ make_dir(RngPath());
+mkdtemp_n(RngPath, N) ->
+ try make_dir(RngPath())
+ catch throw:{error, eexist} ->
+ mkdtemp_n(RngPath, N - 1)
+ end.
+
+make_dir(Path) ->
+ case file:make_dir(Path) of
+ ok ->
+ ok;
+ E={error, eexist} ->
+ throw(E)
+ end,
+ %% Small window for a race condition here because dir is created 777
+ ok = file:write_file_info(Path, #file_info{mode=8#0700}),
+ Path.
+
+rngpath_fun(Prefix, Suffix, Dir) ->
+ fun () ->
+ filename:join([Dir, Prefix ++ rngchars(6) ++ Suffix])
+ end.
+
+rngchars(0) ->
+ "";
+rngchars(N) ->
+ [rngchar() | rngchars(N - 1)].
+
+rngchar() ->
+ rngchar(mochiweb_util:rand_uniform(0, tuple_size(?SAFE_CHARS))).
+
+rngchar(C) ->
+ element(1 + C, ?SAFE_CHARS).
+
+%% @spec gettempdir() -> string()
+%% @doc Get a usable temporary directory using the first of these that is a directory:
+%% $TMPDIR, $TMP, $TEMP, "/tmp", "/var/tmp", "/usr/tmp", ".".
+gettempdir() ->
+ gettempdir(gettempdir_checks(), fun normalize_dir/1).
+
+gettempdir_checks() ->
+ [{fun os:getenv/1, ["TMPDIR", "TMP", "TEMP"]},
+ {fun gettempdir_identity/1, ["/tmp", "/var/tmp", "/usr/tmp"]},
+ {fun gettempdir_cwd/1, [cwd]}].
+
+gettempdir_identity(L) ->
+ L.
+
+gettempdir_cwd(cwd) ->
+ {ok, L} = file:get_cwd(),
+ L.
+
+gettempdir([{_F, []} | RestF], Normalize) ->
+ gettempdir(RestF, Normalize);
+gettempdir([{F, [L | RestL]} | RestF], Normalize) ->
+ case Normalize(F(L)) of
+ false ->
+ gettempdir([{F, RestL} | RestF], Normalize);
+ Dir ->
+ Dir
+ end.
+
+normalize_dir(False) when False =:= false orelse False =:= "" ->
+ %% Erlang doesn't have an unsetenv, wtf.
+ false;
+normalize_dir(L) ->
+ Dir = filename:absname(L),
+ case filelib:is_dir(Dir) of
+ false ->
+ false;
+ true ->
+ Dir
+ end.
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+pushenv(L) ->
+ [{K, os:getenv(K)} || K <- L].
+popenv(L) ->
+ F = fun ({K, false}) ->
+ %% Erlang doesn't have an unsetenv, wtf.
+ os:putenv(K, "");
+ ({K, V}) ->
+ os:putenv(K, V)
+ end,
+ lists:foreach(F, L).
+
+gettempdir_fallback_test() ->
+ ?assertEqual(
+ "/",
+ gettempdir([{fun gettempdir_identity/1, ["/--not-here--/"]},
+ {fun gettempdir_identity/1, ["/"]}],
+ fun normalize_dir/1)),
+ ?assertEqual(
+ "/",
+ %% simulate a true os:getenv unset env
+ gettempdir([{fun gettempdir_identity/1, [false]},
+ {fun gettempdir_identity/1, ["/"]}],
+ fun normalize_dir/1)),
+ ok.
+
+gettempdir_identity_test() ->
+ ?assertEqual(
+ "/",
+ gettempdir([{fun gettempdir_identity/1, ["/"]}], fun normalize_dir/1)),
+ ok.
+
+gettempdir_cwd_test() ->
+ {ok, Cwd} = file:get_cwd(),
+ ?assertEqual(
+ normalize_dir(Cwd),
+ gettempdir([{fun gettempdir_cwd/1, [cwd]}], fun normalize_dir/1)),
+ ok.
+
+rngchars_test() ->
+ ?assertEqual(
+ "",
+ rngchars(0)),
+ ?assertEqual(
+ 10,
+ length(rngchars(10))),
+ ok.
+
+rngchar_test() ->
+ ?assertEqual(
+ $a,
+ rngchar(0)),
+ ?assertEqual(
+ $A,
+ rngchar(26)),
+ ?assertEqual(
+ $_,
+ rngchar(62)),
+ ok.
+
+mkdtemp_n_failonce_test() ->
+ D = mkdtemp(),
+ Path = filename:join([D, "testdir"]),
+ %% Toggle the existence of a dir so that it fails
+ %% the first time and succeeds the second.
+ F = fun () ->
+ case filelib:is_dir(Path) of
+ true ->
+ file:del_dir(Path);
+ false ->
+ file:make_dir(Path)
+ end,
+ Path
+ end,
+ try
+ %% Fails the first time
+ ?assertThrow(
+ {error, eexist},
+ mkdtemp_n(F, 1)),
+ %% Reset state
+ file:del_dir(Path),
+ %% Succeeds the second time
+ ?assertEqual(
+ Path,
+ mkdtemp_n(F, 2))
+ after rmtempdir(D)
+ end,
+ ok.
+
+mkdtemp_n_fail_test() ->
+ {ok, Cwd} = file:get_cwd(),
+ ?assertThrow(
+ {error, eexist},
+ mkdtemp_n(fun () -> Cwd end, 1)),
+ ?assertThrow(
+ {error, eexist},
+ mkdtemp_n(fun () -> Cwd end, 2)),
+ ok.
+
+make_dir_fail_test() ->
+ {ok, Cwd} = file:get_cwd(),
+ ?assertThrow(
+ {error, eexist},
+ make_dir(Cwd)),
+ ok.
+
+mkdtemp_test() ->
+ D = mkdtemp(),
+ ?assertEqual(
+ true,
+ filelib:is_dir(D)),
+ ?assertEqual(
+ ok,
+ file:del_dir(D)),
+ ok.
+
+rmtempdir_test() ->
+ D1 = mkdtemp(),
+ ?assertEqual(
+ true,
+ filelib:is_dir(D1)),
+ ?assertEqual(
+ ok,
+ rmtempdir(D1)),
+ D2 = mkdtemp(),
+ ?assertEqual(
+ true,
+ filelib:is_dir(D2)),
+ ok = file:write_file(filename:join([D2, "foo"]), <<"bytes">>),
+ D3 = mkdtemp("suffix", "prefix", D2),
+ ?assertEqual(
+ true,
+ filelib:is_dir(D3)),
+ ok = file:write_file(filename:join([D3, "foo"]), <<"bytes">>),
+ ?assertEqual(
+ ok,
+ rmtempdir(D2)),
+ ?assertEqual(
+ {error, enoent},
+ file:consult(D3)),
+ ?assertEqual(
+ {error, enoent},
+ file:consult(D2)),
+ ok.
+
+gettempdir_env_test() ->
+ Env = pushenv(["TMPDIR", "TEMP", "TMP"]),
+ FalseEnv = [{"TMPDIR", false}, {"TEMP", false}, {"TMP", false}],
+ try
+ popenv(FalseEnv),
+ popenv([{"TMPDIR", "/"}]),
+ ?assertEqual(
+ "/",
+ os:getenv("TMPDIR")),
+ ?assertEqual(
+ "/",
+ gettempdir()),
+ {ok, Cwd} = file:get_cwd(),
+ popenv(FalseEnv),
+ popenv([{"TMP", Cwd}]),
+ ?assertEqual(
+ normalize_dir(Cwd),
+ gettempdir())
+ after popenv(Env)
+ end,
+ ok.
+
+-endif.
--- /dev/null
+%% @copyright 2010 Mochi Media, Inc.
+%% @author Bob Ippolito <bob@mochimedia.com>
+
+%% @doc Algorithm to convert any binary to a valid UTF-8 sequence by ignoring
+%% invalid bytes.
+
+-module(mochiutf8).
+-export([valid_utf8_bytes/1, codepoint_to_bytes/1, codepoints_to_bytes/1]).
+-export([bytes_to_codepoints/1, bytes_foldl/3, codepoint_foldl/3]).
+-export([read_codepoint/1, len/1]).
+
+%% External API
+
+%% -type unichar_low() :: 0..16#d7ff.
+%% -type unichar_high() :: 16#e000..16#10ffff.
+%% -type unichar() :: unichar_low() | unichar_high().
+
+%% -spec codepoint_to_bytes(unichar()) -> binary().
+%% @doc Convert a unicode codepoint to UTF-8 bytes.
+codepoint_to_bytes(C) when (C >= 16#00 andalso C =< 16#7f) ->
+ %% U+0000 - U+007F - 7 bits
+ <<C>>;
+codepoint_to_bytes(C) when (C >= 16#080 andalso C =< 16#07FF) ->
+ %% U+0080 - U+07FF - 11 bits
+ <<0:5, B1:5, B0:6>> = <<C:16>>,
+ <<2#110:3, B1:5,
+ 2#10:2, B0:6>>;
+codepoint_to_bytes(C) when (C >= 16#0800 andalso C =< 16#FFFF) andalso
+ (C < 16#D800 orelse C > 16#DFFF) ->
+ %% U+0800 - U+FFFF - 16 bits (excluding UTC-16 surrogate code points)
+ <<B2:4, B1:6, B0:6>> = <<C:16>>,
+ <<2#1110:4, B2:4,
+ 2#10:2, B1:6,
+ 2#10:2, B0:6>>;
+codepoint_to_bytes(C) when (C >= 16#010000 andalso C =< 16#10FFFF) ->
+ %% U+10000 - U+10FFFF - 21 bits
+ <<0:3, B3:3, B2:6, B1:6, B0:6>> = <<C:24>>,
+ <<2#11110:5, B3:3,
+ 2#10:2, B2:6,
+ 2#10:2, B1:6,
+ 2#10:2, B0:6>>.
+
+%% -spec codepoints_to_bytes([unichar()]) -> binary().
+%% @doc Convert a list of codepoints to a UTF-8 binary.
+codepoints_to_bytes(L) ->
+ <<<<(codepoint_to_bytes(C))/binary>> || C <- L>>.
+
+%% -spec read_codepoint(binary()) -> {unichar(), binary(), binary()}.
+read_codepoint(Bin = <<2#0:1, C:7, Rest/binary>>) ->
+ %% U+0000 - U+007F - 7 bits
+ <<B:1/binary, _/binary>> = Bin,
+ {C, B, Rest};
+read_codepoint(Bin = <<2#110:3, B1:5,
+ 2#10:2, B0:6,
+ Rest/binary>>) ->
+ %% U+0080 - U+07FF - 11 bits
+ case <<B1:5, B0:6>> of
+ <<C:11>> when C >= 16#80 ->
+ <<B:2/binary, _/binary>> = Bin,
+ {C, B, Rest}
+ end;
+read_codepoint(Bin = <<2#1110:4, B2:4,
+ 2#10:2, B1:6,
+ 2#10:2, B0:6,
+ Rest/binary>>) ->
+ %% U+0800 - U+FFFF - 16 bits (excluding UTC-16 surrogate code points)
+ case <<B2:4, B1:6, B0:6>> of
+ <<C:16>> when (C >= 16#0800 andalso C =< 16#FFFF) andalso
+ (C < 16#D800 orelse C > 16#DFFF) ->
+ <<B:3/binary, _/binary>> = Bin,
+ {C, B, Rest}
+ end;
+read_codepoint(Bin = <<2#11110:5, B3:3,
+ 2#10:2, B2:6,
+ 2#10:2, B1:6,
+ 2#10:2, B0:6,
+ Rest/binary>>) ->
+ %% U+10000 - U+10FFFF - 21 bits
+ case <<B3:3, B2:6, B1:6, B0:6>> of
+ <<C:21>> when (C >= 16#010000 andalso C =< 16#10FFFF) ->
+ <<B:4/binary, _/binary>> = Bin,
+ {C, B, Rest}
+ end.
+
+%% -spec codepoint_foldl(fun((unichar(), _) -> _), _, binary()) -> _.
+codepoint_foldl(F, Acc, <<>>) when is_function(F, 2) ->
+ Acc;
+codepoint_foldl(F, Acc, Bin) ->
+ {C, _, Rest} = read_codepoint(Bin),
+ codepoint_foldl(F, F(C, Acc), Rest).
+
+%% -spec bytes_foldl(fun((binary(), _) -> _), _, binary()) -> _.
+bytes_foldl(F, Acc, <<>>) when is_function(F, 2) ->
+ Acc;
+bytes_foldl(F, Acc, Bin) ->
+ {_, B, Rest} = read_codepoint(Bin),
+ bytes_foldl(F, F(B, Acc), Rest).
+
+%% -spec bytes_to_codepoints(binary()) -> [unichar()].
+bytes_to_codepoints(B) ->
+ lists:reverse(codepoint_foldl(fun (C, Acc) -> [C | Acc] end, [], B)).
+
+%% -spec len(binary()) -> non_neg_integer().
+len(<<>>) ->
+ 0;
+len(B) ->
+ {_, _, Rest} = read_codepoint(B),
+ 1 + len(Rest).
+
+%% -spec valid_utf8_bytes(B::binary()) -> binary().
+%% @doc Return only the bytes in B that represent valid UTF-8. Uses
+%% the following recursive algorithm: skip one byte if B does not
+%% follow UTF-8 syntax (a 1-4 byte encoding of some number),
+%% skip sequence of 2-4 bytes if it represents an overlong encoding
+%% or bad code point (surrogate U+D800 - U+DFFF or > U+10FFFF).
+valid_utf8_bytes(B) when is_binary(B) ->
+ binary_skip_bytes(B, invalid_utf8_indexes(B)).
+
+%% Internal API
+
+%% -spec binary_skip_bytes(binary(), [non_neg_integer()]) -> binary().
+%% @doc Return B, but skipping the 0-based indexes in L.
+binary_skip_bytes(B, []) ->
+ B;
+binary_skip_bytes(B, L) ->
+ binary_skip_bytes(B, L, 0, []).
+
+%% @private
+%% -spec binary_skip_bytes(binary(), [non_neg_integer()], non_neg_integer(), iolist()) -> binary().
+binary_skip_bytes(B, [], _N, Acc) ->
+ iolist_to_binary(lists:reverse([B | Acc]));
+binary_skip_bytes(<<_, RestB/binary>>, [N | RestL], N, Acc) ->
+ binary_skip_bytes(RestB, RestL, 1 + N, Acc);
+binary_skip_bytes(<<C, RestB/binary>>, L, N, Acc) ->
+ binary_skip_bytes(RestB, L, 1 + N, [C | Acc]).
+
+%% -spec invalid_utf8_indexes(binary()) -> [non_neg_integer()].
+%% @doc Return the 0-based indexes in B that are not valid UTF-8.
+invalid_utf8_indexes(B) ->
+ invalid_utf8_indexes(B, 0, []).
+
+%% @private.
+%% -spec invalid_utf8_indexes(binary(), non_neg_integer(), [non_neg_integer()]) -> [non_neg_integer()].
+invalid_utf8_indexes(<<C, Rest/binary>>, N, Acc) when C < 16#80 ->
+ %% U+0000 - U+007F - 7 bits
+ invalid_utf8_indexes(Rest, 1 + N, Acc);
+invalid_utf8_indexes(<<C1, C2, Rest/binary>>, N, Acc)
+ when C1 band 16#E0 =:= 16#C0,
+ C2 band 16#C0 =:= 16#80 ->
+ %% U+0080 - U+07FF - 11 bits
+ case ((C1 band 16#1F) bsl 6) bor (C2 band 16#3F) of
+ C when C < 16#80 ->
+ %% Overlong encoding.
+ invalid_utf8_indexes(Rest, 2 + N, [1 + N, N | Acc]);
+ _ ->
+ %% Upper bound U+07FF does not need to be checked
+ invalid_utf8_indexes(Rest, 2 + N, Acc)
+ end;
+invalid_utf8_indexes(<<C1, C2, C3, Rest/binary>>, N, Acc)
+ when C1 band 16#F0 =:= 16#E0,
+ C2 band 16#C0 =:= 16#80,
+ C3 band 16#C0 =:= 16#80 ->
+ %% U+0800 - U+FFFF - 16 bits
+ case ((((C1 band 16#0F) bsl 6) bor (C2 band 16#3F)) bsl 6) bor
+ (C3 band 16#3F) of
+ C when (C < 16#800) orelse (C >= 16#D800 andalso C =< 16#DFFF) ->
+ %% Overlong encoding or surrogate.
+ invalid_utf8_indexes(Rest, 3 + N, [2 + N, 1 + N, N | Acc]);
+ _ ->
+ %% Upper bound U+FFFF does not need to be checked
+ invalid_utf8_indexes(Rest, 3 + N, Acc)
+ end;
+invalid_utf8_indexes(<<C1, C2, C3, C4, Rest/binary>>, N, Acc)
+ when C1 band 16#F8 =:= 16#F0,
+ C2 band 16#C0 =:= 16#80,
+ C3 band 16#C0 =:= 16#80,
+ C4 band 16#C0 =:= 16#80 ->
+ %% U+10000 - U+10FFFF - 21 bits
+ case ((((((C1 band 16#0F) bsl 6) bor (C2 band 16#3F)) bsl 6) bor
+ (C3 band 16#3F)) bsl 6) bor (C4 band 16#3F) of
+ C when (C < 16#10000) orelse (C > 16#10FFFF) ->
+ %% Overlong encoding or invalid code point.
+ invalid_utf8_indexes(Rest, 4 + N, [3 + N, 2 + N, 1 + N, N | Acc]);
+ _ ->
+ invalid_utf8_indexes(Rest, 4 + N, Acc)
+ end;
+invalid_utf8_indexes(<<_, Rest/binary>>, N, Acc) ->
+ %% Invalid char
+ invalid_utf8_indexes(Rest, 1 + N, [N | Acc]);
+invalid_utf8_indexes(<<>>, _N, Acc) ->
+ lists:reverse(Acc).
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+binary_skip_bytes_test() ->
+ ?assertEqual(<<"foo">>,
+ binary_skip_bytes(<<"foo">>, [])),
+ ?assertEqual(<<"foobar">>,
+ binary_skip_bytes(<<"foo bar">>, [3])),
+ ?assertEqual(<<"foo">>,
+ binary_skip_bytes(<<"foo bar">>, [3, 4, 5, 6])),
+ ?assertEqual(<<"oo bar">>,
+ binary_skip_bytes(<<"foo bar">>, [0])),
+ ok.
+
+invalid_utf8_indexes_test() ->
+ ?assertEqual(
+ [],
+ invalid_utf8_indexes(<<"unicode snowman for you: ", 226, 152, 131>>)),
+ ?assertEqual(
+ [0],
+ invalid_utf8_indexes(<<128>>)),
+ ?assertEqual(
+ [57,59,60,64,66,67],
+ invalid_utf8_indexes(<<"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; (",
+ 167, 65, 170, 186, 73, 83, 80, 166, 87, 186, 217, 41, 41>>)),
+ ok.
+
+codepoint_to_bytes_test() ->
+ %% U+0000 - U+007F - 7 bits
+ %% U+0080 - U+07FF - 11 bits
+ %% U+0800 - U+FFFF - 16 bits (excluding UTC-16 surrogate code points)
+ %% U+10000 - U+10FFFF - 21 bits
+ ?assertEqual(
+ <<"a">>,
+ codepoint_to_bytes($a)),
+ ?assertEqual(
+ <<16#c2, 16#80>>,
+ codepoint_to_bytes(16#80)),
+ ?assertEqual(
+ <<16#df, 16#bf>>,
+ codepoint_to_bytes(16#07ff)),
+ ?assertEqual(
+ <<16#ef, 16#bf, 16#bf>>,
+ codepoint_to_bytes(16#ffff)),
+ ?assertEqual(
+ <<16#f4, 16#8f, 16#bf, 16#bf>>,
+ codepoint_to_bytes(16#10ffff)),
+ ok.
+
+bytes_foldl_test() ->
+ ?assertEqual(
+ <<"abc">>,
+ bytes_foldl(fun (B, Acc) -> <<Acc/binary, B/binary>> end, <<>>, <<"abc">>)),
+ ?assertEqual(
+ <<"abc", 226, 152, 131, 228, 184, 173, 194, 133, 244,143,191,191>>,
+ bytes_foldl(fun (B, Acc) -> <<Acc/binary, B/binary>> end, <<>>,
+ <<"abc", 226, 152, 131, 228, 184, 173, 194, 133, 244,143,191,191>>)),
+ ok.
+
+bytes_to_codepoints_test() ->
+ ?assertEqual(
+ "abc" ++ [16#2603, 16#4e2d, 16#85, 16#10ffff],
+ bytes_to_codepoints(<<"abc", 226, 152, 131, 228, 184, 173, 194, 133, 244,143,191,191>>)),
+ ok.
+
+codepoint_foldl_test() ->
+ ?assertEqual(
+ "cba",
+ codepoint_foldl(fun (C, Acc) -> [C | Acc] end, [], <<"abc">>)),
+ ?assertEqual(
+ [16#10ffff, 16#85, 16#4e2d, 16#2603 | "cba"],
+ codepoint_foldl(fun (C, Acc) -> [C | Acc] end, [],
+ <<"abc", 226, 152, 131, 228, 184, 173, 194, 133, 244,143,191,191>>)),
+ ok.
+
+len_test() ->
+ ?assertEqual(
+ 29,
+ len(<<"unicode snowman for you: ", 226, 152, 131, 228, 184, 173, 194, 133, 244, 143, 191, 191>>)),
+ ok.
+
+codepoints_to_bytes_test() ->
+ ?assertEqual(
+ iolist_to_binary(lists:map(fun codepoint_to_bytes/1, lists:seq(1, 1000))),
+ codepoints_to_bytes(lists:seq(1, 1000))),
+ ok.
+
+valid_utf8_bytes_test() ->
+ ?assertEqual(
+ <<"invalid U+11ffff: ">>,
+ valid_utf8_bytes(<<"invalid U+11ffff: ", 244, 159, 191, 191>>)),
+ ?assertEqual(
+ <<"U+10ffff: ", 244, 143, 191, 191>>,
+ valid_utf8_bytes(<<"U+10ffff: ", 244, 143, 191, 191>>)),
+ ?assertEqual(
+ <<"overlong 2-byte encoding (a): ">>,
+ valid_utf8_bytes(<<"overlong 2-byte encoding (a): ", 2#11000001, 2#10100001>>)),
+ ?assertEqual(
+ <<"overlong 2-byte encoding (!): ">>,
+ valid_utf8_bytes(<<"overlong 2-byte encoding (!): ", 2#11000000, 2#10100001>>)),
+ ?assertEqual(
+ <<"mu: ", 194, 181>>,
+ valid_utf8_bytes(<<"mu: ", 194, 181>>)),
+ ?assertEqual(
+ <<"bad coding bytes: ">>,
+ valid_utf8_bytes(<<"bad coding bytes: ", 2#10011111, 2#10111111, 2#11111111>>)),
+ ?assertEqual(
+ <<"low surrogate (unpaired): ">>,
+ valid_utf8_bytes(<<"low surrogate (unpaired): ", 237, 176, 128>>)),
+ ?assertEqual(
+ <<"high surrogate (unpaired): ">>,
+ valid_utf8_bytes(<<"high surrogate (unpaired): ", 237, 191, 191>>)),
+ ?assertEqual(
+ <<"unicode snowman for you: ", 226, 152, 131>>,
+ valid_utf8_bytes(<<"unicode snowman for you: ", 226, 152, 131>>)),
+ ?assertEqual(
+ <<"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; (AISPW))">>,
+ valid_utf8_bytes(<<"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; (",
+ 167, 65, 170, 186, 73, 83, 80, 166, 87, 186, 217, 41, 41>>)),
+ ok.
+
+-endif.
--- /dev/null
+%% This is generated from src/mochiweb.app.src
+{application, mochiweb,
+ [{description, "MochiMedia Web Server"},
+ {vsn, "2.7.0"},
+ {modules, []},
+ {registered, []},
+ {env, []},
+ {applications, [kernel, stdlib, inets, xmerl]}]}.
--- /dev/null
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Start and stop the MochiWeb server.
+
+-module(mochiweb).
+-author('bob@mochimedia.com').
+
+-export([new_request/1, new_response/1]).
+-export([all_loaded/0, all_loaded/1, reload/0]).
+-export([ensure_started/1]).
+
+reload() ->
+ [c:l(Module) || Module <- all_loaded()].
+
+all_loaded() ->
+ all_loaded(filename:dirname(code:which(?MODULE))).
+
+all_loaded(Base) when is_atom(Base) ->
+ [];
+all_loaded(Base) ->
+ FullBase = Base ++ "/",
+ F = fun ({_Module, Loaded}, Acc) when is_atom(Loaded) ->
+ Acc;
+ ({Module, Loaded}, Acc) ->
+ case lists:prefix(FullBase, Loaded) of
+ true ->
+ [Module | Acc];
+ false ->
+ Acc
+ end
+ end,
+ lists:foldl(F, [], code:all_loaded()).
+
+
+%% @spec new_request({Socket, Request, Headers}) -> MochiWebRequest
+%% @doc Return a mochiweb_request data structure.
+new_request({Socket, {Method, {abs_path, Uri}, Version}, Headers}) ->
+ mochiweb_request:new(Socket,
+ Method,
+ Uri,
+ Version,
+ mochiweb_headers:make(Headers));
+% this case probably doesn't "exist".
+new_request({Socket, {Method, {absoluteURI, _Protocol, _Host, _Port, Uri},
+ Version}, Headers}) ->
+ mochiweb_request:new(Socket,
+ Method,
+ Uri,
+ Version,
+ mochiweb_headers:make(Headers));
+%% Request-URI is "*"
+%% From http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
+new_request({Socket, {Method, '*'=Uri, Version}, Headers}) ->
+ mochiweb_request:new(Socket,
+ Method,
+ Uri,
+ Version,
+ mochiweb_headers:make(Headers)).
+
+%% @spec new_response({Request, integer(), Headers}) -> MochiWebResponse
+%% @doc Return a mochiweb_response data structure.
+new_response({Request, Code, Headers}) ->
+ mochiweb_response:new(Request,
+ Code,
+ mochiweb_headers:make(Headers)).
+
+%% @spec ensure_started(App::atom()) -> ok
+%% @doc Start the given App if it has not been started already.
+ensure_started(App) ->
+ case application:start(App) of
+ ok ->
+ ok;
+ {error, {already_started, App}} ->
+ ok
+ end.
--- /dev/null
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2010 Mochi Media, Inc.
+
+%% @doc MochiWeb acceptor.
+
+-module(mochiweb_acceptor).
+-author('bob@mochimedia.com').
+
+-include("internal.hrl").
+
+-export([start_link/3, init/3]).
+
+start_link(Server, Listen, Loop) ->
+ proc_lib:spawn_link(?MODULE, init, [Server, Listen, Loop]).
+
+init(Server, Listen, Loop) ->
+ T1 = os:timestamp(),
+ case catch mochiweb_socket:accept(Listen) of
+ {ok, Socket} ->
+ gen_server:cast(Server, {accepted, self(), timer:now_diff(os:timestamp(), T1)}),
+ call_loop(Loop, Socket);
+ {error, closed} ->
+ exit(normal);
+ {error, timeout} ->
+ init(Server, Listen, Loop);
+ {error, esslaccept} ->
+ exit(normal);
+ Other ->
+ error_logger:error_report(
+ [{application, mochiweb},
+ "Accept failed error",
+ lists:flatten(io_lib:format("~p", [Other]))]),
+ exit({error, accept_failed})
+ end.
+
+call_loop({M, F}, Socket) ->
+ M:F(Socket);
+call_loop({M, F, [A1]}, Socket) ->
+ M:F(Socket, A1);
+call_loop({M, F, A}, Socket) ->
+ erlang:apply(M, F, [Socket | A]);
+call_loop(Loop, Socket) ->
+ Loop(Socket).
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+-endif.
--- /dev/null
+-module(mochiweb_base64url).
+-export([encode/1, decode/1]).
+%% @doc URL and filename safe base64 variant with no padding,
+%% also known as "base64url" per RFC 4648.
+%%
+%% This differs from base64 in the following ways:
+%% '-' is used in place of '+' (62),
+%% '_' is used in place of '/' (63),
+%% padding is implicit rather than explicit ('=').
+
+-spec encode(iolist()) -> binary().
+encode(B) when is_binary(B) ->
+ encode_binary(B);
+encode(L) when is_list(L) ->
+ encode_binary(iolist_to_binary(L)).
+
+-spec decode(iolist()) -> binary().
+decode(B) when is_binary(B) ->
+ decode_binary(B);
+decode(L) when is_list(L) ->
+ decode_binary(iolist_to_binary(L)).
+
+%% Implementation, derived from stdlib base64.erl
+
+%% One-based decode map.
+-define(DECODE_MAP,
+ {bad,bad,bad,bad,bad,bad,bad,bad,ws,ws,bad,bad,ws,bad,bad, %1-15
+ bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad, %16-31
+ ws,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,62,bad,bad, %32-47
+ 52,53,54,55,56,57,58,59,60,61,bad,bad,bad,bad,bad,bad, %48-63
+ bad,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14, %64-79
+ 15,16,17,18,19,20,21,22,23,24,25,bad,bad,bad,bad,63, %80-95
+ bad,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40, %96-111
+ 41,42,43,44,45,46,47,48,49,50,51,bad,bad,bad,bad,bad, %112-127
+ bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,
+ bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,
+ bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,
+ bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,
+ bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,
+ bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,
+ bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,
+ bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad,bad}).
+
+encode_binary(Bin) ->
+ Split = 3*(byte_size(Bin) div 3),
+ <<Main0:Split/binary,Rest/binary>> = Bin,
+ Main = << <<(b64e(C)):8>> || <<C:6>> <= Main0 >>,
+ case Rest of
+ <<A:6,B:6,C:4>> ->
+ <<Main/binary,(b64e(A)):8,(b64e(B)):8,(b64e(C bsl 2)):8>>;
+ <<A:6,B:2>> ->
+ <<Main/binary,(b64e(A)):8,(b64e(B bsl 4)):8>>;
+ <<>> ->
+ Main
+ end.
+
+decode_binary(Bin) ->
+ Main = << <<(b64d(C)):6>> || <<C>> <= Bin,
+ (C =/= $\t andalso C =/= $\s andalso
+ C =/= $\r andalso C =/= $\n) >>,
+ case bit_size(Main) rem 8 of
+ 0 ->
+ Main;
+ N ->
+ Split = byte_size(Main) - 1,
+ <<Result:Split/bytes, _:N>> = Main,
+ Result
+ end.
+
+%% accessors
+
+b64e(X) ->
+ element(X+1,
+ {$A, $B, $C, $D, $E, $F, $G, $H, $I, $J, $K, $L, $M, $N,
+ $O, $P, $Q, $R, $S, $T, $U, $V, $W, $X, $Y, $Z,
+ $a, $b, $c, $d, $e, $f, $g, $h, $i, $j, $k, $l, $m, $n,
+ $o, $p, $q, $r, $s, $t, $u, $v, $w, $x, $y, $z,
+ $0, $1, $2, $3, $4, $5, $6, $7, $8, $9, $-, $_}).
+
+b64d(X) ->
+ b64d_ok(element(X, ?DECODE_MAP)).
+
+b64d_ok(I) when is_integer(I) -> I.
--- /dev/null
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Converts HTML 5 charrefs and entities to codepoints (or lists of code points).
+-module(mochiweb_charref).
+-export([charref/1]).
+
+%% External API.
+
+%% @doc Convert a decimal charref, hex charref, or html entity to a unicode
+%% codepoint, or return undefined on failure.
+%% The input should not include an ampersand or semicolon.
+%% charref("#38") = 38, charref("#x26") = 38, charref("amp") = 38.
+%% -spec charref(binary() | string()) -> integer() | [integer()] | undefined.
+charref(B) when is_binary(B) ->
+ charref(binary_to_list(B));
+charref([$#, C | L]) when C =:= $x orelse C =:= $X ->
+ try erlang:list_to_integer(L, 16)
+ catch
+ error:badarg -> undefined
+ end;
+charref([$# | L]) ->
+ try list_to_integer(L)
+ catch
+ error:badarg -> undefined
+ end;
+charref(L) ->
+ entity(L).
+
+%% Internal API.
+
+%% [2011-10-14] Generated from:
+%% http://www.w3.org/TR/html5/named-character-references.html
+
+entity("AElig") -> 16#000C6;
+entity("AMP") -> 16#00026;
+entity("Aacute") -> 16#000C1;
+entity("Abreve") -> 16#00102;
+entity("Acirc") -> 16#000C2;
+entity("Acy") -> 16#00410;
+entity("Afr") -> 16#1D504;
+entity("Agrave") -> 16#000C0;
+entity("Alpha") -> 16#00391;
+entity("Amacr") -> 16#00100;
+entity("And") -> 16#02A53;
+entity("Aogon") -> 16#00104;
+entity("Aopf") -> 16#1D538;
+entity("ApplyFunction") -> 16#02061;
+entity("Aring") -> 16#000C5;
+entity("Ascr") -> 16#1D49C;
+entity("Assign") -> 16#02254;
+entity("Atilde") -> 16#000C3;
+entity("Auml") -> 16#000C4;
+entity("Backslash") -> 16#02216;
+entity("Barv") -> 16#02AE7;
+entity("Barwed") -> 16#02306;
+entity("Bcy") -> 16#00411;
+entity("Because") -> 16#02235;
+entity("Bernoullis") -> 16#0212C;
+entity("Beta") -> 16#00392;
+entity("Bfr") -> 16#1D505;
+entity("Bopf") -> 16#1D539;
+entity("Breve") -> 16#002D8;
+entity("Bscr") -> 16#0212C;
+entity("Bumpeq") -> 16#0224E;
+entity("CHcy") -> 16#00427;
+entity("COPY") -> 16#000A9;
+entity("Cacute") -> 16#00106;
+entity("Cap") -> 16#022D2;
+entity("CapitalDifferentialD") -> 16#02145;
+entity("Cayleys") -> 16#0212D;
+entity("Ccaron") -> 16#0010C;
+entity("Ccedil") -> 16#000C7;
+entity("Ccirc") -> 16#00108;
+entity("Cconint") -> 16#02230;
+entity("Cdot") -> 16#0010A;
+entity("Cedilla") -> 16#000B8;
+entity("CenterDot") -> 16#000B7;
+entity("Cfr") -> 16#0212D;
+entity("Chi") -> 16#003A7;
+entity("CircleDot") -> 16#02299;
+entity("CircleMinus") -> 16#02296;
+entity("CirclePlus") -> 16#02295;
+entity("CircleTimes") -> 16#02297;
+entity("ClockwiseContourIntegral") -> 16#02232;
+entity("CloseCurlyDoubleQuote") -> 16#0201D;
+entity("CloseCurlyQuote") -> 16#02019;
+entity("Colon") -> 16#02237;
+entity("Colone") -> 16#02A74;
+entity("Congruent") -> 16#02261;
+entity("Conint") -> 16#0222F;
+entity("ContourIntegral") -> 16#0222E;
+entity("Copf") -> 16#02102;
+entity("Coproduct") -> 16#02210;
+entity("CounterClockwiseContourIntegral") -> 16#02233;
+entity("Cross") -> 16#02A2F;
+entity("Cscr") -> 16#1D49E;
+entity("Cup") -> 16#022D3;
+entity("CupCap") -> 16#0224D;
+entity("DD") -> 16#02145;
+entity("DDotrahd") -> 16#02911;
+entity("DJcy") -> 16#00402;
+entity("DScy") -> 16#00405;
+entity("DZcy") -> 16#0040F;
+entity("Dagger") -> 16#02021;
+entity("Darr") -> 16#021A1;
+entity("Dashv") -> 16#02AE4;
+entity("Dcaron") -> 16#0010E;
+entity("Dcy") -> 16#00414;
+entity("Del") -> 16#02207;
+entity("Delta") -> 16#00394;
+entity("Dfr") -> 16#1D507;
+entity("DiacriticalAcute") -> 16#000B4;
+entity("DiacriticalDot") -> 16#002D9;
+entity("DiacriticalDoubleAcute") -> 16#002DD;
+entity("DiacriticalGrave") -> 16#00060;
+entity("DiacriticalTilde") -> 16#002DC;
+entity("Diamond") -> 16#022C4;
+entity("DifferentialD") -> 16#02146;
+entity("Dopf") -> 16#1D53B;
+entity("Dot") -> 16#000A8;
+entity("DotDot") -> 16#020DC;
+entity("DotEqual") -> 16#02250;
+entity("DoubleContourIntegral") -> 16#0222F;
+entity("DoubleDot") -> 16#000A8;
+entity("DoubleDownArrow") -> 16#021D3;
+entity("DoubleLeftArrow") -> 16#021D0;
+entity("DoubleLeftRightArrow") -> 16#021D4;
+entity("DoubleLeftTee") -> 16#02AE4;
+entity("DoubleLongLeftArrow") -> 16#027F8;
+entity("DoubleLongLeftRightArrow") -> 16#027FA;
+entity("DoubleLongRightArrow") -> 16#027F9;
+entity("DoubleRightArrow") -> 16#021D2;
+entity("DoubleRightTee") -> 16#022A8;
+entity("DoubleUpArrow") -> 16#021D1;
+entity("DoubleUpDownArrow") -> 16#021D5;
+entity("DoubleVerticalBar") -> 16#02225;
+entity("DownArrow") -> 16#02193;
+entity("DownArrowBar") -> 16#02913;
+entity("DownArrowUpArrow") -> 16#021F5;
+entity("DownBreve") -> 16#00311;
+entity("DownLeftRightVector") -> 16#02950;
+entity("DownLeftTeeVector") -> 16#0295E;
+entity("DownLeftVector") -> 16#021BD;
+entity("DownLeftVectorBar") -> 16#02956;
+entity("DownRightTeeVector") -> 16#0295F;
+entity("DownRightVector") -> 16#021C1;
+entity("DownRightVectorBar") -> 16#02957;
+entity("DownTee") -> 16#022A4;
+entity("DownTeeArrow") -> 16#021A7;
+entity("Downarrow") -> 16#021D3;
+entity("Dscr") -> 16#1D49F;
+entity("Dstrok") -> 16#00110;
+entity("ENG") -> 16#0014A;
+entity("ETH") -> 16#000D0;
+entity("Eacute") -> 16#000C9;
+entity("Ecaron") -> 16#0011A;
+entity("Ecirc") -> 16#000CA;
+entity("Ecy") -> 16#0042D;
+entity("Edot") -> 16#00116;
+entity("Efr") -> 16#1D508;
+entity("Egrave") -> 16#000C8;
+entity("Element") -> 16#02208;
+entity("Emacr") -> 16#00112;
+entity("EmptySmallSquare") -> 16#025FB;
+entity("EmptyVerySmallSquare") -> 16#025AB;
+entity("Eogon") -> 16#00118;
+entity("Eopf") -> 16#1D53C;
+entity("Epsilon") -> 16#00395;
+entity("Equal") -> 16#02A75;
+entity("EqualTilde") -> 16#02242;
+entity("Equilibrium") -> 16#021CC;
+entity("Escr") -> 16#02130;
+entity("Esim") -> 16#02A73;
+entity("Eta") -> 16#00397;
+entity("Euml") -> 16#000CB;
+entity("Exists") -> 16#02203;
+entity("ExponentialE") -> 16#02147;
+entity("Fcy") -> 16#00424;
+entity("Ffr") -> 16#1D509;
+entity("FilledSmallSquare") -> 16#025FC;
+entity("FilledVerySmallSquare") -> 16#025AA;
+entity("Fopf") -> 16#1D53D;
+entity("ForAll") -> 16#02200;
+entity("Fouriertrf") -> 16#02131;
+entity("Fscr") -> 16#02131;
+entity("GJcy") -> 16#00403;
+entity("GT") -> 16#0003E;
+entity("Gamma") -> 16#00393;
+entity("Gammad") -> 16#003DC;
+entity("Gbreve") -> 16#0011E;
+entity("Gcedil") -> 16#00122;
+entity("Gcirc") -> 16#0011C;
+entity("Gcy") -> 16#00413;
+entity("Gdot") -> 16#00120;
+entity("Gfr") -> 16#1D50A;
+entity("Gg") -> 16#022D9;
+entity("Gopf") -> 16#1D53E;
+entity("GreaterEqual") -> 16#02265;
+entity("GreaterEqualLess") -> 16#022DB;
+entity("GreaterFullEqual") -> 16#02267;
+entity("GreaterGreater") -> 16#02AA2;
+entity("GreaterLess") -> 16#02277;
+entity("GreaterSlantEqual") -> 16#02A7E;
+entity("GreaterTilde") -> 16#02273;
+entity("Gscr") -> 16#1D4A2;
+entity("Gt") -> 16#0226B;
+entity("HARDcy") -> 16#0042A;
+entity("Hacek") -> 16#002C7;
+entity("Hat") -> 16#0005E;
+entity("Hcirc") -> 16#00124;
+entity("Hfr") -> 16#0210C;
+entity("HilbertSpace") -> 16#0210B;
+entity("Hopf") -> 16#0210D;
+entity("HorizontalLine") -> 16#02500;
+entity("Hscr") -> 16#0210B;
+entity("Hstrok") -> 16#00126;
+entity("HumpDownHump") -> 16#0224E;
+entity("HumpEqual") -> 16#0224F;
+entity("IEcy") -> 16#00415;
+entity("IJlig") -> 16#00132;
+entity("IOcy") -> 16#00401;
+entity("Iacute") -> 16#000CD;
+entity("Icirc") -> 16#000CE;
+entity("Icy") -> 16#00418;
+entity("Idot") -> 16#00130;
+entity("Ifr") -> 16#02111;
+entity("Igrave") -> 16#000CC;
+entity("Im") -> 16#02111;
+entity("Imacr") -> 16#0012A;
+entity("ImaginaryI") -> 16#02148;
+entity("Implies") -> 16#021D2;
+entity("Int") -> 16#0222C;
+entity("Integral") -> 16#0222B;
+entity("Intersection") -> 16#022C2;
+entity("InvisibleComma") -> 16#02063;
+entity("InvisibleTimes") -> 16#02062;
+entity("Iogon") -> 16#0012E;
+entity("Iopf") -> 16#1D540;
+entity("Iota") -> 16#00399;
+entity("Iscr") -> 16#02110;
+entity("Itilde") -> 16#00128;
+entity("Iukcy") -> 16#00406;
+entity("Iuml") -> 16#000CF;
+entity("Jcirc") -> 16#00134;
+entity("Jcy") -> 16#00419;
+entity("Jfr") -> 16#1D50D;
+entity("Jopf") -> 16#1D541;
+entity("Jscr") -> 16#1D4A5;
+entity("Jsercy") -> 16#00408;
+entity("Jukcy") -> 16#00404;
+entity("KHcy") -> 16#00425;
+entity("KJcy") -> 16#0040C;
+entity("Kappa") -> 16#0039A;
+entity("Kcedil") -> 16#00136;
+entity("Kcy") -> 16#0041A;
+entity("Kfr") -> 16#1D50E;
+entity("Kopf") -> 16#1D542;
+entity("Kscr") -> 16#1D4A6;
+entity("LJcy") -> 16#00409;
+entity("LT") -> 16#0003C;
+entity("Lacute") -> 16#00139;
+entity("Lambda") -> 16#0039B;
+entity("Lang") -> 16#027EA;
+entity("Laplacetrf") -> 16#02112;
+entity("Larr") -> 16#0219E;
+entity("Lcaron") -> 16#0013D;
+entity("Lcedil") -> 16#0013B;
+entity("Lcy") -> 16#0041B;
+entity("LeftAngleBracket") -> 16#027E8;
+entity("LeftArrow") -> 16#02190;
+entity("LeftArrowBar") -> 16#021E4;
+entity("LeftArrowRightArrow") -> 16#021C6;
+entity("LeftCeiling") -> 16#02308;
+entity("LeftDoubleBracket") -> 16#027E6;
+entity("LeftDownTeeVector") -> 16#02961;
+entity("LeftDownVector") -> 16#021C3;
+entity("LeftDownVectorBar") -> 16#02959;
+entity("LeftFloor") -> 16#0230A;
+entity("LeftRightArrow") -> 16#02194;
+entity("LeftRightVector") -> 16#0294E;
+entity("LeftTee") -> 16#022A3;
+entity("LeftTeeArrow") -> 16#021A4;
+entity("LeftTeeVector") -> 16#0295A;
+entity("LeftTriangle") -> 16#022B2;
+entity("LeftTriangleBar") -> 16#029CF;
+entity("LeftTriangleEqual") -> 16#022B4;
+entity("LeftUpDownVector") -> 16#02951;
+entity("LeftUpTeeVector") -> 16#02960;
+entity("LeftUpVector") -> 16#021BF;
+entity("LeftUpVectorBar") -> 16#02958;
+entity("LeftVector") -> 16#021BC;
+entity("LeftVectorBar") -> 16#02952;
+entity("Leftarrow") -> 16#021D0;
+entity("Leftrightarrow") -> 16#021D4;
+entity("LessEqualGreater") -> 16#022DA;
+entity("LessFullEqual") -> 16#02266;
+entity("LessGreater") -> 16#02276;
+entity("LessLess") -> 16#02AA1;
+entity("LessSlantEqual") -> 16#02A7D;
+entity("LessTilde") -> 16#02272;
+entity("Lfr") -> 16#1D50F;
+entity("Ll") -> 16#022D8;
+entity("Lleftarrow") -> 16#021DA;
+entity("Lmidot") -> 16#0013F;
+entity("LongLeftArrow") -> 16#027F5;
+entity("LongLeftRightArrow") -> 16#027F7;
+entity("LongRightArrow") -> 16#027F6;
+entity("Longleftarrow") -> 16#027F8;
+entity("Longleftrightarrow") -> 16#027FA;
+entity("Longrightarrow") -> 16#027F9;
+entity("Lopf") -> 16#1D543;
+entity("LowerLeftArrow") -> 16#02199;
+entity("LowerRightArrow") -> 16#02198;
+entity("Lscr") -> 16#02112;
+entity("Lsh") -> 16#021B0;
+entity("Lstrok") -> 16#00141;
+entity("Lt") -> 16#0226A;
+entity("Map") -> 16#02905;
+entity("Mcy") -> 16#0041C;
+entity("MediumSpace") -> 16#0205F;
+entity("Mellintrf") -> 16#02133;
+entity("Mfr") -> 16#1D510;
+entity("MinusPlus") -> 16#02213;
+entity("Mopf") -> 16#1D544;
+entity("Mscr") -> 16#02133;
+entity("Mu") -> 16#0039C;
+entity("NJcy") -> 16#0040A;
+entity("Nacute") -> 16#00143;
+entity("Ncaron") -> 16#00147;
+entity("Ncedil") -> 16#00145;
+entity("Ncy") -> 16#0041D;
+entity("NegativeMediumSpace") -> 16#0200B;
+entity("NegativeThickSpace") -> 16#0200B;
+entity("NegativeThinSpace") -> 16#0200B;
+entity("NegativeVeryThinSpace") -> 16#0200B;
+entity("NestedGreaterGreater") -> 16#0226B;
+entity("NestedLessLess") -> 16#0226A;
+entity("NewLine") -> 16#0000A;
+entity("Nfr") -> 16#1D511;
+entity("NoBreak") -> 16#02060;
+entity("NonBreakingSpace") -> 16#000A0;
+entity("Nopf") -> 16#02115;
+entity("Not") -> 16#02AEC;
+entity("NotCongruent") -> 16#02262;
+entity("NotCupCap") -> 16#0226D;
+entity("NotDoubleVerticalBar") -> 16#02226;
+entity("NotElement") -> 16#02209;
+entity("NotEqual") -> 16#02260;
+entity("NotEqualTilde") -> [16#02242, 16#00338];
+entity("NotExists") -> 16#02204;
+entity("NotGreater") -> 16#0226F;
+entity("NotGreaterEqual") -> 16#02271;
+entity("NotGreaterFullEqual") -> [16#02267, 16#00338];
+entity("NotGreaterGreater") -> [16#0226B, 16#00338];
+entity("NotGreaterLess") -> 16#02279;
+entity("NotGreaterSlantEqual") -> [16#02A7E, 16#00338];
+entity("NotGreaterTilde") -> 16#02275;
+entity("NotHumpDownHump") -> [16#0224E, 16#00338];
+entity("NotHumpEqual") -> [16#0224F, 16#00338];
+entity("NotLeftTriangle") -> 16#022EA;
+entity("NotLeftTriangleBar") -> [16#029CF, 16#00338];
+entity("NotLeftTriangleEqual") -> 16#022EC;
+entity("NotLess") -> 16#0226E;
+entity("NotLessEqual") -> 16#02270;
+entity("NotLessGreater") -> 16#02278;
+entity("NotLessLess") -> [16#0226A, 16#00338];
+entity("NotLessSlantEqual") -> [16#02A7D, 16#00338];
+entity("NotLessTilde") -> 16#02274;
+entity("NotNestedGreaterGreater") -> [16#02AA2, 16#00338];
+entity("NotNestedLessLess") -> [16#02AA1, 16#00338];
+entity("NotPrecedes") -> 16#02280;
+entity("NotPrecedesEqual") -> [16#02AAF, 16#00338];
+entity("NotPrecedesSlantEqual") -> 16#022E0;
+entity("NotReverseElement") -> 16#0220C;
+entity("NotRightTriangle") -> 16#022EB;
+entity("NotRightTriangleBar") -> [16#029D0, 16#00338];
+entity("NotRightTriangleEqual") -> 16#022ED;
+entity("NotSquareSubset") -> [16#0228F, 16#00338];
+entity("NotSquareSubsetEqual") -> 16#022E2;
+entity("NotSquareSuperset") -> [16#02290, 16#00338];
+entity("NotSquareSupersetEqual") -> 16#022E3;
+entity("NotSubset") -> [16#02282, 16#020D2];
+entity("NotSubsetEqual") -> 16#02288;
+entity("NotSucceeds") -> 16#02281;
+entity("NotSucceedsEqual") -> [16#02AB0, 16#00338];
+entity("NotSucceedsSlantEqual") -> 16#022E1;
+entity("NotSucceedsTilde") -> [16#0227F, 16#00338];
+entity("NotSuperset") -> [16#02283, 16#020D2];
+entity("NotSupersetEqual") -> 16#02289;
+entity("NotTilde") -> 16#02241;
+entity("NotTildeEqual") -> 16#02244;
+entity("NotTildeFullEqual") -> 16#02247;
+entity("NotTildeTilde") -> 16#02249;
+entity("NotVerticalBar") -> 16#02224;
+entity("Nscr") -> 16#1D4A9;
+entity("Ntilde") -> 16#000D1;
+entity("Nu") -> 16#0039D;
+entity("OElig") -> 16#00152;
+entity("Oacute") -> 16#000D3;
+entity("Ocirc") -> 16#000D4;
+entity("Ocy") -> 16#0041E;
+entity("Odblac") -> 16#00150;
+entity("Ofr") -> 16#1D512;
+entity("Ograve") -> 16#000D2;
+entity("Omacr") -> 16#0014C;
+entity("Omega") -> 16#003A9;
+entity("Omicron") -> 16#0039F;
+entity("Oopf") -> 16#1D546;
+entity("OpenCurlyDoubleQuote") -> 16#0201C;
+entity("OpenCurlyQuote") -> 16#02018;
+entity("Or") -> 16#02A54;
+entity("Oscr") -> 16#1D4AA;
+entity("Oslash") -> 16#000D8;
+entity("Otilde") -> 16#000D5;
+entity("Otimes") -> 16#02A37;
+entity("Ouml") -> 16#000D6;
+entity("OverBar") -> 16#0203E;
+entity("OverBrace") -> 16#023DE;
+entity("OverBracket") -> 16#023B4;
+entity("OverParenthesis") -> 16#023DC;
+entity("PartialD") -> 16#02202;
+entity("Pcy") -> 16#0041F;
+entity("Pfr") -> 16#1D513;
+entity("Phi") -> 16#003A6;
+entity("Pi") -> 16#003A0;
+entity("PlusMinus") -> 16#000B1;
+entity("Poincareplane") -> 16#0210C;
+entity("Popf") -> 16#02119;
+entity("Pr") -> 16#02ABB;
+entity("Precedes") -> 16#0227A;
+entity("PrecedesEqual") -> 16#02AAF;
+entity("PrecedesSlantEqual") -> 16#0227C;
+entity("PrecedesTilde") -> 16#0227E;
+entity("Prime") -> 16#02033;
+entity("Product") -> 16#0220F;
+entity("Proportion") -> 16#02237;
+entity("Proportional") -> 16#0221D;
+entity("Pscr") -> 16#1D4AB;
+entity("Psi") -> 16#003A8;
+entity("QUOT") -> 16#00022;
+entity("Qfr") -> 16#1D514;
+entity("Qopf") -> 16#0211A;
+entity("Qscr") -> 16#1D4AC;
+entity("RBarr") -> 16#02910;
+entity("REG") -> 16#000AE;
+entity("Racute") -> 16#00154;
+entity("Rang") -> 16#027EB;
+entity("Rarr") -> 16#021A0;
+entity("Rarrtl") -> 16#02916;
+entity("Rcaron") -> 16#00158;
+entity("Rcedil") -> 16#00156;
+entity("Rcy") -> 16#00420;
+entity("Re") -> 16#0211C;
+entity("ReverseElement") -> 16#0220B;
+entity("ReverseEquilibrium") -> 16#021CB;
+entity("ReverseUpEquilibrium") -> 16#0296F;
+entity("Rfr") -> 16#0211C;
+entity("Rho") -> 16#003A1;
+entity("RightAngleBracket") -> 16#027E9;
+entity("RightArrow") -> 16#02192;
+entity("RightArrowBar") -> 16#021E5;
+entity("RightArrowLeftArrow") -> 16#021C4;
+entity("RightCeiling") -> 16#02309;
+entity("RightDoubleBracket") -> 16#027E7;
+entity("RightDownTeeVector") -> 16#0295D;
+entity("RightDownVector") -> 16#021C2;
+entity("RightDownVectorBar") -> 16#02955;
+entity("RightFloor") -> 16#0230B;
+entity("RightTee") -> 16#022A2;
+entity("RightTeeArrow") -> 16#021A6;
+entity("RightTeeVector") -> 16#0295B;
+entity("RightTriangle") -> 16#022B3;
+entity("RightTriangleBar") -> 16#029D0;
+entity("RightTriangleEqual") -> 16#022B5;
+entity("RightUpDownVector") -> 16#0294F;
+entity("RightUpTeeVector") -> 16#0295C;
+entity("RightUpVector") -> 16#021BE;
+entity("RightUpVectorBar") -> 16#02954;
+entity("RightVector") -> 16#021C0;
+entity("RightVectorBar") -> 16#02953;
+entity("Rightarrow") -> 16#021D2;
+entity("Ropf") -> 16#0211D;
+entity("RoundImplies") -> 16#02970;
+entity("Rrightarrow") -> 16#021DB;
+entity("Rscr") -> 16#0211B;
+entity("Rsh") -> 16#021B1;
+entity("RuleDelayed") -> 16#029F4;
+entity("SHCHcy") -> 16#00429;
+entity("SHcy") -> 16#00428;
+entity("SOFTcy") -> 16#0042C;
+entity("Sacute") -> 16#0015A;
+entity("Sc") -> 16#02ABC;
+entity("Scaron") -> 16#00160;
+entity("Scedil") -> 16#0015E;
+entity("Scirc") -> 16#0015C;
+entity("Scy") -> 16#00421;
+entity("Sfr") -> 16#1D516;
+entity("ShortDownArrow") -> 16#02193;
+entity("ShortLeftArrow") -> 16#02190;
+entity("ShortRightArrow") -> 16#02192;
+entity("ShortUpArrow") -> 16#02191;
+entity("Sigma") -> 16#003A3;
+entity("SmallCircle") -> 16#02218;
+entity("Sopf") -> 16#1D54A;
+entity("Sqrt") -> 16#0221A;
+entity("Square") -> 16#025A1;
+entity("SquareIntersection") -> 16#02293;
+entity("SquareSubset") -> 16#0228F;
+entity("SquareSubsetEqual") -> 16#02291;
+entity("SquareSuperset") -> 16#02290;
+entity("SquareSupersetEqual") -> 16#02292;
+entity("SquareUnion") -> 16#02294;
+entity("Sscr") -> 16#1D4AE;
+entity("Star") -> 16#022C6;
+entity("Sub") -> 16#022D0;
+entity("Subset") -> 16#022D0;
+entity("SubsetEqual") -> 16#02286;
+entity("Succeeds") -> 16#0227B;
+entity("SucceedsEqual") -> 16#02AB0;
+entity("SucceedsSlantEqual") -> 16#0227D;
+entity("SucceedsTilde") -> 16#0227F;
+entity("SuchThat") -> 16#0220B;
+entity("Sum") -> 16#02211;
+entity("Sup") -> 16#022D1;
+entity("Superset") -> 16#02283;
+entity("SupersetEqual") -> 16#02287;
+entity("Supset") -> 16#022D1;
+entity("THORN") -> 16#000DE;
+entity("TRADE") -> 16#02122;
+entity("TSHcy") -> 16#0040B;
+entity("TScy") -> 16#00426;
+entity("Tab") -> 16#00009;
+entity("Tau") -> 16#003A4;
+entity("Tcaron") -> 16#00164;
+entity("Tcedil") -> 16#00162;
+entity("Tcy") -> 16#00422;
+entity("Tfr") -> 16#1D517;
+entity("Therefore") -> 16#02234;
+entity("Theta") -> 16#00398;
+entity("ThickSpace") -> [16#0205F, 16#0200A];
+entity("ThinSpace") -> 16#02009;
+entity("Tilde") -> 16#0223C;
+entity("TildeEqual") -> 16#02243;
+entity("TildeFullEqual") -> 16#02245;
+entity("TildeTilde") -> 16#02248;
+entity("Topf") -> 16#1D54B;
+entity("TripleDot") -> 16#020DB;
+entity("Tscr") -> 16#1D4AF;
+entity("Tstrok") -> 16#00166;
+entity("Uacute") -> 16#000DA;
+entity("Uarr") -> 16#0219F;
+entity("Uarrocir") -> 16#02949;
+entity("Ubrcy") -> 16#0040E;
+entity("Ubreve") -> 16#0016C;
+entity("Ucirc") -> 16#000DB;
+entity("Ucy") -> 16#00423;
+entity("Udblac") -> 16#00170;
+entity("Ufr") -> 16#1D518;
+entity("Ugrave") -> 16#000D9;
+entity("Umacr") -> 16#0016A;
+entity("UnderBar") -> 16#0005F;
+entity("UnderBrace") -> 16#023DF;
+entity("UnderBracket") -> 16#023B5;
+entity("UnderParenthesis") -> 16#023DD;
+entity("Union") -> 16#022C3;
+entity("UnionPlus") -> 16#0228E;
+entity("Uogon") -> 16#00172;
+entity("Uopf") -> 16#1D54C;
+entity("UpArrow") -> 16#02191;
+entity("UpArrowBar") -> 16#02912;
+entity("UpArrowDownArrow") -> 16#021C5;
+entity("UpDownArrow") -> 16#02195;
+entity("UpEquilibrium") -> 16#0296E;
+entity("UpTee") -> 16#022A5;
+entity("UpTeeArrow") -> 16#021A5;
+entity("Uparrow") -> 16#021D1;
+entity("Updownarrow") -> 16#021D5;
+entity("UpperLeftArrow") -> 16#02196;
+entity("UpperRightArrow") -> 16#02197;
+entity("Upsi") -> 16#003D2;
+entity("Upsilon") -> 16#003A5;
+entity("Uring") -> 16#0016E;
+entity("Uscr") -> 16#1D4B0;
+entity("Utilde") -> 16#00168;
+entity("Uuml") -> 16#000DC;
+entity("VDash") -> 16#022AB;
+entity("Vbar") -> 16#02AEB;
+entity("Vcy") -> 16#00412;
+entity("Vdash") -> 16#022A9;
+entity("Vdashl") -> 16#02AE6;
+entity("Vee") -> 16#022C1;
+entity("Verbar") -> 16#02016;
+entity("Vert") -> 16#02016;
+entity("VerticalBar") -> 16#02223;
+entity("VerticalLine") -> 16#0007C;
+entity("VerticalSeparator") -> 16#02758;
+entity("VerticalTilde") -> 16#02240;
+entity("VeryThinSpace") -> 16#0200A;
+entity("Vfr") -> 16#1D519;
+entity("Vopf") -> 16#1D54D;
+entity("Vscr") -> 16#1D4B1;
+entity("Vvdash") -> 16#022AA;
+entity("Wcirc") -> 16#00174;
+entity("Wedge") -> 16#022C0;
+entity("Wfr") -> 16#1D51A;
+entity("Wopf") -> 16#1D54E;
+entity("Wscr") -> 16#1D4B2;
+entity("Xfr") -> 16#1D51B;
+entity("Xi") -> 16#0039E;
+entity("Xopf") -> 16#1D54F;
+entity("Xscr") -> 16#1D4B3;
+entity("YAcy") -> 16#0042F;
+entity("YIcy") -> 16#00407;
+entity("YUcy") -> 16#0042E;
+entity("Yacute") -> 16#000DD;
+entity("Ycirc") -> 16#00176;
+entity("Ycy") -> 16#0042B;
+entity("Yfr") -> 16#1D51C;
+entity("Yopf") -> 16#1D550;
+entity("Yscr") -> 16#1D4B4;
+entity("Yuml") -> 16#00178;
+entity("ZHcy") -> 16#00416;
+entity("Zacute") -> 16#00179;
+entity("Zcaron") -> 16#0017D;
+entity("Zcy") -> 16#00417;
+entity("Zdot") -> 16#0017B;
+entity("ZeroWidthSpace") -> 16#0200B;
+entity("Zeta") -> 16#00396;
+entity("Zfr") -> 16#02128;
+entity("Zopf") -> 16#02124;
+entity("Zscr") -> 16#1D4B5;
+entity("aacute") -> 16#000E1;
+entity("abreve") -> 16#00103;
+entity("ac") -> 16#0223E;
+entity("acE") -> [16#0223E, 16#00333];
+entity("acd") -> 16#0223F;
+entity("acirc") -> 16#000E2;
+entity("acute") -> 16#000B4;
+entity("acy") -> 16#00430;
+entity("aelig") -> 16#000E6;
+entity("af") -> 16#02061;
+entity("afr") -> 16#1D51E;
+entity("agrave") -> 16#000E0;
+entity("alefsym") -> 16#02135;
+entity("aleph") -> 16#02135;
+entity("alpha") -> 16#003B1;
+entity("amacr") -> 16#00101;
+entity("amalg") -> 16#02A3F;
+entity("amp") -> 16#00026;
+entity("and") -> 16#02227;
+entity("andand") -> 16#02A55;
+entity("andd") -> 16#02A5C;
+entity("andslope") -> 16#02A58;
+entity("andv") -> 16#02A5A;
+entity("ang") -> 16#02220;
+entity("ange") -> 16#029A4;
+entity("angle") -> 16#02220;
+entity("angmsd") -> 16#02221;
+entity("angmsdaa") -> 16#029A8;
+entity("angmsdab") -> 16#029A9;
+entity("angmsdac") -> 16#029AA;
+entity("angmsdad") -> 16#029AB;
+entity("angmsdae") -> 16#029AC;
+entity("angmsdaf") -> 16#029AD;
+entity("angmsdag") -> 16#029AE;
+entity("angmsdah") -> 16#029AF;
+entity("angrt") -> 16#0221F;
+entity("angrtvb") -> 16#022BE;
+entity("angrtvbd") -> 16#0299D;
+entity("angsph") -> 16#02222;
+entity("angst") -> 16#000C5;
+entity("angzarr") -> 16#0237C;
+entity("aogon") -> 16#00105;
+entity("aopf") -> 16#1D552;
+entity("ap") -> 16#02248;
+entity("apE") -> 16#02A70;
+entity("apacir") -> 16#02A6F;
+entity("ape") -> 16#0224A;
+entity("apid") -> 16#0224B;
+entity("apos") -> 16#00027;
+entity("approx") -> 16#02248;
+entity("approxeq") -> 16#0224A;
+entity("aring") -> 16#000E5;
+entity("ascr") -> 16#1D4B6;
+entity("ast") -> 16#0002A;
+entity("asymp") -> 16#02248;
+entity("asympeq") -> 16#0224D;
+entity("atilde") -> 16#000E3;
+entity("auml") -> 16#000E4;
+entity("awconint") -> 16#02233;
+entity("awint") -> 16#02A11;
+entity("bNot") -> 16#02AED;
+entity("backcong") -> 16#0224C;
+entity("backepsilon") -> 16#003F6;
+entity("backprime") -> 16#02035;
+entity("backsim") -> 16#0223D;
+entity("backsimeq") -> 16#022CD;
+entity("barvee") -> 16#022BD;
+entity("barwed") -> 16#02305;
+entity("barwedge") -> 16#02305;
+entity("bbrk") -> 16#023B5;
+entity("bbrktbrk") -> 16#023B6;
+entity("bcong") -> 16#0224C;
+entity("bcy") -> 16#00431;
+entity("bdquo") -> 16#0201E;
+entity("becaus") -> 16#02235;
+entity("because") -> 16#02235;
+entity("bemptyv") -> 16#029B0;
+entity("bepsi") -> 16#003F6;
+entity("bernou") -> 16#0212C;
+entity("beta") -> 16#003B2;
+entity("beth") -> 16#02136;
+entity("between") -> 16#0226C;
+entity("bfr") -> 16#1D51F;
+entity("bigcap") -> 16#022C2;
+entity("bigcirc") -> 16#025EF;
+entity("bigcup") -> 16#022C3;
+entity("bigodot") -> 16#02A00;
+entity("bigoplus") -> 16#02A01;
+entity("bigotimes") -> 16#02A02;
+entity("bigsqcup") -> 16#02A06;
+entity("bigstar") -> 16#02605;
+entity("bigtriangledown") -> 16#025BD;
+entity("bigtriangleup") -> 16#025B3;
+entity("biguplus") -> 16#02A04;
+entity("bigvee") -> 16#022C1;
+entity("bigwedge") -> 16#022C0;
+entity("bkarow") -> 16#0290D;
+entity("blacklozenge") -> 16#029EB;
+entity("blacksquare") -> 16#025AA;
+entity("blacktriangle") -> 16#025B4;
+entity("blacktriangledown") -> 16#025BE;
+entity("blacktriangleleft") -> 16#025C2;
+entity("blacktriangleright") -> 16#025B8;
+entity("blank") -> 16#02423;
+entity("blk12") -> 16#02592;
+entity("blk14") -> 16#02591;
+entity("blk34") -> 16#02593;
+entity("block") -> 16#02588;
+entity("bne") -> [16#0003D, 16#020E5];
+entity("bnequiv") -> [16#02261, 16#020E5];
+entity("bnot") -> 16#02310;
+entity("bopf") -> 16#1D553;
+entity("bot") -> 16#022A5;
+entity("bottom") -> 16#022A5;
+entity("bowtie") -> 16#022C8;
+entity("boxDL") -> 16#02557;
+entity("boxDR") -> 16#02554;
+entity("boxDl") -> 16#02556;
+entity("boxDr") -> 16#02553;
+entity("boxH") -> 16#02550;
+entity("boxHD") -> 16#02566;
+entity("boxHU") -> 16#02569;
+entity("boxHd") -> 16#02564;
+entity("boxHu") -> 16#02567;
+entity("boxUL") -> 16#0255D;
+entity("boxUR") -> 16#0255A;
+entity("boxUl") -> 16#0255C;
+entity("boxUr") -> 16#02559;
+entity("boxV") -> 16#02551;
+entity("boxVH") -> 16#0256C;
+entity("boxVL") -> 16#02563;
+entity("boxVR") -> 16#02560;
+entity("boxVh") -> 16#0256B;
+entity("boxVl") -> 16#02562;
+entity("boxVr") -> 16#0255F;
+entity("boxbox") -> 16#029C9;
+entity("boxdL") -> 16#02555;
+entity("boxdR") -> 16#02552;
+entity("boxdl") -> 16#02510;
+entity("boxdr") -> 16#0250C;
+entity("boxh") -> 16#02500;
+entity("boxhD") -> 16#02565;
+entity("boxhU") -> 16#02568;
+entity("boxhd") -> 16#0252C;
+entity("boxhu") -> 16#02534;
+entity("boxminus") -> 16#0229F;
+entity("boxplus") -> 16#0229E;
+entity("boxtimes") -> 16#022A0;
+entity("boxuL") -> 16#0255B;
+entity("boxuR") -> 16#02558;
+entity("boxul") -> 16#02518;
+entity("boxur") -> 16#02514;
+entity("boxv") -> 16#02502;
+entity("boxvH") -> 16#0256A;
+entity("boxvL") -> 16#02561;
+entity("boxvR") -> 16#0255E;
+entity("boxvh") -> 16#0253C;
+entity("boxvl") -> 16#02524;
+entity("boxvr") -> 16#0251C;
+entity("bprime") -> 16#02035;
+entity("breve") -> 16#002D8;
+entity("brvbar") -> 16#000A6;
+entity("bscr") -> 16#1D4B7;
+entity("bsemi") -> 16#0204F;
+entity("bsim") -> 16#0223D;
+entity("bsime") -> 16#022CD;
+entity("bsol") -> 16#0005C;
+entity("bsolb") -> 16#029C5;
+entity("bsolhsub") -> 16#027C8;
+entity("bull") -> 16#02022;
+entity("bullet") -> 16#02022;
+entity("bump") -> 16#0224E;
+entity("bumpE") -> 16#02AAE;
+entity("bumpe") -> 16#0224F;
+entity("bumpeq") -> 16#0224F;
+entity("cacute") -> 16#00107;
+entity("cap") -> 16#02229;
+entity("capand") -> 16#02A44;
+entity("capbrcup") -> 16#02A49;
+entity("capcap") -> 16#02A4B;
+entity("capcup") -> 16#02A47;
+entity("capdot") -> 16#02A40;
+entity("caps") -> [16#02229, 16#0FE00];
+entity("caret") -> 16#02041;
+entity("caron") -> 16#002C7;
+entity("ccaps") -> 16#02A4D;
+entity("ccaron") -> 16#0010D;
+entity("ccedil") -> 16#000E7;
+entity("ccirc") -> 16#00109;
+entity("ccups") -> 16#02A4C;
+entity("ccupssm") -> 16#02A50;
+entity("cdot") -> 16#0010B;
+entity("cedil") -> 16#000B8;
+entity("cemptyv") -> 16#029B2;
+entity("cent") -> 16#000A2;
+entity("centerdot") -> 16#000B7;
+entity("cfr") -> 16#1D520;
+entity("chcy") -> 16#00447;
+entity("check") -> 16#02713;
+entity("checkmark") -> 16#02713;
+entity("chi") -> 16#003C7;
+entity("cir") -> 16#025CB;
+entity("cirE") -> 16#029C3;
+entity("circ") -> 16#002C6;
+entity("circeq") -> 16#02257;
+entity("circlearrowleft") -> 16#021BA;
+entity("circlearrowright") -> 16#021BB;
+entity("circledR") -> 16#000AE;
+entity("circledS") -> 16#024C8;
+entity("circledast") -> 16#0229B;
+entity("circledcirc") -> 16#0229A;
+entity("circleddash") -> 16#0229D;
+entity("cire") -> 16#02257;
+entity("cirfnint") -> 16#02A10;
+entity("cirmid") -> 16#02AEF;
+entity("cirscir") -> 16#029C2;
+entity("clubs") -> 16#02663;
+entity("clubsuit") -> 16#02663;
+entity("colon") -> 16#0003A;
+entity("colone") -> 16#02254;
+entity("coloneq") -> 16#02254;
+entity("comma") -> 16#0002C;
+entity("commat") -> 16#00040;
+entity("comp") -> 16#02201;
+entity("compfn") -> 16#02218;
+entity("complement") -> 16#02201;
+entity("complexes") -> 16#02102;
+entity("cong") -> 16#02245;
+entity("congdot") -> 16#02A6D;
+entity("conint") -> 16#0222E;
+entity("copf") -> 16#1D554;
+entity("coprod") -> 16#02210;
+entity("copy") -> 16#000A9;
+entity("copysr") -> 16#02117;
+entity("crarr") -> 16#021B5;
+entity("cross") -> 16#02717;
+entity("cscr") -> 16#1D4B8;
+entity("csub") -> 16#02ACF;
+entity("csube") -> 16#02AD1;
+entity("csup") -> 16#02AD0;
+entity("csupe") -> 16#02AD2;
+entity("ctdot") -> 16#022EF;
+entity("cudarrl") -> 16#02938;
+entity("cudarrr") -> 16#02935;
+entity("cuepr") -> 16#022DE;
+entity("cuesc") -> 16#022DF;
+entity("cularr") -> 16#021B6;
+entity("cularrp") -> 16#0293D;
+entity("cup") -> 16#0222A;
+entity("cupbrcap") -> 16#02A48;
+entity("cupcap") -> 16#02A46;
+entity("cupcup") -> 16#02A4A;
+entity("cupdot") -> 16#0228D;
+entity("cupor") -> 16#02A45;
+entity("cups") -> [16#0222A, 16#0FE00];
+entity("curarr") -> 16#021B7;
+entity("curarrm") -> 16#0293C;
+entity("curlyeqprec") -> 16#022DE;
+entity("curlyeqsucc") -> 16#022DF;
+entity("curlyvee") -> 16#022CE;
+entity("curlywedge") -> 16#022CF;
+entity("curren") -> 16#000A4;
+entity("curvearrowleft") -> 16#021B6;
+entity("curvearrowright") -> 16#021B7;
+entity("cuvee") -> 16#022CE;
+entity("cuwed") -> 16#022CF;
+entity("cwconint") -> 16#02232;
+entity("cwint") -> 16#02231;
+entity("cylcty") -> 16#0232D;
+entity("dArr") -> 16#021D3;
+entity("dHar") -> 16#02965;
+entity("dagger") -> 16#02020;
+entity("daleth") -> 16#02138;
+entity("darr") -> 16#02193;
+entity("dash") -> 16#02010;
+entity("dashv") -> 16#022A3;
+entity("dbkarow") -> 16#0290F;
+entity("dblac") -> 16#002DD;
+entity("dcaron") -> 16#0010F;
+entity("dcy") -> 16#00434;
+entity("dd") -> 16#02146;
+entity("ddagger") -> 16#02021;
+entity("ddarr") -> 16#021CA;
+entity("ddotseq") -> 16#02A77;
+entity("deg") -> 16#000B0;
+entity("delta") -> 16#003B4;
+entity("demptyv") -> 16#029B1;
+entity("dfisht") -> 16#0297F;
+entity("dfr") -> 16#1D521;
+entity("dharl") -> 16#021C3;
+entity("dharr") -> 16#021C2;
+entity("diam") -> 16#022C4;
+entity("diamond") -> 16#022C4;
+entity("diamondsuit") -> 16#02666;
+entity("diams") -> 16#02666;
+entity("die") -> 16#000A8;
+entity("digamma") -> 16#003DD;
+entity("disin") -> 16#022F2;
+entity("div") -> 16#000F7;
+entity("divide") -> 16#000F7;
+entity("divideontimes") -> 16#022C7;
+entity("divonx") -> 16#022C7;
+entity("djcy") -> 16#00452;
+entity("dlcorn") -> 16#0231E;
+entity("dlcrop") -> 16#0230D;
+entity("dollar") -> 16#00024;
+entity("dopf") -> 16#1D555;
+entity("dot") -> 16#002D9;
+entity("doteq") -> 16#02250;
+entity("doteqdot") -> 16#02251;
+entity("dotminus") -> 16#02238;
+entity("dotplus") -> 16#02214;
+entity("dotsquare") -> 16#022A1;
+entity("doublebarwedge") -> 16#02306;
+entity("downarrow") -> 16#02193;
+entity("downdownarrows") -> 16#021CA;
+entity("downharpoonleft") -> 16#021C3;
+entity("downharpoonright") -> 16#021C2;
+entity("drbkarow") -> 16#02910;
+entity("drcorn") -> 16#0231F;
+entity("drcrop") -> 16#0230C;
+entity("dscr") -> 16#1D4B9;
+entity("dscy") -> 16#00455;
+entity("dsol") -> 16#029F6;
+entity("dstrok") -> 16#00111;
+entity("dtdot") -> 16#022F1;
+entity("dtri") -> 16#025BF;
+entity("dtrif") -> 16#025BE;
+entity("duarr") -> 16#021F5;
+entity("duhar") -> 16#0296F;
+entity("dwangle") -> 16#029A6;
+entity("dzcy") -> 16#0045F;
+entity("dzigrarr") -> 16#027FF;
+entity("eDDot") -> 16#02A77;
+entity("eDot") -> 16#02251;
+entity("eacute") -> 16#000E9;
+entity("easter") -> 16#02A6E;
+entity("ecaron") -> 16#0011B;
+entity("ecir") -> 16#02256;
+entity("ecirc") -> 16#000EA;
+entity("ecolon") -> 16#02255;
+entity("ecy") -> 16#0044D;
+entity("edot") -> 16#00117;
+entity("ee") -> 16#02147;
+entity("efDot") -> 16#02252;
+entity("efr") -> 16#1D522;
+entity("eg") -> 16#02A9A;
+entity("egrave") -> 16#000E8;
+entity("egs") -> 16#02A96;
+entity("egsdot") -> 16#02A98;
+entity("el") -> 16#02A99;
+entity("elinters") -> 16#023E7;
+entity("ell") -> 16#02113;
+entity("els") -> 16#02A95;
+entity("elsdot") -> 16#02A97;
+entity("emacr") -> 16#00113;
+entity("empty") -> 16#02205;
+entity("emptyset") -> 16#02205;
+entity("emptyv") -> 16#02205;
+entity("emsp") -> 16#02003;
+entity("emsp13") -> 16#02004;
+entity("emsp14") -> 16#02005;
+entity("eng") -> 16#0014B;
+entity("ensp") -> 16#02002;
+entity("eogon") -> 16#00119;
+entity("eopf") -> 16#1D556;
+entity("epar") -> 16#022D5;
+entity("eparsl") -> 16#029E3;
+entity("eplus") -> 16#02A71;
+entity("epsi") -> 16#003B5;
+entity("epsilon") -> 16#003B5;
+entity("epsiv") -> 16#003F5;
+entity("eqcirc") -> 16#02256;
+entity("eqcolon") -> 16#02255;
+entity("eqsim") -> 16#02242;
+entity("eqslantgtr") -> 16#02A96;
+entity("eqslantless") -> 16#02A95;
+entity("equals") -> 16#0003D;
+entity("equest") -> 16#0225F;
+entity("equiv") -> 16#02261;
+entity("equivDD") -> 16#02A78;
+entity("eqvparsl") -> 16#029E5;
+entity("erDot") -> 16#02253;
+entity("erarr") -> 16#02971;
+entity("escr") -> 16#0212F;
+entity("esdot") -> 16#02250;
+entity("esim") -> 16#02242;
+entity("eta") -> 16#003B7;
+entity("eth") -> 16#000F0;
+entity("euml") -> 16#000EB;
+entity("euro") -> 16#020AC;
+entity("excl") -> 16#00021;
+entity("exist") -> 16#02203;
+entity("expectation") -> 16#02130;
+entity("exponentiale") -> 16#02147;
+entity("fallingdotseq") -> 16#02252;
+entity("fcy") -> 16#00444;
+entity("female") -> 16#02640;
+entity("ffilig") -> 16#0FB03;
+entity("fflig") -> 16#0FB00;
+entity("ffllig") -> 16#0FB04;
+entity("ffr") -> 16#1D523;
+entity("filig") -> 16#0FB01;
+entity("fjlig") -> [16#00066, 16#0006A];
+entity("flat") -> 16#0266D;
+entity("fllig") -> 16#0FB02;
+entity("fltns") -> 16#025B1;
+entity("fnof") -> 16#00192;
+entity("fopf") -> 16#1D557;
+entity("forall") -> 16#02200;
+entity("fork") -> 16#022D4;
+entity("forkv") -> 16#02AD9;
+entity("fpartint") -> 16#02A0D;
+entity("frac12") -> 16#000BD;
+entity("frac13") -> 16#02153;
+entity("frac14") -> 16#000BC;
+entity("frac15") -> 16#02155;
+entity("frac16") -> 16#02159;
+entity("frac18") -> 16#0215B;
+entity("frac23") -> 16#02154;
+entity("frac25") -> 16#02156;
+entity("frac34") -> 16#000BE;
+entity("frac35") -> 16#02157;
+entity("frac38") -> 16#0215C;
+entity("frac45") -> 16#02158;
+entity("frac56") -> 16#0215A;
+entity("frac58") -> 16#0215D;
+entity("frac78") -> 16#0215E;
+entity("frasl") -> 16#02044;
+entity("frown") -> 16#02322;
+entity("fscr") -> 16#1D4BB;
+entity("gE") -> 16#02267;
+entity("gEl") -> 16#02A8C;
+entity("gacute") -> 16#001F5;
+entity("gamma") -> 16#003B3;
+entity("gammad") -> 16#003DD;
+entity("gap") -> 16#02A86;
+entity("gbreve") -> 16#0011F;
+entity("gcirc") -> 16#0011D;
+entity("gcy") -> 16#00433;
+entity("gdot") -> 16#00121;
+entity("ge") -> 16#02265;
+entity("gel") -> 16#022DB;
+entity("geq") -> 16#02265;
+entity("geqq") -> 16#02267;
+entity("geqslant") -> 16#02A7E;
+entity("ges") -> 16#02A7E;
+entity("gescc") -> 16#02AA9;
+entity("gesdot") -> 16#02A80;
+entity("gesdoto") -> 16#02A82;
+entity("gesdotol") -> 16#02A84;
+entity("gesl") -> [16#022DB, 16#0FE00];
+entity("gesles") -> 16#02A94;
+entity("gfr") -> 16#1D524;
+entity("gg") -> 16#0226B;
+entity("ggg") -> 16#022D9;
+entity("gimel") -> 16#02137;
+entity("gjcy") -> 16#00453;
+entity("gl") -> 16#02277;
+entity("glE") -> 16#02A92;
+entity("gla") -> 16#02AA5;
+entity("glj") -> 16#02AA4;
+entity("gnE") -> 16#02269;
+entity("gnap") -> 16#02A8A;
+entity("gnapprox") -> 16#02A8A;
+entity("gne") -> 16#02A88;
+entity("gneq") -> 16#02A88;
+entity("gneqq") -> 16#02269;
+entity("gnsim") -> 16#022E7;
+entity("gopf") -> 16#1D558;
+entity("grave") -> 16#00060;
+entity("gscr") -> 16#0210A;
+entity("gsim") -> 16#02273;
+entity("gsime") -> 16#02A8E;
+entity("gsiml") -> 16#02A90;
+entity("gt") -> 16#0003E;
+entity("gtcc") -> 16#02AA7;
+entity("gtcir") -> 16#02A7A;
+entity("gtdot") -> 16#022D7;
+entity("gtlPar") -> 16#02995;
+entity("gtquest") -> 16#02A7C;
+entity("gtrapprox") -> 16#02A86;
+entity("gtrarr") -> 16#02978;
+entity("gtrdot") -> 16#022D7;
+entity("gtreqless") -> 16#022DB;
+entity("gtreqqless") -> 16#02A8C;
+entity("gtrless") -> 16#02277;
+entity("gtrsim") -> 16#02273;
+entity("gvertneqq") -> [16#02269, 16#0FE00];
+entity("gvnE") -> [16#02269, 16#0FE00];
+entity("hArr") -> 16#021D4;
+entity("hairsp") -> 16#0200A;
+entity("half") -> 16#000BD;
+entity("hamilt") -> 16#0210B;
+entity("hardcy") -> 16#0044A;
+entity("harr") -> 16#02194;
+entity("harrcir") -> 16#02948;
+entity("harrw") -> 16#021AD;
+entity("hbar") -> 16#0210F;
+entity("hcirc") -> 16#00125;
+entity("hearts") -> 16#02665;
+entity("heartsuit") -> 16#02665;
+entity("hellip") -> 16#02026;
+entity("hercon") -> 16#022B9;
+entity("hfr") -> 16#1D525;
+entity("hksearow") -> 16#02925;
+entity("hkswarow") -> 16#02926;
+entity("hoarr") -> 16#021FF;
+entity("homtht") -> 16#0223B;
+entity("hookleftarrow") -> 16#021A9;
+entity("hookrightarrow") -> 16#021AA;
+entity("hopf") -> 16#1D559;
+entity("horbar") -> 16#02015;
+entity("hscr") -> 16#1D4BD;
+entity("hslash") -> 16#0210F;
+entity("hstrok") -> 16#00127;
+entity("hybull") -> 16#02043;
+entity("hyphen") -> 16#02010;
+entity("iacute") -> 16#000ED;
+entity("ic") -> 16#02063;
+entity("icirc") -> 16#000EE;
+entity("icy") -> 16#00438;
+entity("iecy") -> 16#00435;
+entity("iexcl") -> 16#000A1;
+entity("iff") -> 16#021D4;
+entity("ifr") -> 16#1D526;
+entity("igrave") -> 16#000EC;
+entity("ii") -> 16#02148;
+entity("iiiint") -> 16#02A0C;
+entity("iiint") -> 16#0222D;
+entity("iinfin") -> 16#029DC;
+entity("iiota") -> 16#02129;
+entity("ijlig") -> 16#00133;
+entity("imacr") -> 16#0012B;
+entity("image") -> 16#02111;
+entity("imagline") -> 16#02110;
+entity("imagpart") -> 16#02111;
+entity("imath") -> 16#00131;
+entity("imof") -> 16#022B7;
+entity("imped") -> 16#001B5;
+entity("in") -> 16#02208;
+entity("incare") -> 16#02105;
+entity("infin") -> 16#0221E;
+entity("infintie") -> 16#029DD;
+entity("inodot") -> 16#00131;
+entity("int") -> 16#0222B;
+entity("intcal") -> 16#022BA;
+entity("integers") -> 16#02124;
+entity("intercal") -> 16#022BA;
+entity("intlarhk") -> 16#02A17;
+entity("intprod") -> 16#02A3C;
+entity("iocy") -> 16#00451;
+entity("iogon") -> 16#0012F;
+entity("iopf") -> 16#1D55A;
+entity("iota") -> 16#003B9;
+entity("iprod") -> 16#02A3C;
+entity("iquest") -> 16#000BF;
+entity("iscr") -> 16#1D4BE;
+entity("isin") -> 16#02208;
+entity("isinE") -> 16#022F9;
+entity("isindot") -> 16#022F5;
+entity("isins") -> 16#022F4;
+entity("isinsv") -> 16#022F3;
+entity("isinv") -> 16#02208;
+entity("it") -> 16#02062;
+entity("itilde") -> 16#00129;
+entity("iukcy") -> 16#00456;
+entity("iuml") -> 16#000EF;
+entity("jcirc") -> 16#00135;
+entity("jcy") -> 16#00439;
+entity("jfr") -> 16#1D527;
+entity("jmath") -> 16#00237;
+entity("jopf") -> 16#1D55B;
+entity("jscr") -> 16#1D4BF;
+entity("jsercy") -> 16#00458;
+entity("jukcy") -> 16#00454;
+entity("kappa") -> 16#003BA;
+entity("kappav") -> 16#003F0;
+entity("kcedil") -> 16#00137;
+entity("kcy") -> 16#0043A;
+entity("kfr") -> 16#1D528;
+entity("kgreen") -> 16#00138;
+entity("khcy") -> 16#00445;
+entity("kjcy") -> 16#0045C;
+entity("kopf") -> 16#1D55C;
+entity("kscr") -> 16#1D4C0;
+entity("lAarr") -> 16#021DA;
+entity("lArr") -> 16#021D0;
+entity("lAtail") -> 16#0291B;
+entity("lBarr") -> 16#0290E;
+entity("lE") -> 16#02266;
+entity("lEg") -> 16#02A8B;
+entity("lHar") -> 16#02962;
+entity("lacute") -> 16#0013A;
+entity("laemptyv") -> 16#029B4;
+entity("lagran") -> 16#02112;
+entity("lambda") -> 16#003BB;
+entity("lang") -> 16#027E8;
+entity("langd") -> 16#02991;
+entity("langle") -> 16#027E8;
+entity("lap") -> 16#02A85;
+entity("laquo") -> 16#000AB;
+entity("larr") -> 16#02190;
+entity("larrb") -> 16#021E4;
+entity("larrbfs") -> 16#0291F;
+entity("larrfs") -> 16#0291D;
+entity("larrhk") -> 16#021A9;
+entity("larrlp") -> 16#021AB;
+entity("larrpl") -> 16#02939;
+entity("larrsim") -> 16#02973;
+entity("larrtl") -> 16#021A2;
+entity("lat") -> 16#02AAB;
+entity("latail") -> 16#02919;
+entity("late") -> 16#02AAD;
+entity("lates") -> [16#02AAD, 16#0FE00];
+entity("lbarr") -> 16#0290C;
+entity("lbbrk") -> 16#02772;
+entity("lbrace") -> 16#0007B;
+entity("lbrack") -> 16#0005B;
+entity("lbrke") -> 16#0298B;
+entity("lbrksld") -> 16#0298F;
+entity("lbrkslu") -> 16#0298D;
+entity("lcaron") -> 16#0013E;
+entity("lcedil") -> 16#0013C;
+entity("lceil") -> 16#02308;
+entity("lcub") -> 16#0007B;
+entity("lcy") -> 16#0043B;
+entity("ldca") -> 16#02936;
+entity("ldquo") -> 16#0201C;
+entity("ldquor") -> 16#0201E;
+entity("ldrdhar") -> 16#02967;
+entity("ldrushar") -> 16#0294B;
+entity("ldsh") -> 16#021B2;
+entity("le") -> 16#02264;
+entity("leftarrow") -> 16#02190;
+entity("leftarrowtail") -> 16#021A2;
+entity("leftharpoondown") -> 16#021BD;
+entity("leftharpoonup") -> 16#021BC;
+entity("leftleftarrows") -> 16#021C7;
+entity("leftrightarrow") -> 16#02194;
+entity("leftrightarrows") -> 16#021C6;
+entity("leftrightharpoons") -> 16#021CB;
+entity("leftrightsquigarrow") -> 16#021AD;
+entity("leftthreetimes") -> 16#022CB;
+entity("leg") -> 16#022DA;
+entity("leq") -> 16#02264;
+entity("leqq") -> 16#02266;
+entity("leqslant") -> 16#02A7D;
+entity("les") -> 16#02A7D;
+entity("lescc") -> 16#02AA8;
+entity("lesdot") -> 16#02A7F;
+entity("lesdoto") -> 16#02A81;
+entity("lesdotor") -> 16#02A83;
+entity("lesg") -> [16#022DA, 16#0FE00];
+entity("lesges") -> 16#02A93;
+entity("lessapprox") -> 16#02A85;
+entity("lessdot") -> 16#022D6;
+entity("lesseqgtr") -> 16#022DA;
+entity("lesseqqgtr") -> 16#02A8B;
+entity("lessgtr") -> 16#02276;
+entity("lesssim") -> 16#02272;
+entity("lfisht") -> 16#0297C;
+entity("lfloor") -> 16#0230A;
+entity("lfr") -> 16#1D529;
+entity("lg") -> 16#02276;
+entity("lgE") -> 16#02A91;
+entity("lhard") -> 16#021BD;
+entity("lharu") -> 16#021BC;
+entity("lharul") -> 16#0296A;
+entity("lhblk") -> 16#02584;
+entity("ljcy") -> 16#00459;
+entity("ll") -> 16#0226A;
+entity("llarr") -> 16#021C7;
+entity("llcorner") -> 16#0231E;
+entity("llhard") -> 16#0296B;
+entity("lltri") -> 16#025FA;
+entity("lmidot") -> 16#00140;
+entity("lmoust") -> 16#023B0;
+entity("lmoustache") -> 16#023B0;
+entity("lnE") -> 16#02268;
+entity("lnap") -> 16#02A89;
+entity("lnapprox") -> 16#02A89;
+entity("lne") -> 16#02A87;
+entity("lneq") -> 16#02A87;
+entity("lneqq") -> 16#02268;
+entity("lnsim") -> 16#022E6;
+entity("loang") -> 16#027EC;
+entity("loarr") -> 16#021FD;
+entity("lobrk") -> 16#027E6;
+entity("longleftarrow") -> 16#027F5;
+entity("longleftrightarrow") -> 16#027F7;
+entity("longmapsto") -> 16#027FC;
+entity("longrightarrow") -> 16#027F6;
+entity("looparrowleft") -> 16#021AB;
+entity("looparrowright") -> 16#021AC;
+entity("lopar") -> 16#02985;
+entity("lopf") -> 16#1D55D;
+entity("loplus") -> 16#02A2D;
+entity("lotimes") -> 16#02A34;
+entity("lowast") -> 16#02217;
+entity("lowbar") -> 16#0005F;
+entity("loz") -> 16#025CA;
+entity("lozenge") -> 16#025CA;
+entity("lozf") -> 16#029EB;
+entity("lpar") -> 16#00028;
+entity("lparlt") -> 16#02993;
+entity("lrarr") -> 16#021C6;
+entity("lrcorner") -> 16#0231F;
+entity("lrhar") -> 16#021CB;
+entity("lrhard") -> 16#0296D;
+entity("lrm") -> 16#0200E;
+entity("lrtri") -> 16#022BF;
+entity("lsaquo") -> 16#02039;
+entity("lscr") -> 16#1D4C1;
+entity("lsh") -> 16#021B0;
+entity("lsim") -> 16#02272;
+entity("lsime") -> 16#02A8D;
+entity("lsimg") -> 16#02A8F;
+entity("lsqb") -> 16#0005B;
+entity("lsquo") -> 16#02018;
+entity("lsquor") -> 16#0201A;
+entity("lstrok") -> 16#00142;
+entity("lt") -> 16#0003C;
+entity("ltcc") -> 16#02AA6;
+entity("ltcir") -> 16#02A79;
+entity("ltdot") -> 16#022D6;
+entity("lthree") -> 16#022CB;
+entity("ltimes") -> 16#022C9;
+entity("ltlarr") -> 16#02976;
+entity("ltquest") -> 16#02A7B;
+entity("ltrPar") -> 16#02996;
+entity("ltri") -> 16#025C3;
+entity("ltrie") -> 16#022B4;
+entity("ltrif") -> 16#025C2;
+entity("lurdshar") -> 16#0294A;
+entity("luruhar") -> 16#02966;
+entity("lvertneqq") -> [16#02268, 16#0FE00];
+entity("lvnE") -> [16#02268, 16#0FE00];
+entity("mDDot") -> 16#0223A;
+entity("macr") -> 16#000AF;
+entity("male") -> 16#02642;
+entity("malt") -> 16#02720;
+entity("maltese") -> 16#02720;
+entity("map") -> 16#021A6;
+entity("mapsto") -> 16#021A6;
+entity("mapstodown") -> 16#021A7;
+entity("mapstoleft") -> 16#021A4;
+entity("mapstoup") -> 16#021A5;
+entity("marker") -> 16#025AE;
+entity("mcomma") -> 16#02A29;
+entity("mcy") -> 16#0043C;
+entity("mdash") -> 16#02014;
+entity("measuredangle") -> 16#02221;
+entity("mfr") -> 16#1D52A;
+entity("mho") -> 16#02127;
+entity("micro") -> 16#000B5;
+entity("mid") -> 16#02223;
+entity("midast") -> 16#0002A;
+entity("midcir") -> 16#02AF0;
+entity("middot") -> 16#000B7;
+entity("minus") -> 16#02212;
+entity("minusb") -> 16#0229F;
+entity("minusd") -> 16#02238;
+entity("minusdu") -> 16#02A2A;
+entity("mlcp") -> 16#02ADB;
+entity("mldr") -> 16#02026;
+entity("mnplus") -> 16#02213;
+entity("models") -> 16#022A7;
+entity("mopf") -> 16#1D55E;
+entity("mp") -> 16#02213;
+entity("mscr") -> 16#1D4C2;
+entity("mstpos") -> 16#0223E;
+entity("mu") -> 16#003BC;
+entity("multimap") -> 16#022B8;
+entity("mumap") -> 16#022B8;
+entity("nGg") -> [16#022D9, 16#00338];
+entity("nGt") -> [16#0226B, 16#020D2];
+entity("nGtv") -> [16#0226B, 16#00338];
+entity("nLeftarrow") -> 16#021CD;
+entity("nLeftrightarrow") -> 16#021CE;
+entity("nLl") -> [16#022D8, 16#00338];
+entity("nLt") -> [16#0226A, 16#020D2];
+entity("nLtv") -> [16#0226A, 16#00338];
+entity("nRightarrow") -> 16#021CF;
+entity("nVDash") -> 16#022AF;
+entity("nVdash") -> 16#022AE;
+entity("nabla") -> 16#02207;
+entity("nacute") -> 16#00144;
+entity("nang") -> [16#02220, 16#020D2];
+entity("nap") -> 16#02249;
+entity("napE") -> [16#02A70, 16#00338];
+entity("napid") -> [16#0224B, 16#00338];
+entity("napos") -> 16#00149;
+entity("napprox") -> 16#02249;
+entity("natur") -> 16#0266E;
+entity("natural") -> 16#0266E;
+entity("naturals") -> 16#02115;
+entity("nbsp") -> 16#000A0;
+entity("nbump") -> [16#0224E, 16#00338];
+entity("nbumpe") -> [16#0224F, 16#00338];
+entity("ncap") -> 16#02A43;
+entity("ncaron") -> 16#00148;
+entity("ncedil") -> 16#00146;
+entity("ncong") -> 16#02247;
+entity("ncongdot") -> [16#02A6D, 16#00338];
+entity("ncup") -> 16#02A42;
+entity("ncy") -> 16#0043D;
+entity("ndash") -> 16#02013;
+entity("ne") -> 16#02260;
+entity("neArr") -> 16#021D7;
+entity("nearhk") -> 16#02924;
+entity("nearr") -> 16#02197;
+entity("nearrow") -> 16#02197;
+entity("nedot") -> [16#02250, 16#00338];
+entity("nequiv") -> 16#02262;
+entity("nesear") -> 16#02928;
+entity("nesim") -> [16#02242, 16#00338];
+entity("nexist") -> 16#02204;
+entity("nexists") -> 16#02204;
+entity("nfr") -> 16#1D52B;
+entity("ngE") -> [16#02267, 16#00338];
+entity("nge") -> 16#02271;
+entity("ngeq") -> 16#02271;
+entity("ngeqq") -> [16#02267, 16#00338];
+entity("ngeqslant") -> [16#02A7E, 16#00338];
+entity("nges") -> [16#02A7E, 16#00338];
+entity("ngsim") -> 16#02275;
+entity("ngt") -> 16#0226F;
+entity("ngtr") -> 16#0226F;
+entity("nhArr") -> 16#021CE;
+entity("nharr") -> 16#021AE;
+entity("nhpar") -> 16#02AF2;
+entity("ni") -> 16#0220B;
+entity("nis") -> 16#022FC;
+entity("nisd") -> 16#022FA;
+entity("niv") -> 16#0220B;
+entity("njcy") -> 16#0045A;
+entity("nlArr") -> 16#021CD;
+entity("nlE") -> [16#02266, 16#00338];
+entity("nlarr") -> 16#0219A;
+entity("nldr") -> 16#02025;
+entity("nle") -> 16#02270;
+entity("nleftarrow") -> 16#0219A;
+entity("nleftrightarrow") -> 16#021AE;
+entity("nleq") -> 16#02270;
+entity("nleqq") -> [16#02266, 16#00338];
+entity("nleqslant") -> [16#02A7D, 16#00338];
+entity("nles") -> [16#02A7D, 16#00338];
+entity("nless") -> 16#0226E;
+entity("nlsim") -> 16#02274;
+entity("nlt") -> 16#0226E;
+entity("nltri") -> 16#022EA;
+entity("nltrie") -> 16#022EC;
+entity("nmid") -> 16#02224;
+entity("nopf") -> 16#1D55F;
+entity("not") -> 16#000AC;
+entity("notin") -> 16#02209;
+entity("notinE") -> [16#022F9, 16#00338];
+entity("notindot") -> [16#022F5, 16#00338];
+entity("notinva") -> 16#02209;
+entity("notinvb") -> 16#022F7;
+entity("notinvc") -> 16#022F6;
+entity("notni") -> 16#0220C;
+entity("notniva") -> 16#0220C;
+entity("notnivb") -> 16#022FE;
+entity("notnivc") -> 16#022FD;
+entity("npar") -> 16#02226;
+entity("nparallel") -> 16#02226;
+entity("nparsl") -> [16#02AFD, 16#020E5];
+entity("npart") -> [16#02202, 16#00338];
+entity("npolint") -> 16#02A14;
+entity("npr") -> 16#02280;
+entity("nprcue") -> 16#022E0;
+entity("npre") -> [16#02AAF, 16#00338];
+entity("nprec") -> 16#02280;
+entity("npreceq") -> [16#02AAF, 16#00338];
+entity("nrArr") -> 16#021CF;
+entity("nrarr") -> 16#0219B;
+entity("nrarrc") -> [16#02933, 16#00338];
+entity("nrarrw") -> [16#0219D, 16#00338];
+entity("nrightarrow") -> 16#0219B;
+entity("nrtri") -> 16#022EB;
+entity("nrtrie") -> 16#022ED;
+entity("nsc") -> 16#02281;
+entity("nsccue") -> 16#022E1;
+entity("nsce") -> [16#02AB0, 16#00338];
+entity("nscr") -> 16#1D4C3;
+entity("nshortmid") -> 16#02224;
+entity("nshortparallel") -> 16#02226;
+entity("nsim") -> 16#02241;
+entity("nsime") -> 16#02244;
+entity("nsimeq") -> 16#02244;
+entity("nsmid") -> 16#02224;
+entity("nspar") -> 16#02226;
+entity("nsqsube") -> 16#022E2;
+entity("nsqsupe") -> 16#022E3;
+entity("nsub") -> 16#02284;
+entity("nsubE") -> [16#02AC5, 16#00338];
+entity("nsube") -> 16#02288;
+entity("nsubset") -> [16#02282, 16#020D2];
+entity("nsubseteq") -> 16#02288;
+entity("nsubseteqq") -> [16#02AC5, 16#00338];
+entity("nsucc") -> 16#02281;
+entity("nsucceq") -> [16#02AB0, 16#00338];
+entity("nsup") -> 16#02285;
+entity("nsupE") -> [16#02AC6, 16#00338];
+entity("nsupe") -> 16#02289;
+entity("nsupset") -> [16#02283, 16#020D2];
+entity("nsupseteq") -> 16#02289;
+entity("nsupseteqq") -> [16#02AC6, 16#00338];
+entity("ntgl") -> 16#02279;
+entity("ntilde") -> 16#000F1;
+entity("ntlg") -> 16#02278;
+entity("ntriangleleft") -> 16#022EA;
+entity("ntrianglelefteq") -> 16#022EC;
+entity("ntriangleright") -> 16#022EB;
+entity("ntrianglerighteq") -> 16#022ED;
+entity("nu") -> 16#003BD;
+entity("num") -> 16#00023;
+entity("numero") -> 16#02116;
+entity("numsp") -> 16#02007;
+entity("nvDash") -> 16#022AD;
+entity("nvHarr") -> 16#02904;
+entity("nvap") -> [16#0224D, 16#020D2];
+entity("nvdash") -> 16#022AC;
+entity("nvge") -> [16#02265, 16#020D2];
+entity("nvgt") -> [16#0003E, 16#020D2];
+entity("nvinfin") -> 16#029DE;
+entity("nvlArr") -> 16#02902;
+entity("nvle") -> [16#02264, 16#020D2];
+entity("nvlt") -> [16#0003C, 16#020D2];
+entity("nvltrie") -> [16#022B4, 16#020D2];
+entity("nvrArr") -> 16#02903;
+entity("nvrtrie") -> [16#022B5, 16#020D2];
+entity("nvsim") -> [16#0223C, 16#020D2];
+entity("nwArr") -> 16#021D6;
+entity("nwarhk") -> 16#02923;
+entity("nwarr") -> 16#02196;
+entity("nwarrow") -> 16#02196;
+entity("nwnear") -> 16#02927;
+entity("oS") -> 16#024C8;
+entity("oacute") -> 16#000F3;
+entity("oast") -> 16#0229B;
+entity("ocir") -> 16#0229A;
+entity("ocirc") -> 16#000F4;
+entity("ocy") -> 16#0043E;
+entity("odash") -> 16#0229D;
+entity("odblac") -> 16#00151;
+entity("odiv") -> 16#02A38;
+entity("odot") -> 16#02299;
+entity("odsold") -> 16#029BC;
+entity("oelig") -> 16#00153;
+entity("ofcir") -> 16#029BF;
+entity("ofr") -> 16#1D52C;
+entity("ogon") -> 16#002DB;
+entity("ograve") -> 16#000F2;
+entity("ogt") -> 16#029C1;
+entity("ohbar") -> 16#029B5;
+entity("ohm") -> 16#003A9;
+entity("oint") -> 16#0222E;
+entity("olarr") -> 16#021BA;
+entity("olcir") -> 16#029BE;
+entity("olcross") -> 16#029BB;
+entity("oline") -> 16#0203E;
+entity("olt") -> 16#029C0;
+entity("omacr") -> 16#0014D;
+entity("omega") -> 16#003C9;
+entity("omicron") -> 16#003BF;
+entity("omid") -> 16#029B6;
+entity("ominus") -> 16#02296;
+entity("oopf") -> 16#1D560;
+entity("opar") -> 16#029B7;
+entity("operp") -> 16#029B9;
+entity("oplus") -> 16#02295;
+entity("or") -> 16#02228;
+entity("orarr") -> 16#021BB;
+entity("ord") -> 16#02A5D;
+entity("order") -> 16#02134;
+entity("orderof") -> 16#02134;
+entity("ordf") -> 16#000AA;
+entity("ordm") -> 16#000BA;
+entity("origof") -> 16#022B6;
+entity("oror") -> 16#02A56;
+entity("orslope") -> 16#02A57;
+entity("orv") -> 16#02A5B;
+entity("oscr") -> 16#02134;
+entity("oslash") -> 16#000F8;
+entity("osol") -> 16#02298;
+entity("otilde") -> 16#000F5;
+entity("otimes") -> 16#02297;
+entity("otimesas") -> 16#02A36;
+entity("ouml") -> 16#000F6;
+entity("ovbar") -> 16#0233D;
+entity("par") -> 16#02225;
+entity("para") -> 16#000B6;
+entity("parallel") -> 16#02225;
+entity("parsim") -> 16#02AF3;
+entity("parsl") -> 16#02AFD;
+entity("part") -> 16#02202;
+entity("pcy") -> 16#0043F;
+entity("percnt") -> 16#00025;
+entity("period") -> 16#0002E;
+entity("permil") -> 16#02030;
+entity("perp") -> 16#022A5;
+entity("pertenk") -> 16#02031;
+entity("pfr") -> 16#1D52D;
+entity("phi") -> 16#003C6;
+entity("phiv") -> 16#003D5;
+entity("phmmat") -> 16#02133;
+entity("phone") -> 16#0260E;
+entity("pi") -> 16#003C0;
+entity("pitchfork") -> 16#022D4;
+entity("piv") -> 16#003D6;
+entity("planck") -> 16#0210F;
+entity("planckh") -> 16#0210E;
+entity("plankv") -> 16#0210F;
+entity("plus") -> 16#0002B;
+entity("plusacir") -> 16#02A23;
+entity("plusb") -> 16#0229E;
+entity("pluscir") -> 16#02A22;
+entity("plusdo") -> 16#02214;
+entity("plusdu") -> 16#02A25;
+entity("pluse") -> 16#02A72;
+entity("plusmn") -> 16#000B1;
+entity("plussim") -> 16#02A26;
+entity("plustwo") -> 16#02A27;
+entity("pm") -> 16#000B1;
+entity("pointint") -> 16#02A15;
+entity("popf") -> 16#1D561;
+entity("pound") -> 16#000A3;
+entity("pr") -> 16#0227A;
+entity("prE") -> 16#02AB3;
+entity("prap") -> 16#02AB7;
+entity("prcue") -> 16#0227C;
+entity("pre") -> 16#02AAF;
+entity("prec") -> 16#0227A;
+entity("precapprox") -> 16#02AB7;
+entity("preccurlyeq") -> 16#0227C;
+entity("preceq") -> 16#02AAF;
+entity("precnapprox") -> 16#02AB9;
+entity("precneqq") -> 16#02AB5;
+entity("precnsim") -> 16#022E8;
+entity("precsim") -> 16#0227E;
+entity("prime") -> 16#02032;
+entity("primes") -> 16#02119;
+entity("prnE") -> 16#02AB5;
+entity("prnap") -> 16#02AB9;
+entity("prnsim") -> 16#022E8;
+entity("prod") -> 16#0220F;
+entity("profalar") -> 16#0232E;
+entity("profline") -> 16#02312;
+entity("profsurf") -> 16#02313;
+entity("prop") -> 16#0221D;
+entity("propto") -> 16#0221D;
+entity("prsim") -> 16#0227E;
+entity("prurel") -> 16#022B0;
+entity("pscr") -> 16#1D4C5;
+entity("psi") -> 16#003C8;
+entity("puncsp") -> 16#02008;
+entity("qfr") -> 16#1D52E;
+entity("qint") -> 16#02A0C;
+entity("qopf") -> 16#1D562;
+entity("qprime") -> 16#02057;
+entity("qscr") -> 16#1D4C6;
+entity("quaternions") -> 16#0210D;
+entity("quatint") -> 16#02A16;
+entity("quest") -> 16#0003F;
+entity("questeq") -> 16#0225F;
+entity("quot") -> 16#00022;
+entity("rAarr") -> 16#021DB;
+entity("rArr") -> 16#021D2;
+entity("rAtail") -> 16#0291C;
+entity("rBarr") -> 16#0290F;
+entity("rHar") -> 16#02964;
+entity("race") -> [16#0223D, 16#00331];
+entity("racute") -> 16#00155;
+entity("radic") -> 16#0221A;
+entity("raemptyv") -> 16#029B3;
+entity("rang") -> 16#027E9;
+entity("rangd") -> 16#02992;
+entity("range") -> 16#029A5;
+entity("rangle") -> 16#027E9;
+entity("raquo") -> 16#000BB;
+entity("rarr") -> 16#02192;
+entity("rarrap") -> 16#02975;
+entity("rarrb") -> 16#021E5;
+entity("rarrbfs") -> 16#02920;
+entity("rarrc") -> 16#02933;
+entity("rarrfs") -> 16#0291E;
+entity("rarrhk") -> 16#021AA;
+entity("rarrlp") -> 16#021AC;
+entity("rarrpl") -> 16#02945;
+entity("rarrsim") -> 16#02974;
+entity("rarrtl") -> 16#021A3;
+entity("rarrw") -> 16#0219D;
+entity("ratail") -> 16#0291A;
+entity("ratio") -> 16#02236;
+entity("rationals") -> 16#0211A;
+entity("rbarr") -> 16#0290D;
+entity("rbbrk") -> 16#02773;
+entity("rbrace") -> 16#0007D;
+entity("rbrack") -> 16#0005D;
+entity("rbrke") -> 16#0298C;
+entity("rbrksld") -> 16#0298E;
+entity("rbrkslu") -> 16#02990;
+entity("rcaron") -> 16#00159;
+entity("rcedil") -> 16#00157;
+entity("rceil") -> 16#02309;
+entity("rcub") -> 16#0007D;
+entity("rcy") -> 16#00440;
+entity("rdca") -> 16#02937;
+entity("rdldhar") -> 16#02969;
+entity("rdquo") -> 16#0201D;
+entity("rdquor") -> 16#0201D;
+entity("rdsh") -> 16#021B3;
+entity("real") -> 16#0211C;
+entity("realine") -> 16#0211B;
+entity("realpart") -> 16#0211C;
+entity("reals") -> 16#0211D;
+entity("rect") -> 16#025AD;
+entity("reg") -> 16#000AE;
+entity("rfisht") -> 16#0297D;
+entity("rfloor") -> 16#0230B;
+entity("rfr") -> 16#1D52F;
+entity("rhard") -> 16#021C1;
+entity("rharu") -> 16#021C0;
+entity("rharul") -> 16#0296C;
+entity("rho") -> 16#003C1;
+entity("rhov") -> 16#003F1;
+entity("rightarrow") -> 16#02192;
+entity("rightarrowtail") -> 16#021A3;
+entity("rightharpoondown") -> 16#021C1;
+entity("rightharpoonup") -> 16#021C0;
+entity("rightleftarrows") -> 16#021C4;
+entity("rightleftharpoons") -> 16#021CC;
+entity("rightrightarrows") -> 16#021C9;
+entity("rightsquigarrow") -> 16#0219D;
+entity("rightthreetimes") -> 16#022CC;
+entity("ring") -> 16#002DA;
+entity("risingdotseq") -> 16#02253;
+entity("rlarr") -> 16#021C4;
+entity("rlhar") -> 16#021CC;
+entity("rlm") -> 16#0200F;
+entity("rmoust") -> 16#023B1;
+entity("rmoustache") -> 16#023B1;
+entity("rnmid") -> 16#02AEE;
+entity("roang") -> 16#027ED;
+entity("roarr") -> 16#021FE;
+entity("robrk") -> 16#027E7;
+entity("ropar") -> 16#02986;
+entity("ropf") -> 16#1D563;
+entity("roplus") -> 16#02A2E;
+entity("rotimes") -> 16#02A35;
+entity("rpar") -> 16#00029;
+entity("rpargt") -> 16#02994;
+entity("rppolint") -> 16#02A12;
+entity("rrarr") -> 16#021C9;
+entity("rsaquo") -> 16#0203A;
+entity("rscr") -> 16#1D4C7;
+entity("rsh") -> 16#021B1;
+entity("rsqb") -> 16#0005D;
+entity("rsquo") -> 16#02019;
+entity("rsquor") -> 16#02019;
+entity("rthree") -> 16#022CC;
+entity("rtimes") -> 16#022CA;
+entity("rtri") -> 16#025B9;
+entity("rtrie") -> 16#022B5;
+entity("rtrif") -> 16#025B8;
+entity("rtriltri") -> 16#029CE;
+entity("ruluhar") -> 16#02968;
+entity("rx") -> 16#0211E;
+entity("sacute") -> 16#0015B;
+entity("sbquo") -> 16#0201A;
+entity("sc") -> 16#0227B;
+entity("scE") -> 16#02AB4;
+entity("scap") -> 16#02AB8;
+entity("scaron") -> 16#00161;
+entity("sccue") -> 16#0227D;
+entity("sce") -> 16#02AB0;
+entity("scedil") -> 16#0015F;
+entity("scirc") -> 16#0015D;
+entity("scnE") -> 16#02AB6;
+entity("scnap") -> 16#02ABA;
+entity("scnsim") -> 16#022E9;
+entity("scpolint") -> 16#02A13;
+entity("scsim") -> 16#0227F;
+entity("scy") -> 16#00441;
+entity("sdot") -> 16#022C5;
+entity("sdotb") -> 16#022A1;
+entity("sdote") -> 16#02A66;
+entity("seArr") -> 16#021D8;
+entity("searhk") -> 16#02925;
+entity("searr") -> 16#02198;
+entity("searrow") -> 16#02198;
+entity("sect") -> 16#000A7;
+entity("semi") -> 16#0003B;
+entity("seswar") -> 16#02929;
+entity("setminus") -> 16#02216;
+entity("setmn") -> 16#02216;
+entity("sext") -> 16#02736;
+entity("sfr") -> 16#1D530;
+entity("sfrown") -> 16#02322;
+entity("sharp") -> 16#0266F;
+entity("shchcy") -> 16#00449;
+entity("shcy") -> 16#00448;
+entity("shortmid") -> 16#02223;
+entity("shortparallel") -> 16#02225;
+entity("shy") -> 16#000AD;
+entity("sigma") -> 16#003C3;
+entity("sigmaf") -> 16#003C2;
+entity("sigmav") -> 16#003C2;
+entity("sim") -> 16#0223C;
+entity("simdot") -> 16#02A6A;
+entity("sime") -> 16#02243;
+entity("simeq") -> 16#02243;
+entity("simg") -> 16#02A9E;
+entity("simgE") -> 16#02AA0;
+entity("siml") -> 16#02A9D;
+entity("simlE") -> 16#02A9F;
+entity("simne") -> 16#02246;
+entity("simplus") -> 16#02A24;
+entity("simrarr") -> 16#02972;
+entity("slarr") -> 16#02190;
+entity("smallsetminus") -> 16#02216;
+entity("smashp") -> 16#02A33;
+entity("smeparsl") -> 16#029E4;
+entity("smid") -> 16#02223;
+entity("smile") -> 16#02323;
+entity("smt") -> 16#02AAA;
+entity("smte") -> 16#02AAC;
+entity("smtes") -> [16#02AAC, 16#0FE00];
+entity("softcy") -> 16#0044C;
+entity("sol") -> 16#0002F;
+entity("solb") -> 16#029C4;
+entity("solbar") -> 16#0233F;
+entity("sopf") -> 16#1D564;
+entity("spades") -> 16#02660;
+entity("spadesuit") -> 16#02660;
+entity("spar") -> 16#02225;
+entity("sqcap") -> 16#02293;
+entity("sqcaps") -> [16#02293, 16#0FE00];
+entity("sqcup") -> 16#02294;
+entity("sqcups") -> [16#02294, 16#0FE00];
+entity("sqsub") -> 16#0228F;
+entity("sqsube") -> 16#02291;
+entity("sqsubset") -> 16#0228F;
+entity("sqsubseteq") -> 16#02291;
+entity("sqsup") -> 16#02290;
+entity("sqsupe") -> 16#02292;
+entity("sqsupset") -> 16#02290;
+entity("sqsupseteq") -> 16#02292;
+entity("squ") -> 16#025A1;
+entity("square") -> 16#025A1;
+entity("squarf") -> 16#025AA;
+entity("squf") -> 16#025AA;
+entity("srarr") -> 16#02192;
+entity("sscr") -> 16#1D4C8;
+entity("ssetmn") -> 16#02216;
+entity("ssmile") -> 16#02323;
+entity("sstarf") -> 16#022C6;
+entity("star") -> 16#02606;
+entity("starf") -> 16#02605;
+entity("straightepsilon") -> 16#003F5;
+entity("straightphi") -> 16#003D5;
+entity("strns") -> 16#000AF;
+entity("sub") -> 16#02282;
+entity("subE") -> 16#02AC5;
+entity("subdot") -> 16#02ABD;
+entity("sube") -> 16#02286;
+entity("subedot") -> 16#02AC3;
+entity("submult") -> 16#02AC1;
+entity("subnE") -> 16#02ACB;
+entity("subne") -> 16#0228A;
+entity("subplus") -> 16#02ABF;
+entity("subrarr") -> 16#02979;
+entity("subset") -> 16#02282;
+entity("subseteq") -> 16#02286;
+entity("subseteqq") -> 16#02AC5;
+entity("subsetneq") -> 16#0228A;
+entity("subsetneqq") -> 16#02ACB;
+entity("subsim") -> 16#02AC7;
+entity("subsub") -> 16#02AD5;
+entity("subsup") -> 16#02AD3;
+entity("succ") -> 16#0227B;
+entity("succapprox") -> 16#02AB8;
+entity("succcurlyeq") -> 16#0227D;
+entity("succeq") -> 16#02AB0;
+entity("succnapprox") -> 16#02ABA;
+entity("succneqq") -> 16#02AB6;
+entity("succnsim") -> 16#022E9;
+entity("succsim") -> 16#0227F;
+entity("sum") -> 16#02211;
+entity("sung") -> 16#0266A;
+entity("sup") -> 16#02283;
+entity("sup1") -> 16#000B9;
+entity("sup2") -> 16#000B2;
+entity("sup3") -> 16#000B3;
+entity("supE") -> 16#02AC6;
+entity("supdot") -> 16#02ABE;
+entity("supdsub") -> 16#02AD8;
+entity("supe") -> 16#02287;
+entity("supedot") -> 16#02AC4;
+entity("suphsol") -> 16#027C9;
+entity("suphsub") -> 16#02AD7;
+entity("suplarr") -> 16#0297B;
+entity("supmult") -> 16#02AC2;
+entity("supnE") -> 16#02ACC;
+entity("supne") -> 16#0228B;
+entity("supplus") -> 16#02AC0;
+entity("supset") -> 16#02283;
+entity("supseteq") -> 16#02287;
+entity("supseteqq") -> 16#02AC6;
+entity("supsetneq") -> 16#0228B;
+entity("supsetneqq") -> 16#02ACC;
+entity("supsim") -> 16#02AC8;
+entity("supsub") -> 16#02AD4;
+entity("supsup") -> 16#02AD6;
+entity("swArr") -> 16#021D9;
+entity("swarhk") -> 16#02926;
+entity("swarr") -> 16#02199;
+entity("swarrow") -> 16#02199;
+entity("swnwar") -> 16#0292A;
+entity("szlig") -> 16#000DF;
+entity("target") -> 16#02316;
+entity("tau") -> 16#003C4;
+entity("tbrk") -> 16#023B4;
+entity("tcaron") -> 16#00165;
+entity("tcedil") -> 16#00163;
+entity("tcy") -> 16#00442;
+entity("tdot") -> 16#020DB;
+entity("telrec") -> 16#02315;
+entity("tfr") -> 16#1D531;
+entity("there4") -> 16#02234;
+entity("therefore") -> 16#02234;
+entity("theta") -> 16#003B8;
+entity("thetasym") -> 16#003D1;
+entity("thetav") -> 16#003D1;
+entity("thickapprox") -> 16#02248;
+entity("thicksim") -> 16#0223C;
+entity("thinsp") -> 16#02009;
+entity("thkap") -> 16#02248;
+entity("thksim") -> 16#0223C;
+entity("thorn") -> 16#000FE;
+entity("tilde") -> 16#002DC;
+entity("times") -> 16#000D7;
+entity("timesb") -> 16#022A0;
+entity("timesbar") -> 16#02A31;
+entity("timesd") -> 16#02A30;
+entity("tint") -> 16#0222D;
+entity("toea") -> 16#02928;
+entity("top") -> 16#022A4;
+entity("topbot") -> 16#02336;
+entity("topcir") -> 16#02AF1;
+entity("topf") -> 16#1D565;
+entity("topfork") -> 16#02ADA;
+entity("tosa") -> 16#02929;
+entity("tprime") -> 16#02034;
+entity("trade") -> 16#02122;
+entity("triangle") -> 16#025B5;
+entity("triangledown") -> 16#025BF;
+entity("triangleleft") -> 16#025C3;
+entity("trianglelefteq") -> 16#022B4;
+entity("triangleq") -> 16#0225C;
+entity("triangleright") -> 16#025B9;
+entity("trianglerighteq") -> 16#022B5;
+entity("tridot") -> 16#025EC;
+entity("trie") -> 16#0225C;
+entity("triminus") -> 16#02A3A;
+entity("triplus") -> 16#02A39;
+entity("trisb") -> 16#029CD;
+entity("tritime") -> 16#02A3B;
+entity("trpezium") -> 16#023E2;
+entity("tscr") -> 16#1D4C9;
+entity("tscy") -> 16#00446;
+entity("tshcy") -> 16#0045B;
+entity("tstrok") -> 16#00167;
+entity("twixt") -> 16#0226C;
+entity("twoheadleftarrow") -> 16#0219E;
+entity("twoheadrightarrow") -> 16#021A0;
+entity("uArr") -> 16#021D1;
+entity("uHar") -> 16#02963;
+entity("uacute") -> 16#000FA;
+entity("uarr") -> 16#02191;
+entity("ubrcy") -> 16#0045E;
+entity("ubreve") -> 16#0016D;
+entity("ucirc") -> 16#000FB;
+entity("ucy") -> 16#00443;
+entity("udarr") -> 16#021C5;
+entity("udblac") -> 16#00171;
+entity("udhar") -> 16#0296E;
+entity("ufisht") -> 16#0297E;
+entity("ufr") -> 16#1D532;
+entity("ugrave") -> 16#000F9;
+entity("uharl") -> 16#021BF;
+entity("uharr") -> 16#021BE;
+entity("uhblk") -> 16#02580;
+entity("ulcorn") -> 16#0231C;
+entity("ulcorner") -> 16#0231C;
+entity("ulcrop") -> 16#0230F;
+entity("ultri") -> 16#025F8;
+entity("umacr") -> 16#0016B;
+entity("uml") -> 16#000A8;
+entity("uogon") -> 16#00173;
+entity("uopf") -> 16#1D566;
+entity("uparrow") -> 16#02191;
+entity("updownarrow") -> 16#02195;
+entity("upharpoonleft") -> 16#021BF;
+entity("upharpoonright") -> 16#021BE;
+entity("uplus") -> 16#0228E;
+entity("upsi") -> 16#003C5;
+entity("upsih") -> 16#003D2;
+entity("upsilon") -> 16#003C5;
+entity("upuparrows") -> 16#021C8;
+entity("urcorn") -> 16#0231D;
+entity("urcorner") -> 16#0231D;
+entity("urcrop") -> 16#0230E;
+entity("uring") -> 16#0016F;
+entity("urtri") -> 16#025F9;
+entity("uscr") -> 16#1D4CA;
+entity("utdot") -> 16#022F0;
+entity("utilde") -> 16#00169;
+entity("utri") -> 16#025B5;
+entity("utrif") -> 16#025B4;
+entity("uuarr") -> 16#021C8;
+entity("uuml") -> 16#000FC;
+entity("uwangle") -> 16#029A7;
+entity("vArr") -> 16#021D5;
+entity("vBar") -> 16#02AE8;
+entity("vBarv") -> 16#02AE9;
+entity("vDash") -> 16#022A8;
+entity("vangrt") -> 16#0299C;
+entity("varepsilon") -> 16#003F5;
+entity("varkappa") -> 16#003F0;
+entity("varnothing") -> 16#02205;
+entity("varphi") -> 16#003D5;
+entity("varpi") -> 16#003D6;
+entity("varpropto") -> 16#0221D;
+entity("varr") -> 16#02195;
+entity("varrho") -> 16#003F1;
+entity("varsigma") -> 16#003C2;
+entity("varsubsetneq") -> [16#0228A, 16#0FE00];
+entity("varsubsetneqq") -> [16#02ACB, 16#0FE00];
+entity("varsupsetneq") -> [16#0228B, 16#0FE00];
+entity("varsupsetneqq") -> [16#02ACC, 16#0FE00];
+entity("vartheta") -> 16#003D1;
+entity("vartriangleleft") -> 16#022B2;
+entity("vartriangleright") -> 16#022B3;
+entity("vcy") -> 16#00432;
+entity("vdash") -> 16#022A2;
+entity("vee") -> 16#02228;
+entity("veebar") -> 16#022BB;
+entity("veeeq") -> 16#0225A;
+entity("vellip") -> 16#022EE;
+entity("verbar") -> 16#0007C;
+entity("vert") -> 16#0007C;
+entity("vfr") -> 16#1D533;
+entity("vltri") -> 16#022B2;
+entity("vnsub") -> [16#02282, 16#020D2];
+entity("vnsup") -> [16#02283, 16#020D2];
+entity("vopf") -> 16#1D567;
+entity("vprop") -> 16#0221D;
+entity("vrtri") -> 16#022B3;
+entity("vscr") -> 16#1D4CB;
+entity("vsubnE") -> [16#02ACB, 16#0FE00];
+entity("vsubne") -> [16#0228A, 16#0FE00];
+entity("vsupnE") -> [16#02ACC, 16#0FE00];
+entity("vsupne") -> [16#0228B, 16#0FE00];
+entity("vzigzag") -> 16#0299A;
+entity("wcirc") -> 16#00175;
+entity("wedbar") -> 16#02A5F;
+entity("wedge") -> 16#02227;
+entity("wedgeq") -> 16#02259;
+entity("weierp") -> 16#02118;
+entity("wfr") -> 16#1D534;
+entity("wopf") -> 16#1D568;
+entity("wp") -> 16#02118;
+entity("wr") -> 16#02240;
+entity("wreath") -> 16#02240;
+entity("wscr") -> 16#1D4CC;
+entity("xcap") -> 16#022C2;
+entity("xcirc") -> 16#025EF;
+entity("xcup") -> 16#022C3;
+entity("xdtri") -> 16#025BD;
+entity("xfr") -> 16#1D535;
+entity("xhArr") -> 16#027FA;
+entity("xharr") -> 16#027F7;
+entity("xi") -> 16#003BE;
+entity("xlArr") -> 16#027F8;
+entity("xlarr") -> 16#027F5;
+entity("xmap") -> 16#027FC;
+entity("xnis") -> 16#022FB;
+entity("xodot") -> 16#02A00;
+entity("xopf") -> 16#1D569;
+entity("xoplus") -> 16#02A01;
+entity("xotime") -> 16#02A02;
+entity("xrArr") -> 16#027F9;
+entity("xrarr") -> 16#027F6;
+entity("xscr") -> 16#1D4CD;
+entity("xsqcup") -> 16#02A06;
+entity("xuplus") -> 16#02A04;
+entity("xutri") -> 16#025B3;
+entity("xvee") -> 16#022C1;
+entity("xwedge") -> 16#022C0;
+entity("yacute") -> 16#000FD;
+entity("yacy") -> 16#0044F;
+entity("ycirc") -> 16#00177;
+entity("ycy") -> 16#0044B;
+entity("yen") -> 16#000A5;
+entity("yfr") -> 16#1D536;
+entity("yicy") -> 16#00457;
+entity("yopf") -> 16#1D56A;
+entity("yscr") -> 16#1D4CE;
+entity("yucy") -> 16#0044E;
+entity("yuml") -> 16#000FF;
+entity("zacute") -> 16#0017A;
+entity("zcaron") -> 16#0017E;
+entity("zcy") -> 16#00437;
+entity("zdot") -> 16#0017C;
+entity("zeetrf") -> 16#02128;
+entity("zeta") -> 16#003B6;
+entity("zfr") -> 16#1D537;
+entity("zhcy") -> 16#00436;
+entity("zigrarr") -> 16#021DD;
+entity("zopf") -> 16#1D56B;
+entity("zscr") -> 16#1D4CF;
+entity("zwj") -> 16#0200D;
+entity("zwnj") -> 16#0200C;
+entity(_) -> undefined.
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+exhaustive_entity_test() ->
+ T = mochiweb_cover:clause_lookup_table(?MODULE, entity),
+ [?assertEqual(V, entity(K)) || {K, V} <- T].
+
+charref_test() ->
+ 1234 = charref("#1234"),
+ 255 = charref("#xfF"),
+ 255 = charref(<<"#XFf">>),
+ 38 = charref("amp"),
+ 38 = charref(<<"amp">>),
+ undefined = charref("not_an_entity"),
+ undefined = charref("#not_an_entity"),
+ undefined = charref("#xnot_an_entity"),
+ ok.
+
+-endif.
--- /dev/null
+%% @author Emad El-Haraty <emad@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc HTTP Cookie parsing and generating (RFC 2109, RFC 2965).
+
+-module(mochiweb_cookies).
+-export([parse_cookie/1, cookie/3, cookie/2]).
+
+-define(QUOTE, $\").
+
+-define(IS_WHITESPACE(C),
+ (C =:= $\s orelse C =:= $\t orelse C =:= $\r orelse C =:= $\n)).
+
+%% RFC 2616 separators (called tspecials in RFC 2068)
+-define(IS_SEPARATOR(C),
+ (C < 32 orelse
+ C =:= $\s orelse C =:= $\t orelse
+ C =:= $( orelse C =:= $) orelse C =:= $< orelse C =:= $> orelse
+ C =:= $@ orelse C =:= $, orelse C =:= $; orelse C =:= $: orelse
+ C =:= $\\ orelse C =:= $\" orelse C =:= $/ orelse
+ C =:= $[ orelse C =:= $] orelse C =:= $? orelse C =:= $= orelse
+ C =:= ${ orelse C =:= $})).
+
+%% @type proplist() = [{Key::string(), Value::string()}].
+%% @type header() = {Name::string(), Value::string()}.
+%% @type int_seconds() = integer().
+
+%% @spec cookie(Key::string(), Value::string()) -> header()
+%% @doc Short-hand for <code>cookie(Key, Value, [])</code>.
+cookie(Key, Value) ->
+ cookie(Key, Value, []).
+
+%% @spec cookie(Key::string(), Value::string(), Options::[Option]) -> header()
+%% where Option = {max_age, int_seconds()} | {local_time, {date(), time()}}
+%% | {domain, string()} | {path, string()}
+%% | {secure, true | false} | {http_only, true | false}
+%%
+%% @doc Generate a Set-Cookie header field tuple.
+cookie(Key, Value, Options) ->
+ Cookie = [any_to_list(Key), "=", quote(Value), "; Version=1"],
+ %% Set-Cookie:
+ %% Comment, Domain, Max-Age, Path, Secure, Version
+ %% Set-Cookie2:
+ %% Comment, CommentURL, Discard, Domain, Max-Age, Path, Port, Secure,
+ %% Version
+ ExpiresPart =
+ case proplists:get_value(max_age, Options) of
+ undefined ->
+ "";
+ RawAge ->
+ When = case proplists:get_value(local_time, Options) of
+ undefined ->
+ calendar:local_time();
+ LocalTime ->
+ LocalTime
+ end,
+ Age = case RawAge < 0 of
+ true ->
+ 0;
+ false ->
+ RawAge
+ end,
+ ["; Expires=", age_to_cookie_date(Age, When),
+ "; Max-Age=", quote(Age)]
+ end,
+ SecurePart =
+ case proplists:get_value(secure, Options) of
+ true ->
+ "; Secure";
+ _ ->
+ ""
+ end,
+ DomainPart =
+ case proplists:get_value(domain, Options) of
+ undefined ->
+ "";
+ Domain ->
+ ["; Domain=", quote(Domain)]
+ end,
+ PathPart =
+ case proplists:get_value(path, Options) of
+ undefined ->
+ "";
+ Path ->
+ ["; Path=", quote(Path)]
+ end,
+ HttpOnlyPart =
+ case proplists:get_value(http_only, Options) of
+ true ->
+ "; HttpOnly";
+ _ ->
+ ""
+ end,
+ CookieParts = [Cookie, ExpiresPart, SecurePart, DomainPart, PathPart, HttpOnlyPart],
+ {"Set-Cookie", lists:flatten(CookieParts)}.
+
+
+%% Every major browser incorrectly handles quoted strings in a
+%% different and (worse) incompatible manner. Instead of wasting time
+%% writing redundant code for each browser, we restrict cookies to
+%% only contain characters that browsers handle compatibly.
+%%
+%% By replacing the definition of quote with this, we generate
+%% RFC-compliant cookies:
+%%
+%% quote(V) ->
+%% Fun = fun(?QUOTE, Acc) -> [$\\, ?QUOTE | Acc];
+%% (Ch, Acc) -> [Ch | Acc]
+%% end,
+%% [?QUOTE | lists:foldr(Fun, [?QUOTE], V)].
+
+%% Convert to a string and raise an error if quoting is required.
+quote(V0) ->
+ V = any_to_list(V0),
+ lists:all(fun(Ch) -> Ch =:= $/ orelse not ?IS_SEPARATOR(Ch) end, V)
+ orelse erlang:error({cookie_quoting_required, V}),
+ V.
+
+
+%% Return a date in the form of: Wdy, DD-Mon-YYYY HH:MM:SS GMT
+%% See also: rfc2109: 10.1.2
+rfc2109_cookie_expires_date(LocalTime) ->
+ {{YYYY,MM,DD},{Hour,Min,Sec}} =
+ case calendar:local_time_to_universal_time_dst(LocalTime) of
+ [] ->
+ {Date, {Hour1, Min1, Sec1}} = LocalTime,
+ LocalTime2 = {Date, {Hour1 + 1, Min1, Sec1}},
+ case calendar:local_time_to_universal_time_dst(LocalTime2) of
+ [Gmt] -> Gmt;
+ [_,Gmt] -> Gmt
+ end;
+ [Gmt] -> Gmt;
+ [_,Gmt] -> Gmt
+ end,
+ DayNumber = calendar:day_of_the_week({YYYY,MM,DD}),
+ lists:flatten(
+ io_lib:format("~s, ~2.2.0w-~3.s-~4.4.0w ~2.2.0w:~2.2.0w:~2.2.0w GMT",
+ [httpd_util:day(DayNumber),DD,httpd_util:month(MM),YYYY,Hour,Min,Sec])).
+
+add_seconds(Secs, LocalTime) ->
+ Greg = calendar:datetime_to_gregorian_seconds(LocalTime),
+ calendar:gregorian_seconds_to_datetime(Greg + Secs).
+
+age_to_cookie_date(Age, LocalTime) ->
+ rfc2109_cookie_expires_date(add_seconds(Age, LocalTime)).
+
+%% @spec parse_cookie(string()) -> [{K::string(), V::string()}]
+%% @doc Parse the contents of a Cookie header field, ignoring cookie
+%% attributes, and return a simple property list.
+parse_cookie("") ->
+ [];
+parse_cookie(Cookie) ->
+ parse_cookie(Cookie, []).
+
+%% Internal API
+
+parse_cookie([], Acc) ->
+ lists:reverse(Acc);
+parse_cookie(String, Acc) ->
+ {{Token, Value}, Rest} = read_pair(String),
+ Acc1 = case Token of
+ "" ->
+ Acc;
+ "$" ++ _ ->
+ Acc;
+ _ ->
+ [{Token, Value} | Acc]
+ end,
+ parse_cookie(Rest, Acc1).
+
+read_pair(String) ->
+ {Token, Rest} = read_token(skip_whitespace(String)),
+ {Value, Rest1} = read_value(skip_whitespace(Rest)),
+ {{Token, Value}, skip_past_separator(Rest1)}.
+
+read_value([$= | Value]) ->
+ Value1 = skip_whitespace(Value),
+ case Value1 of
+ [?QUOTE | _] ->
+ read_quoted(Value1);
+ _ ->
+ read_token(Value1)
+ end;
+read_value(String) ->
+ {"", String}.
+
+read_quoted([?QUOTE | String]) ->
+ read_quoted(String, []).
+
+read_quoted([], Acc) ->
+ {lists:reverse(Acc), []};
+read_quoted([?QUOTE | Rest], Acc) ->
+ {lists:reverse(Acc), Rest};
+read_quoted([$\\, Any | Rest], Acc) ->
+ read_quoted(Rest, [Any | Acc]);
+read_quoted([C | Rest], Acc) ->
+ read_quoted(Rest, [C | Acc]).
+
+skip_whitespace(String) ->
+ F = fun (C) -> ?IS_WHITESPACE(C) end,
+ lists:dropwhile(F, String).
+
+read_token(String) ->
+ F = fun (C) -> not ?IS_SEPARATOR(C) end,
+ lists:splitwith(F, String).
+
+skip_past_separator([]) ->
+ [];
+skip_past_separator([$; | Rest]) ->
+ Rest;
+skip_past_separator([$, | Rest]) ->
+ Rest;
+skip_past_separator([_ | Rest]) ->
+ skip_past_separator(Rest).
+
+any_to_list(V) when is_list(V) ->
+ V;
+any_to_list(V) when is_atom(V) ->
+ atom_to_list(V);
+any_to_list(V) when is_binary(V) ->
+ binary_to_list(V);
+any_to_list(V) when is_integer(V) ->
+ integer_to_list(V).
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+quote_test() ->
+ %% ?assertError eunit macro is not compatible with coverage module
+ try quote(":wq")
+ catch error:{cookie_quoting_required, ":wq"} -> ok
+ end,
+ ?assertEqual(
+ "foo",
+ quote(foo)),
+ ok.
+
+parse_cookie_test() ->
+ %% RFC example
+ C1 = "$Version=\"1\"; Customer=\"WILE_E_COYOTE\"; $Path=\"/acme\";
+ Part_Number=\"Rocket_Launcher_0001\"; $Path=\"/acme\";
+ Shipping=\"FedEx\"; $Path=\"/acme\"",
+ ?assertEqual(
+ [{"Customer","WILE_E_COYOTE"},
+ {"Part_Number","Rocket_Launcher_0001"},
+ {"Shipping","FedEx"}],
+ parse_cookie(C1)),
+ %% Potential edge cases
+ ?assertEqual(
+ [{"foo", "x"}],
+ parse_cookie("foo=\"\\x\"")),
+ ?assertEqual(
+ [],
+ parse_cookie("=")),
+ ?assertEqual(
+ [{"foo", ""}, {"bar", ""}],
+ parse_cookie(" foo ; bar ")),
+ ?assertEqual(
+ [{"foo", ""}, {"bar", ""}],
+ parse_cookie("foo=;bar=")),
+ ?assertEqual(
+ [{"foo", "\";"}, {"bar", ""}],
+ parse_cookie("foo = \"\\\";\";bar ")),
+ ?assertEqual(
+ [{"foo", "\";bar"}],
+ parse_cookie("foo=\"\\\";bar")),
+ ?assertEqual(
+ [],
+ parse_cookie([])),
+ ?assertEqual(
+ [{"foo", "bar"}, {"baz", "wibble"}],
+ parse_cookie("foo=bar , baz=wibble ")),
+ ok.
+
+domain_test() ->
+ ?assertEqual(
+ {"Set-Cookie",
+ "Customer=WILE_E_COYOTE; "
+ "Version=1; "
+ "Domain=acme.com; "
+ "HttpOnly"},
+ cookie("Customer", "WILE_E_COYOTE",
+ [{http_only, true}, {domain, "acme.com"}])),
+ ok.
+
+local_time_test() ->
+ {"Set-Cookie", S} = cookie("Customer", "WILE_E_COYOTE",
+ [{max_age, 111}, {secure, true}]),
+ ?assertMatch(
+ ["Customer=WILE_E_COYOTE",
+ " Version=1",
+ " Expires=" ++ _,
+ " Max-Age=111",
+ " Secure"],
+ string:tokens(S, ";")),
+ ok.
+
+cookie_test() ->
+ C1 = {"Set-Cookie",
+ "Customer=WILE_E_COYOTE; "
+ "Version=1; "
+ "Path=/acme"},
+ C1 = cookie("Customer", "WILE_E_COYOTE", [{path, "/acme"}]),
+ C1 = cookie("Customer", "WILE_E_COYOTE",
+ [{path, "/acme"}, {badoption, "negatory"}]),
+ C1 = cookie('Customer', 'WILE_E_COYOTE', [{path, '/acme'}]),
+ C1 = cookie(<<"Customer">>, <<"WILE_E_COYOTE">>, [{path, <<"/acme">>}]),
+
+ {"Set-Cookie","=NoKey; Version=1"} = cookie("", "NoKey", []),
+ {"Set-Cookie","=NoKey; Version=1"} = cookie("", "NoKey"),
+ LocalTime = calendar:universal_time_to_local_time({{2007, 5, 15}, {13, 45, 33}}),
+ C2 = {"Set-Cookie",
+ "Customer=WILE_E_COYOTE; "
+ "Version=1; "
+ "Expires=Tue, 15-May-2007 13:45:33 GMT; "
+ "Max-Age=0"},
+ C2 = cookie("Customer", "WILE_E_COYOTE",
+ [{max_age, -111}, {local_time, LocalTime}]),
+ C3 = {"Set-Cookie",
+ "Customer=WILE_E_COYOTE; "
+ "Version=1; "
+ "Expires=Wed, 16-May-2007 13:45:50 GMT; "
+ "Max-Age=86417"},
+ C3 = cookie("Customer", "WILE_E_COYOTE",
+ [{max_age, 86417}, {local_time, LocalTime}]),
+ ok.
+
+-endif.
--- /dev/null
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2010 Mochi Media, Inc.
+
+%% @doc Workarounds for various cover deficiencies.
+-module(mochiweb_cover).
+-export([get_beam/1, get_abstract_code/1,
+ get_clauses/2, clause_lookup_table/1]).
+-export([clause_lookup_table/2]).
+
+%% Internal
+
+get_beam(Module) ->
+ {Module, Beam, _Path} = code:get_object_code(Module),
+ Beam.
+
+get_abstract_code(Beam) ->
+ {ok, {_Module,
+ [{abstract_code,
+ {raw_abstract_v1, L}}]}} = beam_lib:chunks(Beam, [abstract_code]),
+ L.
+
+get_clauses(Function, Code) ->
+ [L] = [Clauses || {function, _, FName, _, Clauses}
+ <- Code, FName =:= Function],
+ L.
+
+clause_lookup_table(Module, Function) ->
+ clause_lookup_table(
+ get_clauses(Function,
+ get_abstract_code(get_beam(Module)))).
+
+clause_lookup_table(Clauses) ->
+ lists:foldr(fun clause_fold/2, [], Clauses).
+
+clause_fold({clause, _,
+ [InTerm],
+ _Guards=[],
+ [OutTerm]},
+ Acc) ->
+ try [{erl_parse:normalise(InTerm), erl_parse:normalise(OutTerm)} | Acc]
+ catch error:_ -> Acc
+ end;
+clause_fold(_, Acc) ->
+ Acc.
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+foo_table(a) -> b;
+foo_table("a") -> <<"b">>;
+foo_table(123) -> {4, 3, 2};
+foo_table([list]) -> [];
+foo_table([list1, list2]) -> [list1, list2, list3];
+foo_table(ignored) -> some, code, ignored;
+foo_table(Var) -> Var.
+
+foo_table_test() ->
+ T = clause_lookup_table(?MODULE, foo_table),
+ [?assertEqual(V, foo_table(K)) || {K, V} <- T].
+
+clause_lookup_table_test() ->
+ ?assertEqual(b, foo_table(a)),
+ ?assertEqual(ignored, foo_table(ignored)),
+ ?assertEqual('Var', foo_table('Var')),
+ ?assertEqual(
+ [{a, b},
+ {"a", <<"b">>},
+ {123, {4, 3, 2}},
+ {[list], []},
+ {[list1, list2], [list1, list2, list3]}],
+ clause_lookup_table(?MODULE, foo_table)).
+
+-endif.
--- /dev/null
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Simple and stupid echo server to demo mochiweb_socket_server.
+
+-module(mochiweb_echo).
+-author('bob@mochimedia.com').
+-export([start/0, stop/0, loop/1]).
+
+stop() ->
+ mochiweb_socket_server:stop(?MODULE).
+
+start() ->
+ mochiweb_socket_server:start([{link, false} | options()]).
+
+options() ->
+ [{name, ?MODULE},
+ {port, 6789},
+ {ip, "127.0.0.1"},
+ {max, 1},
+ {loop, {?MODULE, loop}}].
+
+loop(Socket) ->
+ case mochiweb_socket:recv(Socket, 0, 30000) of
+ {ok, Data} ->
+ case mochiweb_socket:send(Socket, Data) of
+ ok ->
+ loop(Socket);
+ _ ->
+ exit(normal)
+ end;
+ _Other ->
+ exit(normal)
+ end.
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+-endif.
--- /dev/null
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Case preserving (but case insensitive) HTTP Header dictionary.
+
+-module(mochiweb_headers).
+-author('bob@mochimedia.com').
+-export([empty/0, from_list/1, insert/3, enter/3, get_value/2, lookup/2]).
+-export([delete_any/2, get_primary_value/2, get_combined_value/2]).
+-export([default/3, enter_from_list/2, default_from_list/2]).
+-export([to_list/1, make/1]).
+-export([from_binary/1]).
+
+%% @type headers().
+%% @type key() = atom() | binary() | string().
+%% @type value() = atom() | binary() | string() | integer().
+
+%% @spec empty() -> headers()
+%% @doc Create an empty headers structure.
+empty() ->
+ gb_trees:empty().
+
+%% @spec make(headers() | [{key(), value()}]) -> headers()
+%% @doc Construct a headers() from the given list.
+make(L) when is_list(L) ->
+ from_list(L);
+%% assume a non-list is already mochiweb_headers.
+make(T) ->
+ T.
+
+%% @spec from_binary(iolist()) -> headers()
+%% @doc Transforms a raw HTTP header into a mochiweb headers structure.
+%%
+%% The given raw HTTP header can be one of the following:
+%%
+%% 1) A string or a binary representing a full HTTP header ending with
+%% double CRLF.
+%% Examples:
+%% ```
+%% "Content-Length: 47\r\nContent-Type: text/plain\r\n\r\n"
+%% <<"Content-Length: 47\r\nContent-Type: text/plain\r\n\r\n">>'''
+%%
+%% 2) A list of binaries or strings where each element represents a raw
+%% HTTP header line ending with a single CRLF.
+%% Examples:
+%% ```
+%% [<<"Content-Length: 47\r\n">>, <<"Content-Type: text/plain\r\n">>]
+%% ["Content-Length: 47\r\n", "Content-Type: text/plain\r\n"]
+%% ["Content-Length: 47\r\n", <<"Content-Type: text/plain\r\n">>]'''
+%%
+from_binary(RawHttpHeader) when is_binary(RawHttpHeader) ->
+ from_binary(RawHttpHeader, []);
+from_binary(RawHttpHeaderList) ->
+ from_binary(list_to_binary([RawHttpHeaderList, "\r\n"])).
+
+from_binary(RawHttpHeader, Acc) ->
+ case erlang:decode_packet(httph, RawHttpHeader, []) of
+ {ok, {http_header, _, H, _, V}, Rest} ->
+ from_binary(Rest, [{H, V} | Acc]);
+ _ ->
+ make(Acc)
+ end.
+
+%% @spec from_list([{key(), value()}]) -> headers()
+%% @doc Construct a headers() from the given list.
+from_list(List) ->
+ lists:foldl(fun ({K, V}, T) -> insert(K, V, T) end, empty(), List).
+
+%% @spec enter_from_list([{key(), value()}], headers()) -> headers()
+%% @doc Insert pairs into the headers, replace any values for existing keys.
+enter_from_list(List, T) ->
+ lists:foldl(fun ({K, V}, T1) -> enter(K, V, T1) end, T, List).
+
+%% @spec default_from_list([{key(), value()}], headers()) -> headers()
+%% @doc Insert pairs into the headers for keys that do not already exist.
+default_from_list(List, T) ->
+ lists:foldl(fun ({K, V}, T1) -> default(K, V, T1) end, T, List).
+
+%% @spec to_list(headers()) -> [{key(), string()}]
+%% @doc Return the contents of the headers. The keys will be the exact key
+%% that was first inserted (e.g. may be an atom or binary, case is
+%% preserved).
+to_list(T) ->
+ F = fun ({K, {array, L}}, Acc) ->
+ L1 = lists:reverse(L),
+ lists:foldl(fun (V, Acc1) -> [{K, V} | Acc1] end, Acc, L1);
+ (Pair, Acc) ->
+ [Pair | Acc]
+ end,
+ lists:reverse(lists:foldl(F, [], gb_trees:values(T))).
+
+%% @spec get_value(key(), headers()) -> string() | undefined
+%% @doc Return the value of the given header using a case insensitive search.
+%% undefined will be returned for keys that are not present.
+get_value(K, T) ->
+ case lookup(K, T) of
+ {value, {_, V}} ->
+ expand(V);
+ none ->
+ undefined
+ end.
+
+%% @spec get_primary_value(key(), headers()) -> string() | undefined
+%% @doc Return the value of the given header up to the first semicolon using
+%% a case insensitive search. undefined will be returned for keys
+%% that are not present.
+get_primary_value(K, T) ->
+ case get_value(K, T) of
+ undefined ->
+ undefined;
+ V ->
+ lists:takewhile(fun (C) -> C =/= $; end, V)
+ end.
+
+%% @spec get_combined_value(key(), headers()) -> string() | undefined
+%% @doc Return the value from the given header using a case insensitive search.
+%% If the value of the header is a comma-separated list where holds values
+%% are all identical, the identical value will be returned.
+%% undefined will be returned for keys that are not present or the
+%% values in the list are not the same.
+%%
+%% NOTE: The process isn't designed for a general purpose. If you need
+%% to access all values in the combined header, please refer to
+%% '''tokenize_header_value/1'''.
+%%
+%% Section 4.2 of the RFC 2616 (HTTP 1.1) describes multiple message-header
+%% fields with the same field-name may be present in a message if and only
+%% if the entire field-value for that header field is defined as a
+%% comma-separated list [i.e., #(values)].
+get_combined_value(K, T) ->
+ case get_value(K, T) of
+ undefined ->
+ undefined;
+ V ->
+ case sets:to_list(sets:from_list(tokenize_header_value(V))) of
+ [Val] ->
+ Val;
+ _ ->
+ undefined
+ end
+ end.
+
+%% @spec lookup(key(), headers()) -> {value, {key(), string()}} | none
+%% @doc Return the case preserved key and value for the given header using
+%% a case insensitive search. none will be returned for keys that are
+%% not present.
+lookup(K, T) ->
+ case gb_trees:lookup(normalize(K), T) of
+ {value, {K0, V}} ->
+ {value, {K0, expand(V)}};
+ none ->
+ none
+ end.
+
+%% @spec default(key(), value(), headers()) -> headers()
+%% @doc Insert the pair into the headers if it does not already exist.
+default(K, V, T) ->
+ K1 = normalize(K),
+ V1 = any_to_list(V),
+ try gb_trees:insert(K1, {K, V1}, T)
+ catch
+ error:{key_exists, _} ->
+ T
+ end.
+
+%% @spec enter(key(), value(), headers()) -> headers()
+%% @doc Insert the pair into the headers, replacing any pre-existing key.
+enter(K, V, T) ->
+ K1 = normalize(K),
+ V1 = any_to_list(V),
+ gb_trees:enter(K1, {K, V1}, T).
+
+%% @spec insert(key(), value(), headers()) -> headers()
+%% @doc Insert the pair into the headers, merging with any pre-existing key.
+%% A merge is done with Value = V0 ++ ", " ++ V1.
+insert(K, V, T) ->
+ K1 = normalize(K),
+ V1 = any_to_list(V),
+ try gb_trees:insert(K1, {K, V1}, T)
+ catch
+ error:{key_exists, _} ->
+ {K0, V0} = gb_trees:get(K1, T),
+ V2 = merge(K1, V1, V0),
+ gb_trees:update(K1, {K0, V2}, T)
+ end.
+
+%% @spec delete_any(key(), headers()) -> headers()
+%% @doc Delete the header corresponding to key if it is present.
+delete_any(K, T) ->
+ K1 = normalize(K),
+ gb_trees:delete_any(K1, T).
+
+%% Internal API
+
+tokenize_header_value(undefined) ->
+ undefined;
+tokenize_header_value(V) ->
+ reversed_tokens(trim_and_reverse(V, false), [], []).
+
+trim_and_reverse([S | Rest], Reversed) when S=:=$ ; S=:=$\n; S=:=$\t ->
+ trim_and_reverse(Rest, Reversed);
+trim_and_reverse(V, false) ->
+ trim_and_reverse(lists:reverse(V), true);
+trim_and_reverse(V, true) ->
+ V.
+
+reversed_tokens([], [], Acc) ->
+ Acc;
+reversed_tokens([], Token, Acc) ->
+ [Token | Acc];
+reversed_tokens("\"" ++ Rest, [], Acc) ->
+ case extract_quoted_string(Rest, []) of
+ {String, NewRest} ->
+ reversed_tokens(NewRest, [], [String | Acc]);
+ undefined ->
+ undefined
+ end;
+reversed_tokens("\"" ++ _Rest, _Token, _Acc) ->
+ undefined;
+reversed_tokens([C | Rest], [], Acc) when C=:=$ ;C=:=$\n;C=:=$\t;C=:=$, ->
+ reversed_tokens(Rest, [], Acc);
+reversed_tokens([C | Rest], Token, Acc) when C=:=$ ;C=:=$\n;C=:=$\t;C=:=$, ->
+ reversed_tokens(Rest, [], [Token | Acc]);
+reversed_tokens([C | Rest], Token, Acc) ->
+ reversed_tokens(Rest, [C | Token], Acc);
+reversed_tokens(_, _, _) ->
+ undefeined.
+
+extract_quoted_string([], _Acc) ->
+ undefined;
+extract_quoted_string("\"\\" ++ Rest, Acc) ->
+ extract_quoted_string(Rest, "\"" ++ Acc);
+extract_quoted_string("\"" ++ Rest, Acc) ->
+ {Acc, Rest};
+extract_quoted_string([C | Rest], Acc) ->
+ extract_quoted_string(Rest, [C | Acc]).
+
+expand({array, L}) ->
+ mochiweb_util:join(lists:reverse(L), ", ");
+expand(V) ->
+ V.
+
+merge("set-cookie", V1, {array, L}) ->
+ {array, [V1 | L]};
+merge("set-cookie", V1, V0) ->
+ {array, [V1, V0]};
+merge(_, V1, V0) ->
+ V0 ++ ", " ++ V1.
+
+normalize(K) when is_list(K) ->
+ string:to_lower(K);
+normalize(K) when is_atom(K) ->
+ normalize(atom_to_list(K));
+normalize(K) when is_binary(K) ->
+ normalize(binary_to_list(K)).
+
+any_to_list(V) when is_list(V) ->
+ V;
+any_to_list(V) when is_atom(V) ->
+ atom_to_list(V);
+any_to_list(V) when is_binary(V) ->
+ binary_to_list(V);
+any_to_list(V) when is_integer(V) ->
+ integer_to_list(V).
+
+%%
+%% Tests.
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+make_test() ->
+ Identity = make([{hdr, foo}]),
+ ?assertEqual(
+ Identity,
+ make(Identity)).
+
+enter_from_list_test() ->
+ H = make([{hdr, foo}]),
+ ?assertEqual(
+ [{baz, "wibble"}, {hdr, "foo"}],
+ to_list(enter_from_list([{baz, wibble}], H))),
+ ?assertEqual(
+ [{hdr, "bar"}],
+ to_list(enter_from_list([{hdr, bar}], H))),
+ ok.
+
+default_from_list_test() ->
+ H = make([{hdr, foo}]),
+ ?assertEqual(
+ [{baz, "wibble"}, {hdr, "foo"}],
+ to_list(default_from_list([{baz, wibble}], H))),
+ ?assertEqual(
+ [{hdr, "foo"}],
+ to_list(default_from_list([{hdr, bar}], H))),
+ ok.
+
+get_primary_value_test() ->
+ H = make([{hdr, foo}, {baz, <<"wibble;taco">>}]),
+ ?assertEqual(
+ "foo",
+ get_primary_value(hdr, H)),
+ ?assertEqual(
+ undefined,
+ get_primary_value(bar, H)),
+ ?assertEqual(
+ "wibble",
+ get_primary_value(<<"baz">>, H)),
+ ok.
+
+get_combined_value_test() ->
+ H = make([{hdr, foo}, {baz, <<"wibble,taco">>}, {content_length, "123, 123"},
+ {test, " 123, 123, 123 , 123,123 "},
+ {test2, "456, 123, 123 , 123"},
+ {test3, "123"}, {test4, " 123, "}]),
+ ?assertEqual(
+ "foo",
+ get_combined_value(hdr, H)),
+ ?assertEqual(
+ undefined,
+ get_combined_value(bar, H)),
+ ?assertEqual(
+ undefined,
+ get_combined_value(<<"baz">>, H)),
+ ?assertEqual(
+ "123",
+ get_combined_value(<<"content_length">>, H)),
+ ?assertEqual(
+ "123",
+ get_combined_value(<<"test">>, H)),
+ ?assertEqual(
+ undefined,
+ get_combined_value(<<"test2">>, H)),
+ ?assertEqual(
+ "123",
+ get_combined_value(<<"test3">>, H)),
+ ?assertEqual(
+ "123",
+ get_combined_value(<<"test4">>, H)),
+ ok.
+
+set_cookie_test() ->
+ H = make([{"set-cookie", foo}, {"set-cookie", bar}, {"set-cookie", baz}]),
+ ?assertEqual(
+ [{"set-cookie", "foo"}, {"set-cookie", "bar"}, {"set-cookie", "baz"}],
+ to_list(H)),
+ ok.
+
+headers_test() ->
+ H = ?MODULE:make([{hdr, foo}, {"Hdr", "bar"}, {'Hdr', 2}]),
+ [{hdr, "foo, bar, 2"}] = ?MODULE:to_list(H),
+ H1 = ?MODULE:insert(taco, grande, H),
+ [{hdr, "foo, bar, 2"}, {taco, "grande"}] = ?MODULE:to_list(H1),
+ H2 = ?MODULE:make([{"Set-Cookie", "foo"}]),
+ [{"Set-Cookie", "foo"}] = ?MODULE:to_list(H2),
+ H3 = ?MODULE:insert("Set-Cookie", "bar", H2),
+ [{"Set-Cookie", "foo"}, {"Set-Cookie", "bar"}] = ?MODULE:to_list(H3),
+ "foo, bar" = ?MODULE:get_value("set-cookie", H3),
+ {value, {"Set-Cookie", "foo, bar"}} = ?MODULE:lookup("set-cookie", H3),
+ undefined = ?MODULE:get_value("shibby", H3),
+ none = ?MODULE:lookup("shibby", H3),
+ H4 = ?MODULE:insert("content-type",
+ "application/x-www-form-urlencoded; charset=utf8",
+ H3),
+ "application/x-www-form-urlencoded" = ?MODULE:get_primary_value(
+ "content-type", H4),
+ H4 = ?MODULE:delete_any("nonexistent-header", H4),
+ H3 = ?MODULE:delete_any("content-type", H4),
+ HB = <<"Content-Length: 47\r\nContent-Type: text/plain\r\n\r\n">>,
+ H_HB = ?MODULE:from_binary(HB),
+ H_HB = ?MODULE:from_binary(binary_to_list(HB)),
+ "47" = ?MODULE:get_value("Content-Length", H_HB),
+ "text/plain" = ?MODULE:get_value("Content-Type", H_HB),
+ L_H_HB = ?MODULE:to_list(H_HB),
+ 2 = length(L_H_HB),
+ true = lists:member({'Content-Length', "47"}, L_H_HB),
+ true = lists:member({'Content-Type', "text/plain"}, L_H_HB),
+ HL = [ <<"Content-Length: 47\r\n">>, <<"Content-Type: text/plain\r\n">> ],
+ HL2 = [ "Content-Length: 47\r\n", <<"Content-Type: text/plain\r\n">> ],
+ HL3 = [ <<"Content-Length: 47\r\n">>, "Content-Type: text/plain\r\n" ],
+ H_HL = ?MODULE:from_binary(HL),
+ H_HL = ?MODULE:from_binary(HL2),
+ H_HL = ?MODULE:from_binary(HL3),
+ "47" = ?MODULE:get_value("Content-Length", H_HL),
+ "text/plain" = ?MODULE:get_value("Content-Type", H_HL),
+ L_H_HL = ?MODULE:to_list(H_HL),
+ 2 = length(L_H_HL),
+ true = lists:member({'Content-Length', "47"}, L_H_HL),
+ true = lists:member({'Content-Type', "text/plain"}, L_H_HL),
+ [] = ?MODULE:to_list(?MODULE:from_binary(<<>>)),
+ [] = ?MODULE:to_list(?MODULE:from_binary(<<"">>)),
+ [] = ?MODULE:to_list(?MODULE:from_binary(<<"\r\n">>)),
+ [] = ?MODULE:to_list(?MODULE:from_binary(<<"\r\n\r\n">>)),
+ [] = ?MODULE:to_list(?MODULE:from_binary("")),
+ [] = ?MODULE:to_list(?MODULE:from_binary([<<>>])),
+ [] = ?MODULE:to_list(?MODULE:from_binary([<<"">>])),
+ [] = ?MODULE:to_list(?MODULE:from_binary([<<"\r\n">>])),
+ [] = ?MODULE:to_list(?MODULE:from_binary([<<"\r\n\r\n">>])),
+ ok.
+
+tokenize_header_value_test() ->
+ ?assertEqual(["a quote in a \"quote\"."],
+ tokenize_header_value("\"a quote in a \\\"quote\\\".\"")),
+ ?assertEqual(["abc"], tokenize_header_value("abc")),
+ ?assertEqual(["abc", "def"], tokenize_header_value("abc def")),
+ ?assertEqual(["abc", "def"], tokenize_header_value("abc , def")),
+ ?assertEqual(["abc", "def"], tokenize_header_value(",abc ,, def,,")),
+ ?assertEqual(["abc def"], tokenize_header_value("\"abc def\" ")),
+ ?assertEqual(["abc, def"], tokenize_header_value("\"abc, def\"")),
+ ?assertEqual(["\\a\\$"], tokenize_header_value("\"\\a\\$\"")),
+ ?assertEqual(["abc def", "foo, bar", "12345", ""],
+ tokenize_header_value("\"abc def\" \"foo, bar\" , 12345, \"\"")),
+ ?assertEqual(undefined,
+ tokenize_header_value(undefined)),
+ ?assertEqual(undefined,
+ tokenize_header_value("umatched quote\"")),
+ ?assertEqual(undefined,
+ tokenize_header_value("\"unmatched quote")).
+
+-endif.
--- /dev/null
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Loosely tokenizes and generates parse trees for HTML 4.
+-module(mochiweb_html).
+-export([tokens/1, parse/1, parse_tokens/1, to_tokens/1, escape/1,
+ escape_attr/1, to_html/1]).
+-compile([export_all]).
+-ifdef(TEST).
+-export([destack/1, destack/2, is_singleton/1]).
+-endif.
+
+%% This is a macro to placate syntax highlighters..
+-define(QUOTE, $\"). %% $\"
+-define(SQUOTE, $\'). %% $\'
+-define(ADV_COL(S, N),
+ S#decoder{column=N+S#decoder.column,
+ offset=N+S#decoder.offset}).
+-define(INC_COL(S),
+ S#decoder{column=1+S#decoder.column,
+ offset=1+S#decoder.offset}).
+-define(INC_LINE(S),
+ S#decoder{column=1,
+ line=1+S#decoder.line,
+ offset=1+S#decoder.offset}).
+-define(INC_CHAR(S, C),
+ case C of
+ $\n ->
+ S#decoder{column=1,
+ line=1+S#decoder.line,
+ offset=1+S#decoder.offset};
+ _ ->
+ S#decoder{column=1+S#decoder.column,
+ offset=1+S#decoder.offset}
+ end).
+
+-define(IS_WHITESPACE(C),
+ (C =:= $\s orelse C =:= $\t orelse C =:= $\r orelse C =:= $\n)).
+-define(IS_LITERAL_SAFE(C),
+ ((C >= $A andalso C =< $Z) orelse (C >= $a andalso C =< $z)
+ orelse (C >= $0 andalso C =< $9))).
+-define(PROBABLE_CLOSE(C),
+ (C =:= $> orelse ?IS_WHITESPACE(C))).
+
+-record(decoder, {line=1,
+ column=1,
+ offset=0}).
+
+%% @type html_node() = {string(), [html_attr()], [html_node() | string()]}
+%% @type html_attr() = {string(), string()}
+%% @type html_token() = html_data() | start_tag() | end_tag() | inline_html() | html_comment() | html_doctype()
+%% @type html_data() = {data, string(), Whitespace::boolean()}
+%% @type start_tag() = {start_tag, Name, [html_attr()], Singleton::boolean()}
+%% @type end_tag() = {end_tag, Name}
+%% @type html_comment() = {comment, Comment}
+%% @type html_doctype() = {doctype, [Doctype]}
+%% @type inline_html() = {'=', iolist()}
+
+%% External API.
+
+%% @spec parse(string() | binary()) -> html_node()
+%% @doc tokenize and then transform the token stream into a HTML tree.
+parse(Input) ->
+ parse_tokens(tokens(Input)).
+
+%% @spec parse_tokens([html_token()]) -> html_node()
+%% @doc Transform the output of tokens(Doc) into a HTML tree.
+parse_tokens(Tokens) when is_list(Tokens) ->
+ %% Skip over doctype, processing instructions
+ [{start_tag, Tag, Attrs, false} | Rest] = find_document(Tokens, normal),
+ {Tree, _} = tree(Rest, [norm({Tag, Attrs})]),
+ Tree.
+
+find_document(Tokens=[{start_tag, _Tag, _Attrs, false} | _Rest], Mode) ->
+ maybe_add_html_tag(Tokens, Mode);
+find_document([{doctype, [<<"html">>]} | Rest], _Mode) ->
+ find_document(Rest, html5);
+find_document([_T | Rest], Mode) ->
+ find_document(Rest, Mode);
+find_document([], _Mode) ->
+ [].
+
+maybe_add_html_tag(Tokens=[{start_tag, Tag, _Attrs, false} | _], html5)
+ when Tag =/= <<"html">> ->
+ [{start_tag, <<"html">>, [], false} | Tokens];
+maybe_add_html_tag(Tokens, _Mode) ->
+ Tokens.
+
+%% @spec tokens(StringOrBinary) -> [html_token()]
+%% @doc Transform the input UTF-8 HTML into a token stream.
+tokens(Input) ->
+ tokens(iolist_to_binary(Input), #decoder{}, []).
+
+%% @spec to_tokens(html_node()) -> [html_token()]
+%% @doc Convert a html_node() tree to a list of tokens.
+to_tokens({Tag0}) ->
+ to_tokens({Tag0, [], []});
+to_tokens(T={'=', _}) ->
+ [T];
+to_tokens(T={doctype, _}) ->
+ [T];
+to_tokens(T={comment, _}) ->
+ [T];
+to_tokens({Tag0, Acc}) ->
+ %% This is only allowed in sub-tags: {p, [{"class", "foo"}]}
+ to_tokens({Tag0, [], Acc});
+to_tokens({Tag0, Attrs, Acc}) ->
+ Tag = to_tag(Tag0),
+ case is_singleton(Tag) of
+ true ->
+ to_tokens([], [{start_tag, Tag, Attrs, true}]);
+ false ->
+ to_tokens([{Tag, Acc}], [{start_tag, Tag, Attrs, false}])
+ end.
+
+%% @spec to_html([html_token()] | html_node()) -> iolist()
+%% @doc Convert a list of html_token() to a HTML document.
+to_html(Node) when is_tuple(Node) ->
+ to_html(to_tokens(Node));
+to_html(Tokens) when is_list(Tokens) ->
+ to_html(Tokens, []).
+
+%% @spec escape(string() | atom() | binary()) -> binary()
+%% @doc Escape a string such that it's safe for HTML (amp; lt; gt;).
+escape(B) when is_binary(B) ->
+ escape(binary_to_list(B), []);
+escape(A) when is_atom(A) ->
+ escape(atom_to_list(A), []);
+escape(S) when is_list(S) ->
+ escape(S, []).
+
+%% @spec escape_attr(string() | binary() | atom() | integer() | float()) -> binary()
+%% @doc Escape a string such that it's safe for HTML attrs
+%% (amp; lt; gt; quot;).
+escape_attr(B) when is_binary(B) ->
+ escape_attr(binary_to_list(B), []);
+escape_attr(A) when is_atom(A) ->
+ escape_attr(atom_to_list(A), []);
+escape_attr(S) when is_list(S) ->
+ escape_attr(S, []);
+escape_attr(I) when is_integer(I) ->
+ escape_attr(integer_to_list(I), []);
+escape_attr(F) when is_float(F) ->
+ escape_attr(mochinum:digits(F), []).
+
+to_html([], Acc) ->
+ lists:reverse(Acc);
+to_html([{'=', Content} | Rest], Acc) ->
+ to_html(Rest, [Content | Acc]);
+to_html([{pi, Bin} | Rest], Acc) ->
+ Open = [<<"<?">>,
+ Bin,
+ <<"?>">>],
+ to_html(Rest, [Open | Acc]);
+to_html([{pi, Tag, Attrs} | Rest], Acc) ->
+ Open = [<<"<?">>,
+ Tag,
+ attrs_to_html(Attrs, []),
+ <<"?>">>],
+ to_html(Rest, [Open | Acc]);
+to_html([{comment, Comment} | Rest], Acc) ->
+ to_html(Rest, [[<<"<!--">>, Comment, <<"-->">>] | Acc]);
+to_html([{doctype, Parts} | Rest], Acc) ->
+ Inside = doctype_to_html(Parts, Acc),
+ to_html(Rest, [[<<"<!DOCTYPE">>, Inside, <<">">>] | Acc]);
+to_html([{data, Data, _Whitespace} | Rest], Acc) ->
+ to_html(Rest, [escape(Data) | Acc]);
+to_html([{start_tag, Tag, Attrs, Singleton} | Rest], Acc) ->
+ Open = [<<"<">>,
+ Tag,
+ attrs_to_html(Attrs, []),
+ case Singleton of
+ true -> <<" />">>;
+ false -> <<">">>
+ end],
+ to_html(Rest, [Open | Acc]);
+to_html([{end_tag, Tag} | Rest], Acc) ->
+ to_html(Rest, [[<<"</">>, Tag, <<">">>] | Acc]).
+
+doctype_to_html([], Acc) ->
+ lists:reverse(Acc);
+doctype_to_html([Word | Rest], Acc) ->
+ case lists:all(fun (C) -> ?IS_LITERAL_SAFE(C) end,
+ binary_to_list(iolist_to_binary(Word))) of
+ true ->
+ doctype_to_html(Rest, [[<<" ">>, Word] | Acc]);
+ false ->
+ doctype_to_html(Rest, [[<<" \"">>, escape_attr(Word), ?QUOTE] | Acc])
+ end.
+
+attrs_to_html([], Acc) ->
+ lists:reverse(Acc);
+attrs_to_html([{K, V} | Rest], Acc) ->
+ attrs_to_html(Rest,
+ [[<<" ">>, escape(K), <<"=\"">>,
+ escape_attr(V), <<"\"">>] | Acc]).
+
+escape([], Acc) ->
+ list_to_binary(lists:reverse(Acc));
+escape("<" ++ Rest, Acc) ->
+ escape(Rest, lists:reverse("<", Acc));
+escape(">" ++ Rest, Acc) ->
+ escape(Rest, lists:reverse(">", Acc));
+escape("&" ++ Rest, Acc) ->
+ escape(Rest, lists:reverse("&", Acc));
+escape([C | Rest], Acc) ->
+ escape(Rest, [C | Acc]).
+
+escape_attr([], Acc) ->
+ list_to_binary(lists:reverse(Acc));
+escape_attr("<" ++ Rest, Acc) ->
+ escape_attr(Rest, lists:reverse("<", Acc));
+escape_attr(">" ++ Rest, Acc) ->
+ escape_attr(Rest, lists:reverse(">", Acc));
+escape_attr("&" ++ Rest, Acc) ->
+ escape_attr(Rest, lists:reverse("&", Acc));
+escape_attr([?QUOTE | Rest], Acc) ->
+ escape_attr(Rest, lists:reverse(""", Acc));
+escape_attr([C | Rest], Acc) ->
+ escape_attr(Rest, [C | Acc]).
+
+to_tag(A) when is_atom(A) ->
+ norm(atom_to_list(A));
+to_tag(L) ->
+ norm(L).
+
+to_tokens([], Acc) ->
+ lists:reverse(Acc);
+to_tokens([{Tag, []} | Rest], Acc) ->
+ to_tokens(Rest, [{end_tag, to_tag(Tag)} | Acc]);
+to_tokens([{Tag0, [{T0} | R1]} | Rest], Acc) ->
+ %% Allow {br}
+ to_tokens([{Tag0, [{T0, [], []} | R1]} | Rest], Acc);
+to_tokens([{Tag0, [T0={'=', _C0} | R1]} | Rest], Acc) ->
+ %% Allow {'=', iolist()}
+ to_tokens([{Tag0, R1} | Rest], [T0 | Acc]);
+to_tokens([{Tag0, [T0={comment, _C0} | R1]} | Rest], Acc) ->
+ %% Allow {comment, iolist()}
+ to_tokens([{Tag0, R1} | Rest], [T0 | Acc]);
+to_tokens([{Tag0, [T0={pi, _S0} | R1]} | Rest], Acc) ->
+ %% Allow {pi, binary()}
+ to_tokens([{Tag0, R1} | Rest], [T0 | Acc]);
+to_tokens([{Tag0, [T0={pi, _S0, _A0} | R1]} | Rest], Acc) ->
+ %% Allow {pi, binary(), list()}
+ to_tokens([{Tag0, R1} | Rest], [T0 | Acc]);
+to_tokens([{Tag0, [{T0, A0=[{_, _} | _]} | R1]} | Rest], Acc) ->
+ %% Allow {p, [{"class", "foo"}]}
+ to_tokens([{Tag0, [{T0, A0, []} | R1]} | Rest], Acc);
+to_tokens([{Tag0, [{T0, C0} | R1]} | Rest], Acc) ->
+ %% Allow {p, "content"} and {p, <<"content">>}
+ to_tokens([{Tag0, [{T0, [], C0} | R1]} | Rest], Acc);
+to_tokens([{Tag0, [{T0, A1, C0} | R1]} | Rest], Acc) when is_binary(C0) ->
+ %% Allow {"p", [{"class", "foo"}], <<"content">>}
+ to_tokens([{Tag0, [{T0, A1, binary_to_list(C0)} | R1]} | Rest], Acc);
+to_tokens([{Tag0, [{T0, A1, C0=[C | _]} | R1]} | Rest], Acc)
+ when is_integer(C) ->
+ %% Allow {"p", [{"class", "foo"}], "content"}
+ to_tokens([{Tag0, [{T0, A1, [C0]} | R1]} | Rest], Acc);
+to_tokens([{Tag0, [{T0, A1, C1} | R1]} | Rest], Acc) ->
+ %% Native {"p", [{"class", "foo"}], ["content"]}
+ Tag = to_tag(Tag0),
+ T1 = to_tag(T0),
+ case is_singleton(norm(T1)) of
+ true ->
+ to_tokens([{Tag, R1} | Rest], [{start_tag, T1, A1, true} | Acc]);
+ false ->
+ to_tokens([{T1, C1}, {Tag, R1} | Rest],
+ [{start_tag, T1, A1, false} | Acc])
+ end;
+to_tokens([{Tag0, [L | R1]} | Rest], Acc) when is_list(L) ->
+ %% List text
+ Tag = to_tag(Tag0),
+ to_tokens([{Tag, R1} | Rest], [{data, iolist_to_binary(L), false} | Acc]);
+to_tokens([{Tag0, [B | R1]} | Rest], Acc) when is_binary(B) ->
+ %% Binary text
+ Tag = to_tag(Tag0),
+ to_tokens([{Tag, R1} | Rest], [{data, B, false} | Acc]).
+
+tokens(B, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary>> ->
+ lists:reverse(Acc);
+ _ ->
+ {Tag, S1} = tokenize(B, S),
+ case parse_flag(Tag) of
+ script ->
+ {Tag2, S2} = tokenize_script(B, S1),
+ tokens(B, S2, [Tag2, Tag | Acc]);
+ textarea ->
+ {Tag2, S2} = tokenize_textarea(B, S1),
+ tokens(B, S2, [Tag2, Tag | Acc]);
+ none ->
+ tokens(B, S1, [Tag | Acc])
+ end
+ end.
+
+parse_flag({start_tag, B, _, false}) ->
+ case string:to_lower(binary_to_list(B)) of
+ "script" ->
+ script;
+ "textarea" ->
+ textarea;
+ _ ->
+ none
+ end;
+parse_flag(_) ->
+ none.
+
+tokenize(B, S=#decoder{offset=O}) ->
+ case B of
+ <<_:O/binary, "<!--", _/binary>> ->
+ tokenize_comment(B, ?ADV_COL(S, 4));
+ <<_:O/binary, "<!doctype", _/binary>> ->
+ tokenize_doctype(B, ?ADV_COL(S, 10));
+ <<_:O/binary, "<!DOCTYPE", _/binary>> ->
+ tokenize_doctype(B, ?ADV_COL(S, 10));
+ <<_:O/binary, "<![CDATA[", _/binary>> ->
+ tokenize_cdata(B, ?ADV_COL(S, 9));
+ <<_:O/binary, "<?php", _/binary>> ->
+ {Body, S1} = raw_qgt(B, ?ADV_COL(S, 2)),
+ {{pi, Body}, S1};
+ <<_:O/binary, "<?", _/binary>> ->
+ {Tag, S1} = tokenize_literal(B, ?ADV_COL(S, 2)),
+ {Attrs, S2} = tokenize_attributes(B, S1),
+ S3 = find_qgt(B, S2),
+ {{pi, Tag, Attrs}, S3};
+ <<_:O/binary, "&", _/binary>> ->
+ tokenize_charref(B, ?INC_COL(S));
+ <<_:O/binary, "</", _/binary>> ->
+ {Tag, S1} = tokenize_literal(B, ?ADV_COL(S, 2)),
+ {S2, _} = find_gt(B, S1),
+ {{end_tag, Tag}, S2};
+ <<_:O/binary, "<", C, _/binary>>
+ when ?IS_WHITESPACE(C); not ?IS_LITERAL_SAFE(C) ->
+ %% This isn't really strict HTML
+ {{data, Data, _Whitespace}, S1} = tokenize_data(B, ?INC_COL(S)),
+ {{data, <<$<, Data/binary>>, false}, S1};
+ <<_:O/binary, "<", _/binary>> ->
+ {Tag, S1} = tokenize_literal(B, ?INC_COL(S)),
+ {Attrs, S2} = tokenize_attributes(B, S1),
+ {S3, HasSlash} = find_gt(B, S2),
+ Singleton = HasSlash orelse is_singleton(Tag),
+ {{start_tag, Tag, Attrs, Singleton}, S3};
+ _ ->
+ tokenize_data(B, S)
+ end.
+
+tree_data([{data, Data, Whitespace} | Rest], AllWhitespace, Acc) ->
+ tree_data(Rest, (Whitespace andalso AllWhitespace), [Data | Acc]);
+tree_data(Rest, AllWhitespace, Acc) ->
+ {iolist_to_binary(lists:reverse(Acc)), AllWhitespace, Rest}.
+
+tree([], Stack) ->
+ {destack(Stack), []};
+tree([{end_tag, Tag} | Rest], Stack) ->
+ case destack(norm(Tag), Stack) of
+ S when is_list(S) ->
+ tree(Rest, S);
+ Result ->
+ {Result, []}
+ end;
+tree([{start_tag, Tag, Attrs, true} | Rest], S) ->
+ tree(Rest, append_stack_child(norm({Tag, Attrs}), S));
+tree([{start_tag, Tag, Attrs, false} | Rest], S) ->
+ tree(Rest, stack(norm({Tag, Attrs}), S));
+tree([T={pi, _Raw} | Rest], S) ->
+ tree(Rest, append_stack_child(T, S));
+tree([T={pi, _Tag, _Attrs} | Rest], S) ->
+ tree(Rest, append_stack_child(T, S));
+tree([T={comment, _Comment} | Rest], S) ->
+ tree(Rest, append_stack_child(T, S));
+tree(L=[{data, _Data, _Whitespace} | _], S) ->
+ case tree_data(L, true, []) of
+ {_, true, Rest} ->
+ tree(Rest, S);
+ {Data, false, Rest} ->
+ tree(Rest, append_stack_child(Data, S))
+ end;
+tree([{doctype, _} | Rest], Stack) ->
+ tree(Rest, Stack).
+
+norm({Tag, Attrs}) ->
+ {norm(Tag), [{norm(K), iolist_to_binary(V)} || {K, V} <- Attrs], []};
+norm(Tag) when is_binary(Tag) ->
+ Tag;
+norm(Tag) ->
+ list_to_binary(string:to_lower(Tag)).
+
+stack(T1={TN, _, _}, Stack=[{TN, _, _} | _Rest])
+ when TN =:= <<"li">> orelse TN =:= <<"option">> ->
+ [T1 | destack(TN, Stack)];
+stack(T1={TN0, _, _}, Stack=[{TN1, _, _} | _Rest])
+ when (TN0 =:= <<"dd">> orelse TN0 =:= <<"dt">>) andalso
+ (TN1 =:= <<"dd">> orelse TN1 =:= <<"dt">>) ->
+ [T1 | destack(TN1, Stack)];
+stack(T1, Stack) ->
+ [T1 | Stack].
+
+append_stack_child(StartTag, [{Name, Attrs, Acc} | Stack]) ->
+ [{Name, Attrs, [StartTag | Acc]} | Stack].
+
+destack(<<"br">>, Stack) ->
+ %% This is an ugly hack to make dumb_br_test() pass,
+ %% this makes it such that br can never have children.
+ Stack;
+destack(TagName, Stack) when is_list(Stack) ->
+ F = fun (X) ->
+ case X of
+ {TagName, _, _} ->
+ false;
+ _ ->
+ true
+ end
+ end,
+ case lists:splitwith(F, Stack) of
+ {_, []} ->
+ %% If we're parsing something like XML we might find
+ %% a <link>tag</link> that is normally a singleton
+ %% in HTML but isn't here
+ case {is_singleton(TagName), Stack} of
+ {true, [{T0, A0, Acc0} | Post0]} ->
+ case lists:splitwith(F, Acc0) of
+ {_, []} ->
+ %% Actually was a singleton
+ Stack;
+ {Pre, [{T1, A1, Acc1} | Post1]} ->
+ [{T0, A0, [{T1, A1, Acc1 ++ lists:reverse(Pre)} | Post1]}
+ | Post0]
+ end;
+ _ ->
+ %% No match, no state change
+ Stack
+ end;
+ {_Pre, [_T]} ->
+ %% Unfurl the whole stack, we're done
+ destack(Stack);
+ {Pre, [T, {T0, A0, Acc0} | Post]} ->
+ %% Unfurl up to the tag, then accumulate it
+ [{T0, A0, [destack(Pre ++ [T]) | Acc0]} | Post]
+ end.
+
+destack([{Tag, Attrs, Acc}]) ->
+ {Tag, Attrs, lists:reverse(Acc)};
+destack([{T1, A1, Acc1}, {T0, A0, Acc0} | Rest]) ->
+ destack([{T0, A0, [{T1, A1, lists:reverse(Acc1)} | Acc0]} | Rest]).
+
+is_singleton(<<"br">>) -> true;
+is_singleton(<<"hr">>) -> true;
+is_singleton(<<"img">>) -> true;
+is_singleton(<<"input">>) -> true;
+is_singleton(<<"base">>) -> true;
+is_singleton(<<"meta">>) -> true;
+is_singleton(<<"link">>) -> true;
+is_singleton(<<"area">>) -> true;
+is_singleton(<<"param">>) -> true;
+is_singleton(<<"col">>) -> true;
+is_singleton(_) -> false.
+
+tokenize_data(B, S=#decoder{offset=O}) ->
+ tokenize_data(B, S, O, true).
+
+tokenize_data(B, S=#decoder{offset=O}, Start, Whitespace) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when (C =/= $< andalso C =/= $&) ->
+ tokenize_data(B, ?INC_CHAR(S, C), Start,
+ (Whitespace andalso ?IS_WHITESPACE(C)));
+ _ ->
+ Len = O - Start,
+ <<_:Start/binary, Data:Len/binary, _/binary>> = B,
+ {{data, Data, Whitespace}, S}
+ end.
+
+tokenize_attributes(B, S) ->
+ tokenize_attributes(B, S, []).
+
+tokenize_attributes(B, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary>> ->
+ {lists:reverse(Acc), S};
+ <<_:O/binary, C, _/binary>> when (C =:= $> orelse C =:= $/) ->
+ {lists:reverse(Acc), S};
+ <<_:O/binary, "?>", _/binary>> ->
+ {lists:reverse(Acc), S};
+ <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C) ->
+ tokenize_attributes(B, ?INC_CHAR(S, C), Acc);
+ _ ->
+ {Attr, S1} = tokenize_literal(B, S),
+ {Value, S2} = tokenize_attr_value(Attr, B, S1),
+ tokenize_attributes(B, S2, [{Attr, Value} | Acc])
+ end.
+
+tokenize_attr_value(Attr, B, S) ->
+ S1 = skip_whitespace(B, S),
+ O = S1#decoder.offset,
+ case B of
+ <<_:O/binary, "=", _/binary>> ->
+ S2 = skip_whitespace(B, ?INC_COL(S1)),
+ tokenize_quoted_or_unquoted_attr_value(B, S2);
+ _ ->
+ {Attr, S1}
+ end.
+
+tokenize_quoted_or_unquoted_attr_value(B, S=#decoder{offset=O}) ->
+ case B of
+ <<_:O/binary>> ->
+ { [], S };
+ <<_:O/binary, Q, _/binary>> when Q =:= ?QUOTE orelse
+ Q =:= ?SQUOTE ->
+ tokenize_quoted_attr_value(B, ?INC_COL(S), [], Q);
+ <<_:O/binary, _/binary>> ->
+ tokenize_unquoted_attr_value(B, S, [])
+ end.
+
+tokenize_quoted_attr_value(B, S=#decoder{offset=O}, Acc, Q) ->
+ case B of
+ <<_:O/binary>> ->
+ { iolist_to_binary(lists:reverse(Acc)), S };
+ <<_:O/binary, $&, _/binary>> ->
+ {{data, Data, false}, S1} = tokenize_charref(B, ?INC_COL(S)),
+ tokenize_quoted_attr_value(B, S1, [Data|Acc], Q);
+ <<_:O/binary, Q, _/binary>> ->
+ { iolist_to_binary(lists:reverse(Acc)), ?INC_COL(S) };
+ <<_:O/binary, C, _/binary>> ->
+ tokenize_quoted_attr_value(B, ?INC_COL(S), [C|Acc], Q)
+ end.
+
+tokenize_unquoted_attr_value(B, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary>> ->
+ { iolist_to_binary(lists:reverse(Acc)), S };
+ <<_:O/binary, $&, _/binary>> ->
+ {{data, Data, false}, S1} = tokenize_charref(B, ?INC_COL(S)),
+ tokenize_unquoted_attr_value(B, S1, [Data|Acc]);
+ <<_:O/binary, $/, $>, _/binary>> ->
+ { iolist_to_binary(lists:reverse(Acc)), S };
+ <<_:O/binary, C, _/binary>> when ?PROBABLE_CLOSE(C) ->
+ { iolist_to_binary(lists:reverse(Acc)), S };
+ <<_:O/binary, C, _/binary>> ->
+ tokenize_unquoted_attr_value(B, ?INC_COL(S), [C|Acc])
+ end.
+
+skip_whitespace(B, S=#decoder{offset=O}) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C) ->
+ skip_whitespace(B, ?INC_CHAR(S, C));
+ _ ->
+ S
+ end.
+
+tokenize_literal(Bin, S=#decoder{offset=O}) ->
+ case Bin of
+ <<_:O/binary, C, _/binary>> when C =:= $>
+ orelse C =:= $/
+ orelse C =:= $= ->
+ %% Handle case where tokenize_literal would consume
+ %% 0 chars. http://github.com/mochi/mochiweb/pull/13
+ {[C], ?INC_COL(S)};
+ _ ->
+ tokenize_literal(Bin, S, [])
+ end.
+
+tokenize_literal(Bin, S=#decoder{offset=O}, Acc) ->
+ case Bin of
+ <<_:O/binary, $&, _/binary>> ->
+ {{data, Data, false}, S1} = tokenize_charref(Bin, ?INC_COL(S)),
+ tokenize_literal(Bin, S1, [Data | Acc]);
+ <<_:O/binary, C, _/binary>> when not (?IS_WHITESPACE(C)
+ orelse C =:= $>
+ orelse C =:= $/
+ orelse C =:= $=) ->
+ tokenize_literal(Bin, ?INC_COL(S), [C | Acc]);
+ _ ->
+ {iolist_to_binary(string:to_lower(lists:reverse(Acc))), S}
+ end.
+
+raw_qgt(Bin, S=#decoder{offset=O}) ->
+ raw_qgt(Bin, S, O).
+
+raw_qgt(Bin, S=#decoder{offset=O}, Start) ->
+ case Bin of
+ <<_:O/binary, "?>", _/binary>> ->
+ Len = O - Start,
+ <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin,
+ {Raw, ?ADV_COL(S, 2)};
+ <<_:O/binary, C, _/binary>> ->
+ raw_qgt(Bin, ?INC_CHAR(S, C), Start);
+ <<_:O/binary>> ->
+ <<_:Start/binary, Raw/binary>> = Bin,
+ {Raw, S}
+ end.
+
+find_qgt(Bin, S=#decoder{offset=O}) ->
+ case Bin of
+ <<_:O/binary, "?>", _/binary>> ->
+ ?ADV_COL(S, 2);
+ <<_:O/binary, ">", _/binary>> ->
+ ?ADV_COL(S, 1);
+ <<_:O/binary, "/>", _/binary>> ->
+ ?ADV_COL(S, 2);
+ %% tokenize_attributes takes care of this state:
+ %% <<_:O/binary, C, _/binary>> ->
+ %% find_qgt(Bin, ?INC_CHAR(S, C));
+ <<_:O/binary>> ->
+ S
+ end.
+
+find_gt(Bin, S) ->
+ find_gt(Bin, S, false).
+
+find_gt(Bin, S=#decoder{offset=O}, HasSlash) ->
+ case Bin of
+ <<_:O/binary, $/, _/binary>> ->
+ find_gt(Bin, ?INC_COL(S), true);
+ <<_:O/binary, $>, _/binary>> ->
+ {?INC_COL(S), HasSlash};
+ <<_:O/binary, C, _/binary>> ->
+ find_gt(Bin, ?INC_CHAR(S, C), HasSlash);
+ _ ->
+ {S, HasSlash}
+ end.
+
+tokenize_charref(Bin, S=#decoder{offset=O}) ->
+ try
+ tokenize_charref(Bin, S, O)
+ catch
+ throw:invalid_charref ->
+ {{data, <<"&">>, false}, S}
+ end.
+
+tokenize_charref(Bin, S=#decoder{offset=O}, Start) ->
+ case Bin of
+ <<_:O/binary>> ->
+ throw(invalid_charref);
+ <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C)
+ orelse C =:= ?SQUOTE
+ orelse C =:= ?QUOTE
+ orelse C =:= $/
+ orelse C =:= $> ->
+ throw(invalid_charref);
+ <<_:O/binary, $;, _/binary>> ->
+ Len = O - Start,
+ <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin,
+ Data = case mochiweb_charref:charref(Raw) of
+ undefined ->
+ throw(invalid_charref);
+ Unichar when is_integer(Unichar) ->
+ mochiutf8:codepoint_to_bytes(Unichar);
+ Unichars when is_list(Unichars) ->
+ unicode:characters_to_binary(Unichars)
+ end,
+ {{data, Data, false}, ?INC_COL(S)};
+ _ ->
+ tokenize_charref(Bin, ?INC_COL(S), Start)
+ end.
+
+tokenize_doctype(Bin, S) ->
+ tokenize_doctype(Bin, S, []).
+
+tokenize_doctype(Bin, S=#decoder{offset=O}, Acc) ->
+ case Bin of
+ <<_:O/binary>> ->
+ {{doctype, lists:reverse(Acc)}, S};
+ <<_:O/binary, $>, _/binary>> ->
+ {{doctype, lists:reverse(Acc)}, ?INC_COL(S)};
+ <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C) ->
+ tokenize_doctype(Bin, ?INC_CHAR(S, C), Acc);
+ _ ->
+ {Word, S1} = tokenize_word_or_literal(Bin, S),
+ tokenize_doctype(Bin, S1, [Word | Acc])
+ end.
+
+tokenize_word_or_literal(Bin, S=#decoder{offset=O}) ->
+ case Bin of
+ <<_:O/binary, C, _/binary>> when C =:= ?QUOTE orelse C =:= ?SQUOTE ->
+ tokenize_word(Bin, ?INC_COL(S), C);
+ <<_:O/binary, C, _/binary>> when not ?IS_WHITESPACE(C) ->
+ %% Sanity check for whitespace
+ tokenize_literal(Bin, S)
+ end.
+
+tokenize_word(Bin, S, Quote) ->
+ tokenize_word(Bin, S, Quote, []).
+
+tokenize_word(Bin, S=#decoder{offset=O}, Quote, Acc) ->
+ case Bin of
+ <<_:O/binary>> ->
+ {iolist_to_binary(lists:reverse(Acc)), S};
+ <<_:O/binary, Quote, _/binary>> ->
+ {iolist_to_binary(lists:reverse(Acc)), ?INC_COL(S)};
+ <<_:O/binary, $&, _/binary>> ->
+ {{data, Data, false}, S1} = tokenize_charref(Bin, ?INC_COL(S)),
+ tokenize_word(Bin, S1, Quote, [Data | Acc]);
+ <<_:O/binary, C, _/binary>> ->
+ tokenize_word(Bin, ?INC_CHAR(S, C), Quote, [C | Acc])
+ end.
+
+tokenize_cdata(Bin, S=#decoder{offset=O}) ->
+ tokenize_cdata(Bin, S, O).
+
+tokenize_cdata(Bin, S=#decoder{offset=O}, Start) ->
+ case Bin of
+ <<_:O/binary, "]]>", _/binary>> ->
+ Len = O - Start,
+ <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin,
+ {{data, Raw, false}, ?ADV_COL(S, 3)};
+ <<_:O/binary, C, _/binary>> ->
+ tokenize_cdata(Bin, ?INC_CHAR(S, C), Start);
+ _ ->
+ <<_:O/binary, Raw/binary>> = Bin,
+ {{data, Raw, false}, S}
+ end.
+
+tokenize_comment(Bin, S=#decoder{offset=O}) ->
+ tokenize_comment(Bin, S, O).
+
+tokenize_comment(Bin, S=#decoder{offset=O}, Start) ->
+ case Bin of
+ <<_:O/binary, "-->", _/binary>> ->
+ Len = O - Start,
+ <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin,
+ {{comment, Raw}, ?ADV_COL(S, 3)};
+ <<_:O/binary, C, _/binary>> ->
+ tokenize_comment(Bin, ?INC_CHAR(S, C), Start);
+ <<_:Start/binary, Raw/binary>> ->
+ {{comment, Raw}, S}
+ end.
+
+tokenize_script(Bin, S=#decoder{offset=O}) ->
+ tokenize_script(Bin, S, O).
+
+tokenize_script(Bin, S=#decoder{offset=O}, Start) ->
+ case Bin of
+ %% Just a look-ahead, we want the end_tag separately
+ <<_:O/binary, $<, $/, SS, CC, RR, II, PP, TT, ZZ, _/binary>>
+ when (SS =:= $s orelse SS =:= $S) andalso
+ (CC =:= $c orelse CC =:= $C) andalso
+ (RR =:= $r orelse RR =:= $R) andalso
+ (II =:= $i orelse II =:= $I) andalso
+ (PP =:= $p orelse PP =:= $P) andalso
+ (TT=:= $t orelse TT =:= $T) andalso
+ ?PROBABLE_CLOSE(ZZ) ->
+ Len = O - Start,
+ <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin,
+ {{data, Raw, false}, S};
+ <<_:O/binary, C, _/binary>> ->
+ tokenize_script(Bin, ?INC_CHAR(S, C), Start);
+ <<_:Start/binary, Raw/binary>> ->
+ {{data, Raw, false}, S}
+ end.
+
+tokenize_textarea(Bin, S=#decoder{offset=O}) ->
+ tokenize_textarea(Bin, S, O).
+
+tokenize_textarea(Bin, S=#decoder{offset=O}, Start) ->
+ case Bin of
+ %% Just a look-ahead, we want the end_tag separately
+ <<_:O/binary, $<, $/, TT, EE, XX, TT2, AA, RR, EE2, AA2, ZZ, _/binary>>
+ when (TT =:= $t orelse TT =:= $T) andalso
+ (EE =:= $e orelse EE =:= $E) andalso
+ (XX =:= $x orelse XX =:= $X) andalso
+ (TT2 =:= $t orelse TT2 =:= $T) andalso
+ (AA =:= $a orelse AA =:= $A) andalso
+ (RR =:= $r orelse RR =:= $R) andalso
+ (EE2 =:= $e orelse EE2 =:= $E) andalso
+ (AA2 =:= $a orelse AA2 =:= $A) andalso
+ ?PROBABLE_CLOSE(ZZ) ->
+ Len = O - Start,
+ <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin,
+ {{data, Raw, false}, S};
+ <<_:O/binary, C, _/binary>> ->
+ tokenize_textarea(Bin, ?INC_CHAR(S, C), Start);
+ <<_:Start/binary, Raw/binary>> ->
+ {{data, Raw, false}, S}
+ end.
--- /dev/null
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc HTTP server.
+
+-module(mochiweb_http).
+-author('bob@mochimedia.com').
+-export([start/1, start_link/1, stop/0, stop/1]).
+-export([loop/2]).
+-export([after_response/2, reentry/1]).
+-export([parse_range_request/1, range_skip_length/2]).
+
+-define(REQUEST_RECV_TIMEOUT, 300000). %% timeout waiting for request line
+-define(HEADERS_RECV_TIMEOUT, 30000). %% timeout waiting for headers
+
+-define(MAX_HEADERS, 1000).
+-define(DEFAULTS, [{name, ?MODULE},
+ {port, 8888}]).
+
+parse_options(Options) ->
+ {loop, HttpLoop} = proplists:lookup(loop, Options),
+ Loop = {?MODULE, loop, [HttpLoop]},
+ Options1 = [{loop, Loop} | proplists:delete(loop, Options)],
+ mochilists:set_defaults(?DEFAULTS, Options1).
+
+stop() ->
+ mochiweb_socket_server:stop(?MODULE).
+
+stop(Name) ->
+ mochiweb_socket_server:stop(Name).
+
+%% @spec start(Options) -> ServerRet
+%% Options = [option()]
+%% Option = {name, atom()} | {ip, string() | tuple()} | {backlog, integer()}
+%% | {nodelay, boolean()} | {acceptor_pool_size, integer()}
+%% | {ssl, boolean()} | {profile_fun, undefined | (Props) -> ok}
+%% | {link, false}
+%% @doc Start a mochiweb server.
+%% profile_fun is used to profile accept timing.
+%% After each accept, if defined, profile_fun is called with a proplist of a subset of the mochiweb_socket_server state and timing information.
+%% The proplist is as follows: [{name, Name}, {port, Port}, {active_sockets, ActiveSockets}, {timing, Timing}].
+%% @end
+start(Options) ->
+ mochiweb_socket_server:start(parse_options(Options)).
+
+start_link(Options) ->
+ mochiweb_socket_server:start_link(parse_options(Options)).
+
+loop(Socket, Body) ->
+ ok = mochiweb_socket:setopts(Socket, [{packet, http}]),
+ request(Socket, Body).
+
+-ifdef(gen_tcp_r15b_workaround).
+-define(R15B_GEN_TCP_FIX, {tcp_error,_,emsgsize} ->
+ % R15B02 returns this then closes the socket, so close and exit
+ mochiweb_socket:close(Socket),
+ exit(normal);
+ ).
+-else.
+-define(R15B_GEN_TCP_FIX,).
+-endif.
+
+request(Socket, Body) ->
+ ok = mochiweb_socket:setopts(Socket, [{active, once}]),
+ receive
+ {Protocol, _, {http_request, Method, Path, Version}} when Protocol == http orelse Protocol == ssl ->
+ ok = mochiweb_socket:setopts(Socket, [{packet, httph}]),
+ headers(Socket, {Method, Path, Version}, [], Body, 0);
+ {Protocol, _, {http_error, "\r\n"}} when Protocol == http orelse Protocol == ssl ->
+ request(Socket, Body);
+ {Protocol, _, {http_error, "\n"}} when Protocol == http orelse Protocol == ssl ->
+ request(Socket, Body);
+ {tcp_closed, _} ->
+ mochiweb_socket:close(Socket),
+ exit(normal);
+ {ssl_closed, _} ->
+ mochiweb_socket:close(Socket),
+ exit(normal);
+ ?R15B_GEN_TCP_FIX
+ _Other ->
+ handle_invalid_request(Socket)
+ after ?REQUEST_RECV_TIMEOUT ->
+ mochiweb_socket:close(Socket),
+ exit(normal)
+ end.
+
+reentry(Body) ->
+ fun (Req) ->
+ ?MODULE:after_response(Body, Req)
+ end.
+
+headers(Socket, Request, Headers, _Body, ?MAX_HEADERS) ->
+ %% Too many headers sent, bad request.
+ ok = mochiweb_socket:setopts(Socket, [{packet, raw}]),
+ handle_invalid_request(Socket, Request, Headers);
+headers(Socket, Request, Headers, Body, HeaderCount) ->
+ ok = mochiweb_socket:setopts(Socket, [{active, once}]),
+ receive
+ {Protocol, _, http_eoh} when Protocol == http orelse Protocol == ssl ->
+ Req = new_request(Socket, Request, Headers),
+ call_body(Body, Req),
+ ?MODULE:after_response(Body, Req);
+ {Protocol, _, {http_header, _, Name, _, Value}} when Protocol == http orelse Protocol == ssl ->
+ headers(Socket, Request, [{Name, Value} | Headers], Body,
+ 1 + HeaderCount);
+ {tcp_closed, _} ->
+ mochiweb_socket:close(Socket),
+ exit(normal);
+ ?R15B_GEN_TCP_FIX
+ _Other ->
+ handle_invalid_request(Socket, Request, Headers)
+ after ?HEADERS_RECV_TIMEOUT ->
+ mochiweb_socket:close(Socket),
+ exit(normal)
+ end.
+
+call_body({M, F, A}, Req) ->
+ erlang:apply(M, F, [Req | A]);
+call_body({M, F}, Req) ->
+ M:F(Req);
+call_body(Body, Req) ->
+ Body(Req).
+
+%% -spec handle_invalid_request(term()) -> no_return().
+handle_invalid_request(Socket) ->
+ handle_invalid_request(Socket, {'GET', {abs_path, "/"}, {0,9}}, []),
+ exit(normal).
+
+%% -spec handle_invalid_request(term(), term(), term()) -> no_return().
+handle_invalid_request(Socket, Request, RevHeaders) ->
+ Req = new_request(Socket, Request, RevHeaders),
+ Req:respond({400, [], []}),
+ mochiweb_socket:close(Socket),
+ exit(normal).
+
+new_request(Socket, Request, RevHeaders) ->
+ ok = mochiweb_socket:setopts(Socket, [{packet, raw}]),
+ mochiweb:new_request({Socket, Request, lists:reverse(RevHeaders)}).
+
+after_response(Body, Req) ->
+ Socket = Req:get(socket),
+ case Req:should_close() of
+ true ->
+ mochiweb_socket:close(Socket),
+ exit(normal);
+ false ->
+ Req:cleanup(),
+ erlang:garbage_collect(),
+ ?MODULE:loop(Socket, Body)
+ end.
+
+parse_range_request("bytes=0-") ->
+ undefined;
+parse_range_request(RawRange) when is_list(RawRange) ->
+ try
+ "bytes=" ++ RangeString = RawRange,
+ Ranges = string:tokens(RangeString, ","),
+ lists:map(fun ("-" ++ V) ->
+ {none, list_to_integer(V)};
+ (R) ->
+ case string:tokens(R, "-") of
+ [S1, S2] ->
+ {list_to_integer(S1), list_to_integer(S2)};
+ [S] ->
+ {list_to_integer(S), none}
+ end
+ end,
+ Ranges)
+ catch
+ _:_ ->
+ fail
+ end.
+
+range_skip_length(Spec, Size) ->
+ case Spec of
+ {none, R} when R =< Size, R >= 0 ->
+ {Size - R, R};
+ {none, _OutOfRange} ->
+ {0, Size};
+ {R, none} when R >= 0, R < Size ->
+ {R, Size - R};
+ {_OutOfRange, none} ->
+ invalid_range;
+ {Start, End} when 0 =< Start, Start =< End, End < Size ->
+ {Start, End - Start + 1};
+ {Start, End} when 0 =< Start, Start =< End, End >= Size ->
+ {Start, Size - Start};
+ {_OutOfRange, _End} ->
+ invalid_range
+ end.
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+range_test() ->
+ %% valid, single ranges
+ ?assertEqual([{20, 30}], parse_range_request("bytes=20-30")),
+ ?assertEqual([{20, none}], parse_range_request("bytes=20-")),
+ ?assertEqual([{none, 20}], parse_range_request("bytes=-20")),
+
+ %% trivial single range
+ ?assertEqual(undefined, parse_range_request("bytes=0-")),
+
+ %% invalid, single ranges
+ ?assertEqual(fail, parse_range_request("")),
+ ?assertEqual(fail, parse_range_request("garbage")),
+ ?assertEqual(fail, parse_range_request("bytes=-20-30")),
+
+ %% valid, multiple range
+ ?assertEqual(
+ [{20, 30}, {50, 100}, {110, 200}],
+ parse_range_request("bytes=20-30,50-100,110-200")),
+ ?assertEqual(
+ [{20, none}, {50, 100}, {none, 200}],
+ parse_range_request("bytes=20-,50-100,-200")),
+
+ %% no ranges
+ ?assertEqual([], parse_range_request("bytes=")),
+ ok.
+
+range_skip_length_test() ->
+ Body = <<"012345678901234567890123456789012345678901234567890123456789">>,
+ BodySize = byte_size(Body), %% 60
+ BodySize = 60,
+
+ %% these values assume BodySize =:= 60
+ ?assertEqual({1,9}, range_skip_length({1,9}, BodySize)), %% 1-9
+ ?assertEqual({10,10}, range_skip_length({10,19}, BodySize)), %% 10-19
+ ?assertEqual({40, 20}, range_skip_length({none, 20}, BodySize)), %% -20
+ ?assertEqual({30, 30}, range_skip_length({30, none}, BodySize)), %% 30-
+
+ %% valid edge cases for range_skip_length
+ ?assertEqual({BodySize, 0}, range_skip_length({none, 0}, BodySize)),
+ ?assertEqual({0, BodySize}, range_skip_length({none, BodySize}, BodySize)),
+ ?assertEqual({0, BodySize}, range_skip_length({0, none}, BodySize)),
+ BodySizeLess1 = BodySize - 1,
+ ?assertEqual({BodySizeLess1, 1},
+ range_skip_length({BodySize - 1, none}, BodySize)),
+ ?assertEqual({BodySizeLess1, 1},
+ range_skip_length({BodySize - 1, BodySize+5}, BodySize)),
+ ?assertEqual({BodySizeLess1, 1},
+ range_skip_length({BodySize - 1, BodySize}, BodySize)),
+
+ %% out of range, return whole thing
+ ?assertEqual({0, BodySize},
+ range_skip_length({none, BodySize + 1}, BodySize)),
+ ?assertEqual({0, BodySize},
+ range_skip_length({none, -1}, BodySize)),
+ ?assertEqual({0, BodySize},
+ range_skip_length({0, BodySize + 1}, BodySize)),
+
+ %% invalid ranges
+ ?assertEqual(invalid_range,
+ range_skip_length({-1, 30}, BodySize)),
+ ?assertEqual(invalid_range,
+ range_skip_length({-1, BodySize + 1}, BodySize)),
+ ?assertEqual(invalid_range,
+ range_skip_length({BodySize, 40}, BodySize)),
+ ?assertEqual(invalid_range,
+ range_skip_length({-1, none}, BodySize)),
+ ?assertEqual(invalid_range,
+ range_skip_length({BodySize, none}, BodySize)),
+ ok.
+
+-endif.
--- /dev/null
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Utilities for dealing with IO devices (open files).
+
+-module(mochiweb_io).
+-author('bob@mochimedia.com').
+
+-export([iodevice_stream/3, iodevice_stream/2]).
+-export([iodevice_foldl/4, iodevice_foldl/3]).
+-export([iodevice_size/1]).
+-define(READ_SIZE, 8192).
+
+iodevice_foldl(F, Acc, IoDevice) ->
+ iodevice_foldl(F, Acc, IoDevice, ?READ_SIZE).
+
+iodevice_foldl(F, Acc, IoDevice, BufferSize) ->
+ case file:read(IoDevice, BufferSize) of
+ eof ->
+ Acc;
+ {ok, Data} ->
+ iodevice_foldl(F, F(Data, Acc), IoDevice, BufferSize)
+ end.
+
+iodevice_stream(Callback, IoDevice) ->
+ iodevice_stream(Callback, IoDevice, ?READ_SIZE).
+
+iodevice_stream(Callback, IoDevice, BufferSize) ->
+ F = fun (Data, ok) -> Callback(Data) end,
+ ok = iodevice_foldl(F, ok, IoDevice, BufferSize).
+
+iodevice_size(IoDevice) ->
+ {ok, Size} = file:position(IoDevice, eof),
+ {ok, 0} = file:position(IoDevice, bof),
+ Size.
+
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+-endif.
--- /dev/null
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Gives a good MIME type guess based on file extension.
+
+-module(mochiweb_mime).
+-author('bob@mochimedia.com').
+-export([from_extension/1]).
+
+%% @spec from_extension(S::string()) -> string() | undefined
+%% @doc Given a filename extension (e.g. ".html") return a guess for the MIME
+%% type such as "text/html". Will return the atom undefined if no good
+%% guess is available.
+
+from_extension(".stl") ->
+ "application/SLA";
+from_extension(".stp") ->
+ "application/STEP";
+from_extension(".step") ->
+ "application/STEP";
+from_extension(".dwg") ->
+ "application/acad";
+from_extension(".ez") ->
+ "application/andrew-inset";
+from_extension(".ccad") ->
+ "application/clariscad";
+from_extension(".drw") ->
+ "application/drafting";
+from_extension(".tsp") ->
+ "application/dsptype";
+from_extension(".dxf") ->
+ "application/dxf";
+from_extension(".xls") ->
+ "application/excel";
+from_extension(".unv") ->
+ "application/i-deas";
+from_extension(".jar") ->
+ "application/java-archive";
+from_extension(".hqx") ->
+ "application/mac-binhex40";
+from_extension(".cpt") ->
+ "application/mac-compactpro";
+from_extension(".pot") ->
+ "application/vnd.ms-powerpoint";
+from_extension(".ppt") ->
+ "application/vnd.ms-powerpoint";
+from_extension(".dms") ->
+ "application/octet-stream";
+from_extension(".lha") ->
+ "application/octet-stream";
+from_extension(".lzh") ->
+ "application/octet-stream";
+from_extension(".oda") ->
+ "application/oda";
+from_extension(".ogg") ->
+ "application/ogg";
+from_extension(".ogm") ->
+ "application/ogg";
+from_extension(".pdf") ->
+ "application/pdf";
+from_extension(".pgp") ->
+ "application/pgp";
+from_extension(".ai") ->
+ "application/postscript";
+from_extension(".eps") ->
+ "application/postscript";
+from_extension(".ps") ->
+ "application/postscript";
+from_extension(".prt") ->
+ "application/pro_eng";
+from_extension(".rtf") ->
+ "application/rtf";
+from_extension(".smi") ->
+ "application/smil";
+from_extension(".smil") ->
+ "application/smil";
+from_extension(".sol") ->
+ "application/solids";
+from_extension(".vda") ->
+ "application/vda";
+from_extension(".xlm") ->
+ "application/vnd.ms-excel";
+from_extension(".cod") ->
+ "application/vnd.rim.cod";
+from_extension(".pgn") ->
+ "application/x-chess-pgn";
+from_extension(".cpio") ->
+ "application/x-cpio";
+from_extension(".csh") ->
+ "application/x-csh";
+from_extension(".deb") ->
+ "application/x-debian-package";
+from_extension(".dcr") ->
+ "application/x-director";
+from_extension(".dir") ->
+ "application/x-director";
+from_extension(".dxr") ->
+ "application/x-director";
+from_extension(".gz") ->
+ "application/x-gzip";
+from_extension(".hdf") ->
+ "application/x-hdf";
+from_extension(".ipx") ->
+ "application/x-ipix";
+from_extension(".ips") ->
+ "application/x-ipscript";
+from_extension(".js") ->
+ "application/x-javascript";
+from_extension(".skd") ->
+ "application/x-koan";
+from_extension(".skm") ->
+ "application/x-koan";
+from_extension(".skp") ->
+ "application/x-koan";
+from_extension(".skt") ->
+ "application/x-koan";
+from_extension(".latex") ->
+ "application/x-latex";
+from_extension(".lsp") ->
+ "application/x-lisp";
+from_extension(".scm") ->
+ "application/x-lotusscreencam";
+from_extension(".mif") ->
+ "application/x-mif";
+from_extension(".com") ->
+ "application/x-msdos-program";
+from_extension(".exe") ->
+ "application/octet-stream";
+from_extension(".cdf") ->
+ "application/x-netcdf";
+from_extension(".nc") ->
+ "application/x-netcdf";
+from_extension(".pl") ->
+ "application/x-perl";
+from_extension(".pm") ->
+ "application/x-perl";
+from_extension(".rar") ->
+ "application/x-rar-compressed";
+from_extension(".sh") ->
+ "application/x-sh";
+from_extension(".shar") ->
+ "application/x-shar";
+from_extension(".swf") ->
+ "application/x-shockwave-flash";
+from_extension(".sit") ->
+ "application/x-stuffit";
+from_extension(".sv4cpio") ->
+ "application/x-sv4cpio";
+from_extension(".sv4crc") ->
+ "application/x-sv4crc";
+from_extension(".tar.gz") ->
+ "application/x-tar-gz";
+from_extension(".tgz") ->
+ "application/x-tar-gz";
+from_extension(".tar") ->
+ "application/x-tar";
+from_extension(".tcl") ->
+ "application/x-tcl";
+from_extension(".texi") ->
+ "application/x-texinfo";
+from_extension(".texinfo") ->
+ "application/x-texinfo";
+from_extension(".man") ->
+ "application/x-troff-man";
+from_extension(".me") ->
+ "application/x-troff-me";
+from_extension(".ms") ->
+ "application/x-troff-ms";
+from_extension(".roff") ->
+ "application/x-troff";
+from_extension(".t") ->
+ "application/x-troff";
+from_extension(".tr") ->
+ "application/x-troff";
+from_extension(".ustar") ->
+ "application/x-ustar";
+from_extension(".src") ->
+ "application/x-wais-source";
+from_extension(".zip") ->
+ "application/zip";
+from_extension(".tsi") ->
+ "audio/TSP-audio";
+from_extension(".au") ->
+ "audio/basic";
+from_extension(".snd") ->
+ "audio/basic";
+from_extension(".kar") ->
+ "audio/midi";
+from_extension(".mid") ->
+ "audio/midi";
+from_extension(".midi") ->
+ "audio/midi";
+from_extension(".mp2") ->
+ "audio/mpeg";
+from_extension(".mp3") ->
+ "audio/mpeg";
+from_extension(".mpga") ->
+ "audio/mpeg";
+from_extension(".aif") ->
+ "audio/x-aiff";
+from_extension(".aifc") ->
+ "audio/x-aiff";
+from_extension(".aiff") ->
+ "audio/x-aiff";
+from_extension(".m3u") ->
+ "audio/x-mpegurl";
+from_extension(".wax") ->
+ "audio/x-ms-wax";
+from_extension(".wma") ->
+ "audio/x-ms-wma";
+from_extension(".rpm") ->
+ "audio/x-pn-realaudio-plugin";
+from_extension(".ram") ->
+ "audio/x-pn-realaudio";
+from_extension(".rm") ->
+ "audio/x-pn-realaudio";
+from_extension(".ra") ->
+ "audio/x-realaudio";
+from_extension(".wav") ->
+ "audio/x-wav";
+from_extension(".pdb") ->
+ "chemical/x-pdb";
+from_extension(".ras") ->
+ "image/cmu-raster";
+from_extension(".gif") ->
+ "image/gif";
+from_extension(".ief") ->
+ "image/ief";
+from_extension(".jpe") ->
+ "image/jpeg";
+from_extension(".jpeg") ->
+ "image/jpeg";
+from_extension(".jpg") ->
+ "image/jpeg";
+from_extension(".jp2") ->
+ "image/jp2";
+from_extension(".png") ->
+ "image/png";
+from_extension(".tif") ->
+ "image/tiff";
+from_extension(".tiff") ->
+ "image/tiff";
+from_extension(".pnm") ->
+ "image/x-portable-anymap";
+from_extension(".pbm") ->
+ "image/x-portable-bitmap";
+from_extension(".pgm") ->
+ "image/x-portable-graymap";
+from_extension(".ppm") ->
+ "image/x-portable-pixmap";
+from_extension(".rgb") ->
+ "image/x-rgb";
+from_extension(".xbm") ->
+ "image/x-xbitmap";
+from_extension(".xwd") ->
+ "image/x-xwindowdump";
+from_extension(".iges") ->
+ "model/iges";
+from_extension(".igs") ->
+ "model/iges";
+from_extension(".mesh") ->
+ "model/mesh";
+from_extension(".") ->
+ "";
+from_extension(".msh") ->
+ "model/mesh";
+from_extension(".silo") ->
+ "model/mesh";
+from_extension(".vrml") ->
+ "model/vrml";
+from_extension(".wrl") ->
+ "model/vrml";
+from_extension(".css") ->
+ "text/css";
+from_extension(".htm") ->
+ "text/html";
+from_extension(".html") ->
+ "text/html";
+from_extension(".asc") ->
+ "text/plain";
+from_extension(".c") ->
+ "text/plain";
+from_extension(".cc") ->
+ "text/plain";
+from_extension(".f90") ->
+ "text/plain";
+from_extension(".f") ->
+ "text/plain";
+from_extension(".hh") ->
+ "text/plain";
+from_extension(".m") ->
+ "text/plain";
+from_extension(".txt") ->
+ "text/plain";
+from_extension(".rtx") ->
+ "text/richtext";
+from_extension(".sgm") ->
+ "text/sgml";
+from_extension(".sgml") ->
+ "text/sgml";
+from_extension(".tsv") ->
+ "text/tab-separated-values";
+from_extension(".jad") ->
+ "text/vnd.sun.j2me.app-descriptor";
+from_extension(".etx") ->
+ "text/x-setext";
+from_extension(".xml") ->
+ "application/xml";
+from_extension(".dl") ->
+ "video/dl";
+from_extension(".fli") ->
+ "video/fli";
+from_extension(".flv") ->
+ "video/x-flv";
+from_extension(".gl") ->
+ "video/gl";
+from_extension(".mp4") ->
+ "video/mp4";
+from_extension(".mpe") ->
+ "video/mpeg";
+from_extension(".mpeg") ->
+ "video/mpeg";
+from_extension(".mpg") ->
+ "video/mpeg";
+from_extension(".mov") ->
+ "video/quicktime";
+from_extension(".qt") ->
+ "video/quicktime";
+from_extension(".viv") ->
+ "video/vnd.vivo";
+from_extension(".vivo") ->
+ "video/vnd.vivo";
+from_extension(".asf") ->
+ "video/x-ms-asf";
+from_extension(".asx") ->
+ "video/x-ms-asx";
+from_extension(".wmv") ->
+ "video/x-ms-wmv";
+from_extension(".wmx") ->
+ "video/x-ms-wmx";
+from_extension(".wvx") ->
+ "video/x-ms-wvx";
+from_extension(".avi") ->
+ "video/x-msvideo";
+from_extension(".movie") ->
+ "video/x-sgi-movie";
+from_extension(".mime") ->
+ "www/mime";
+from_extension(".ice") ->
+ "x-conference/x-cooltalk";
+from_extension(".vrm") ->
+ "x-world/x-vrml";
+from_extension(".spx") ->
+ "audio/ogg";
+from_extension(".xhtml") ->
+ "application/xhtml+xml";
+from_extension(".bz2") ->
+ "application/x-bzip2";
+from_extension(".doc") ->
+ "application/msword";
+from_extension(".z") ->
+ "application/x-compress";
+from_extension(".ico") ->
+ "image/x-icon";
+from_extension(".bmp") ->
+ "image/bmp";
+from_extension(".m4a") ->
+ "audio/mpeg";
+from_extension(".csv") ->
+ "text/csv";
+from_extension(".eot") ->
+ "application/vnd.ms-fontobject";
+from_extension(".m4v") ->
+ "video/mp4";
+from_extension(".svg") ->
+ "image/svg+xml";
+from_extension(".svgz") ->
+ "image/svg+xml";
+from_extension(".ttc") ->
+ "application/x-font-ttf";
+from_extension(".ttf") ->
+ "application/x-font-ttf";
+from_extension(".vcf") ->
+ "text/x-vcard";
+from_extension(".webm") ->
+ "video/web";
+from_extension(".webp") ->
+ "image/web";
+from_extension(".woff") ->
+ "application/x-font-woff";
+from_extension(".otf") ->
+ "font/opentype";
+from_extension(_) ->
+ undefined.
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+exhaustive_from_extension_test() ->
+ T = mochiweb_cover:clause_lookup_table(?MODULE, from_extension),
+ [?assertEqual(V, from_extension(K)) || {K, V} <- T].
+
+from_extension_test() ->
+ ?assertEqual("text/html",
+ from_extension(".html")),
+ ?assertEqual(undefined,
+ from_extension("")),
+ ?assertEqual(undefined,
+ from_extension(".wtf")),
+ ok.
+
+-endif.
--- /dev/null
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Utilities for parsing multipart/form-data.
+
+-module(mochiweb_multipart).
+-author('bob@mochimedia.com').
+
+-export([parse_form/1, parse_form/2]).
+-export([parse_multipart_request/2]).
+-export([parts_to_body/3, parts_to_multipart_body/4]).
+-export([default_file_handler/2]).
+
+-define(CHUNKSIZE, 4096).
+
+-record(mp, {state, boundary, length, buffer, callback, req}).
+
+%% TODO: DOCUMENT THIS MODULE.
+%% @type key() = atom() | string() | binary().
+%% @type value() = atom() | iolist() | integer().
+%% @type header() = {key(), value()}.
+%% @type bodypart() = {Start::integer(), End::integer(), Body::iolist()}.
+%% @type formfile() = {Name::string(), ContentType::string(), Content::binary()}.
+%% @type request().
+%% @type file_handler() = (Filename::string(), ContentType::string()) -> file_handler_callback().
+%% @type file_handler_callback() = (binary() | eof) -> file_handler_callback() | term().
+
+%% @spec parts_to_body([bodypart()], ContentType::string(),
+%% Size::integer()) -> {[header()], iolist()}
+%% @doc Return {[header()], iolist()} representing the body for the given
+%% parts, may be a single part or multipart.
+parts_to_body([{Start, End, Body}], ContentType, Size) ->
+ HeaderList = [{"Content-Type", ContentType},
+ {"Content-Range",
+ ["bytes ",
+ mochiweb_util:make_io(Start), "-", mochiweb_util:make_io(End),
+ "/", mochiweb_util:make_io(Size)]}],
+ {HeaderList, Body};
+parts_to_body(BodyList, ContentType, Size) when is_list(BodyList) ->
+ parts_to_multipart_body(BodyList, ContentType, Size,
+ mochihex:to_hex(mochiweb_util:rand_bytes(8))).
+
+%% @spec parts_to_multipart_body([bodypart()], ContentType::string(),
+%% Size::integer(), Boundary::string()) ->
+%% {[header()], iolist()}
+%% @doc Return {[header()], iolist()} representing the body for the given
+%% parts, always a multipart response.
+parts_to_multipart_body(BodyList, ContentType, Size, Boundary) ->
+ HeaderList = [{"Content-Type",
+ ["multipart/byteranges; ",
+ "boundary=", Boundary]}],
+ MultiPartBody = multipart_body(BodyList, ContentType, Boundary, Size),
+
+ {HeaderList, MultiPartBody}.
+
+%% @spec multipart_body([bodypart()], ContentType::string(),
+%% Boundary::string(), Size::integer()) -> iolist()
+%% @doc Return the representation of a multipart body for the given [bodypart()].
+multipart_body([], _ContentType, Boundary, _Size) ->
+ ["--", Boundary, "--\r\n"];
+multipart_body([{Start, End, Body} | BodyList], ContentType, Boundary, Size) ->
+ ["--", Boundary, "\r\n",
+ "Content-Type: ", ContentType, "\r\n",
+ "Content-Range: ",
+ "bytes ", mochiweb_util:make_io(Start), "-", mochiweb_util:make_io(End),
+ "/", mochiweb_util:make_io(Size), "\r\n\r\n",
+ Body, "\r\n"
+ | multipart_body(BodyList, ContentType, Boundary, Size)].
+
+%% @spec parse_form(request()) -> [{string(), string() | formfile()}]
+%% @doc Parse a multipart form from the given request using the in-memory
+%% default_file_handler/2.
+parse_form(Req) ->
+ parse_form(Req, fun default_file_handler/2).
+
+%% @spec parse_form(request(), F::file_handler()) -> [{string(), string() | term()}]
+%% @doc Parse a multipart form from the given request using the given file_handler().
+parse_form(Req, FileHandler) ->
+ Callback = fun (Next) -> parse_form_outer(Next, FileHandler, []) end,
+ {_, _, Res} = parse_multipart_request(Req, Callback),
+ Res.
+
+parse_form_outer(eof, _, Acc) ->
+ lists:reverse(Acc);
+parse_form_outer({headers, H}, FileHandler, State) ->
+ {"form-data", H1} = proplists:get_value("content-disposition", H),
+ Name = proplists:get_value("name", H1),
+ Filename = proplists:get_value("filename", H1),
+ case Filename of
+ undefined ->
+ fun (Next) ->
+ parse_form_value(Next, {Name, []}, FileHandler, State)
+ end;
+ _ ->
+ ContentType = proplists:get_value("content-type", H),
+ Handler = FileHandler(Filename, ContentType),
+ fun (Next) ->
+ parse_form_file(Next, {Name, Handler}, FileHandler, State)
+ end
+ end.
+
+parse_form_value(body_end, {Name, Acc}, FileHandler, State) ->
+ Value = binary_to_list(iolist_to_binary(lists:reverse(Acc))),
+ State1 = [{Name, Value} | State],
+ fun (Next) -> parse_form_outer(Next, FileHandler, State1) end;
+parse_form_value({body, Data}, {Name, Acc}, FileHandler, State) ->
+ Acc1 = [Data | Acc],
+ fun (Next) -> parse_form_value(Next, {Name, Acc1}, FileHandler, State) end.
+
+parse_form_file(body_end, {Name, Handler}, FileHandler, State) ->
+ Value = Handler(eof),
+ State1 = [{Name, Value} | State],
+ fun (Next) -> parse_form_outer(Next, FileHandler, State1) end;
+parse_form_file({body, Data}, {Name, Handler}, FileHandler, State) ->
+ H1 = Handler(Data),
+ fun (Next) -> parse_form_file(Next, {Name, H1}, FileHandler, State) end.
+
+default_file_handler(Filename, ContentType) ->
+ default_file_handler_1(Filename, ContentType, []).
+
+default_file_handler_1(Filename, ContentType, Acc) ->
+ fun(eof) ->
+ Value = iolist_to_binary(lists:reverse(Acc)),
+ {Filename, ContentType, Value};
+ (Next) ->
+ default_file_handler_1(Filename, ContentType, [Next | Acc])
+ end.
+
+parse_multipart_request(Req, Callback) ->
+ %% TODO: Support chunked?
+ Length = list_to_integer(Req:get_combined_header_value("content-length")),
+ Boundary = iolist_to_binary(
+ get_boundary(Req:get_header_value("content-type"))),
+ Prefix = <<"\r\n--", Boundary/binary>>,
+ BS = byte_size(Boundary),
+ Chunk = read_chunk(Req, Length),
+ Length1 = Length - byte_size(Chunk),
+ <<"--", Boundary:BS/binary, "\r\n", Rest/binary>> = Chunk,
+ feed_mp(headers, flash_multipart_hack(#mp{boundary=Prefix,
+ length=Length1,
+ buffer=Rest,
+ callback=Callback,
+ req=Req})).
+
+parse_headers(<<>>) ->
+ [];
+parse_headers(Binary) ->
+ parse_headers(Binary, []).
+
+parse_headers(Binary, Acc) ->
+ case find_in_binary(<<"\r\n">>, Binary) of
+ {exact, N} ->
+ <<Line:N/binary, "\r\n", Rest/binary>> = Binary,
+ parse_headers(Rest, [split_header(Line) | Acc]);
+ not_found ->
+ lists:reverse([split_header(Binary) | Acc])
+ end.
+
+split_header(Line) ->
+ {Name, [$: | Value]} = lists:splitwith(fun (C) -> C =/= $: end,
+ binary_to_list(Line)),
+ {string:to_lower(string:strip(Name)),
+ mochiweb_util:parse_header(Value)}.
+
+read_chunk(Req, Length) when Length > 0 ->
+ case Length of
+ Length when Length < ?CHUNKSIZE ->
+ Req:recv(Length);
+ _ ->
+ Req:recv(?CHUNKSIZE)
+ end.
+
+read_more(State=#mp{length=Length, buffer=Buffer, req=Req}) ->
+ Data = read_chunk(Req, Length),
+ Buffer1 = <<Buffer/binary, Data/binary>>,
+ flash_multipart_hack(State#mp{length=Length - byte_size(Data),
+ buffer=Buffer1}).
+
+flash_multipart_hack(State=#mp{length=0, buffer=Buffer, boundary=Prefix}) ->
+ %% http://code.google.com/p/mochiweb/issues/detail?id=22
+ %% Flash doesn't terminate multipart with \r\n properly so we fix it up here
+ PrefixSize = size(Prefix),
+ case size(Buffer) - (2 + PrefixSize) of
+ Seek when Seek >= 0 ->
+ case Buffer of
+ <<_:Seek/binary, Prefix:PrefixSize/binary, "--">> ->
+ Buffer1 = <<Buffer/binary, "\r\n">>,
+ State#mp{buffer=Buffer1};
+ _ ->
+ State
+ end;
+ _ ->
+ State
+ end;
+flash_multipart_hack(State) ->
+ State.
+
+feed_mp(headers, State=#mp{buffer=Buffer, callback=Callback}) ->
+ {State1, P} = case find_in_binary(<<"\r\n\r\n">>, Buffer) of
+ {exact, N} ->
+ {State, N};
+ _ ->
+ S1 = read_more(State),
+ %% Assume headers must be less than ?CHUNKSIZE
+ {exact, N} = find_in_binary(<<"\r\n\r\n">>,
+ S1#mp.buffer),
+ {S1, N}
+ end,
+ <<Headers:P/binary, "\r\n\r\n", Rest/binary>> = State1#mp.buffer,
+ NextCallback = Callback({headers, parse_headers(Headers)}),
+ feed_mp(body, State1#mp{buffer=Rest,
+ callback=NextCallback});
+feed_mp(body, State=#mp{boundary=Prefix, buffer=Buffer, callback=Callback}) ->
+ Boundary = find_boundary(Prefix, Buffer),
+ case Boundary of
+ {end_boundary, Start, Skip} ->
+ <<Data:Start/binary, _:Skip/binary, Rest/binary>> = Buffer,
+ C1 = Callback({body, Data}),
+ C2 = C1(body_end),
+ {State#mp.length, Rest, C2(eof)};
+ {next_boundary, Start, Skip} ->
+ <<Data:Start/binary, _:Skip/binary, Rest/binary>> = Buffer,
+ C1 = Callback({body, Data}),
+ feed_mp(headers, State#mp{callback=C1(body_end),
+ buffer=Rest});
+ {maybe, Start} ->
+ <<Data:Start/binary, Rest/binary>> = Buffer,
+ feed_mp(body, read_more(State#mp{callback=Callback({body, Data}),
+ buffer=Rest}));
+ not_found ->
+ {Data, Rest} = {Buffer, <<>>},
+ feed_mp(body, read_more(State#mp{callback=Callback({body, Data}),
+ buffer=Rest}))
+ end.
+
+get_boundary(ContentType) ->
+ {"multipart/form-data", Opts} = mochiweb_util:parse_header(ContentType),
+ case proplists:get_value("boundary", Opts) of
+ S when is_list(S) ->
+ S
+ end.
+
+%% @spec find_in_binary(Pattern::binary(), Data::binary()) ->
+%% {exact, N} | {partial, N, K} | not_found
+%% @doc Searches for the given pattern in the given binary.
+find_in_binary(P, Data) when size(P) > 0 ->
+ PS = size(P),
+ DS = size(Data),
+ case DS - PS of
+ Last when Last < 0 ->
+ partial_find(P, Data, 0, DS);
+ Last ->
+ case binary:match(Data, P) of
+ {Pos, _} -> {exact, Pos};
+ nomatch -> partial_find(P, Data, Last+1, PS-1)
+ end
+ end.
+
+partial_find(_B, _D, _N, 0) ->
+ not_found;
+partial_find(B, D, N, K) ->
+ <<B1:K/binary, _/binary>> = B,
+ case D of
+ <<_Skip:N/binary, B1:K/binary>> ->
+ {partial, N, K};
+ _ ->
+ partial_find(B, D, 1 + N, K - 1)
+ end.
+
+find_boundary(Prefix, Data) ->
+ case find_in_binary(Prefix, Data) of
+ {exact, Skip} ->
+ PrefixSkip = Skip + size(Prefix),
+ case Data of
+ <<_:PrefixSkip/binary, "\r\n", _/binary>> ->
+ {next_boundary, Skip, size(Prefix) + 2};
+ <<_:PrefixSkip/binary, "--\r\n", _/binary>> ->
+ {end_boundary, Skip, size(Prefix) + 4};
+ _ when size(Data) < PrefixSkip + 4 ->
+ %% Underflow
+ {maybe, Skip};
+ _ ->
+ %% False positive
+ not_found
+ end;
+ {partial, Skip, Length} when (Skip + Length) =:= size(Data) ->
+ %% Underflow
+ {maybe, Skip};
+ _ ->
+ not_found
+ end.
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+ssl_cert_opts() ->
+ EbinDir = filename:dirname(code:which(?MODULE)),
+ CertDir = filename:join([EbinDir, "..", "support", "test-materials"]),
+ CertFile = filename:join(CertDir, "test_ssl_cert.pem"),
+ KeyFile = filename:join(CertDir, "test_ssl_key.pem"),
+ [{certfile, CertFile}, {keyfile, KeyFile}].
+
+with_socket_server(Transport, ServerFun, ClientFun) ->
+ ServerOpts0 = [{ip, "127.0.0.1"}, {port, 0}, {loop, ServerFun}],
+ ServerOpts = case Transport of
+ plain ->
+ ServerOpts0;
+ ssl ->
+ ServerOpts0 ++ [{ssl, true}, {ssl_opts, ssl_cert_opts()}]
+ end,
+ {ok, Server} = mochiweb_socket_server:start_link(ServerOpts),
+ Port = mochiweb_socket_server:get(Server, port),
+ ClientOpts = [binary, {active, false}],
+ {ok, Client} = case Transport of
+ plain ->
+ gen_tcp:connect("127.0.0.1", Port, ClientOpts);
+ ssl ->
+ ClientOpts1 = [{ssl_imp, new} | ClientOpts],
+ {ok, SslSocket} = ssl:connect("127.0.0.1", Port, ClientOpts1),
+ {ok, {ssl, SslSocket}}
+ end,
+ Res = (catch ClientFun(Client)),
+ mochiweb_socket_server:stop(Server),
+ Res.
+
+fake_request(Socket, ContentType, Length) ->
+ mochiweb_request:new(Socket,
+ 'POST',
+ "/multipart",
+ {1,1},
+ mochiweb_headers:make(
+ [{"content-type", ContentType},
+ {"content-length", Length}])).
+
+test_callback({body, <<>>}, Rest=[body_end | _]) ->
+ %% When expecting the body_end we might get an empty binary
+ fun (Next) -> test_callback(Next, Rest) end;
+test_callback({body, Got}, [{body, Expect} | Rest]) when Got =/= Expect ->
+ %% Partial response
+ GotSize = size(Got),
+ <<Got:GotSize/binary, Expect1/binary>> = Expect,
+ fun (Next) -> test_callback(Next, [{body, Expect1} | Rest]) end;
+test_callback(Got, [Expect | Rest]) ->
+ ?assertEqual(Got, Expect),
+ case Rest of
+ [] ->
+ ok;
+ _ ->
+ fun (Next) -> test_callback(Next, Rest) end
+ end.
+
+parse3_http_test() ->
+ parse3(plain).
+
+parse3_https_test() ->
+ parse3(ssl).
+
+parse3(Transport) ->
+ ContentType = "multipart/form-data; boundary=---------------------------7386909285754635891697677882",
+ BinContent = <<"-----------------------------7386909285754635891697677882\r\nContent-Disposition: form-data; name=\"hidden\"\r\n\r\nmultipart message\r\n-----------------------------7386909285754635891697677882\r\nContent-Disposition: form-data; name=\"file\"; filename=\"test_file.txt\"\r\nContent-Type: text/plain\r\n\r\nWoo multiline text file\n\nLa la la\r\n-----------------------------7386909285754635891697677882--\r\n">>,
+ Expect = [{headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "hidden"}]}}]},
+ {body, <<"multipart message">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "file"}, {"filename", "test_file.txt"}]}},
+ {"content-type", {"text/plain", []}}]},
+ {body, <<"Woo multiline text file\n\nLa la la">>},
+ body_end,
+ eof],
+ TestCallback = fun (Next) -> test_callback(Next, Expect) end,
+ ServerFun = fun (Socket) ->
+ ok = mochiweb_socket:send(Socket, BinContent),
+ exit(normal)
+ end,
+ ClientFun = fun (Socket) ->
+ Req = fake_request(Socket, ContentType,
+ byte_size(BinContent)),
+ Res = parse_multipart_request(Req, TestCallback),
+ {0, <<>>, ok} = Res,
+ ok
+ end,
+ ok = with_socket_server(Transport, ServerFun, ClientFun),
+ ok.
+
+parse2_http_test() ->
+ parse2(plain).
+
+parse2_https_test() ->
+ parse2(ssl).
+
+parse2(Transport) ->
+ ContentType = "multipart/form-data; boundary=---------------------------6072231407570234361599764024",
+ BinContent = <<"-----------------------------6072231407570234361599764024\r\nContent-Disposition: form-data; name=\"hidden\"\r\n\r\nmultipart message\r\n-----------------------------6072231407570234361599764024\r\nContent-Disposition: form-data; name=\"file\"; filename=\"\"\r\nContent-Type: application/octet-stream\r\n\r\n\r\n-----------------------------6072231407570234361599764024--\r\n">>,
+ Expect = [{headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "hidden"}]}}]},
+ {body, <<"multipart message">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "file"}, {"filename", ""}]}},
+ {"content-type", {"application/octet-stream", []}}]},
+ {body, <<>>},
+ body_end,
+ eof],
+ TestCallback = fun (Next) -> test_callback(Next, Expect) end,
+ ServerFun = fun (Socket) ->
+ ok = mochiweb_socket:send(Socket, BinContent),
+ exit(normal)
+ end,
+ ClientFun = fun (Socket) ->
+ Req = fake_request(Socket, ContentType,
+ byte_size(BinContent)),
+ Res = parse_multipart_request(Req, TestCallback),
+ {0, <<>>, ok} = Res,
+ ok
+ end,
+ ok = with_socket_server(Transport, ServerFun, ClientFun),
+ ok.
+
+parse_form_http_test() ->
+ do_parse_form(plain).
+
+parse_form_https_test() ->
+ do_parse_form(ssl).
+
+do_parse_form(Transport) ->
+ ContentType = "multipart/form-data; boundary=AaB03x",
+ "AaB03x" = get_boundary(ContentType),
+ Content = mochiweb_util:join(
+ ["--AaB03x",
+ "Content-Disposition: form-data; name=\"submit-name\"",
+ "",
+ "Larry",
+ "--AaB03x",
+ "Content-Disposition: form-data; name=\"files\";"
+ ++ "filename=\"file1.txt\"",
+ "Content-Type: text/plain",
+ "",
+ "... contents of file1.txt ...",
+ "--AaB03x--",
+ ""], "\r\n"),
+ BinContent = iolist_to_binary(Content),
+ ServerFun = fun (Socket) ->
+ ok = mochiweb_socket:send(Socket, BinContent),
+ exit(normal)
+ end,
+ ClientFun = fun (Socket) ->
+ Req = fake_request(Socket, ContentType,
+ byte_size(BinContent)),
+ Res = parse_form(Req),
+ [{"submit-name", "Larry"},
+ {"files", {"file1.txt", {"text/plain",[]},
+ <<"... contents of file1.txt ...">>}
+ }] = Res,
+ ok
+ end,
+ ok = with_socket_server(Transport, ServerFun, ClientFun),
+ ok.
+
+parse_http_test() ->
+ do_parse(plain).
+
+parse_https_test() ->
+ do_parse(ssl).
+
+do_parse(Transport) ->
+ ContentType = "multipart/form-data; boundary=AaB03x",
+ "AaB03x" = get_boundary(ContentType),
+ Content = mochiweb_util:join(
+ ["--AaB03x",
+ "Content-Disposition: form-data; name=\"submit-name\"",
+ "",
+ "Larry",
+ "--AaB03x",
+ "Content-Disposition: form-data; name=\"files\";"
+ ++ "filename=\"file1.txt\"",
+ "Content-Type: text/plain",
+ "",
+ "... contents of file1.txt ...",
+ "--AaB03x--",
+ ""], "\r\n"),
+ BinContent = iolist_to_binary(Content),
+ Expect = [{headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "submit-name"}]}}]},
+ {body, <<"Larry">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "files"}, {"filename", "file1.txt"}]}},
+ {"content-type", {"text/plain", []}}]},
+ {body, <<"... contents of file1.txt ...">>},
+ body_end,
+ eof],
+ TestCallback = fun (Next) -> test_callback(Next, Expect) end,
+ ServerFun = fun (Socket) ->
+ ok = mochiweb_socket:send(Socket, BinContent),
+ exit(normal)
+ end,
+ ClientFun = fun (Socket) ->
+ Req = fake_request(Socket, ContentType,
+ byte_size(BinContent)),
+ Res = parse_multipart_request(Req, TestCallback),
+ {0, <<>>, ok} = Res,
+ ok
+ end,
+ ok = with_socket_server(Transport, ServerFun, ClientFun),
+ ok.
+
+parse_partial_body_boundary_http_test() ->
+ parse_partial_body_boundary(plain).
+
+parse_partial_body_boundary_https_test() ->
+ parse_partial_body_boundary(ssl).
+
+parse_partial_body_boundary(Transport) ->
+ Boundary = string:copies("$", 2048),
+ ContentType = "multipart/form-data; boundary=" ++ Boundary,
+ ?assertEqual(Boundary, get_boundary(ContentType)),
+ Content = mochiweb_util:join(
+ ["--" ++ Boundary,
+ "Content-Disposition: form-data; name=\"submit-name\"",
+ "",
+ "Larry",
+ "--" ++ Boundary,
+ "Content-Disposition: form-data; name=\"files\";"
+ ++ "filename=\"file1.txt\"",
+ "Content-Type: text/plain",
+ "",
+ "... contents of file1.txt ...",
+ "--" ++ Boundary ++ "--",
+ ""], "\r\n"),
+ BinContent = iolist_to_binary(Content),
+ Expect = [{headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "submit-name"}]}}]},
+ {body, <<"Larry">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "files"}, {"filename", "file1.txt"}]}},
+ {"content-type", {"text/plain", []}}
+ ]},
+ {body, <<"... contents of file1.txt ...">>},
+ body_end,
+ eof],
+ TestCallback = fun (Next) -> test_callback(Next, Expect) end,
+ ServerFun = fun (Socket) ->
+ ok = mochiweb_socket:send(Socket, BinContent),
+ exit(normal)
+ end,
+ ClientFun = fun (Socket) ->
+ Req = fake_request(Socket, ContentType,
+ byte_size(BinContent)),
+ Res = parse_multipart_request(Req, TestCallback),
+ {0, <<>>, ok} = Res,
+ ok
+ end,
+ ok = with_socket_server(Transport, ServerFun, ClientFun),
+ ok.
+
+parse_large_header_http_test() ->
+ parse_large_header(plain).
+
+parse_large_header_https_test() ->
+ parse_large_header(ssl).
+
+parse_large_header(Transport) ->
+ ContentType = "multipart/form-data; boundary=AaB03x",
+ "AaB03x" = get_boundary(ContentType),
+ Content = mochiweb_util:join(
+ ["--AaB03x",
+ "Content-Disposition: form-data; name=\"submit-name\"",
+ "",
+ "Larry",
+ "--AaB03x",
+ "Content-Disposition: form-data; name=\"files\";"
+ ++ "filename=\"file1.txt\"",
+ "Content-Type: text/plain",
+ "x-large-header: " ++ string:copies("%", 4096),
+ "",
+ "... contents of file1.txt ...",
+ "--AaB03x--",
+ ""], "\r\n"),
+ BinContent = iolist_to_binary(Content),
+ Expect = [{headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "submit-name"}]}}]},
+ {body, <<"Larry">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "files"}, {"filename", "file1.txt"}]}},
+ {"content-type", {"text/plain", []}},
+ {"x-large-header", {string:copies("%", 4096), []}}
+ ]},
+ {body, <<"... contents of file1.txt ...">>},
+ body_end,
+ eof],
+ TestCallback = fun (Next) -> test_callback(Next, Expect) end,
+ ServerFun = fun (Socket) ->
+ ok = mochiweb_socket:send(Socket, BinContent),
+ exit(normal)
+ end,
+ ClientFun = fun (Socket) ->
+ Req = fake_request(Socket, ContentType,
+ byte_size(BinContent)),
+ Res = parse_multipart_request(Req, TestCallback),
+ {0, <<>>, ok} = Res,
+ ok
+ end,
+ ok = with_socket_server(Transport, ServerFun, ClientFun),
+ ok.
+
+find_boundary_test() ->
+ B = <<"\r\n--X">>,
+ {next_boundary, 0, 7} = find_boundary(B, <<"\r\n--X\r\nRest">>),
+ {next_boundary, 1, 7} = find_boundary(B, <<"!\r\n--X\r\nRest">>),
+ {end_boundary, 0, 9} = find_boundary(B, <<"\r\n--X--\r\nRest">>),
+ {end_boundary, 1, 9} = find_boundary(B, <<"!\r\n--X--\r\nRest">>),
+ not_found = find_boundary(B, <<"--X\r\nRest">>),
+ {maybe, 0} = find_boundary(B, <<"\r\n--X\r">>),
+ {maybe, 1} = find_boundary(B, <<"!\r\n--X\r">>),
+ P = <<"\r\n-----------------------------16037454351082272548568224146">>,
+ B0 = <<55,212,131,77,206,23,216,198,35,87,252,118,252,8,25,211,132,229,
+ 182,42,29,188,62,175,247,243,4,4,0,59, 13,10,45,45,45,45,45,45,45,
+ 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,
+ 49,54,48,51,55,52,53,52,51,53,49>>,
+ {maybe, 30} = find_boundary(P, B0),
+ not_found = find_boundary(B, <<"\r\n--XJOPKE">>),
+ ok.
+
+find_in_binary_test() ->
+ {exact, 0} = find_in_binary(<<"foo">>, <<"foobarbaz">>),
+ {exact, 1} = find_in_binary(<<"oo">>, <<"foobarbaz">>),
+ {exact, 8} = find_in_binary(<<"z">>, <<"foobarbaz">>),
+ not_found = find_in_binary(<<"q">>, <<"foobarbaz">>),
+ {partial, 7, 2} = find_in_binary(<<"azul">>, <<"foobarbaz">>),
+ {exact, 0} = find_in_binary(<<"foobarbaz">>, <<"foobarbaz">>),
+ {partial, 0, 3} = find_in_binary(<<"foobar">>, <<"foo">>),
+ {partial, 1, 3} = find_in_binary(<<"foobar">>, <<"afoo">>),
+ ok.
+
+flash_parse_http_test() ->
+ flash_parse(plain).
+
+flash_parse_https_test() ->
+ flash_parse(ssl).
+
+flash_parse(Transport) ->
+ ContentType = "multipart/form-data; boundary=----------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5",
+ "----------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5" = get_boundary(ContentType),
+ BinContent = <<"------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"Filename\"\r\n\r\nhello.txt\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"success_action_status\"\r\n\r\n201\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"file\"; filename=\"hello.txt\"\r\nContent-Type: application/octet-stream\r\n\r\nhello\n\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"Upload\"\r\n\r\nSubmit Query\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5--">>,
+ Expect = [{headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "Filename"}]}}]},
+ {body, <<"hello.txt">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "success_action_status"}]}}]},
+ {body, <<"201">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "file"}, {"filename", "hello.txt"}]}},
+ {"content-type", {"application/octet-stream", []}}]},
+ {body, <<"hello\n">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "Upload"}]}}]},
+ {body, <<"Submit Query">>},
+ body_end,
+ eof],
+ TestCallback = fun (Next) -> test_callback(Next, Expect) end,
+ ServerFun = fun (Socket) ->
+ ok = mochiweb_socket:send(Socket, BinContent),
+ exit(normal)
+ end,
+ ClientFun = fun (Socket) ->
+ Req = fake_request(Socket, ContentType,
+ byte_size(BinContent)),
+ Res = parse_multipart_request(Req, TestCallback),
+ {0, <<>>, ok} = Res,
+ ok
+ end,
+ ok = with_socket_server(Transport, ServerFun, ClientFun),
+ ok.
+
+flash_parse2_http_test() ->
+ flash_parse2(plain).
+
+flash_parse2_https_test() ->
+ flash_parse2(ssl).
+
+flash_parse2(Transport) ->
+ ContentType = "multipart/form-data; boundary=----------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5",
+ "----------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5" = get_boundary(ContentType),
+ Chunk = iolist_to_binary(string:copies("%", 4096)),
+ BinContent = <<"------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"Filename\"\r\n\r\nhello.txt\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"success_action_status\"\r\n\r\n201\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"file\"; filename=\"hello.txt\"\r\nContent-Type: application/octet-stream\r\n\r\n", Chunk/binary, "\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"Upload\"\r\n\r\nSubmit Query\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5--">>,
+ Expect = [{headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "Filename"}]}}]},
+ {body, <<"hello.txt">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "success_action_status"}]}}]},
+ {body, <<"201">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "file"}, {"filename", "hello.txt"}]}},
+ {"content-type", {"application/octet-stream", []}}]},
+ {body, Chunk},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "Upload"}]}}]},
+ {body, <<"Submit Query">>},
+ body_end,
+ eof],
+ TestCallback = fun (Next) -> test_callback(Next, Expect) end,
+ ServerFun = fun (Socket) ->
+ ok = mochiweb_socket:send(Socket, BinContent),
+ exit(normal)
+ end,
+ ClientFun = fun (Socket) ->
+ Req = fake_request(Socket, ContentType,
+ byte_size(BinContent)),
+ Res = parse_multipart_request(Req, TestCallback),
+ {0, <<>>, ok} = Res,
+ ok
+ end,
+ ok = with_socket_server(Transport, ServerFun, ClientFun),
+ ok.
+
+parse_headers_test() ->
+ ?assertEqual([], parse_headers(<<>>)).
+
+flash_multipart_hack_test() ->
+ Buffer = <<"prefix-">>,
+ Prefix = <<"prefix">>,
+ State = #mp{length=0, buffer=Buffer, boundary=Prefix},
+ ?assertEqual(State,
+ flash_multipart_hack(State)).
+
+parts_to_body_single_test() ->
+ {HL, B} = parts_to_body([{0, 5, <<"01234">>}],
+ "text/plain",
+ 10),
+ [{"Content-Range", Range},
+ {"Content-Type", Type}] = lists:sort(HL),
+ ?assertEqual(
+ <<"bytes 0-5/10">>,
+ iolist_to_binary(Range)),
+ ?assertEqual(
+ <<"text/plain">>,
+ iolist_to_binary(Type)),
+ ?assertEqual(
+ <<"01234">>,
+ iolist_to_binary(B)),
+ ok.
+
+parts_to_body_multi_test() ->
+ {[{"Content-Type", Type}],
+ _B} = parts_to_body([{0, 5, <<"01234">>}, {5, 10, <<"56789">>}],
+ "text/plain",
+ 10),
+ ?assertMatch(
+ <<"multipart/byteranges; boundary=", _/binary>>,
+ iolist_to_binary(Type)),
+ ok.
+
+parts_to_multipart_body_test() ->
+ {[{"Content-Type", V}], B} = parts_to_multipart_body(
+ [{0, 5, <<"01234">>}, {5, 10, <<"56789">>}],
+ "text/plain",
+ 10,
+ "BOUNDARY"),
+ MB = multipart_body(
+ [{0, 5, <<"01234">>}, {5, 10, <<"56789">>}],
+ "text/plain",
+ "BOUNDARY",
+ 10),
+ ?assertEqual(
+ <<"multipart/byteranges; boundary=BOUNDARY">>,
+ iolist_to_binary(V)),
+ ?assertEqual(
+ iolist_to_binary(MB),
+ iolist_to_binary(B)),
+ ok.
+
+multipart_body_test() ->
+ ?assertEqual(
+ <<"--BOUNDARY--\r\n">>,
+ iolist_to_binary(multipart_body([], "text/plain", "BOUNDARY", 0))),
+ ?assertEqual(
+ <<"--BOUNDARY\r\n"
+ "Content-Type: text/plain\r\n"
+ "Content-Range: bytes 0-5/10\r\n\r\n"
+ "01234\r\n"
+ "--BOUNDARY\r\n"
+ "Content-Type: text/plain\r\n"
+ "Content-Range: bytes 5-10/10\r\n\r\n"
+ "56789\r\n"
+ "--BOUNDARY--\r\n">>,
+ iolist_to_binary(multipart_body([{0, 5, <<"01234">>}, {5, 10, <<"56789">>}],
+ "text/plain",
+ "BOUNDARY",
+ 10))),
+ ok.
+
+%% @todo Move somewhere more appropriate than in the test suite
+
+multipart_parsing_benchmark_test() ->
+ run_multipart_parsing_benchmark(1).
+
+run_multipart_parsing_benchmark(0) -> ok;
+run_multipart_parsing_benchmark(N) ->
+ multipart_parsing_benchmark(),
+ run_multipart_parsing_benchmark(N-1).
+
+multipart_parsing_benchmark() ->
+ ContentType = "multipart/form-data; boundary=----------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5",
+ Chunk = binary:copy(<<"This Is_%Some=Quite0Long4String2Used9For7BenchmarKing.5">>, 102400),
+ BinContent = <<"------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"Filename\"\r\n\r\nhello.txt\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"success_action_status\"\r\n\r\n201\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"file\"; filename=\"hello.txt\"\r\nContent-Type: application/octet-stream\r\n\r\n", Chunk/binary, "\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"Upload\"\r\n\r\nSubmit Query\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5--">>,
+ Expect = [{headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "Filename"}]}}]},
+ {body, <<"hello.txt">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "success_action_status"}]}}]},
+ {body, <<"201">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "file"}, {"filename", "hello.txt"}]}},
+ {"content-type", {"application/octet-stream", []}}]},
+ {body, Chunk},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "Upload"}]}}]},
+ {body, <<"Submit Query">>},
+ body_end,
+ eof],
+ TestCallback = fun (Next) -> test_callback(Next, Expect) end,
+ ServerFun = fun (Socket) ->
+ ok = mochiweb_socket:send(Socket, BinContent),
+ exit(normal)
+ end,
+ ClientFun = fun (Socket) ->
+ Req = fake_request(Socket, ContentType,
+ byte_size(BinContent)),
+ Res = parse_multipart_request(Req, TestCallback),
+ {0, <<>>, ok} = Res,
+ ok
+ end,
+ ok = with_socket_server(plain, ServerFun, ClientFun),
+ ok.
+-endif.
--- /dev/null
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc MochiWeb HTTP Request abstraction.
+
+-module(mochiweb_request).
+-author('bob@mochimedia.com').
+
+-include_lib("kernel/include/file.hrl").
+-include("internal.hrl").
+
+-define(QUIP, "Any of you quaids got a smint?").
+
+-export([new/5]).
+-export([get_header_value/2, get_primary_header_value/2, get_combined_header_value/2, get/2, dump/1]).
+-export([send/2, recv/2, recv/3, recv_body/1, recv_body/2, stream_body/4]).
+-export([start_response/2, start_response_length/2, start_raw_response/2]).
+-export([respond/2, ok/2]).
+-export([not_found/1, not_found/2]).
+-export([parse_post/1, parse_qs/1]).
+-export([should_close/1, cleanup/1]).
+-export([parse_cookie/1, get_cookie_value/2]).
+-export([serve_file/3, serve_file/4]).
+-export([accepted_encodings/2]).
+-export([accepts_content_type/2, accepted_content_types/2]).
+
+-define(SAVE_QS, mochiweb_request_qs).
+-define(SAVE_PATH, mochiweb_request_path).
+-define(SAVE_RECV, mochiweb_request_recv).
+-define(SAVE_BODY, mochiweb_request_body).
+-define(SAVE_BODY_LENGTH, mochiweb_request_body_length).
+-define(SAVE_POST, mochiweb_request_post).
+-define(SAVE_COOKIE, mochiweb_request_cookie).
+-define(SAVE_FORCE_CLOSE, mochiweb_request_force_close).
+
+%% @type key() = atom() | string() | binary()
+%% @type value() = atom() | string() | binary() | integer()
+%% @type headers(). A mochiweb_headers structure.
+%% @type request(). A mochiweb_request parameterized module instance.
+%% @type response(). A mochiweb_response parameterized module instance.
+%% @type ioheaders() = headers() | [{key(), value()}].
+
+% 5 minute default idle timeout
+-define(IDLE_TIMEOUT, 300000).
+
+% Maximum recv_body() length of 1MB
+-define(MAX_RECV_BODY, 104857600).
+
+%% @spec new(Socket, Method, RawPath, Version, headers()) -> request()
+%% @doc Create a new request instance.
+new(Socket, Method, RawPath, Version, Headers) ->
+ {?MODULE, [Socket, Method, RawPath, Version, Headers]}.
+
+%% @spec get_header_value(K, request()) -> undefined | Value
+%% @doc Get the value of a given request header.
+get_header_value(K, {?MODULE, [_Socket, _Method, _RawPath, _Version, Headers]}) ->
+ mochiweb_headers:get_value(K, Headers).
+
+get_primary_header_value(K, {?MODULE, [_Socket, _Method, _RawPath, _Version, Headers]}) ->
+ mochiweb_headers:get_primary_value(K, Headers).
+
+get_combined_header_value(K, {?MODULE, [_Socket, _Method, _RawPath, _Version, Headers]}) ->
+ mochiweb_headers:get_combined_value(K, Headers).
+
+%% @type field() = socket | scheme | method | raw_path | version | headers | peer | path | body_length | range
+
+%% @spec get(field(), request()) -> term()
+%% @doc Return the internal representation of the given field. If
+%% <code>socket</code> is requested on a HTTPS connection, then
+%% an ssl socket will be returned as <code>{ssl, SslSocket}</code>.
+%% You can use <code>SslSocket</code> with the <code>ssl</code>
+%% application, eg: <code>ssl:peercert(SslSocket)</code>.
+get(socket, {?MODULE, [Socket, _Method, _RawPath, _Version, _Headers]}) ->
+ Socket;
+get(scheme, {?MODULE, [Socket, _Method, _RawPath, _Version, _Headers]}) ->
+ case mochiweb_socket:type(Socket) of
+ plain ->
+ http;
+ ssl ->
+ https
+ end;
+get(method, {?MODULE, [_Socket, Method, _RawPath, _Version, _Headers]}) ->
+ Method;
+get(raw_path, {?MODULE, [_Socket, _Method, RawPath, _Version, _Headers]}) ->
+ RawPath;
+get(version, {?MODULE, [_Socket, _Method, _RawPath, Version, _Headers]}) ->
+ Version;
+get(headers, {?MODULE, [_Socket, _Method, _RawPath, _Version, Headers]}) ->
+ Headers;
+get(peer, {?MODULE, [Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ case mochiweb_socket:peername(Socket) of
+ {ok, {Addr={10, _, _, _}, _Port}} ->
+ case get_header_value("x-forwarded-for", THIS) of
+ undefined ->
+ inet_parse:ntoa(Addr);
+ Hosts ->
+ string:strip(lists:last(string:tokens(Hosts, ",")))
+ end;
+ {ok, {{127, 0, 0, 1}, _Port}} ->
+ case get_header_value("x-forwarded-for", THIS) of
+ undefined ->
+ "127.0.0.1";
+ Hosts ->
+ string:strip(lists:last(string:tokens(Hosts, ",")))
+ end;
+ {ok, {Addr, _Port}} ->
+ inet_parse:ntoa(Addr);
+ {error, enotconn} ->
+ exit(normal)
+ end;
+get(path, {?MODULE, [_Socket, _Method, RawPath, _Version, _Headers]}) ->
+ case erlang:get(?SAVE_PATH) of
+ undefined ->
+ {Path0, _, _} = mochiweb_util:urlsplit_path(RawPath),
+ Path = mochiweb_util:unquote(Path0),
+ put(?SAVE_PATH, Path),
+ Path;
+ Cached ->
+ Cached
+ end;
+get(body_length, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ case erlang:get(?SAVE_BODY_LENGTH) of
+ undefined ->
+ BodyLength = body_length(THIS),
+ put(?SAVE_BODY_LENGTH, {cached, BodyLength}),
+ BodyLength;
+ {cached, Cached} ->
+ Cached
+ end;
+get(range, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ case get_header_value(range, THIS) of
+ undefined ->
+ undefined;
+ RawRange ->
+ mochiweb_http:parse_range_request(RawRange)
+ end.
+
+%% @spec dump(request()) -> {mochiweb_request, [{atom(), term()}]}
+%% @doc Dump the internal representation to a "human readable" set of terms
+%% for debugging/inspection purposes.
+dump({?MODULE, [_Socket, Method, RawPath, Version, Headers]}) ->
+ {?MODULE, [{method, Method},
+ {version, Version},
+ {raw_path, RawPath},
+ {headers, mochiweb_headers:to_list(Headers)}]}.
+
+%% @spec send(iodata(), request()) -> ok
+%% @doc Send data over the socket.
+send(Data, {?MODULE, [Socket, _Method, _RawPath, _Version, _Headers]}) ->
+ case mochiweb_socket:send(Socket, Data) of
+ ok ->
+ ok;
+ _ ->
+ exit(normal)
+ end.
+
+%% @spec recv(integer(), request()) -> binary()
+%% @doc Receive Length bytes from the client as a binary, with the default
+%% idle timeout.
+recv(Length, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ recv(Length, ?IDLE_TIMEOUT, THIS).
+
+%% @spec recv(integer(), integer(), request()) -> binary()
+%% @doc Receive Length bytes from the client as a binary, with the given
+%% Timeout in msec.
+recv(Length, Timeout, {?MODULE, [Socket, _Method, _RawPath, _Version, _Headers]}) ->
+ case mochiweb_socket:recv(Socket, Length, Timeout) of
+ {ok, Data} ->
+ put(?SAVE_RECV, true),
+ Data;
+ _ ->
+ exit(normal)
+ end.
+
+%% @spec body_length(request()) -> undefined | chunked | unknown_transfer_encoding | integer()
+%% @doc Infer body length from transfer-encoding and content-length headers.
+body_length({?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ case get_header_value("transfer-encoding", THIS) of
+ undefined ->
+ case get_combined_header_value("content-length", THIS) of
+ undefined ->
+ undefined;
+ Length ->
+ list_to_integer(Length)
+ end;
+ "chunked" ->
+ chunked;
+ Unknown ->
+ {unknown_transfer_encoding, Unknown}
+ end.
+
+
+%% @spec recv_body(request()) -> binary()
+%% @doc Receive the body of the HTTP request (defined by Content-Length).
+%% Will only receive up to the default max-body length of 1MB.
+recv_body({?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ recv_body(?MAX_RECV_BODY, THIS).
+
+%% @spec recv_body(integer(), request()) -> binary()
+%% @doc Receive the body of the HTTP request (defined by Content-Length).
+%% Will receive up to MaxBody bytes.
+recv_body(MaxBody, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ case erlang:get(?SAVE_BODY) of
+ undefined ->
+ % we could use a sane constant for max chunk size
+ Body = stream_body(?MAX_RECV_BODY, fun
+ ({0, _ChunkedFooter}, {_LengthAcc, BinAcc}) ->
+ iolist_to_binary(lists:reverse(BinAcc));
+ ({Length, Bin}, {LengthAcc, BinAcc}) ->
+ NewLength = Length + LengthAcc,
+ if NewLength > MaxBody ->
+ exit({body_too_large, chunked});
+ true ->
+ {NewLength, [Bin | BinAcc]}
+ end
+ end, {0, []}, MaxBody, THIS),
+ put(?SAVE_BODY, Body),
+ Body;
+ Cached -> Cached
+ end.
+
+stream_body(MaxChunkSize, ChunkFun, FunState, {?MODULE,[_Socket,_Method,_RawPath,_Version,_Headers]}=THIS) ->
+ stream_body(MaxChunkSize, ChunkFun, FunState, undefined, THIS).
+
+stream_body(MaxChunkSize, ChunkFun, FunState, MaxBodyLength,
+ {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ Expect = case get_header_value("expect", THIS) of
+ undefined ->
+ undefined;
+ Value when is_list(Value) ->
+ string:to_lower(Value)
+ end,
+ case Expect of
+ "100-continue" ->
+ _ = start_raw_response({100, gb_trees:empty()}, THIS),
+ ok;
+ _Else ->
+ ok
+ end,
+ case body_length(THIS) of
+ undefined ->
+ undefined;
+ {unknown_transfer_encoding, Unknown} ->
+ exit({unknown_transfer_encoding, Unknown});
+ chunked ->
+ % In this case the MaxBody is actually used to
+ % determine the maximum allowed size of a single
+ % chunk.
+ stream_chunked_body(MaxChunkSize, ChunkFun, FunState, THIS);
+ 0 ->
+ <<>>;
+ Length when is_integer(Length) ->
+ case MaxBodyLength of
+ MaxBodyLength when is_integer(MaxBodyLength), MaxBodyLength < Length ->
+ exit({body_too_large, content_length});
+ _ ->
+ stream_unchunked_body(Length, ChunkFun, FunState, THIS)
+ end
+ end.
+
+
+%% @spec start_response({integer(), ioheaders()}, request()) -> response()
+%% @doc Start the HTTP response by sending the Code HTTP response and
+%% ResponseHeaders. The server will set header defaults such as Server
+%% and Date if not present in ResponseHeaders.
+start_response({Code, ResponseHeaders}, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ HResponse = mochiweb_headers:make(ResponseHeaders),
+ HResponse1 = mochiweb_headers:default_from_list(server_headers(),
+ HResponse),
+ start_raw_response({Code, HResponse1}, THIS).
+
+%% @spec start_raw_response({integer(), headers()}, request()) -> response()
+%% @doc Start the HTTP response by sending the Code HTTP response and
+%% ResponseHeaders.
+start_raw_response({Code, ResponseHeaders}, {?MODULE, [_Socket, _Method, _RawPath, Version, _Headers]}=THIS) ->
+ F = fun ({K, V}, Acc) ->
+ [mochiweb_util:make_io(K), <<": ">>, V, <<"\r\n">> | Acc]
+ end,
+ End = lists:foldl(F, [<<"\r\n">>],
+ mochiweb_headers:to_list(ResponseHeaders)),
+ send([make_version(Version), make_code(Code), <<"\r\n">> | End], THIS),
+ mochiweb:new_response({THIS, Code, ResponseHeaders}).
+
+
+%% @spec start_response_length({integer(), ioheaders(), integer()}, request()) -> response()
+%% @doc Start the HTTP response by sending the Code HTTP response and
+%% ResponseHeaders including a Content-Length of Length. The server
+%% will set header defaults such as Server
+%% and Date if not present in ResponseHeaders.
+start_response_length({Code, ResponseHeaders, Length},
+ {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ HResponse = mochiweb_headers:make(ResponseHeaders),
+ HResponse1 = mochiweb_headers:enter("Content-Length", Length, HResponse),
+ start_response({Code, HResponse1}, THIS).
+
+%% @spec respond({integer(), ioheaders(), iodata() | chunked | {file, IoDevice}}, request()) -> response()
+%% @doc Start the HTTP response with start_response, and send Body to the
+%% client (if the get(method) /= 'HEAD'). The Content-Length header
+%% will be set by the Body length, and the server will insert header
+%% defaults.
+respond({Code, ResponseHeaders, {file, IoDevice}},
+ {?MODULE, [_Socket, Method, _RawPath, _Version, _Headers]}=THIS) ->
+ Length = mochiweb_io:iodevice_size(IoDevice),
+ Response = start_response_length({Code, ResponseHeaders, Length}, THIS),
+ case Method of
+ 'HEAD' ->
+ ok;
+ _ ->
+ mochiweb_io:iodevice_stream(
+ fun (Body) -> send(Body, THIS) end,
+ IoDevice)
+ end,
+ Response;
+respond({Code, ResponseHeaders, chunked}, {?MODULE, [_Socket, Method, _RawPath, Version, _Headers]}=THIS) ->
+ HResponse = mochiweb_headers:make(ResponseHeaders),
+ HResponse1 = case Method of
+ 'HEAD' ->
+ %% This is what Google does, http://www.google.com/
+ %% is chunked but HEAD gets Content-Length: 0.
+ %% The RFC is ambiguous so emulating Google is smart.
+ mochiweb_headers:enter("Content-Length", "0",
+ HResponse);
+ _ when Version >= {1, 1} ->
+ %% Only use chunked encoding for HTTP/1.1
+ mochiweb_headers:enter("Transfer-Encoding", "chunked",
+ HResponse);
+ _ ->
+ %% For pre-1.1 clients we send the data as-is
+ %% without a Content-Length header and without
+ %% chunk delimiters. Since the end of the document
+ %% is now ambiguous we must force a close.
+ put(?SAVE_FORCE_CLOSE, true),
+ HResponse
+ end,
+ start_response({Code, HResponse1}, THIS);
+respond({Code, ResponseHeaders, Body}, {?MODULE, [_Socket, Method, _RawPath, _Version, _Headers]}=THIS) ->
+ Response = start_response_length({Code, ResponseHeaders, iolist_size(Body)}, THIS),
+ case Method of
+ 'HEAD' ->
+ ok;
+ _ ->
+ send(Body, THIS)
+ end,
+ Response.
+
+%% @spec not_found(request()) -> response()
+%% @doc Alias for <code>not_found([])</code>.
+not_found({?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ not_found([], THIS).
+
+%% @spec not_found(ExtraHeaders, request()) -> response()
+%% @doc Alias for <code>respond({404, [{"Content-Type", "text/plain"}
+%% | ExtraHeaders], <<"Not found.">>})</code>.
+not_found(ExtraHeaders, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ respond({404, [{"Content-Type", "text/plain"} | ExtraHeaders],
+ <<"Not found.">>}, THIS).
+
+%% @spec ok({value(), iodata()} | {value(), ioheaders(), iodata() | {file, IoDevice}}, request()) ->
+%% response()
+%% @doc respond({200, [{"Content-Type", ContentType} | Headers], Body}).
+ok({ContentType, Body}, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ ok({ContentType, [], Body}, THIS);
+ok({ContentType, ResponseHeaders, Body}, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ HResponse = mochiweb_headers:make(ResponseHeaders),
+ case THIS:get(range) of
+ X when (X =:= undefined orelse X =:= fail) orelse Body =:= chunked ->
+ %% http://code.google.com/p/mochiweb/issues/detail?id=54
+ %% Range header not supported when chunked, return 200 and provide
+ %% full response.
+ HResponse1 = mochiweb_headers:enter("Content-Type", ContentType,
+ HResponse),
+ respond({200, HResponse1, Body}, THIS);
+ Ranges ->
+ {PartList, Size} = range_parts(Body, Ranges),
+ case PartList of
+ [] -> %% no valid ranges
+ HResponse1 = mochiweb_headers:enter("Content-Type",
+ ContentType,
+ HResponse),
+ %% could be 416, for now we'll just return 200
+ respond({200, HResponse1, Body}, THIS);
+ PartList ->
+ {RangeHeaders, RangeBody} =
+ mochiweb_multipart:parts_to_body(PartList, ContentType, Size),
+ HResponse1 = mochiweb_headers:enter_from_list(
+ [{"Accept-Ranges", "bytes"} |
+ RangeHeaders],
+ HResponse),
+ respond({206, HResponse1, RangeBody}, THIS)
+ end
+ end.
+
+%% @spec should_close(request()) -> bool()
+%% @doc Return true if the connection must be closed. If false, using
+%% Keep-Alive should be safe.
+should_close({?MODULE, [_Socket, _Method, _RawPath, Version, _Headers]}=THIS) ->
+ ForceClose = erlang:get(?SAVE_FORCE_CLOSE) =/= undefined,
+ DidNotRecv = erlang:get(?SAVE_RECV) =:= undefined,
+ ForceClose orelse Version < {1, 0}
+ %% Connection: close
+ orelse is_close(get_header_value("connection", THIS))
+ %% HTTP 1.0 requires Connection: Keep-Alive
+ orelse (Version =:= {1, 0}
+ andalso get_header_value("connection", THIS) =/= "Keep-Alive")
+ %% unread data left on the socket, can't safely continue
+ orelse (DidNotRecv
+ andalso get_combined_header_value("content-length", THIS) =/= undefined
+ andalso list_to_integer(get_combined_header_value("content-length", THIS)) > 0)
+ orelse (DidNotRecv
+ andalso get_header_value("transfer-encoding", THIS) =:= "chunked").
+
+is_close("close") ->
+ true;
+is_close(S=[_C, _L, _O, _S, _E]) ->
+ string:to_lower(S) =:= "close";
+is_close(_) ->
+ false.
+
+%% @spec cleanup(request()) -> ok
+%% @doc Clean up any junk in the process dictionary, required before continuing
+%% a Keep-Alive request.
+cleanup({?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}) ->
+ L = [?SAVE_QS, ?SAVE_PATH, ?SAVE_RECV, ?SAVE_BODY, ?SAVE_BODY_LENGTH,
+ ?SAVE_POST, ?SAVE_COOKIE, ?SAVE_FORCE_CLOSE],
+ lists:foreach(fun(K) ->
+ erase(K)
+ end, L),
+ ok.
+
+%% @spec parse_qs(request()) -> [{Key::string(), Value::string()}]
+%% @doc Parse the query string of the URL.
+parse_qs({?MODULE, [_Socket, _Method, RawPath, _Version, _Headers]}) ->
+ case erlang:get(?SAVE_QS) of
+ undefined ->
+ {_, QueryString, _} = mochiweb_util:urlsplit_path(RawPath),
+ Parsed = mochiweb_util:parse_qs(QueryString),
+ put(?SAVE_QS, Parsed),
+ Parsed;
+ Cached ->
+ Cached
+ end.
+
+%% @spec get_cookie_value(Key::string, request()) -> string() | undefined
+%% @doc Get the value of the given cookie.
+get_cookie_value(Key, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ proplists:get_value(Key, parse_cookie(THIS)).
+
+%% @spec parse_cookie(request()) -> [{Key::string(), Value::string()}]
+%% @doc Parse the cookie header.
+parse_cookie({?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ case erlang:get(?SAVE_COOKIE) of
+ undefined ->
+ Cookies = case get_header_value("cookie", THIS) of
+ undefined ->
+ [];
+ Value ->
+ mochiweb_cookies:parse_cookie(Value)
+ end,
+ put(?SAVE_COOKIE, Cookies),
+ Cookies;
+ Cached ->
+ Cached
+ end.
+
+%% @spec parse_post(request()) -> [{Key::string(), Value::string()}]
+%% @doc Parse an application/x-www-form-urlencoded form POST. This
+%% has the side-effect of calling recv_body().
+parse_post({?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ case erlang:get(?SAVE_POST) of
+ undefined ->
+ Parsed = case recv_body(THIS) of
+ undefined ->
+ [];
+ Binary ->
+ case get_primary_header_value("content-type",THIS) of
+ "application/x-www-form-urlencoded" ++ _ ->
+ mochiweb_util:parse_qs(Binary);
+ _ ->
+ []
+ end
+ end,
+ put(?SAVE_POST, Parsed),
+ Parsed;
+ Cached ->
+ Cached
+ end.
+
+%% @spec stream_chunked_body(integer(), fun(), term(), request()) -> term()
+%% @doc The function is called for each chunk.
+%% Used internally by read_chunked_body.
+stream_chunked_body(MaxChunkSize, Fun, FunState,
+ {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ case read_chunk_length(THIS) of
+ 0 ->
+ Fun({0, read_chunk(0, THIS)}, FunState);
+ Length when Length > MaxChunkSize ->
+ NewState = read_sub_chunks(Length, MaxChunkSize, Fun, FunState, THIS),
+ stream_chunked_body(MaxChunkSize, Fun, NewState, THIS);
+ Length ->
+ NewState = Fun({Length, read_chunk(Length, THIS)}, FunState),
+ stream_chunked_body(MaxChunkSize, Fun, NewState, THIS)
+ end.
+
+stream_unchunked_body(0, Fun, FunState, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}) ->
+ Fun({0, <<>>}, FunState);
+stream_unchunked_body(Length, Fun, FunState,
+ {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) when Length > 0 ->
+ PktSize = case Length > ?RECBUF_SIZE of
+ true ->
+ ?RECBUF_SIZE;
+ false ->
+ Length
+ end,
+ Bin = recv(PktSize, THIS),
+ NewState = Fun({PktSize, Bin}, FunState),
+ stream_unchunked_body(Length - PktSize, Fun, NewState, THIS).
+
+%% @spec read_chunk_length(request()) -> integer()
+%% @doc Read the length of the next HTTP chunk.
+read_chunk_length({?MODULE, [Socket, _Method, _RawPath, _Version, _Headers]}) ->
+ ok = mochiweb_socket:setopts(Socket, [{packet, line}]),
+ case mochiweb_socket:recv(Socket, 0, ?IDLE_TIMEOUT) of
+ {ok, Header} ->
+ ok = mochiweb_socket:setopts(Socket, [{packet, raw}]),
+ Splitter = fun (C) ->
+ C =/= $\r andalso C =/= $\n andalso C =/= $
+ end,
+ {Hex, _Rest} = lists:splitwith(Splitter, binary_to_list(Header)),
+ mochihex:to_int(Hex);
+ _ ->
+ exit(normal)
+ end.
+
+%% @spec read_chunk(integer(), request()) -> Chunk::binary() | [Footer::binary()]
+%% @doc Read in a HTTP chunk of the given length. If Length is 0, then read the
+%% HTTP footers (as a list of binaries, since they're nominal).
+read_chunk(0, {?MODULE, [Socket, _Method, _RawPath, _Version, _Headers]}) ->
+ ok = mochiweb_socket:setopts(Socket, [{packet, line}]),
+ F = fun (F1, Acc) ->
+ case mochiweb_socket:recv(Socket, 0, ?IDLE_TIMEOUT) of
+ {ok, <<"\r\n">>} ->
+ Acc;
+ {ok, Footer} ->
+ F1(F1, [Footer | Acc]);
+ _ ->
+ exit(normal)
+ end
+ end,
+ Footers = F(F, []),
+ ok = mochiweb_socket:setopts(Socket, [{packet, raw}]),
+ put(?SAVE_RECV, true),
+ Footers;
+read_chunk(Length, {?MODULE, [Socket, _Method, _RawPath, _Version, _Headers]}) ->
+ case mochiweb_socket:recv(Socket, 2 + Length, ?IDLE_TIMEOUT) of
+ {ok, <<Chunk:Length/binary, "\r\n">>} ->
+ Chunk;
+ _ ->
+ exit(normal)
+ end.
+
+read_sub_chunks(Length, MaxChunkSize, Fun, FunState,
+ {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) when Length > MaxChunkSize ->
+ Bin = recv(MaxChunkSize, THIS),
+ NewState = Fun({size(Bin), Bin}, FunState),
+ read_sub_chunks(Length - MaxChunkSize, MaxChunkSize, Fun, NewState, THIS);
+
+read_sub_chunks(Length, _MaxChunkSize, Fun, FunState,
+ {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ Fun({Length, read_chunk(Length, THIS)}, FunState).
+
+%% @spec serve_file(Path, DocRoot, request()) -> Response
+%% @doc Serve a file relative to DocRoot.
+serve_file(Path, DocRoot, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ serve_file(Path, DocRoot, [], THIS).
+
+%% @spec serve_file(Path, DocRoot, ExtraHeaders, request()) -> Response
+%% @doc Serve a file relative to DocRoot.
+serve_file(Path, DocRoot, ExtraHeaders, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ case mochiweb_util:safe_relative_path(Path) of
+ undefined ->
+ not_found(ExtraHeaders, THIS);
+ RelPath ->
+ FullPath = filename:join([DocRoot, RelPath]),
+ case filelib:is_dir(FullPath) of
+ true ->
+ maybe_redirect(RelPath, FullPath, ExtraHeaders, THIS);
+ false ->
+ maybe_serve_file(FullPath, ExtraHeaders, THIS)
+ end
+ end.
+
+%% Internal API
+
+%% This has the same effect as the DirectoryIndex directive in httpd
+directory_index(FullPath) ->
+ filename:join([FullPath, "index.html"]).
+
+maybe_redirect([], FullPath, ExtraHeaders, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ maybe_serve_file(directory_index(FullPath), ExtraHeaders, THIS);
+
+maybe_redirect(RelPath, FullPath, ExtraHeaders,
+ {?MODULE, [_Socket, _Method, _RawPath, _Version, Headers]}=THIS) ->
+ case string:right(RelPath, 1) of
+ "/" ->
+ maybe_serve_file(directory_index(FullPath), ExtraHeaders, THIS);
+ _ ->
+ Host = mochiweb_headers:get_value("host", Headers),
+ Location = "http://" ++ Host ++ "/" ++ RelPath ++ "/",
+ LocationBin = list_to_binary(Location),
+ MoreHeaders = [{"Location", Location},
+ {"Content-Type", "text/html"} | ExtraHeaders],
+ Top = <<"<!DOCTYPE HTML PUBLIC \"-//IETF//DTD HTML 2.0//EN\">"
+ "<html><head>"
+ "<title>301 Moved Permanently</title>"
+ "</head><body>"
+ "<h1>Moved Permanently</h1>"
+ "<p>The document has moved <a href=\"">>,
+ Bottom = <<">here</a>.</p></body></html>\n">>,
+ Body = <<Top/binary, LocationBin/binary, Bottom/binary>>,
+ respond({301, MoreHeaders, Body}, THIS)
+ end.
+
+maybe_serve_file(File, ExtraHeaders, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ case file:read_file_info(File) of
+ {ok, FileInfo} ->
+ LastModified = httpd_util:rfc1123_date(FileInfo#file_info.mtime),
+ case get_header_value("if-modified-since", THIS) of
+ LastModified ->
+ respond({304, ExtraHeaders, ""}, THIS);
+ _ ->
+ case file:open(File, [raw, binary]) of
+ {ok, IoDevice} ->
+ ContentType = mochiweb_util:guess_mime(File),
+ Res = ok({ContentType,
+ [{"last-modified", LastModified}
+ | ExtraHeaders],
+ {file, IoDevice}}, THIS),
+ ok = file:close(IoDevice),
+ Res;
+ _ ->
+ not_found(ExtraHeaders, THIS)
+ end
+ end;
+ {error, _} ->
+ not_found(ExtraHeaders, THIS)
+ end.
+
+server_headers() ->
+ [{"Server", "MochiWeb/1.0 (" ++ ?QUIP ++ ")"},
+ {"Date", httpd_util:rfc1123_date()}].
+
+make_code(X) when is_integer(X) ->
+ [integer_to_list(X), [" " | httpd_util:reason_phrase(X)]];
+make_code(Io) when is_list(Io); is_binary(Io) ->
+ Io.
+
+make_version({1, 0}) ->
+ <<"HTTP/1.0 ">>;
+make_version(_) ->
+ <<"HTTP/1.1 ">>.
+
+range_parts({file, IoDevice}, Ranges) ->
+ Size = mochiweb_io:iodevice_size(IoDevice),
+ F = fun (Spec, Acc) ->
+ case mochiweb_http:range_skip_length(Spec, Size) of
+ invalid_range ->
+ Acc;
+ V ->
+ [V | Acc]
+ end
+ end,
+ LocNums = lists:foldr(F, [], Ranges),
+ {ok, Data} = file:pread(IoDevice, LocNums),
+ Bodies = lists:zipwith(fun ({Skip, Length}, PartialBody) ->
+ {Skip, Skip + Length - 1, PartialBody}
+ end,
+ LocNums, Data),
+ {Bodies, Size};
+range_parts(Body0, Ranges) ->
+ Body = iolist_to_binary(Body0),
+ Size = size(Body),
+ F = fun(Spec, Acc) ->
+ case mochiweb_http:range_skip_length(Spec, Size) of
+ invalid_range ->
+ Acc;
+ {Skip, Length} ->
+ <<_:Skip/binary, PartialBody:Length/binary, _/binary>> = Body,
+ [{Skip, Skip + Length - 1, PartialBody} | Acc]
+ end
+ end,
+ {lists:foldr(F, [], Ranges), Size}.
+
+%% @spec accepted_encodings([encoding()], request()) -> [encoding()] | bad_accept_encoding_value
+%% @type encoding() = string().
+%%
+%% @doc Returns a list of encodings accepted by a request. Encodings that are
+%% not supported by the server will not be included in the return list.
+%% This list is computed from the "Accept-Encoding" header and
+%% its elements are ordered, descendingly, according to their Q values.
+%%
+%% Section 14.3 of the RFC 2616 (HTTP 1.1) describes the "Accept-Encoding"
+%% header and the process of determining which server supported encodings
+%% can be used for encoding the body for the request's response.
+%%
+%% Examples
+%%
+%% 1) For a missing "Accept-Encoding" header:
+%% accepted_encodings(["gzip", "identity"]) -> ["identity"]
+%%
+%% 2) For an "Accept-Encoding" header with value "gzip, deflate":
+%% accepted_encodings(["gzip", "identity"]) -> ["gzip", "identity"]
+%%
+%% 3) For an "Accept-Encoding" header with value "gzip;q=0.5, deflate":
+%% accepted_encodings(["gzip", "deflate", "identity"]) ->
+%% ["deflate", "gzip", "identity"]
+%%
+accepted_encodings(SupportedEncodings, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ AcceptEncodingHeader = case get_header_value("Accept-Encoding", THIS) of
+ undefined ->
+ "";
+ Value ->
+ Value
+ end,
+ case mochiweb_util:parse_qvalues(AcceptEncodingHeader) of
+ invalid_qvalue_string ->
+ bad_accept_encoding_value;
+ QList ->
+ mochiweb_util:pick_accepted_encodings(
+ QList, SupportedEncodings, "identity"
+ )
+ end.
+
+%% @spec accepts_content_type(string() | binary(), request()) -> boolean() | bad_accept_header
+%%
+%% @doc Determines whether a request accepts a given media type by analyzing its
+%% "Accept" header.
+%%
+%% Examples
+%%
+%% 1) For a missing "Accept" header:
+%% accepts_content_type("application/json") -> true
+%%
+%% 2) For an "Accept" header with value "text/plain, application/*":
+%% accepts_content_type("application/json") -> true
+%%
+%% 3) For an "Accept" header with value "text/plain, */*; q=0.0":
+%% accepts_content_type("application/json") -> false
+%%
+%% 4) For an "Accept" header with value "text/plain; q=0.5, */*; q=0.1":
+%% accepts_content_type("application/json") -> true
+%%
+%% 5) For an "Accept" header with value "text/*; q=0.0, */*":
+%% accepts_content_type("text/plain") -> false
+%%
+accepts_content_type(ContentType1, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ ContentType = re:replace(ContentType1, "\\s", "", [global, {return, list}]),
+ AcceptHeader = accept_header(THIS),
+ case mochiweb_util:parse_qvalues(AcceptHeader) of
+ invalid_qvalue_string ->
+ bad_accept_header;
+ QList ->
+ [MainType, _SubType] = string:tokens(ContentType, "/"),
+ SuperType = MainType ++ "/*",
+ lists:any(
+ fun({"*/*", Q}) when Q > 0.0 ->
+ true;
+ ({Type, Q}) when Q > 0.0 ->
+ Type =:= ContentType orelse Type =:= SuperType;
+ (_) ->
+ false
+ end,
+ QList
+ ) andalso
+ (not lists:member({ContentType, 0.0}, QList)) andalso
+ (not lists:member({SuperType, 0.0}, QList))
+ end.
+
+%% @spec accepted_content_types([string() | binary()], request()) -> [string()] | bad_accept_header
+%%
+%% @doc Filters which of the given media types this request accepts. This filtering
+%% is performed by analyzing the "Accept" header. The returned list is sorted
+%% according to the preferences specified in the "Accept" header (higher Q values
+%% first). If two or more types have the same preference (Q value), they're order
+%% in the returned list is the same as they're order in the input list.
+%%
+%% Examples
+%%
+%% 1) For a missing "Accept" header:
+%% accepted_content_types(["text/html", "application/json"]) ->
+%% ["text/html", "application/json"]
+%%
+%% 2) For an "Accept" header with value "text/html, application/*":
+%% accepted_content_types(["application/json", "text/html"]) ->
+%% ["application/json", "text/html"]
+%%
+%% 3) For an "Accept" header with value "text/html, */*; q=0.0":
+%% accepted_content_types(["text/html", "application/json"]) ->
+%% ["text/html"]
+%%
+%% 4) For an "Accept" header with value "text/html; q=0.5, */*; q=0.1":
+%% accepts_content_types(["application/json", "text/html"]) ->
+%% ["text/html", "application/json"]
+%%
+accepted_content_types(Types1, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ Types = lists:map(
+ fun(T) -> re:replace(T, "\\s", "", [global, {return, list}]) end,
+ Types1),
+ AcceptHeader = accept_header(THIS),
+ case mochiweb_util:parse_qvalues(AcceptHeader) of
+ invalid_qvalue_string ->
+ bad_accept_header;
+ QList ->
+ TypesQ = lists:foldr(
+ fun(T, Acc) ->
+ case proplists:get_value(T, QList) of
+ undefined ->
+ [MainType, _SubType] = string:tokens(T, "/"),
+ case proplists:get_value(MainType ++ "/*", QList) of
+ undefined ->
+ case proplists:get_value("*/*", QList) of
+ Q when is_float(Q), Q > 0.0 ->
+ [{Q, T} | Acc];
+ _ ->
+ Acc
+ end;
+ Q when Q > 0.0 ->
+ [{Q, T} | Acc];
+ _ ->
+ Acc
+ end;
+ Q when Q > 0.0 ->
+ [{Q, T} | Acc];
+ _ ->
+ Acc
+ end
+ end,
+ [], Types),
+ % Note: Stable sort. If 2 types have the same Q value we leave them in the
+ % same order as in the input list.
+ SortFun = fun({Q1, _}, {Q2, _}) -> Q1 >= Q2 end,
+ [Type || {_Q, Type} <- lists:sort(SortFun, TypesQ)]
+ end.
+
+accept_header({?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ case get_header_value("Accept", THIS) of
+ undefined ->
+ "*/*";
+ Value ->
+ Value
+ end.
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+-endif.
--- /dev/null
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc MochiWeb HTTP Request abstraction.
+
+-module(mochiweb_request).
+-author('bob@mochimedia.com').
+
+-include_lib("kernel/include/file.hrl").
+-include("internal.hrl").
+
+-define(QUIP, "Any of you quaids got a smint?").
+
+-export([new/5]).
+-export([get_header_value/2, get_primary_header_value/2, get_combined_header_value/2, get/2, dump/1]).
+-export([send/2, recv/2, recv/3, recv_body/1, recv_body/2, stream_body/4]).
+-export([start_response/2, start_response_length/2, start_raw_response/2]).
+-export([respond/2, ok/2]).
+-export([not_found/1, not_found/2]).
+-export([parse_post/1, parse_qs/1]).
+-export([should_close/1, cleanup/1]).
+-export([parse_cookie/1, get_cookie_value/2]).
+-export([serve_file/3, serve_file/4]).
+-export([accepted_encodings/2]).
+-export([accepts_content_type/2, accepted_content_types/2]).
+
+-define(SAVE_QS, mochiweb_request_qs).
+-define(SAVE_PATH, mochiweb_request_path).
+-define(SAVE_RECV, mochiweb_request_recv).
+-define(SAVE_BODY, mochiweb_request_body).
+-define(SAVE_BODY_LENGTH, mochiweb_request_body_length).
+-define(SAVE_POST, mochiweb_request_post).
+-define(SAVE_COOKIE, mochiweb_request_cookie).
+-define(SAVE_FORCE_CLOSE, mochiweb_request_force_close).
+
+%% @type key() = atom() | string() | binary()
+%% @type value() = atom() | string() | binary() | integer()
+%% @type headers(). A mochiweb_headers structure.
+%% @type request(). A mochiweb_request parameterized module instance.
+%% @type response(). A mochiweb_response parameterized module instance.
+%% @type ioheaders() = headers() | [{key(), value()}].
+
+% 5 minute default idle timeout
+-define(IDLE_TIMEOUT, 300000).
+
+% Maximum recv_body() length of 1MB
+-define(MAX_RECV_BODY, (1024*1024)).
+
+%% @spec new(Socket, Method, RawPath, Version, headers()) -> request()
+%% @doc Create a new request instance.
+new(Socket, Method, RawPath, Version, Headers) ->
+ {?MODULE, [Socket, Method, RawPath, Version, Headers]}.
+
+%% @spec get_header_value(K, request()) -> undefined | Value
+%% @doc Get the value of a given request header.
+get_header_value(K, {?MODULE, [_Socket, _Method, _RawPath, _Version, Headers]}) ->
+ mochiweb_headers:get_value(K, Headers).
+
+get_primary_header_value(K, {?MODULE, [_Socket, _Method, _RawPath, _Version, Headers]}) ->
+ mochiweb_headers:get_primary_value(K, Headers).
+
+get_combined_header_value(K, {?MODULE, [_Socket, _Method, _RawPath, _Version, Headers]}) ->
+ mochiweb_headers:get_combined_value(K, Headers).
+
+%% @type field() = socket | scheme | method | raw_path | version | headers | peer | path | body_length | range
+
+%% @spec get(field(), request()) -> term()
+%% @doc Return the internal representation of the given field. If
+%% <code>socket</code> is requested on a HTTPS connection, then
+%% an ssl socket will be returned as <code>{ssl, SslSocket}</code>.
+%% You can use <code>SslSocket</code> with the <code>ssl</code>
+%% application, eg: <code>ssl:peercert(SslSocket)</code>.
+get(socket, {?MODULE, [Socket, _Method, _RawPath, _Version, _Headers]}) ->
+ Socket;
+get(scheme, {?MODULE, [Socket, _Method, _RawPath, _Version, _Headers]}) ->
+ case mochiweb_socket:type(Socket) of
+ plain ->
+ http;
+ ssl ->
+ https
+ end;
+get(method, {?MODULE, [_Socket, Method, _RawPath, _Version, _Headers]}) ->
+ Method;
+get(raw_path, {?MODULE, [_Socket, _Method, RawPath, _Version, _Headers]}) ->
+ RawPath;
+get(version, {?MODULE, [_Socket, _Method, _RawPath, Version, _Headers]}) ->
+ Version;
+get(headers, {?MODULE, [_Socket, _Method, _RawPath, _Version, Headers]}) ->
+ Headers;
+get(peer, {?MODULE, [Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ case mochiweb_socket:peername(Socket) of
+ {ok, {Addr={10, _, _, _}, _Port}} ->
+ case get_header_value("x-forwarded-for", THIS) of
+ undefined ->
+ inet_parse:ntoa(Addr);
+ Hosts ->
+ string:strip(lists:last(string:tokens(Hosts, ",")))
+ end;
+ {ok, {{127, 0, 0, 1}, _Port}} ->
+ case get_header_value("x-forwarded-for", THIS) of
+ undefined ->
+ "127.0.0.1";
+ Hosts ->
+ string:strip(lists:last(string:tokens(Hosts, ",")))
+ end;
+ {ok, {Addr, _Port}} ->
+ inet_parse:ntoa(Addr);
+ {error, enotconn} ->
+ exit(normal)
+ end;
+get(path, {?MODULE, [_Socket, _Method, RawPath, _Version, _Headers]}) ->
+ case erlang:get(?SAVE_PATH) of
+ undefined ->
+ {Path0, _, _} = mochiweb_util:urlsplit_path(RawPath),
+ Path = mochiweb_util:unquote(Path0),
+ put(?SAVE_PATH, Path),
+ Path;
+ Cached ->
+ Cached
+ end;
+get(body_length, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ case erlang:get(?SAVE_BODY_LENGTH) of
+ undefined ->
+ BodyLength = body_length(THIS),
+ put(?SAVE_BODY_LENGTH, {cached, BodyLength}),
+ BodyLength;
+ {cached, Cached} ->
+ Cached
+ end;
+get(range, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ case get_header_value(range, THIS) of
+ undefined ->
+ undefined;
+ RawRange ->
+ mochiweb_http:parse_range_request(RawRange)
+ end.
+
+%% @spec dump(request()) -> {mochiweb_request, [{atom(), term()}]}
+%% @doc Dump the internal representation to a "human readable" set of terms
+%% for debugging/inspection purposes.
+dump({?MODULE, [_Socket, Method, RawPath, Version, Headers]}) ->
+ {?MODULE, [{method, Method},
+ {version, Version},
+ {raw_path, RawPath},
+ {headers, mochiweb_headers:to_list(Headers)}]}.
+
+%% @spec send(iodata(), request()) -> ok
+%% @doc Send data over the socket.
+send(Data, {?MODULE, [Socket, _Method, _RawPath, _Version, _Headers]}) ->
+ case mochiweb_socket:send(Socket, Data) of
+ ok ->
+ ok;
+ _ ->
+ exit(normal)
+ end.
+
+%% @spec recv(integer(), request()) -> binary()
+%% @doc Receive Length bytes from the client as a binary, with the default
+%% idle timeout.
+recv(Length, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ recv(Length, ?IDLE_TIMEOUT, THIS).
+
+%% @spec recv(integer(), integer(), request()) -> binary()
+%% @doc Receive Length bytes from the client as a binary, with the given
+%% Timeout in msec.
+recv(Length, Timeout, {?MODULE, [Socket, _Method, _RawPath, _Version, _Headers]}) ->
+ case mochiweb_socket:recv(Socket, Length, Timeout) of
+ {ok, Data} ->
+ put(?SAVE_RECV, true),
+ Data;
+ _ ->
+ exit(normal)
+ end.
+
+%% @spec body_length(request()) -> undefined | chunked | unknown_transfer_encoding | integer()
+%% @doc Infer body length from transfer-encoding and content-length headers.
+body_length({?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ case get_header_value("transfer-encoding", THIS) of
+ undefined ->
+ case get_combined_header_value("content-length", THIS) of
+ undefined ->
+ undefined;
+ Length ->
+ list_to_integer(Length)
+ end;
+ "chunked" ->
+ chunked;
+ Unknown ->
+ {unknown_transfer_encoding, Unknown}
+ end.
+
+
+%% @spec recv_body(request()) -> binary()
+%% @doc Receive the body of the HTTP request (defined by Content-Length).
+%% Will only receive up to the default max-body length of 1MB.
+recv_body({?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ recv_body(?MAX_RECV_BODY, THIS).
+
+%% @spec recv_body(integer(), request()) -> binary()
+%% @doc Receive the body of the HTTP request (defined by Content-Length).
+%% Will receive up to MaxBody bytes.
+recv_body(MaxBody, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ case erlang:get(?SAVE_BODY) of
+ undefined ->
+ % we could use a sane constant for max chunk size
+ Body = stream_body(?MAX_RECV_BODY, fun
+ ({0, _ChunkedFooter}, {_LengthAcc, BinAcc}) ->
+ iolist_to_binary(lists:reverse(BinAcc));
+ ({Length, Bin}, {LengthAcc, BinAcc}) ->
+ NewLength = Length + LengthAcc,
+ if NewLength > MaxBody ->
+ exit({body_too_large, chunked});
+ true ->
+ {NewLength, [Bin | BinAcc]}
+ end
+ end, {0, []}, MaxBody, THIS),
+ put(?SAVE_BODY, Body),
+ Body;
+ Cached -> Cached
+ end.
+
+stream_body(MaxChunkSize, ChunkFun, FunState, {?MODULE,[_Socket,_Method,_RawPath,_Version,_Headers]}=THIS) ->
+ stream_body(MaxChunkSize, ChunkFun, FunState, undefined, THIS).
+
+stream_body(MaxChunkSize, ChunkFun, FunState, MaxBodyLength,
+ {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ Expect = case get_header_value("expect", THIS) of
+ undefined ->
+ undefined;
+ Value when is_list(Value) ->
+ string:to_lower(Value)
+ end,
+ case Expect of
+ "100-continue" ->
+ _ = start_raw_response({100, gb_trees:empty()}, THIS),
+ ok;
+ _Else ->
+ ok
+ end,
+ case body_length(THIS) of
+ undefined ->
+ undefined;
+ {unknown_transfer_encoding, Unknown} ->
+ exit({unknown_transfer_encoding, Unknown});
+ chunked ->
+ % In this case the MaxBody is actually used to
+ % determine the maximum allowed size of a single
+ % chunk.
+ stream_chunked_body(MaxChunkSize, ChunkFun, FunState, THIS);
+ 0 ->
+ <<>>;
+ Length when is_integer(Length) ->
+ case MaxBodyLength of
+ MaxBodyLength when is_integer(MaxBodyLength), MaxBodyLength < Length ->
+ exit({body_too_large, content_length});
+ _ ->
+ stream_unchunked_body(Length, ChunkFun, FunState, THIS)
+ end
+ end.
+
+
+%% @spec start_response({integer(), ioheaders()}, request()) -> response()
+%% @doc Start the HTTP response by sending the Code HTTP response and
+%% ResponseHeaders. The server will set header defaults such as Server
+%% and Date if not present in ResponseHeaders.
+start_response({Code, ResponseHeaders}, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ HResponse = mochiweb_headers:make(ResponseHeaders),
+ HResponse1 = mochiweb_headers:default_from_list(server_headers(),
+ HResponse),
+ start_raw_response({Code, HResponse1}, THIS).
+
+%% @spec start_raw_response({integer(), headers()}, request()) -> response()
+%% @doc Start the HTTP response by sending the Code HTTP response and
+%% ResponseHeaders.
+start_raw_response({Code, ResponseHeaders}, {?MODULE, [_Socket, _Method, _RawPath, Version, _Headers]}=THIS) ->
+ F = fun ({K, V}, Acc) ->
+ [mochiweb_util:make_io(K), <<": ">>, V, <<"\r\n">> | Acc]
+ end,
+ End = lists:foldl(F, [<<"\r\n">>],
+ mochiweb_headers:to_list(ResponseHeaders)),
+ send([make_version(Version), make_code(Code), <<"\r\n">> | End], THIS),
+ mochiweb:new_response({THIS, Code, ResponseHeaders}).
+
+
+%% @spec start_response_length({integer(), ioheaders(), integer()}, request()) -> response()
+%% @doc Start the HTTP response by sending the Code HTTP response and
+%% ResponseHeaders including a Content-Length of Length. The server
+%% will set header defaults such as Server
+%% and Date if not present in ResponseHeaders.
+start_response_length({Code, ResponseHeaders, Length},
+ {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ HResponse = mochiweb_headers:make(ResponseHeaders),
+ HResponse1 = mochiweb_headers:enter("Content-Length", Length, HResponse),
+ start_response({Code, HResponse1}, THIS).
+
+%% @spec respond({integer(), ioheaders(), iodata() | chunked | {file, IoDevice}}, request()) -> response()
+%% @doc Start the HTTP response with start_response, and send Body to the
+%% client (if the get(method) /= 'HEAD'). The Content-Length header
+%% will be set by the Body length, and the server will insert header
+%% defaults.
+respond({Code, ResponseHeaders, {file, IoDevice}},
+ {?MODULE, [_Socket, Method, _RawPath, _Version, _Headers]}=THIS) ->
+ Length = mochiweb_io:iodevice_size(IoDevice),
+ Response = start_response_length({Code, ResponseHeaders, Length}, THIS),
+ case Method of
+ 'HEAD' ->
+ ok;
+ _ ->
+ mochiweb_io:iodevice_stream(
+ fun (Body) -> send(Body, THIS) end,
+ IoDevice)
+ end,
+ Response;
+respond({Code, ResponseHeaders, chunked}, {?MODULE, [_Socket, Method, _RawPath, Version, _Headers]}=THIS) ->
+ HResponse = mochiweb_headers:make(ResponseHeaders),
+ HResponse1 = case Method of
+ 'HEAD' ->
+ %% This is what Google does, http://www.google.com/
+ %% is chunked but HEAD gets Content-Length: 0.
+ %% The RFC is ambiguous so emulating Google is smart.
+ mochiweb_headers:enter("Content-Length", "0",
+ HResponse);
+ _ when Version >= {1, 1} ->
+ %% Only use chunked encoding for HTTP/1.1
+ mochiweb_headers:enter("Transfer-Encoding", "chunked",
+ HResponse);
+ _ ->
+ %% For pre-1.1 clients we send the data as-is
+ %% without a Content-Length header and without
+ %% chunk delimiters. Since the end of the document
+ %% is now ambiguous we must force a close.
+ put(?SAVE_FORCE_CLOSE, true),
+ HResponse
+ end,
+ start_response({Code, HResponse1}, THIS);
+respond({Code, ResponseHeaders, Body}, {?MODULE, [_Socket, Method, _RawPath, _Version, _Headers]}=THIS) ->
+ Response = start_response_length({Code, ResponseHeaders, iolist_size(Body)}, THIS),
+ case Method of
+ 'HEAD' ->
+ ok;
+ _ ->
+ send(Body, THIS)
+ end,
+ Response.
+
+%% @spec not_found(request()) -> response()
+%% @doc Alias for <code>not_found([])</code>.
+not_found({?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ not_found([], THIS).
+
+%% @spec not_found(ExtraHeaders, request()) -> response()
+%% @doc Alias for <code>respond({404, [{"Content-Type", "text/plain"}
+%% | ExtraHeaders], <<"Not found.">>})</code>.
+not_found(ExtraHeaders, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ respond({404, [{"Content-Type", "text/plain"} | ExtraHeaders],
+ <<"Not found.">>}, THIS).
+
+%% @spec ok({value(), iodata()} | {value(), ioheaders(), iodata() | {file, IoDevice}}, request()) ->
+%% response()
+%% @doc respond({200, [{"Content-Type", ContentType} | Headers], Body}).
+ok({ContentType, Body}, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ ok({ContentType, [], Body}, THIS);
+ok({ContentType, ResponseHeaders, Body}, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ HResponse = mochiweb_headers:make(ResponseHeaders),
+ case THIS:get(range) of
+ X when (X =:= undefined orelse X =:= fail) orelse Body =:= chunked ->
+ %% http://code.google.com/p/mochiweb/issues/detail?id=54
+ %% Range header not supported when chunked, return 200 and provide
+ %% full response.
+ HResponse1 = mochiweb_headers:enter("Content-Type", ContentType,
+ HResponse),
+ respond({200, HResponse1, Body}, THIS);
+ Ranges ->
+ {PartList, Size} = range_parts(Body, Ranges),
+ case PartList of
+ [] -> %% no valid ranges
+ HResponse1 = mochiweb_headers:enter("Content-Type",
+ ContentType,
+ HResponse),
+ %% could be 416, for now we'll just return 200
+ respond({200, HResponse1, Body}, THIS);
+ PartList ->
+ {RangeHeaders, RangeBody} =
+ mochiweb_multipart:parts_to_body(PartList, ContentType, Size),
+ HResponse1 = mochiweb_headers:enter_from_list(
+ [{"Accept-Ranges", "bytes"} |
+ RangeHeaders],
+ HResponse),
+ respond({206, HResponse1, RangeBody}, THIS)
+ end
+ end.
+
+%% @spec should_close(request()) -> bool()
+%% @doc Return true if the connection must be closed. If false, using
+%% Keep-Alive should be safe.
+should_close({?MODULE, [_Socket, _Method, _RawPath, Version, _Headers]}=THIS) ->
+ ForceClose = erlang:get(?SAVE_FORCE_CLOSE) =/= undefined,
+ DidNotRecv = erlang:get(?SAVE_RECV) =:= undefined,
+ ForceClose orelse Version < {1, 0}
+ %% Connection: close
+ orelse is_close(get_header_value("connection", THIS))
+ %% HTTP 1.0 requires Connection: Keep-Alive
+ orelse (Version =:= {1, 0}
+ andalso get_header_value("connection", THIS) =/= "Keep-Alive")
+ %% unread data left on the socket, can't safely continue
+ orelse (DidNotRecv
+ andalso get_combined_header_value("content-length", THIS) =/= undefined
+ andalso list_to_integer(get_combined_header_value("content-length", THIS)) > 0)
+ orelse (DidNotRecv
+ andalso get_header_value("transfer-encoding", THIS) =:= "chunked").
+
+is_close("close") ->
+ true;
+is_close(S=[_C, _L, _O, _S, _E]) ->
+ string:to_lower(S) =:= "close";
+is_close(_) ->
+ false.
+
+%% @spec cleanup(request()) -> ok
+%% @doc Clean up any junk in the process dictionary, required before continuing
+%% a Keep-Alive request.
+cleanup({?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}) ->
+ L = [?SAVE_QS, ?SAVE_PATH, ?SAVE_RECV, ?SAVE_BODY, ?SAVE_BODY_LENGTH,
+ ?SAVE_POST, ?SAVE_COOKIE, ?SAVE_FORCE_CLOSE],
+ lists:foreach(fun(K) ->
+ erase(K)
+ end, L),
+ ok.
+
+%% @spec parse_qs(request()) -> [{Key::string(), Value::string()}]
+%% @doc Parse the query string of the URL.
+parse_qs({?MODULE, [_Socket, _Method, RawPath, _Version, _Headers]}) ->
+ case erlang:get(?SAVE_QS) of
+ undefined ->
+ {_, QueryString, _} = mochiweb_util:urlsplit_path(RawPath),
+ Parsed = mochiweb_util:parse_qs(QueryString),
+ put(?SAVE_QS, Parsed),
+ Parsed;
+ Cached ->
+ Cached
+ end.
+
+%% @spec get_cookie_value(Key::string, request()) -> string() | undefined
+%% @doc Get the value of the given cookie.
+get_cookie_value(Key, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ proplists:get_value(Key, parse_cookie(THIS)).
+
+%% @spec parse_cookie(request()) -> [{Key::string(), Value::string()}]
+%% @doc Parse the cookie header.
+parse_cookie({?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ case erlang:get(?SAVE_COOKIE) of
+ undefined ->
+ Cookies = case get_header_value("cookie", THIS) of
+ undefined ->
+ [];
+ Value ->
+ mochiweb_cookies:parse_cookie(Value)
+ end,
+ put(?SAVE_COOKIE, Cookies),
+ Cookies;
+ Cached ->
+ Cached
+ end.
+
+%% @spec parse_post(request()) -> [{Key::string(), Value::string()}]
+%% @doc Parse an application/x-www-form-urlencoded form POST. This
+%% has the side-effect of calling recv_body().
+parse_post({?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ case erlang:get(?SAVE_POST) of
+ undefined ->
+ Parsed = case recv_body(THIS) of
+ undefined ->
+ [];
+ Binary ->
+ case get_primary_header_value("content-type",THIS) of
+ "application/x-www-form-urlencoded" ++ _ ->
+ mochiweb_util:parse_qs(Binary);
+ _ ->
+ []
+ end
+ end,
+ put(?SAVE_POST, Parsed),
+ Parsed;
+ Cached ->
+ Cached
+ end.
+
+%% @spec stream_chunked_body(integer(), fun(), term(), request()) -> term()
+%% @doc The function is called for each chunk.
+%% Used internally by read_chunked_body.
+stream_chunked_body(MaxChunkSize, Fun, FunState,
+ {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ case read_chunk_length(THIS) of
+ 0 ->
+ Fun({0, read_chunk(0, THIS)}, FunState);
+ Length when Length > MaxChunkSize ->
+ NewState = read_sub_chunks(Length, MaxChunkSize, Fun, FunState, THIS),
+ stream_chunked_body(MaxChunkSize, Fun, NewState, THIS);
+ Length ->
+ NewState = Fun({Length, read_chunk(Length, THIS)}, FunState),
+ stream_chunked_body(MaxChunkSize, Fun, NewState, THIS)
+ end.
+
+stream_unchunked_body(0, Fun, FunState, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}) ->
+ Fun({0, <<>>}, FunState);
+stream_unchunked_body(Length, Fun, FunState,
+ {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) when Length > 0 ->
+ PktSize = case Length > ?RECBUF_SIZE of
+ true ->
+ ?RECBUF_SIZE;
+ false ->
+ Length
+ end,
+ Bin = recv(PktSize, THIS),
+ NewState = Fun({PktSize, Bin}, FunState),
+ stream_unchunked_body(Length - PktSize, Fun, NewState, THIS).
+
+%% @spec read_chunk_length(request()) -> integer()
+%% @doc Read the length of the next HTTP chunk.
+read_chunk_length({?MODULE, [Socket, _Method, _RawPath, _Version, _Headers]}) ->
+ ok = mochiweb_socket:setopts(Socket, [{packet, line}]),
+ case mochiweb_socket:recv(Socket, 0, ?IDLE_TIMEOUT) of
+ {ok, Header} ->
+ ok = mochiweb_socket:setopts(Socket, [{packet, raw}]),
+ Splitter = fun (C) ->
+ C =/= $\r andalso C =/= $\n andalso C =/= $
+ end,
+ {Hex, _Rest} = lists:splitwith(Splitter, binary_to_list(Header)),
+ mochihex:to_int(Hex);
+ _ ->
+ exit(normal)
+ end.
+
+%% @spec read_chunk(integer(), request()) -> Chunk::binary() | [Footer::binary()]
+%% @doc Read in a HTTP chunk of the given length. If Length is 0, then read the
+%% HTTP footers (as a list of binaries, since they're nominal).
+read_chunk(0, {?MODULE, [Socket, _Method, _RawPath, _Version, _Headers]}) ->
+ ok = mochiweb_socket:setopts(Socket, [{packet, line}]),
+ F = fun (F1, Acc) ->
+ case mochiweb_socket:recv(Socket, 0, ?IDLE_TIMEOUT) of
+ {ok, <<"\r\n">>} ->
+ Acc;
+ {ok, Footer} ->
+ F1(F1, [Footer | Acc]);
+ _ ->
+ exit(normal)
+ end
+ end,
+ Footers = F(F, []),
+ ok = mochiweb_socket:setopts(Socket, [{packet, raw}]),
+ put(?SAVE_RECV, true),
+ Footers;
+read_chunk(Length, {?MODULE, [Socket, _Method, _RawPath, _Version, _Headers]}) ->
+ case mochiweb_socket:recv(Socket, 2 + Length, ?IDLE_TIMEOUT) of
+ {ok, <<Chunk:Length/binary, "\r\n">>} ->
+ Chunk;
+ _ ->
+ exit(normal)
+ end.
+
+read_sub_chunks(Length, MaxChunkSize, Fun, FunState,
+ {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) when Length > MaxChunkSize ->
+ Bin = recv(MaxChunkSize, THIS),
+ NewState = Fun({size(Bin), Bin}, FunState),
+ read_sub_chunks(Length - MaxChunkSize, MaxChunkSize, Fun, NewState, THIS);
+
+read_sub_chunks(Length, _MaxChunkSize, Fun, FunState,
+ {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ Fun({Length, read_chunk(Length, THIS)}, FunState).
+
+%% @spec serve_file(Path, DocRoot, request()) -> Response
+%% @doc Serve a file relative to DocRoot.
+serve_file(Path, DocRoot, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ serve_file(Path, DocRoot, [], THIS).
+
+%% @spec serve_file(Path, DocRoot, ExtraHeaders, request()) -> Response
+%% @doc Serve a file relative to DocRoot.
+serve_file(Path, DocRoot, ExtraHeaders, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ case mochiweb_util:safe_relative_path(Path) of
+ undefined ->
+ not_found(ExtraHeaders, THIS);
+ RelPath ->
+ FullPath = filename:join([DocRoot, RelPath]),
+ case filelib:is_dir(FullPath) of
+ true ->
+ maybe_redirect(RelPath, FullPath, ExtraHeaders, THIS);
+ false ->
+ maybe_serve_file(FullPath, ExtraHeaders, THIS)
+ end
+ end.
+
+%% Internal API
+
+%% This has the same effect as the DirectoryIndex directive in httpd
+directory_index(FullPath) ->
+ filename:join([FullPath, "index.html"]).
+
+maybe_redirect([], FullPath, ExtraHeaders, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ maybe_serve_file(directory_index(FullPath), ExtraHeaders, THIS);
+
+maybe_redirect(RelPath, FullPath, ExtraHeaders,
+ {?MODULE, [_Socket, _Method, _RawPath, _Version, Headers]}=THIS) ->
+ case string:right(RelPath, 1) of
+ "/" ->
+ maybe_serve_file(directory_index(FullPath), ExtraHeaders, THIS);
+ _ ->
+ Host = mochiweb_headers:get_value("host", Headers),
+ Location = "http://" ++ Host ++ "/" ++ RelPath ++ "/",
+ LocationBin = list_to_binary(Location),
+ MoreHeaders = [{"Location", Location},
+ {"Content-Type", "text/html"} | ExtraHeaders],
+ Top = <<"<!DOCTYPE HTML PUBLIC \"-//IETF//DTD HTML 2.0//EN\">"
+ "<html><head>"
+ "<title>301 Moved Permanently</title>"
+ "</head><body>"
+ "<h1>Moved Permanently</h1>"
+ "<p>The document has moved <a href=\"">>,
+ Bottom = <<">here</a>.</p></body></html>\n">>,
+ Body = <<Top/binary, LocationBin/binary, Bottom/binary>>,
+ respond({301, MoreHeaders, Body}, THIS)
+ end.
+
+maybe_serve_file(File, ExtraHeaders, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ case file:read_file_info(File) of
+ {ok, FileInfo} ->
+ LastModified = httpd_util:rfc1123_date(FileInfo#file_info.mtime),
+ case get_header_value("if-modified-since", THIS) of
+ LastModified ->
+ respond({304, ExtraHeaders, ""}, THIS);
+ _ ->
+ case file:open(File, [raw, binary]) of
+ {ok, IoDevice} ->
+ ContentType = mochiweb_util:guess_mime(File),
+ Res = ok({ContentType,
+ [{"last-modified", LastModified}
+ | ExtraHeaders],
+ {file, IoDevice}}, THIS),
+ ok = file:close(IoDevice),
+ Res;
+ _ ->
+ not_found(ExtraHeaders, THIS)
+ end
+ end;
+ {error, _} ->
+ not_found(ExtraHeaders, THIS)
+ end.
+
+server_headers() ->
+ [{"Server", "MochiWeb/1.0 (" ++ ?QUIP ++ ")"},
+ {"Date", httpd_util:rfc1123_date()}].
+
+make_code(X) when is_integer(X) ->
+ [integer_to_list(X), [" " | httpd_util:reason_phrase(X)]];
+make_code(Io) when is_list(Io); is_binary(Io) ->
+ Io.
+
+make_version({1, 0}) ->
+ <<"HTTP/1.0 ">>;
+make_version(_) ->
+ <<"HTTP/1.1 ">>.
+
+range_parts({file, IoDevice}, Ranges) ->
+ Size = mochiweb_io:iodevice_size(IoDevice),
+ F = fun (Spec, Acc) ->
+ case mochiweb_http:range_skip_length(Spec, Size) of
+ invalid_range ->
+ Acc;
+ V ->
+ [V | Acc]
+ end
+ end,
+ LocNums = lists:foldr(F, [], Ranges),
+ {ok, Data} = file:pread(IoDevice, LocNums),
+ Bodies = lists:zipwith(fun ({Skip, Length}, PartialBody) ->
+ {Skip, Skip + Length - 1, PartialBody}
+ end,
+ LocNums, Data),
+ {Bodies, Size};
+range_parts(Body0, Ranges) ->
+ Body = iolist_to_binary(Body0),
+ Size = size(Body),
+ F = fun(Spec, Acc) ->
+ case mochiweb_http:range_skip_length(Spec, Size) of
+ invalid_range ->
+ Acc;
+ {Skip, Length} ->
+ <<_:Skip/binary, PartialBody:Length/binary, _/binary>> = Body,
+ [{Skip, Skip + Length - 1, PartialBody} | Acc]
+ end
+ end,
+ {lists:foldr(F, [], Ranges), Size}.
+
+%% @spec accepted_encodings([encoding()], request()) -> [encoding()] | bad_accept_encoding_value
+%% @type encoding() = string().
+%%
+%% @doc Returns a list of encodings accepted by a request. Encodings that are
+%% not supported by the server will not be included in the return list.
+%% This list is computed from the "Accept-Encoding" header and
+%% its elements are ordered, descendingly, according to their Q values.
+%%
+%% Section 14.3 of the RFC 2616 (HTTP 1.1) describes the "Accept-Encoding"
+%% header and the process of determining which server supported encodings
+%% can be used for encoding the body for the request's response.
+%%
+%% Examples
+%%
+%% 1) For a missing "Accept-Encoding" header:
+%% accepted_encodings(["gzip", "identity"]) -> ["identity"]
+%%
+%% 2) For an "Accept-Encoding" header with value "gzip, deflate":
+%% accepted_encodings(["gzip", "identity"]) -> ["gzip", "identity"]
+%%
+%% 3) For an "Accept-Encoding" header with value "gzip;q=0.5, deflate":
+%% accepted_encodings(["gzip", "deflate", "identity"]) ->
+%% ["deflate", "gzip", "identity"]
+%%
+accepted_encodings(SupportedEncodings, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ AcceptEncodingHeader = case get_header_value("Accept-Encoding", THIS) of
+ undefined ->
+ "";
+ Value ->
+ Value
+ end,
+ case mochiweb_util:parse_qvalues(AcceptEncodingHeader) of
+ invalid_qvalue_string ->
+ bad_accept_encoding_value;
+ QList ->
+ mochiweb_util:pick_accepted_encodings(
+ QList, SupportedEncodings, "identity"
+ )
+ end.
+
+%% @spec accepts_content_type(string() | binary(), request()) -> boolean() | bad_accept_header
+%%
+%% @doc Determines whether a request accepts a given media type by analyzing its
+%% "Accept" header.
+%%
+%% Examples
+%%
+%% 1) For a missing "Accept" header:
+%% accepts_content_type("application/json") -> true
+%%
+%% 2) For an "Accept" header with value "text/plain, application/*":
+%% accepts_content_type("application/json") -> true
+%%
+%% 3) For an "Accept" header with value "text/plain, */*; q=0.0":
+%% accepts_content_type("application/json") -> false
+%%
+%% 4) For an "Accept" header with value "text/plain; q=0.5, */*; q=0.1":
+%% accepts_content_type("application/json") -> true
+%%
+%% 5) For an "Accept" header with value "text/*; q=0.0, */*":
+%% accepts_content_type("text/plain") -> false
+%%
+accepts_content_type(ContentType1, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ ContentType = re:replace(ContentType1, "\\s", "", [global, {return, list}]),
+ AcceptHeader = accept_header(THIS),
+ case mochiweb_util:parse_qvalues(AcceptHeader) of
+ invalid_qvalue_string ->
+ bad_accept_header;
+ QList ->
+ [MainType, _SubType] = string:tokens(ContentType, "/"),
+ SuperType = MainType ++ "/*",
+ lists:any(
+ fun({"*/*", Q}) when Q > 0.0 ->
+ true;
+ ({Type, Q}) when Q > 0.0 ->
+ Type =:= ContentType orelse Type =:= SuperType;
+ (_) ->
+ false
+ end,
+ QList
+ ) andalso
+ (not lists:member({ContentType, 0.0}, QList)) andalso
+ (not lists:member({SuperType, 0.0}, QList))
+ end.
+
+%% @spec accepted_content_types([string() | binary()], request()) -> [string()] | bad_accept_header
+%%
+%% @doc Filters which of the given media types this request accepts. This filtering
+%% is performed by analyzing the "Accept" header. The returned list is sorted
+%% according to the preferences specified in the "Accept" header (higher Q values
+%% first). If two or more types have the same preference (Q value), they're order
+%% in the returned list is the same as they're order in the input list.
+%%
+%% Examples
+%%
+%% 1) For a missing "Accept" header:
+%% accepted_content_types(["text/html", "application/json"]) ->
+%% ["text/html", "application/json"]
+%%
+%% 2) For an "Accept" header with value "text/html, application/*":
+%% accepted_content_types(["application/json", "text/html"]) ->
+%% ["application/json", "text/html"]
+%%
+%% 3) For an "Accept" header with value "text/html, */*; q=0.0":
+%% accepted_content_types(["text/html", "application/json"]) ->
+%% ["text/html"]
+%%
+%% 4) For an "Accept" header with value "text/html; q=0.5, */*; q=0.1":
+%% accepts_content_types(["application/json", "text/html"]) ->
+%% ["text/html", "application/json"]
+%%
+accepted_content_types(Types1, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ Types = lists:map(
+ fun(T) -> re:replace(T, "\\s", "", [global, {return, list}]) end,
+ Types1),
+ AcceptHeader = accept_header(THIS),
+ case mochiweb_util:parse_qvalues(AcceptHeader) of
+ invalid_qvalue_string ->
+ bad_accept_header;
+ QList ->
+ TypesQ = lists:foldr(
+ fun(T, Acc) ->
+ case proplists:get_value(T, QList) of
+ undefined ->
+ [MainType, _SubType] = string:tokens(T, "/"),
+ case proplists:get_value(MainType ++ "/*", QList) of
+ undefined ->
+ case proplists:get_value("*/*", QList) of
+ Q when is_float(Q), Q > 0.0 ->
+ [{Q, T} | Acc];
+ _ ->
+ Acc
+ end;
+ Q when Q > 0.0 ->
+ [{Q, T} | Acc];
+ _ ->
+ Acc
+ end;
+ Q when Q > 0.0 ->
+ [{Q, T} | Acc];
+ _ ->
+ Acc
+ end
+ end,
+ [], Types),
+ % Note: Stable sort. If 2 types have the same Q value we leave them in the
+ % same order as in the input list.
+ SortFun = fun({Q1, _}, {Q2, _}) -> Q1 >= Q2 end,
+ [Type || {_Q, Type} <- lists:sort(SortFun, TypesQ)]
+ end.
+
+accept_header({?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
+ case get_header_value("Accept", THIS) of
+ undefined ->
+ "*/*";
+ Value ->
+ Value
+ end.
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+-endif.
--- /dev/null
+-module(mochiweb_request_tests).
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+accepts_content_type_test() ->
+ Req1 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "multipart/related"}])),
+ ?assertEqual(true, Req1:accepts_content_type("multipart/related")),
+ ?assertEqual(true, Req1:accepts_content_type(<<"multipart/related">>)),
+
+ Req2 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/html"}])),
+ ?assertEqual(false, Req2:accepts_content_type("multipart/related")),
+
+ Req3 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/html, multipart/*"}])),
+ ?assertEqual(true, Req3:accepts_content_type("multipart/related")),
+
+ Req4 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/html, multipart/*; q=0.0"}])),
+ ?assertEqual(false, Req4:accepts_content_type("multipart/related")),
+
+ Req5 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/html, multipart/*; q=0"}])),
+ ?assertEqual(false, Req5:accepts_content_type("multipart/related")),
+
+ Req6 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/html, */*; q=0.0"}])),
+ ?assertEqual(false, Req6:accepts_content_type("multipart/related")),
+
+ Req7 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "multipart/*; q=0.0, */*"}])),
+ ?assertEqual(false, Req7:accepts_content_type("multipart/related")),
+
+ Req8 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "*/*; q=0.0, multipart/*"}])),
+ ?assertEqual(true, Req8:accepts_content_type("multipart/related")),
+
+ Req9 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "*/*; q=0.0, multipart/related"}])),
+ ?assertEqual(true, Req9:accepts_content_type("multipart/related")),
+
+ Req10 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/html; level=1"}])),
+ ?assertEqual(true, Req10:accepts_content_type("text/html;level=1")),
+
+ Req11 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/html; level=1, text/html"}])),
+ ?assertEqual(true, Req11:accepts_content_type("text/html")),
+
+ Req12 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/html; level=1; q=0.0, text/html"}])),
+ ?assertEqual(false, Req12:accepts_content_type("text/html;level=1")),
+
+ Req13 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/html; level=1; q=0.0, text/html"}])),
+ ?assertEqual(false, Req13:accepts_content_type("text/html; level=1")),
+
+ Req14 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/html;level=1;q=0.1, text/html"}])),
+ ?assertEqual(true, Req14:accepts_content_type("text/html; level=1")).
+
+accepted_encodings_test() ->
+ Req1 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([])),
+ ?assertEqual(["identity"],
+ Req1:accepted_encodings(["gzip", "identity"])),
+
+ Req2 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept-Encoding", "gzip, deflate"}])),
+ ?assertEqual(["gzip", "identity"],
+ Req2:accepted_encodings(["gzip", "identity"])),
+
+ Req3 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept-Encoding", "gzip;q=0.5, deflate"}])),
+ ?assertEqual(["deflate", "gzip", "identity"],
+ Req3:accepted_encodings(["gzip", "deflate", "identity"])),
+
+ Req4 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept-Encoding", "identity, *;q=0"}])),
+ ?assertEqual(["identity"],
+ Req4:accepted_encodings(["gzip", "deflate", "identity"])),
+
+ Req5 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept-Encoding", "gzip; q=0.1, *;q=0"}])),
+ ?assertEqual(["gzip"],
+ Req5:accepted_encodings(["gzip", "deflate", "identity"])),
+
+ Req6 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept-Encoding", "gzip; q=, *;q=0"}])),
+ ?assertEqual(bad_accept_encoding_value,
+ Req6:accepted_encodings(["gzip", "deflate", "identity"])),
+
+ Req7 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept-Encoding", "gzip;q=2.0, *;q=0"}])),
+ ?assertEqual(bad_accept_encoding_value,
+ Req7:accepted_encodings(["gzip", "identity"])),
+
+ Req8 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept-Encoding", "deflate, *;q=0.0"}])),
+ ?assertEqual([],
+ Req8:accepted_encodings(["gzip", "identity"])).
+
+accepted_content_types_test() ->
+ Req1 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/html"}])),
+ ?assertEqual(["text/html"],
+ Req1:accepted_content_types(["text/html", "application/json"])),
+
+ Req2 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/html, */*;q=0"}])),
+ ?assertEqual(["text/html"],
+ Req2:accepted_content_types(["text/html", "application/json"])),
+
+ Req3 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/*, */*;q=0"}])),
+ ?assertEqual(["text/html"],
+ Req3:accepted_content_types(["text/html", "application/json"])),
+
+ Req4 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/*;q=0.8, */*;q=0.5"}])),
+ ?assertEqual(["text/html", "application/json"],
+ Req4:accepted_content_types(["application/json", "text/html"])),
+
+ Req5 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/*;q=0.8, */*;q=0.5"}])),
+ ?assertEqual(["text/html", "application/json"],
+ Req5:accepted_content_types(["text/html", "application/json"])),
+
+ Req6 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/*;q=0.5, */*;q=0.5"}])),
+ ?assertEqual(["application/json", "text/html"],
+ Req6:accepted_content_types(["application/json", "text/html"])),
+
+ Req7 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make(
+ [{"Accept", "text/html;q=0.5, application/json;q=0.5"}])),
+ ?assertEqual(["application/json", "text/html"],
+ Req7:accepted_content_types(["application/json", "text/html"])),
+
+ Req8 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/html"}])),
+ ?assertEqual([],
+ Req8:accepted_content_types(["application/json"])),
+
+ Req9 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/*;q=0.9, text/html;q=0.5, */*;q=0.7"}])),
+ ?assertEqual(["application/json", "text/html"],
+ Req9:accepted_content_types(["text/html", "application/json"])).
+
+should_close_test() ->
+ F = fun (V, H) ->
+ (mochiweb_request:new(
+ nil, 'GET', "/", V,
+ mochiweb_headers:make(H)
+ )):should_close()
+ end,
+ ?assertEqual(
+ true,
+ F({1, 1}, [{"Connection", "close"}])),
+ ?assertEqual(
+ true,
+ F({1, 0}, [{"Connection", "close"}])),
+ ?assertEqual(
+ true,
+ F({1, 1}, [{"Connection", "ClOSe"}])),
+ ?assertEqual(
+ false,
+ F({1, 1}, [{"Connection", "closer"}])),
+ ?assertEqual(
+ false,
+ F({1, 1}, [])),
+ ?assertEqual(
+ true,
+ F({1, 0}, [])),
+ ?assertEqual(
+ false,
+ F({1, 0}, [{"Connection", "Keep-Alive"}])),
+ ok.
+
+-endif.
--- /dev/null
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Response abstraction.
+
+-module(mochiweb_response).
+-author('bob@mochimedia.com').
+
+-define(QUIP, "Any of you quaids got a smint?").
+
+-export([new/3, get_header_value/2, get/2, dump/1]).
+-export([send/2, write_chunk/2]).
+
+%% @type response(). A mochiweb_response parameterized module instance.
+
+%% @spec new(Request, Code, Headers) -> response()
+%% @doc Create a new mochiweb_response instance.
+new(Request, Code, Headers) ->
+ {?MODULE, [Request, Code, Headers]}.
+
+%% @spec get_header_value(string() | atom() | binary(), response()) ->
+%% string() | undefined
+%% @doc Get the value of the given response header.
+get_header_value(K, {?MODULE, [_Request, _Code, Headers]}) ->
+ mochiweb_headers:get_value(K, Headers).
+
+%% @spec get(request | code | headers, response()) -> term()
+%% @doc Return the internal representation of the given field.
+get(request, {?MODULE, [Request, _Code, _Headers]}) ->
+ Request;
+get(code, {?MODULE, [_Request, Code, _Headers]}) ->
+ Code;
+get(headers, {?MODULE, [_Request, _Code, Headers]}) ->
+ Headers.
+
+%% @spec dump(response()) -> {mochiweb_request, [{atom(), term()}]}
+%% @doc Dump the internal representation to a "human readable" set of terms
+%% for debugging/inspection purposes.
+dump({?MODULE, [Request, Code, Headers]}) ->
+ [{request, Request:dump()},
+ {code, Code},
+ {headers, mochiweb_headers:to_list(Headers)}].
+
+%% @spec send(iodata(), response()) -> ok
+%% @doc Send data over the socket if the method is not HEAD.
+send(Data, {?MODULE, [Request, _Code, _Headers]}) ->
+ case Request:get(method) of
+ 'HEAD' ->
+ ok;
+ _ ->
+ Request:send(Data)
+ end.
+
+%% @spec write_chunk(iodata(), response()) -> ok
+%% @doc Write a chunk of a HTTP chunked response. If Data is zero length,
+%% then the chunked response will be finished.
+write_chunk(Data, {?MODULE, [Request, _Code, _Headers]}=THIS) ->
+ case Request:get(version) of
+ Version when Version >= {1, 1} ->
+ Length = iolist_size(Data),
+ send([io_lib:format("~.16b\r\n", [Length]), Data, <<"\r\n">>], THIS);
+ _ ->
+ send(Data, THIS)
+ end.
+
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+-endif.
--- /dev/null
+%% @author Asier Azkuenaga Batiz <asier@zebixe.com>
+
+%% @doc HTTP Cookie session. Note that the expiration time travels unencrypted
+%% as far as this module is concerned. In order to achieve more security,
+%% it is advised to use https.
+%% Based on the paper
+%% <a href="http://www.cse.msu.edu/~alexliu/publications/Cookie/cookie.pdf">
+%% "A Secure Cookie Protocol"</a>.
+%% This module is only supported on R15B02 and later, the AES CFB mode is not
+%% available in earlier releases of crypto.
+-module(mochiweb_session).
+-export([generate_session_data/4, generate_session_cookie/4,
+ check_session_cookie/4]).
+
+-export_types([expiration_time/0]).
+-type expiration_time() :: integer().
+-type key_fun() :: fun((string()) -> iolist()).
+
+%% TODO: Import this from elsewhere after attribute types refactor.
+-type header() :: {string(), string()}.
+
+%% @doc Generates a secure encrypted binary convining all the parameters. The
+%% expiration time must be a 32-bit integer.
+%% -spec generate_session_data(
+%% ExpirationTime :: expiration_time(),
+%% Data :: iolist(),
+%% FSessionKey :: key_fun(),
+%% ServerKey :: iolist()) -> binary().
+generate_session_data(ExpirationTime, Data, FSessionKey, ServerKey)
+ when is_integer(ExpirationTime), is_function(FSessionKey)->
+ BData = ensure_binary(Data),
+ ExpTime = integer_to_list(ExpirationTime),
+ Key = gen_key(ExpTime, ServerKey),
+ Hmac = gen_hmac(ExpTime, BData, FSessionKey(ExpTime), Key),
+ EData = encrypt_data(BData, Key),
+ mochiweb_base64url:encode(
+ <<ExpirationTime:32/integer, Hmac/binary, EData/binary>>).
+
+%% @doc Convenience wrapper for generate_session_data that returns a
+%% mochiweb cookie with "id" as the key, a max_age of 20000 seconds,
+%% and the current local time as local time.
+%% -spec generate_session_cookie(
+%% ExpirationTime :: expiration_time(),
+%% Data :: iolist(),
+%% FSessionKey :: key_fun(),
+%% ServerKey :: iolist()) -> header().
+generate_session_cookie(ExpirationTime, Data, FSessionKey, ServerKey)
+ when is_integer(ExpirationTime), is_function(FSessionKey)->
+ CookieData = generate_session_data(ExpirationTime, Data,
+ FSessionKey, ServerKey),
+ mochiweb_cookies:cookie("id", CookieData,
+ [{max_age, 20000},
+ {local_time,
+ calendar:universal_time_to_local_time(
+ calendar:universal_time())}]).
+
+%% TODO: This return type is messy to express in the type system.
+%% -spec check_session_cookie(
+ %% ECookie :: binary(),
+ %% ExpirationTime :: string(),
+ %% FSessionKey :: key_fun(),
+ %% ServerKey :: iolist()) ->
+ %% {Success :: boolean(),
+ %% ExpTimeAndData :: [integer() | binary()]}.
+check_session_cookie(ECookie, ExpirationTime, FSessionKey, ServerKey)
+ when is_binary(ECookie), is_integer(ExpirationTime),
+ is_function(FSessionKey) ->
+ case mochiweb_base64url:decode(ECookie) of
+ <<ExpirationTime1:32/integer, BHmac:20/binary, EData/binary>> ->
+ ETString = integer_to_list(ExpirationTime1),
+ Key = gen_key(ETString, ServerKey),
+ Data = decrypt_data(EData, Key),
+ Hmac2 = gen_hmac(ETString,
+ Data,
+ FSessionKey(ETString),
+ Key),
+ {ExpirationTime1 >= ExpirationTime andalso eq(Hmac2, BHmac),
+ [ExpirationTime1, binary_to_list(Data)]};
+ _ ->
+ {false, []}
+ end;
+check_session_cookie(_ECookie, _ExpirationTime, _FSessionKey, _ServerKey) ->
+ {false, []}.
+
+%% 'Constant' time =:= operator for binary, to mitigate timing attacks.
+%% -spec eq(binary(), binary()) -> boolean().
+eq(A, B) when is_binary(A) andalso is_binary(B) ->
+ eq(A, B, 0).
+
+eq(<<A, As/binary>>, <<B, Bs/binary>>, Acc) ->
+ eq(As, Bs, Acc bor (A bxor B));
+eq(<<>>, <<>>, 0) ->
+ true;
+eq(_As, _Bs, _Acc) ->
+ false.
+
+%% -spec ensure_binary(iolist()) -> binary().
+ensure_binary(B) when is_binary(B) ->
+ B;
+ensure_binary(L) when is_list(L) ->
+ iolist_to_binary(L).
+
+%% -spec encrypt_data(binary(), binary()) -> binary().
+encrypt_data(Data, Key) ->
+ IV = crypto:rand_bytes(16),
+ Crypt = crypto:aes_cfb_128_encrypt(Key, IV, Data),
+ <<IV/binary, Crypt/binary>>.
+
+%% -spec decrypt_data(binary(), binary()) -> binary().
+decrypt_data(<<IV:16/binary, Crypt/binary>>, Key) ->
+ crypto:aes_cfb_128_decrypt(Key, IV, Crypt).
+
+%% -spec gen_key(iolist(), iolist()) -> binary().
+gen_key(ExpirationTime, ServerKey)->
+ crypto:md5_mac(ServerKey, [ExpirationTime]).
+
+%% -spec gen_hmac(iolist(), binary(), iolist(), binary()) -> binary().
+gen_hmac(ExpirationTime, Data, SessionKey, Key) ->
+ crypto:sha_mac(Key, [ExpirationTime, Data, SessionKey]).
+
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+generate_check_session_cookie_test_() ->
+ {setup,
+ fun setup_server_key/0,
+ fun generate_check_session_cookie/1}.
+
+setup_server_key() ->
+ crypto:start(),
+ ["adfasdfasfs",30000].
+
+generate_check_session_cookie([ServerKey, TS]) ->
+ Id = fun (A) -> A end,
+ TSFuture = TS + 1000,
+ TSPast = TS - 1,
+ [?_assertEqual(
+ {true, [TSFuture, "alice"]},
+ check_session_cookie(
+ generate_session_data(TSFuture, "alice", Id, ServerKey),
+ TS, Id, ServerKey)),
+ ?_assertEqual(
+ {true, [TSFuture, "alice and"]},
+ check_session_cookie(
+ generate_session_data(TSFuture, "alice and", Id, ServerKey),
+ TS, Id, ServerKey)),
+ ?_assertEqual(
+ {true, [TSFuture, "alice and"]},
+ check_session_cookie(
+ generate_session_data(TSFuture, "alice and", Id, ServerKey),
+ TS, Id,ServerKey)),
+ ?_assertEqual(
+ {true, [TSFuture, "alice and bob"]},
+ check_session_cookie(
+ generate_session_data(TSFuture, "alice and bob",
+ Id, ServerKey),
+ TS, Id, ServerKey)),
+ ?_assertEqual(
+ {true, [TSFuture, "alice jlkjfkjsdfg sdkfjgldsjgl"]},
+ check_session_cookie(
+ generate_session_data(TSFuture, "alice jlkjfkjsdfg sdkfjgldsjgl",
+ Id, ServerKey),
+ TS, Id, ServerKey)),
+ ?_assertEqual(
+ {true, [TSFuture, "alice .'¡'ç+-$%/(&\""]},
+ check_session_cookie(
+ generate_session_data(TSFuture, "alice .'¡'ç+-$%/(&\""
+ ,Id, ServerKey),
+ TS, Id, ServerKey)),
+ ?_assertEqual(
+ {true,[TSFuture,"alice456689875"]},
+ check_session_cookie(
+ generate_session_data(TSFuture, ["alice","456689875"],
+ Id, ServerKey),
+ TS, Id, ServerKey)),
+ ?_assertError(
+ function_clause,
+ check_session_cookie(
+ generate_session_data(TSFuture, {tuple,one},
+ Id, ServerKey),
+ TS, Id,ServerKey)),
+ ?_assertEqual(
+ {false, [TSPast, "bob"]},
+ check_session_cookie(
+ generate_session_data(TSPast, "bob", Id,ServerKey),
+ TS, Id, ServerKey))
+ ].
+-endif.
--- /dev/null
+%% @copyright 2010 Mochi Media, Inc.
+
+%% @doc MochiWeb socket - wrapper for plain and ssl sockets.
+
+-module(mochiweb_socket).
+
+-export([listen/4, accept/1, recv/3, send/2, close/1, port/1, peername/1,
+ setopts/2, type/1]).
+
+-define(ACCEPT_TIMEOUT, 2000).
+
+listen(Ssl, Port, Opts, SslOpts) ->
+ case Ssl of
+ true ->
+ case ssl:listen(Port, Opts ++ SslOpts) of
+ {ok, ListenSocket} ->
+ {ok, {ssl, ListenSocket}};
+ {error, _} = Err ->
+ Err
+ end;
+ false ->
+ gen_tcp:listen(Port, Opts)
+ end.
+
+accept({ssl, ListenSocket}) ->
+ % There's a bug in ssl:transport_accept/2 at the moment, which is the
+ % reason for the try...catch block. Should be fixed in OTP R14.
+ try ssl:transport_accept(ListenSocket) of
+ {ok, Socket} ->
+ case ssl:ssl_accept(Socket) of
+ ok ->
+ {ok, {ssl, Socket}};
+ {error, _} = Err ->
+ Err
+ end;
+ {error, _} = Err ->
+ Err
+ catch
+ error:{badmatch, {error, Reason}} ->
+ {error, Reason}
+ end;
+accept(ListenSocket) ->
+ gen_tcp:accept(ListenSocket, ?ACCEPT_TIMEOUT).
+
+recv({ssl, Socket}, Length, Timeout) ->
+ ssl:recv(Socket, Length, Timeout);
+recv(Socket, Length, Timeout) ->
+ gen_tcp:recv(Socket, Length, Timeout).
+
+send({ssl, Socket}, Data) ->
+ ssl:send(Socket, Data);
+send(Socket, Data) ->
+ gen_tcp:send(Socket, Data).
+
+close({ssl, Socket}) ->
+ ssl:close(Socket);
+close(Socket) ->
+ gen_tcp:close(Socket).
+
+port({ssl, Socket}) ->
+ case ssl:sockname(Socket) of
+ {ok, {_, Port}} ->
+ {ok, Port};
+ {error, _} = Err ->
+ Err
+ end;
+port(Socket) ->
+ inet:port(Socket).
+
+peername({ssl, Socket}) ->
+ ssl:peername(Socket);
+peername(Socket) ->
+ inet:peername(Socket).
+
+setopts({ssl, Socket}, Opts) ->
+ ssl:setopts(Socket, Opts);
+setopts(Socket, Opts) ->
+ inet:setopts(Socket, Opts).
+
+type({ssl, _}) ->
+ ssl;
+type(_) ->
+ plain.
+
--- /dev/null
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc MochiWeb socket server.
+
+-module(mochiweb_socket_server).
+-author('bob@mochimedia.com').
+-behaviour(gen_server).
+
+-include("internal.hrl").
+
+-export([start/1, start_link/1, stop/1]).
+-export([init/1, handle_call/3, handle_cast/2, terminate/2, code_change/3,
+ handle_info/2]).
+-export([get/2, set/3]).
+
+-record(mochiweb_socket_server,
+ {port,
+ loop,
+ name=undefined,
+ %% NOTE: This is currently ignored.
+ max=2048,
+ ip=any,
+ listen=null,
+ nodelay=false,
+ backlog=128,
+ active_sockets=0,
+ acceptor_pool_size=16,
+ ssl=false,
+ ssl_opts=[{ssl_imp, new}],
+ acceptor_pool=sets:new(),
+ profile_fun=undefined}).
+
+-define(is_old_state(State), not is_record(State, mochiweb_socket_server)).
+
+start_link(Options) ->
+ start_server(start_link, parse_options(Options)).
+
+start(Options) ->
+ case lists:keytake(link, 1, Options) of
+ {value, {_Key, false}, Options1} ->
+ start_server(start, parse_options(Options1));
+ _ ->
+ %% TODO: https://github.com/mochi/mochiweb/issues/58
+ %% [X] Phase 1: Add new APIs (Sep 2011)
+ %% [_] Phase 2: Add deprecation warning
+ %% [_] Phase 3: Change default to {link, false} and ignore link
+ %% [_] Phase 4: Add deprecation warning for {link, _} option
+ %% [_] Phase 5: Remove support for {link, _} option
+ start_link(Options)
+ end.
+
+get(Name, Property) ->
+ gen_server:call(Name, {get, Property}).
+
+set(Name, profile_fun, Fun) ->
+ gen_server:cast(Name, {set, profile_fun, Fun});
+set(Name, Property, _Value) ->
+ error_logger:info_msg("?MODULE:set for ~p with ~p not implemented~n",
+ [Name, Property]).
+
+stop(Name) when is_atom(Name) orelse is_pid(Name) ->
+ gen_server:call(Name, stop);
+stop({Scope, Name}) when Scope =:= local orelse Scope =:= global ->
+ stop(Name);
+stop(Options) ->
+ State = parse_options(Options),
+ stop(State#mochiweb_socket_server.name).
+
+%% Internal API
+
+parse_options(State=#mochiweb_socket_server{}) ->
+ State;
+parse_options(Options) ->
+ parse_options(Options, #mochiweb_socket_server{}).
+
+parse_options([], State) ->
+ State;
+parse_options([{name, L} | Rest], State) when is_list(L) ->
+ Name = {local, list_to_atom(L)},
+ parse_options(Rest, State#mochiweb_socket_server{name=Name});
+parse_options([{name, A} | Rest], State) when A =:= undefined ->
+ parse_options(Rest, State#mochiweb_socket_server{name=A});
+parse_options([{name, A} | Rest], State) when is_atom(A) ->
+ Name = {local, A},
+ parse_options(Rest, State#mochiweb_socket_server{name=Name});
+parse_options([{name, Name} | Rest], State) ->
+ parse_options(Rest, State#mochiweb_socket_server{name=Name});
+parse_options([{port, L} | Rest], State) when is_list(L) ->
+ Port = list_to_integer(L),
+ parse_options(Rest, State#mochiweb_socket_server{port=Port});
+parse_options([{port, Port} | Rest], State) ->
+ parse_options(Rest, State#mochiweb_socket_server{port=Port});
+parse_options([{ip, Ip} | Rest], State) ->
+ ParsedIp = case Ip of
+ any ->
+ any;
+ Ip when is_tuple(Ip) ->
+ Ip;
+ Ip when is_list(Ip) ->
+ {ok, IpTuple} = inet_parse:address(Ip),
+ IpTuple
+ end,
+ parse_options(Rest, State#mochiweb_socket_server{ip=ParsedIp});
+parse_options([{loop, Loop} | Rest], State) ->
+ parse_options(Rest, State#mochiweb_socket_server{loop=Loop});
+parse_options([{backlog, Backlog} | Rest], State) ->
+ parse_options(Rest, State#mochiweb_socket_server{backlog=Backlog});
+parse_options([{nodelay, NoDelay} | Rest], State) ->
+ parse_options(Rest, State#mochiweb_socket_server{nodelay=NoDelay});
+parse_options([{acceptor_pool_size, Max} | Rest], State) ->
+ MaxInt = ensure_int(Max),
+ parse_options(Rest,
+ State#mochiweb_socket_server{acceptor_pool_size=MaxInt});
+parse_options([{max, Max} | Rest], State) ->
+ error_logger:info_report([{warning, "TODO: max is currently unsupported"},
+ {max, Max}]),
+ MaxInt = ensure_int(Max),
+ parse_options(Rest, State#mochiweb_socket_server{max=MaxInt});
+parse_options([{ssl, Ssl} | Rest], State) when is_boolean(Ssl) ->
+ parse_options(Rest, State#mochiweb_socket_server{ssl=Ssl});
+parse_options([{ssl_opts, SslOpts} | Rest], State) when is_list(SslOpts) ->
+ SslOpts1 = [{ssl_imp, new} | proplists:delete(ssl_imp, SslOpts)],
+ parse_options(Rest, State#mochiweb_socket_server{ssl_opts=SslOpts1});
+parse_options([{profile_fun, ProfileFun} | Rest], State) when is_function(ProfileFun) ->
+ parse_options(Rest, State#mochiweb_socket_server{profile_fun=ProfileFun}).
+
+
+start_server(F, State=#mochiweb_socket_server{ssl=Ssl, name=Name}) ->
+ ok = prep_ssl(Ssl),
+ case Name of
+ undefined ->
+ gen_server:F(?MODULE, State, []);
+ _ ->
+ gen_server:F(Name, ?MODULE, State, [])
+ end.
+
+prep_ssl(true) ->
+ ok = mochiweb:ensure_started(crypto),
+ ok = mochiweb:ensure_started(asn1),
+ ok = mochiweb:ensure_started(public_key),
+ ok = mochiweb:ensure_started(ssl);
+prep_ssl(false) ->
+ ok.
+
+ensure_int(N) when is_integer(N) ->
+ N;
+ensure_int(S) when is_list(S) ->
+ list_to_integer(S).
+
+ipv6_supported() ->
+ case (catch inet:getaddr("localhost", inet6)) of
+ {ok, _Addr} ->
+ true;
+ {error, _} ->
+ false
+ end.
+
+init(State=#mochiweb_socket_server{ip=Ip, port=Port, backlog=Backlog, nodelay=NoDelay}) ->
+ process_flag(trap_exit, true),
+ BaseOpts = [binary,
+ {reuseaddr, true},
+ {packet, 0},
+ {backlog, Backlog},
+ {recbuf, ?RECBUF_SIZE},
+ {exit_on_close, false},
+ {active, false},
+ {nodelay, NoDelay}],
+ Opts = case Ip of
+ any ->
+ case ipv6_supported() of % IPv4, and IPv6 if supported
+ true -> [inet, inet6 | BaseOpts];
+ _ -> BaseOpts
+ end;
+ {_, _, _, _} -> % IPv4
+ [inet, {ip, Ip} | BaseOpts];
+ {_, _, _, _, _, _, _, _} -> % IPv6
+ [inet6, {ip, Ip} | BaseOpts]
+ end,
+ listen(Port, Opts, State).
+
+new_acceptor_pool(Listen,
+ State=#mochiweb_socket_server{acceptor_pool=Pool,
+ acceptor_pool_size=Size,
+ loop=Loop}) ->
+ F = fun (_, S) ->
+ Pid = mochiweb_acceptor:start_link(self(), Listen, Loop),
+ sets:add_element(Pid, S)
+ end,
+ Pool1 = lists:foldl(F, Pool, lists:seq(1, Size)),
+ State#mochiweb_socket_server{acceptor_pool=Pool1}.
+
+listen(Port, Opts, State=#mochiweb_socket_server{ssl=Ssl, ssl_opts=SslOpts}) ->
+ case mochiweb_socket:listen(Ssl, Port, Opts, SslOpts) of
+ {ok, Listen} ->
+ {ok, ListenPort} = mochiweb_socket:port(Listen),
+ {ok, new_acceptor_pool(
+ Listen,
+ State#mochiweb_socket_server{listen=Listen,
+ port=ListenPort})};
+ {error, Reason} ->
+ {stop, Reason}
+ end.
+
+do_get(port, #mochiweb_socket_server{port=Port}) ->
+ Port;
+do_get(active_sockets, #mochiweb_socket_server{active_sockets=ActiveSockets}) ->
+ ActiveSockets.
+
+
+state_to_proplist(#mochiweb_socket_server{name=Name,
+ port=Port,
+ active_sockets=ActiveSockets}) ->
+ [{name, Name}, {port, Port}, {active_sockets, ActiveSockets}].
+
+upgrade_state(State = #mochiweb_socket_server{}) ->
+ State;
+upgrade_state({mochiweb_socket_server, Port, Loop, Name,
+ Max, IP, Listen, NoDelay, Backlog, ActiveSockets,
+ AcceptorPoolSize, SSL, SSL_opts,
+ AcceptorPool}) ->
+ #mochiweb_socket_server{port=Port, loop=Loop, name=Name, max=Max, ip=IP,
+ listen=Listen, nodelay=NoDelay, backlog=Backlog,
+ active_sockets=ActiveSockets,
+ acceptor_pool_size=AcceptorPoolSize,
+ ssl=SSL,
+ ssl_opts=SSL_opts,
+ acceptor_pool=AcceptorPool}.
+
+handle_call(Req, From, State) when ?is_old_state(State) ->
+ handle_call(Req, From, upgrade_state(State));
+handle_call({get, Property}, _From, State) ->
+ Res = do_get(Property, State),
+ {reply, Res, State};
+handle_call(stop, _From, State) ->
+ {stop, normal, ok, State};
+handle_call(_Message, _From, State) ->
+ Res = error,
+ {reply, Res, State}.
+
+
+handle_cast(Req, State) when ?is_old_state(State) ->
+ handle_cast(Req, upgrade_state(State));
+handle_cast({accepted, Pid, Timing},
+ State=#mochiweb_socket_server{active_sockets=ActiveSockets}) ->
+ State1 = State#mochiweb_socket_server{active_sockets=1 + ActiveSockets},
+ case State#mochiweb_socket_server.profile_fun of
+ undefined ->
+ undefined;
+ F when is_function(F) ->
+ catch F([{timing, Timing} | state_to_proplist(State1)])
+ end,
+ {noreply, recycle_acceptor(Pid, State1)};
+handle_cast({set, profile_fun, ProfileFun}, State) ->
+ State1 = case ProfileFun of
+ ProfileFun when is_function(ProfileFun); ProfileFun =:= undefined ->
+ State#mochiweb_socket_server{profile_fun=ProfileFun};
+ _ ->
+ State
+ end,
+ {noreply, State1}.
+
+
+terminate(Reason, State) when ?is_old_state(State) ->
+ terminate(Reason, upgrade_state(State));
+terminate(_Reason, #mochiweb_socket_server{listen=Listen}) ->
+ mochiweb_socket:close(Listen).
+
+code_change(_OldVsn, State, _Extra) ->
+ State.
+
+recycle_acceptor(Pid, State=#mochiweb_socket_server{
+ acceptor_pool=Pool,
+ listen=Listen,
+ loop=Loop,
+ active_sockets=ActiveSockets}) ->
+ case sets:is_element(Pid, Pool) of
+ true ->
+ Acceptor = mochiweb_acceptor:start_link(self(), Listen, Loop),
+ Pool1 = sets:add_element(Acceptor, sets:del_element(Pid, Pool)),
+ State#mochiweb_socket_server{acceptor_pool=Pool1};
+ false ->
+ State#mochiweb_socket_server{active_sockets=ActiveSockets - 1}
+ end.
+
+handle_info(Msg, State) when ?is_old_state(State) ->
+ handle_info(Msg, upgrade_state(State));
+handle_info({'EXIT', Pid, normal}, State) ->
+ {noreply, recycle_acceptor(Pid, State)};
+handle_info({'EXIT', Pid, Reason},
+ State=#mochiweb_socket_server{acceptor_pool=Pool}) ->
+ case sets:is_element(Pid, Pool) of
+ true ->
+ %% If there was an unexpected error accepting, log and sleep.
+ error_logger:error_report({?MODULE, ?LINE,
+ {acceptor_error, Reason}}),
+ timer:sleep(100);
+ false ->
+ ok
+ end,
+ {noreply, recycle_acceptor(Pid, State)};
+
+% this is what release_handler needs to get a list of modules,
+% since our supervisor modules list is set to 'dynamic'
+% see sasl-2.1.9.2/src/release_handler_1.erl get_dynamic_mods
+handle_info({From, Tag, get_modules}, State = #mochiweb_socket_server{name={local,Mod}}) ->
+ From ! {element(2,Tag), [Mod]},
+ {noreply, State};
+
+% If for some reason we can't get the module name, send empty list to avoid release_handler timeout:
+handle_info({From, Tag, get_modules}, State) ->
+ error_logger:info_msg("mochiweb_socket_server replying to dynamic modules request as '[]'~n",[]),
+ From ! {element(2,Tag), []},
+ {noreply, State};
+
+handle_info(Info, State) ->
+ error_logger:info_report([{'INFO', Info}, {'State', State}]),
+ {noreply, State}.
+
+
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+upgrade_state_test() ->
+ OldState = {mochiweb_socket_server,
+ port, loop, name,
+ max, ip, listen,
+ nodelay, backlog,
+ active_sockets,
+ acceptor_pool_size,
+ ssl, ssl_opts, acceptor_pool},
+ State = upgrade_state(OldState),
+ CmpState = #mochiweb_socket_server{port=port, loop=loop,
+ name=name, max=max, ip=ip,
+ listen=listen, nodelay=nodelay,
+ backlog=backlog,
+ active_sockets=active_sockets,
+ acceptor_pool_size=acceptor_pool_size,
+ ssl=ssl, ssl_opts=ssl_opts,
+ acceptor_pool=acceptor_pool,
+ profile_fun=undefined},
+ ?assertEqual(CmpState, State).
+
+-endif.
--- /dev/null
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Utilities for parsing and quoting.
+
+-module(mochiweb_util).
+-author('bob@mochimedia.com').
+-export([join/2, quote_plus/1, urlencode/1, parse_qs/1, unquote/1]).
+-export([path_split/1]).
+-export([urlsplit/1, urlsplit_path/1, urlunsplit/1, urlunsplit_path/1]).
+-export([guess_mime/1, parse_header/1]).
+-export([shell_quote/1, cmd/1, cmd_string/1, cmd_port/2, cmd_status/1, cmd_status/2]).
+-export([record_to_proplist/2, record_to_proplist/3]).
+-export([safe_relative_path/1, partition/2]).
+-export([parse_qvalues/1, pick_accepted_encodings/3]).
+-export([make_io/1, rand_bytes/1, rand_uniform/2]).
+
+-define(PERCENT, 37). % $\%
+-define(FULLSTOP, 46). % $\.
+-define(IS_HEX(C), ((C >= $0 andalso C =< $9) orelse
+ (C >= $a andalso C =< $f) orelse
+ (C >= $A andalso C =< $F))).
+-define(QS_SAFE(C), ((C >= $a andalso C =< $z) orelse
+ (C >= $A andalso C =< $Z) orelse
+ (C >= $0 andalso C =< $9) orelse
+ (C =:= ?FULLSTOP orelse C =:= $- orelse C =:= $~ orelse
+ C =:= $_))).
+
+hexdigit(C) when C < 10 -> $0 + C;
+hexdigit(C) when C < 16 -> $A + (C - 10).
+
+unhexdigit(C) when C >= $0, C =< $9 -> C - $0;
+unhexdigit(C) when C >= $a, C =< $f -> C - $a + 10;
+unhexdigit(C) when C >= $A, C =< $F -> C - $A + 10.
+
+%% @spec partition(String, Sep) -> {String, [], []} | {Prefix, Sep, Postfix}
+%% @doc Inspired by Python 2.5's str.partition:
+%% partition("foo/bar", "/") = {"foo", "/", "bar"},
+%% partition("foo", "/") = {"foo", "", ""}.
+partition(String, Sep) ->
+ case partition(String, Sep, []) of
+ undefined ->
+ {String, "", ""};
+ Result ->
+ Result
+ end.
+
+partition("", _Sep, _Acc) ->
+ undefined;
+partition(S, Sep, Acc) ->
+ case partition2(S, Sep) of
+ undefined ->
+ [C | Rest] = S,
+ partition(Rest, Sep, [C | Acc]);
+ Rest ->
+ {lists:reverse(Acc), Sep, Rest}
+ end.
+
+partition2(Rest, "") ->
+ Rest;
+partition2([C | R1], [C | R2]) ->
+ partition2(R1, R2);
+partition2(_S, _Sep) ->
+ undefined.
+
+
+
+%% @spec safe_relative_path(string()) -> string() | undefined
+%% @doc Return the reduced version of a relative path or undefined if it
+%% is not safe. safe relative paths can be joined with an absolute path
+%% and will result in a subdirectory of the absolute path. Safe paths
+%% never contain a backslash character.
+safe_relative_path("/" ++ _) ->
+ undefined;
+safe_relative_path(P) ->
+ case string:chr(P, $\\) of
+ 0 ->
+ safe_relative_path(P, []);
+ _ ->
+ undefined
+ end.
+
+safe_relative_path("", Acc) ->
+ case Acc of
+ [] ->
+ "";
+ _ ->
+ string:join(lists:reverse(Acc), "/")
+ end;
+safe_relative_path(P, Acc) ->
+ case partition(P, "/") of
+ {"", "/", _} ->
+ %% /foo or foo//bar
+ undefined;
+ {"..", _, _} when Acc =:= [] ->
+ undefined;
+ {"..", _, Rest} ->
+ safe_relative_path(Rest, tl(Acc));
+ {Part, "/", ""} ->
+ safe_relative_path("", ["", Part | Acc]);
+ {Part, _, Rest} ->
+ safe_relative_path(Rest, [Part | Acc])
+ end.
+
+%% @spec shell_quote(string()) -> string()
+%% @doc Quote a string according to UNIX shell quoting rules, returns a string
+%% surrounded by double quotes.
+shell_quote(L) ->
+ shell_quote(L, [$\"]).
+
+%% @spec cmd_port([string()], Options) -> port()
+%% @doc open_port({spawn, mochiweb_util:cmd_string(Argv)}, Options).
+cmd_port(Argv, Options) ->
+ open_port({spawn, cmd_string(Argv)}, Options).
+
+%% @spec cmd([string()]) -> string()
+%% @doc os:cmd(cmd_string(Argv)).
+cmd(Argv) ->
+ os:cmd(cmd_string(Argv)).
+
+%% @spec cmd_string([string()]) -> string()
+%% @doc Create a shell quoted command string from a list of arguments.
+cmd_string(Argv) ->
+ string:join([shell_quote(X) || X <- Argv], " ").
+
+%% @spec cmd_status([string()]) -> {ExitStatus::integer(), Stdout::binary()}
+%% @doc Accumulate the output and exit status from the given application,
+%% will be spawned with cmd_port/2.
+cmd_status(Argv) ->
+ cmd_status(Argv, []).
+
+%% @spec cmd_status([string()], [atom()]) -> {ExitStatus::integer(), Stdout::binary()}
+%% @doc Accumulate the output and exit status from the given application,
+%% will be spawned with cmd_port/2.
+cmd_status(Argv, Options) ->
+ Port = cmd_port(Argv, [exit_status, stderr_to_stdout,
+ use_stdio, binary | Options]),
+ try cmd_loop(Port, [])
+ after catch port_close(Port)
+ end.
+
+%% @spec cmd_loop(port(), list()) -> {ExitStatus::integer(), Stdout::binary()}
+%% @doc Accumulate the output and exit status from a port.
+cmd_loop(Port, Acc) ->
+ receive
+ {Port, {exit_status, Status}} ->
+ {Status, iolist_to_binary(lists:reverse(Acc))};
+ {Port, {data, Data}} ->
+ cmd_loop(Port, [Data | Acc])
+ end.
+
+%% @spec join([iolist()], iolist()) -> iolist()
+%% @doc Join a list of strings or binaries together with the given separator
+%% string or char or binary. The output is flattened, but may be an
+%% iolist() instead of a string() if any of the inputs are binary().
+join([], _Separator) ->
+ [];
+join([S], _Separator) ->
+ lists:flatten(S);
+join(Strings, Separator) ->
+ lists:flatten(revjoin(lists:reverse(Strings), Separator, [])).
+
+revjoin([], _Separator, Acc) ->
+ Acc;
+revjoin([S | Rest], Separator, []) ->
+ revjoin(Rest, Separator, [S]);
+revjoin([S | Rest], Separator, Acc) ->
+ revjoin(Rest, Separator, [S, Separator | Acc]).
+
+%% @spec quote_plus(atom() | integer() | float() | string() | binary()) -> string()
+%% @doc URL safe encoding of the given term.
+quote_plus(Atom) when is_atom(Atom) ->
+ quote_plus(atom_to_list(Atom));
+quote_plus(Int) when is_integer(Int) ->
+ quote_plus(integer_to_list(Int));
+quote_plus(Binary) when is_binary(Binary) ->
+ quote_plus(binary_to_list(Binary));
+quote_plus(Float) when is_float(Float) ->
+ quote_plus(mochinum:digits(Float));
+quote_plus(String) ->
+ quote_plus(String, []).
+
+quote_plus([], Acc) ->
+ lists:reverse(Acc);
+quote_plus([C | Rest], Acc) when ?QS_SAFE(C) ->
+ quote_plus(Rest, [C | Acc]);
+quote_plus([$\s | Rest], Acc) ->
+ quote_plus(Rest, [$+ | Acc]);
+quote_plus([C | Rest], Acc) ->
+ <<Hi:4, Lo:4>> = <<C>>,
+ quote_plus(Rest, [hexdigit(Lo), hexdigit(Hi), ?PERCENT | Acc]).
+
+%% @spec urlencode([{Key, Value}]) -> string()
+%% @doc URL encode the property list.
+urlencode(Props) ->
+ Pairs = lists:foldr(
+ fun ({K, V}, Acc) ->
+ [quote_plus(K) ++ "=" ++ quote_plus(V) | Acc]
+ end, [], Props),
+ string:join(Pairs, "&").
+
+%% @spec parse_qs(string() | binary()) -> [{Key, Value}]
+%% @doc Parse a query string or application/x-www-form-urlencoded.
+parse_qs(Binary) when is_binary(Binary) ->
+ parse_qs(binary_to_list(Binary));
+parse_qs(String) ->
+ parse_qs(String, []).
+
+parse_qs([], Acc) ->
+ lists:reverse(Acc);
+parse_qs(String, Acc) ->
+ {Key, Rest} = parse_qs_key(String),
+ {Value, Rest1} = parse_qs_value(Rest),
+ parse_qs(Rest1, [{Key, Value} | Acc]).
+
+parse_qs_key(String) ->
+ parse_qs_key(String, []).
+
+parse_qs_key([], Acc) ->
+ {qs_revdecode(Acc), ""};
+parse_qs_key([$= | Rest], Acc) ->
+ {qs_revdecode(Acc), Rest};
+parse_qs_key(Rest=[$; | _], Acc) ->
+ {qs_revdecode(Acc), Rest};
+parse_qs_key(Rest=[$& | _], Acc) ->
+ {qs_revdecode(Acc), Rest};
+parse_qs_key([C | Rest], Acc) ->
+ parse_qs_key(Rest, [C | Acc]).
+
+parse_qs_value(String) ->
+ parse_qs_value(String, []).
+
+parse_qs_value([], Acc) ->
+ {qs_revdecode(Acc), ""};
+parse_qs_value([$; | Rest], Acc) ->
+ {qs_revdecode(Acc), Rest};
+parse_qs_value([$& | Rest], Acc) ->
+ {qs_revdecode(Acc), Rest};
+parse_qs_value([C | Rest], Acc) ->
+ parse_qs_value(Rest, [C | Acc]).
+
+%% @spec unquote(string() | binary()) -> string()
+%% @doc Unquote a URL encoded string.
+unquote(Binary) when is_binary(Binary) ->
+ unquote(binary_to_list(Binary));
+unquote(String) ->
+ qs_revdecode(lists:reverse(String)).
+
+qs_revdecode(S) ->
+ qs_revdecode(S, []).
+
+qs_revdecode([], Acc) ->
+ Acc;
+qs_revdecode([$+ | Rest], Acc) ->
+ qs_revdecode(Rest, [$\s | Acc]);
+qs_revdecode([Lo, Hi, ?PERCENT | Rest], Acc) when ?IS_HEX(Lo), ?IS_HEX(Hi) ->
+ qs_revdecode(Rest, [(unhexdigit(Lo) bor (unhexdigit(Hi) bsl 4)) | Acc]);
+qs_revdecode([C | Rest], Acc) ->
+ qs_revdecode(Rest, [C | Acc]).
+
+%% @spec urlsplit(Url) -> {Scheme, Netloc, Path, Query, Fragment}
+%% @doc Return a 5-tuple, does not expand % escapes. Only supports HTTP style
+%% URLs.
+urlsplit(Url) ->
+ {Scheme, Url1} = urlsplit_scheme(Url),
+ {Netloc, Url2} = urlsplit_netloc(Url1),
+ {Path, Query, Fragment} = urlsplit_path(Url2),
+ {Scheme, Netloc, Path, Query, Fragment}.
+
+urlsplit_scheme(Url) ->
+ case urlsplit_scheme(Url, []) of
+ no_scheme ->
+ {"", Url};
+ Res ->
+ Res
+ end.
+
+urlsplit_scheme([C | Rest], Acc) when ((C >= $a andalso C =< $z) orelse
+ (C >= $A andalso C =< $Z) orelse
+ (C >= $0 andalso C =< $9) orelse
+ C =:= $+ orelse C =:= $- orelse
+ C =:= $.) ->
+ urlsplit_scheme(Rest, [C | Acc]);
+urlsplit_scheme([$: | Rest], Acc=[_ | _]) ->
+ {string:to_lower(lists:reverse(Acc)), Rest};
+urlsplit_scheme(_Rest, _Acc) ->
+ no_scheme.
+
+urlsplit_netloc("//" ++ Rest) ->
+ urlsplit_netloc(Rest, []);
+urlsplit_netloc(Path) ->
+ {"", Path}.
+
+urlsplit_netloc("", Acc) ->
+ {lists:reverse(Acc), ""};
+urlsplit_netloc(Rest=[C | _], Acc) when C =:= $/; C =:= $?; C =:= $# ->
+ {lists:reverse(Acc), Rest};
+urlsplit_netloc([C | Rest], Acc) ->
+ urlsplit_netloc(Rest, [C | Acc]).
+
+
+%% @spec path_split(string()) -> {Part, Rest}
+%% @doc Split a path starting from the left, as in URL traversal.
+%% path_split("foo/bar") = {"foo", "bar"},
+%% path_split("/foo/bar") = {"", "foo/bar"}.
+path_split(S) ->
+ path_split(S, []).
+
+path_split("", Acc) ->
+ {lists:reverse(Acc), ""};
+path_split("/" ++ Rest, Acc) ->
+ {lists:reverse(Acc), Rest};
+path_split([C | Rest], Acc) ->
+ path_split(Rest, [C | Acc]).
+
+
+%% @spec urlunsplit({Scheme, Netloc, Path, Query, Fragment}) -> string()
+%% @doc Assemble a URL from the 5-tuple. Path must be absolute.
+urlunsplit({Scheme, Netloc, Path, Query, Fragment}) ->
+ lists:flatten([case Scheme of "" -> ""; _ -> [Scheme, "://"] end,
+ Netloc,
+ urlunsplit_path({Path, Query, Fragment})]).
+
+%% @spec urlunsplit_path({Path, Query, Fragment}) -> string()
+%% @doc Assemble a URL path from the 3-tuple.
+urlunsplit_path({Path, Query, Fragment}) ->
+ lists:flatten([Path,
+ case Query of "" -> ""; _ -> [$? | Query] end,
+ case Fragment of "" -> ""; _ -> [$# | Fragment] end]).
+
+%% @spec urlsplit_path(Url) -> {Path, Query, Fragment}
+%% @doc Return a 3-tuple, does not expand % escapes. Only supports HTTP style
+%% paths.
+urlsplit_path(Path) ->
+ urlsplit_path(Path, []).
+
+urlsplit_path("", Acc) ->
+ {lists:reverse(Acc), "", ""};
+urlsplit_path("?" ++ Rest, Acc) ->
+ {Query, Fragment} = urlsplit_query(Rest),
+ {lists:reverse(Acc), Query, Fragment};
+urlsplit_path("#" ++ Rest, Acc) ->
+ {lists:reverse(Acc), "", Rest};
+urlsplit_path([C | Rest], Acc) ->
+ urlsplit_path(Rest, [C | Acc]).
+
+urlsplit_query(Query) ->
+ urlsplit_query(Query, []).
+
+urlsplit_query("", Acc) ->
+ {lists:reverse(Acc), ""};
+urlsplit_query("#" ++ Rest, Acc) ->
+ {lists:reverse(Acc), Rest};
+urlsplit_query([C | Rest], Acc) ->
+ urlsplit_query(Rest, [C | Acc]).
+
+%% @spec guess_mime(string()) -> string()
+%% @doc Guess the mime type of a file by the extension of its filename.
+guess_mime(File) ->
+ case mochiweb_mime:from_extension(filename:extension(File)) of
+ undefined ->
+ "text/plain";
+ Mime ->
+ Mime
+ end.
+
+%% @spec parse_header(string()) -> {Type, [{K, V}]}
+%% @doc Parse a Content-Type like header, return the main Content-Type
+%% and a property list of options.
+parse_header(String) ->
+ %% TODO: This is exactly as broken as Python's cgi module.
+ %% Should parse properly like mochiweb_cookies.
+ [Type | Parts] = [string:strip(S) || S <- string:tokens(String, ";")],
+ F = fun (S, Acc) ->
+ case lists:splitwith(fun (C) -> C =/= $= end, S) of
+ {"", _} ->
+ %% Skip anything with no name
+ Acc;
+ {_, ""} ->
+ %% Skip anything with no value
+ Acc;
+ {Name, [$\= | Value]} ->
+ [{string:to_lower(string:strip(Name)),
+ unquote_header(string:strip(Value))} | Acc]
+ end
+ end,
+ {string:to_lower(Type),
+ lists:foldr(F, [], Parts)}.
+
+unquote_header("\"" ++ Rest) ->
+ unquote_header(Rest, []);
+unquote_header(S) ->
+ S.
+
+unquote_header("", Acc) ->
+ lists:reverse(Acc);
+unquote_header("\"", Acc) ->
+ lists:reverse(Acc);
+unquote_header([$\\, C | Rest], Acc) ->
+ unquote_header(Rest, [C | Acc]);
+unquote_header([C | Rest], Acc) ->
+ unquote_header(Rest, [C | Acc]).
+
+%% @spec record_to_proplist(Record, Fields) -> proplist()
+%% @doc calls record_to_proplist/3 with a default TypeKey of '__record'
+record_to_proplist(Record, Fields) ->
+ record_to_proplist(Record, Fields, '__record').
+
+%% @spec record_to_proplist(Record, Fields, TypeKey) -> proplist()
+%% @doc Return a proplist of the given Record with each field in the
+%% Fields list set as a key with the corresponding value in the Record.
+%% TypeKey is the key that is used to store the record type
+%% Fields should be obtained by calling record_info(fields, record_type)
+%% where record_type is the record type of Record
+record_to_proplist(Record, Fields, TypeKey)
+ when tuple_size(Record) - 1 =:= length(Fields) ->
+ lists:zip([TypeKey | Fields], tuple_to_list(Record)).
+
+
+shell_quote([], Acc) ->
+ lists:reverse([$\" | Acc]);
+shell_quote([C | Rest], Acc) when C =:= $\" orelse C =:= $\` orelse
+ C =:= $\\ orelse C =:= $\$ ->
+ shell_quote(Rest, [C, $\\ | Acc]);
+shell_quote([C | Rest], Acc) ->
+ shell_quote(Rest, [C | Acc]).
+
+%% @spec parse_qvalues(string()) -> [qvalue()] | invalid_qvalue_string
+%% @type qvalue() = {media_type() | encoding() , float()}.
+%% @type media_type() = string().
+%% @type encoding() = string().
+%%
+%% @doc Parses a list (given as a string) of elements with Q values associated
+%% to them. Elements are separated by commas and each element is separated
+%% from its Q value by a semicolon. Q values are optional but when missing
+%% the value of an element is considered as 1.0. A Q value is always in the
+%% range [0.0, 1.0]. A Q value list is used for example as the value of the
+%% HTTP "Accept" and "Accept-Encoding" headers.
+%%
+%% Q values are described in section 2.9 of the RFC 2616 (HTTP 1.1).
+%%
+%% Example:
+%%
+%% parse_qvalues("gzip; q=0.5, deflate, identity;q=0.0") ->
+%% [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 0.0}]
+%%
+parse_qvalues(QValuesStr) ->
+ try
+ lists:map(
+ fun(Pair) ->
+ [Type | Params] = string:tokens(Pair, ";"),
+ NormParams = normalize_media_params(Params),
+ {Q, NonQParams} = extract_q(NormParams),
+ {string:join([string:strip(Type) | NonQParams], ";"), Q}
+ end,
+ string:tokens(string:to_lower(QValuesStr), ",")
+ )
+ catch
+ _Type:_Error ->
+ invalid_qvalue_string
+ end.
+
+normalize_media_params(Params) ->
+ {ok, Re} = re:compile("\\s"),
+ normalize_media_params(Re, Params, []).
+
+normalize_media_params(_Re, [], Acc) ->
+ lists:reverse(Acc);
+normalize_media_params(Re, [Param | Rest], Acc) ->
+ NormParam = re:replace(Param, Re, "", [global, {return, list}]),
+ normalize_media_params(Re, Rest, [NormParam | Acc]).
+
+extract_q(NormParams) ->
+ {ok, KVRe} = re:compile("^([^=]+)=([^=]+)$"),
+ {ok, QRe} = re:compile("^((?:0|1)(?:\\.\\d{1,3})?)$"),
+ extract_q(KVRe, QRe, NormParams, []).
+
+extract_q(_KVRe, _QRe, [], Acc) ->
+ {1.0, lists:reverse(Acc)};
+extract_q(KVRe, QRe, [Param | Rest], Acc) ->
+ case re:run(Param, KVRe, [{capture, [1, 2], list}]) of
+ {match, [Name, Value]} ->
+ case Name of
+ "q" ->
+ {match, [Q]} = re:run(Value, QRe, [{capture, [1], list}]),
+ QVal = case Q of
+ "0" ->
+ 0.0;
+ "1" ->
+ 1.0;
+ Else ->
+ list_to_float(Else)
+ end,
+ case QVal < 0.0 orelse QVal > 1.0 of
+ false ->
+ {QVal, lists:reverse(Acc) ++ Rest}
+ end;
+ _ ->
+ extract_q(KVRe, QRe, Rest, [Param | Acc])
+ end
+ end.
+
+%% @spec pick_accepted_encodings([qvalue()], [encoding()], encoding()) ->
+%% [encoding()]
+%%
+%% @doc Determines which encodings specified in the given Q values list are
+%% valid according to a list of supported encodings and a default encoding.
+%%
+%% The returned list of encodings is sorted, descendingly, according to the
+%% Q values of the given list. The last element of this list is the given
+%% default encoding unless this encoding is explicitily or implicitily
+%% marked with a Q value of 0.0 in the given Q values list.
+%% Note: encodings with the same Q value are kept in the same order as
+%% found in the input Q values list.
+%%
+%% This encoding picking process is described in section 14.3 of the
+%% RFC 2616 (HTTP 1.1).
+%%
+%% Example:
+%%
+%% pick_accepted_encodings(
+%% [{"gzip", 0.5}, {"deflate", 1.0}],
+%% ["gzip", "identity"],
+%% "identity"
+%% ) ->
+%% ["gzip", "identity"]
+%%
+pick_accepted_encodings(AcceptedEncs, SupportedEncs, DefaultEnc) ->
+ SortedQList = lists:reverse(
+ lists:sort(fun({_, Q1}, {_, Q2}) -> Q1 < Q2 end, AcceptedEncs)
+ ),
+ {Accepted, Refused} = lists:foldr(
+ fun({E, Q}, {A, R}) ->
+ case Q > 0.0 of
+ true ->
+ {[E | A], R};
+ false ->
+ {A, [E | R]}
+ end
+ end,
+ {[], []},
+ SortedQList
+ ),
+ Refused1 = lists:foldr(
+ fun(Enc, Acc) ->
+ case Enc of
+ "*" ->
+ lists:subtract(SupportedEncs, Accepted) ++ Acc;
+ _ ->
+ [Enc | Acc]
+ end
+ end,
+ [],
+ Refused
+ ),
+ Accepted1 = lists:foldr(
+ fun(Enc, Acc) ->
+ case Enc of
+ "*" ->
+ lists:subtract(SupportedEncs, Accepted ++ Refused1) ++ Acc;
+ _ ->
+ [Enc | Acc]
+ end
+ end,
+ [],
+ Accepted
+ ),
+ Accepted2 = case lists:member(DefaultEnc, Accepted1) of
+ true ->
+ Accepted1;
+ false ->
+ Accepted1 ++ [DefaultEnc]
+ end,
+ [E || E <- Accepted2, lists:member(E, SupportedEncs),
+ not lists:member(E, Refused1)].
+
+make_io(Atom) when is_atom(Atom) ->
+ atom_to_list(Atom);
+make_io(Integer) when is_integer(Integer) ->
+ integer_to_list(Integer);
+make_io(Io) when is_list(Io); is_binary(Io) ->
+ Io.
+
+rand_bytes(Count) ->
+ list_to_binary([rand_uniform(0, 16#FF + 1) || _ <- lists:seq(1, Count)]).
+
+rand_uniform(Lo, Hi) ->
+ random:uniform(Hi - Lo) + Lo - 1.
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+make_io_test() ->
+ ?assertEqual(
+ <<"atom">>,
+ iolist_to_binary(make_io(atom))),
+ ?assertEqual(
+ <<"20">>,
+ iolist_to_binary(make_io(20))),
+ ?assertEqual(
+ <<"list">>,
+ iolist_to_binary(make_io("list"))),
+ ?assertEqual(
+ <<"binary">>,
+ iolist_to_binary(make_io(<<"binary">>))),
+ ok.
+
+-record(test_record, {field1=f1, field2=f2}).
+record_to_proplist_test() ->
+ ?assertEqual(
+ [{'__record', test_record},
+ {field1, f1},
+ {field2, f2}],
+ record_to_proplist(#test_record{}, record_info(fields, test_record))),
+ ?assertEqual(
+ [{'typekey', test_record},
+ {field1, f1},
+ {field2, f2}],
+ record_to_proplist(#test_record{},
+ record_info(fields, test_record),
+ typekey)),
+ ok.
+
+shell_quote_test() ->
+ ?assertEqual(
+ "\"foo \\$bar\\\"\\`' baz\"",
+ shell_quote("foo $bar\"`' baz")),
+ ok.
+
+cmd_port_test_spool(Port, Acc) ->
+ receive
+ {Port, eof} ->
+ Acc;
+ {Port, {data, {eol, Data}}} ->
+ cmd_port_test_spool(Port, ["\n", Data | Acc]);
+ {Port, Unknown} ->
+ throw({unknown, Unknown})
+ after 1000 ->
+ throw(timeout)
+ end.
+
+cmd_port_test() ->
+ Port = cmd_port(["echo", "$bling$ `word`!"],
+ [eof, stream, {line, 4096}]),
+ Res = try lists:append(lists:reverse(cmd_port_test_spool(Port, [])))
+ after catch port_close(Port)
+ end,
+ self() ! {Port, wtf},
+ try cmd_port_test_spool(Port, [])
+ catch throw:{unknown, wtf} -> ok
+ end,
+ try cmd_port_test_spool(Port, [])
+ catch throw:timeout -> ok
+ end,
+ ?assertEqual(
+ "$bling$ `word`!\n",
+ Res).
+
+cmd_test() ->
+ ?assertEqual(
+ "$bling$ `word`!\n",
+ cmd(["echo", "$bling$ `word`!"])),
+ ok.
+
+cmd_string_test() ->
+ ?assertEqual(
+ "\"echo\" \"\\$bling\\$ \\`word\\`!\"",
+ cmd_string(["echo", "$bling$ `word`!"])),
+ ok.
+
+cmd_status_test() ->
+ ?assertEqual(
+ {0, <<"$bling$ `word`!\n">>},
+ cmd_status(["echo", "$bling$ `word`!"])),
+ ok.
+
+
+parse_header_test() ->
+ ?assertEqual(
+ {"multipart/form-data", [{"boundary", "AaB03x"}]},
+ parse_header("multipart/form-data; boundary=AaB03x")),
+ %% This tests (currently) intentionally broken behavior
+ ?assertEqual(
+ {"multipart/form-data",
+ [{"b", ""},
+ {"cgi", "is"},
+ {"broken", "true\"e"}]},
+ parse_header("multipart/form-data;b=;cgi=\"i\\s;broken=true\"e;=z;z")),
+ ok.
+
+guess_mime_test() ->
+ "text/plain" = guess_mime(""),
+ "text/plain" = guess_mime(".text"),
+ "application/zip" = guess_mime(".zip"),
+ "application/zip" = guess_mime("x.zip"),
+ "text/html" = guess_mime("x.html"),
+ "application/xhtml+xml" = guess_mime("x.xhtml"),
+ ok.
+
+path_split_test() ->
+ {"", "foo/bar"} = path_split("/foo/bar"),
+ {"foo", "bar"} = path_split("foo/bar"),
+ {"bar", ""} = path_split("bar"),
+ ok.
+
+urlsplit_test() ->
+ {"", "", "/foo", "", "bar?baz"} = urlsplit("/foo#bar?baz"),
+ {"http", "host:port", "/foo", "", "bar?baz"} =
+ urlsplit("http://host:port/foo#bar?baz"),
+ {"http", "host", "", "", ""} = urlsplit("http://host"),
+ {"", "", "/wiki/Category:Fruit", "", ""} =
+ urlsplit("/wiki/Category:Fruit"),
+ ok.
+
+urlsplit_path_test() ->
+ {"/foo/bar", "", ""} = urlsplit_path("/foo/bar"),
+ {"/foo", "baz", ""} = urlsplit_path("/foo?baz"),
+ {"/foo", "", "bar?baz"} = urlsplit_path("/foo#bar?baz"),
+ {"/foo", "", "bar?baz#wibble"} = urlsplit_path("/foo#bar?baz#wibble"),
+ {"/foo", "bar", "baz"} = urlsplit_path("/foo?bar#baz"),
+ {"/foo", "bar?baz", "baz"} = urlsplit_path("/foo?bar?baz#baz"),
+ ok.
+
+urlunsplit_test() ->
+ "/foo#bar?baz" = urlunsplit({"", "", "/foo", "", "bar?baz"}),
+ "http://host:port/foo#bar?baz" =
+ urlunsplit({"http", "host:port", "/foo", "", "bar?baz"}),
+ ok.
+
+urlunsplit_path_test() ->
+ "/foo/bar" = urlunsplit_path({"/foo/bar", "", ""}),
+ "/foo?baz" = urlunsplit_path({"/foo", "baz", ""}),
+ "/foo#bar?baz" = urlunsplit_path({"/foo", "", "bar?baz"}),
+ "/foo#bar?baz#wibble" = urlunsplit_path({"/foo", "", "bar?baz#wibble"}),
+ "/foo?bar#baz" = urlunsplit_path({"/foo", "bar", "baz"}),
+ "/foo?bar?baz#baz" = urlunsplit_path({"/foo", "bar?baz", "baz"}),
+ ok.
+
+join_test() ->
+ ?assertEqual("foo,bar,baz",
+ join(["foo", "bar", "baz"], $,)),
+ ?assertEqual("foo,bar,baz",
+ join(["foo", "bar", "baz"], ",")),
+ ?assertEqual("foo bar",
+ join([["foo", " bar"]], ",")),
+ ?assertEqual("foo bar,baz",
+ join([["foo", " bar"], "baz"], ",")),
+ ?assertEqual("foo",
+ join(["foo"], ",")),
+ ?assertEqual("foobarbaz",
+ join(["foo", "bar", "baz"], "")),
+ ?assertEqual("foo" ++ [<<>>] ++ "bar" ++ [<<>>] ++ "baz",
+ join(["foo", "bar", "baz"], <<>>)),
+ ?assertEqual("foobar" ++ [<<"baz">>],
+ join(["foo", "bar", <<"baz">>], "")),
+ ?assertEqual("",
+ join([], "any")),
+ ok.
+
+quote_plus_test() ->
+ "foo" = quote_plus(foo),
+ "1" = quote_plus(1),
+ "1.1" = quote_plus(1.1),
+ "foo" = quote_plus("foo"),
+ "foo+bar" = quote_plus("foo bar"),
+ "foo%0A" = quote_plus("foo\n"),
+ "foo%0A" = quote_plus("foo\n"),
+ "foo%3B%26%3D" = quote_plus("foo;&="),
+ "foo%3B%26%3D" = quote_plus(<<"foo;&=">>),
+ ok.
+
+unquote_test() ->
+ ?assertEqual("foo bar",
+ unquote("foo+bar")),
+ ?assertEqual("foo bar",
+ unquote("foo%20bar")),
+ ?assertEqual("foo\r\n",
+ unquote("foo%0D%0A")),
+ ?assertEqual("foo\r\n",
+ unquote(<<"foo%0D%0A">>)),
+ ok.
+
+urlencode_test() ->
+ "foo=bar&baz=wibble+%0D%0A&z=1" = urlencode([{foo, "bar"},
+ {"baz", "wibble \r\n"},
+ {z, 1}]),
+ ok.
+
+parse_qs_test() ->
+ ?assertEqual(
+ [{"foo", "bar"}, {"baz", "wibble \r\n"}, {"z", "1"}],
+ parse_qs("foo=bar&baz=wibble+%0D%0a&z=1")),
+ ?assertEqual(
+ [{"", "bar"}, {"baz", "wibble \r\n"}, {"z", ""}],
+ parse_qs("=bar&baz=wibble+%0D%0a&z=")),
+ ?assertEqual(
+ [{"foo", "bar"}, {"baz", "wibble \r\n"}, {"z", "1"}],
+ parse_qs(<<"foo=bar&baz=wibble+%0D%0a&z=1">>)),
+ ?assertEqual(
+ [],
+ parse_qs("")),
+ ?assertEqual(
+ [{"foo", ""}, {"bar", ""}, {"baz", ""}],
+ parse_qs("foo;bar&baz")),
+ ok.
+
+partition_test() ->
+ {"foo", "", ""} = partition("foo", "/"),
+ {"foo", "/", "bar"} = partition("foo/bar", "/"),
+ {"foo", "/", ""} = partition("foo/", "/"),
+ {"", "/", "bar"} = partition("/bar", "/"),
+ {"f", "oo/ba", "r"} = partition("foo/bar", "oo/ba"),
+ ok.
+
+safe_relative_path_test() ->
+ "foo" = safe_relative_path("foo"),
+ "foo/" = safe_relative_path("foo/"),
+ "foo" = safe_relative_path("foo/bar/.."),
+ "bar" = safe_relative_path("foo/../bar"),
+ "bar/" = safe_relative_path("foo/../bar/"),
+ "" = safe_relative_path("foo/.."),
+ "" = safe_relative_path("foo/../"),
+ undefined = safe_relative_path("/foo"),
+ undefined = safe_relative_path("../foo"),
+ undefined = safe_relative_path("foo/../.."),
+ undefined = safe_relative_path("foo//"),
+ undefined = safe_relative_path("foo\\bar"),
+ ok.
+
+parse_qvalues_test() ->
+ [] = parse_qvalues(""),
+ [{"identity", 0.0}] = parse_qvalues("identity;q=0"),
+ [{"identity", 0.0}] = parse_qvalues("identity ;q=0"),
+ [{"identity", 0.0}] = parse_qvalues(" identity; q =0 "),
+ [{"identity", 0.0}] = parse_qvalues("identity ; q = 0"),
+ [{"identity", 0.0}] = parse_qvalues("identity ; q= 0.0"),
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "gzip,deflate,identity;q=0.0"
+ ),
+ [{"deflate", 1.0}, {"gzip", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "deflate,gzip,identity;q=0.0"
+ ),
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"gzip", 1.0}, {"identity", 0.0}] =
+ parse_qvalues("gzip,deflate,gzip,identity;q=0"),
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "gzip, deflate , identity; q=0.0"
+ ),
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "gzip; q=1, deflate;q=1.0, identity;q=0.0"
+ ),
+ [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "gzip; q=0.5, deflate;q=1.0, identity;q=0"
+ ),
+ [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "gzip; q=0.5, deflate , identity;q=0.0"
+ ),
+ [{"gzip", 0.5}, {"deflate", 0.8}, {"identity", 0.0}] = parse_qvalues(
+ "gzip; q=0.5, deflate;q=0.8, identity;q=0.0"
+ ),
+ [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 1.0}] = parse_qvalues(
+ "gzip; q=0.5,deflate,identity"
+ ),
+ [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 1.0}, {"identity", 1.0}] =
+ parse_qvalues("gzip; q=0.5,deflate,identity, identity "),
+ [{"text/html;level=1", 1.0}, {"text/plain", 0.5}] =
+ parse_qvalues("text/html;level=1, text/plain;q=0.5"),
+ [{"text/html;level=1", 0.3}, {"text/plain", 1.0}] =
+ parse_qvalues("text/html;level=1;q=0.3, text/plain"),
+ [{"text/html;level=1", 0.3}, {"text/plain", 1.0}] =
+ parse_qvalues("text/html; level = 1; q = 0.3, text/plain"),
+ [{"text/html;level=1", 0.3}, {"text/plain", 1.0}] =
+ parse_qvalues("text/html;q=0.3;level=1, text/plain"),
+ invalid_qvalue_string = parse_qvalues("gzip; q=1.1, deflate"),
+ invalid_qvalue_string = parse_qvalues("gzip; q=0.5, deflate;q=2"),
+ invalid_qvalue_string = parse_qvalues("gzip, deflate;q=AB"),
+ invalid_qvalue_string = parse_qvalues("gzip; q=2.1, deflate"),
+ invalid_qvalue_string = parse_qvalues("gzip; q=0.1234, deflate"),
+ invalid_qvalue_string = parse_qvalues("text/html;level=1;q=0.3, text/html;level"),
+ ok.
+
+pick_accepted_encodings_test() ->
+ ["identity"] = pick_accepted_encodings(
+ [],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 1.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["identity"] = pick_accepted_encodings(
+ [{"gzip", 0.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"deflate", 1.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 0.5}, {"deflate", 1.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["identity"] = pick_accepted_encodings(
+ [{"gzip", 0.0}, {"deflate", 0.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["gzip"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate", "identity"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"deflate", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["deflate", "gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 0.2}, {"deflate", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["deflate", "deflate", "gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 0.2}, {"deflate", 1.0}, {"deflate", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["deflate", "gzip", "gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 0.2}, {"deflate", 1.0}, {"gzip", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate", "gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 0.2}, {"deflate", 0.9}, {"gzip", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ [] = pick_accepted_encodings(
+ [{"*", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate", "identity"] = pick_accepted_encodings(
+ [{"*", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate", "identity"] = pick_accepted_encodings(
+ [{"*", 0.6}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"*", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"deflate", 0.6}, {"*", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["deflate", "gzip"] = pick_accepted_encodings(
+ [{"gzip", 0.5}, {"deflate", 1.0}, {"*", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "identity"] = pick_accepted_encodings(
+ [{"deflate", 0.0}, {"*", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "identity"] = pick_accepted_encodings(
+ [{"*", 1.0}, {"deflate", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ok.
+
+-endif.
--- /dev/null
+%% @copyright 2007 Mochi Media, Inc.
+%% @author Matthew Dempsky <matthew@mochimedia.com>
+%%
+%% @doc Erlang module for automatically reloading modified modules
+%% during development.
+
+-module(reloader).
+-author("Matthew Dempsky <matthew@mochimedia.com>").
+
+-include_lib("kernel/include/file.hrl").
+
+-behaviour(gen_server).
+-export([start/0, start_link/0]).
+-export([stop/0]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]).
+-export([all_changed/0]).
+-export([is_changed/1]).
+-export([reload_modules/1]).
+-record(state, {last, tref}).
+
+%% External API
+
+%% @spec start() -> ServerRet
+%% @doc Start the reloader.
+start() ->
+ gen_server:start({local, ?MODULE}, ?MODULE, [], []).
+
+%% @spec start_link() -> ServerRet
+%% @doc Start the reloader.
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+%% @spec stop() -> ok
+%% @doc Stop the reloader.
+stop() ->
+ gen_server:call(?MODULE, stop).
+
+%% gen_server callbacks
+
+%% @spec init([]) -> {ok, State}
+%% @doc gen_server init, opens the server in an initial state.
+init([]) ->
+ {ok, TRef} = timer:send_interval(timer:seconds(1), doit),
+ {ok, #state{last = stamp(), tref = TRef}}.
+
+%% @spec handle_call(Args, From, State) -> tuple()
+%% @doc gen_server callback.
+handle_call(stop, _From, State) ->
+ {stop, shutdown, stopped, State};
+handle_call(_Req, _From, State) ->
+ {reply, {error, badrequest}, State}.
+
+%% @spec handle_cast(Cast, State) -> tuple()
+%% @doc gen_server callback.
+handle_cast(_Req, State) ->
+ {noreply, State}.
+
+%% @spec handle_info(Info, State) -> tuple()
+%% @doc gen_server callback.
+handle_info(doit, State) ->
+ Now = stamp(),
+ _ = doit(State#state.last, Now),
+ {noreply, State#state{last = Now}};
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+%% @spec terminate(Reason, State) -> ok
+%% @doc gen_server termination callback.
+terminate(_Reason, State) ->
+ {ok, cancel} = timer:cancel(State#state.tref),
+ ok.
+
+
+%% @spec code_change(_OldVsn, State, _Extra) -> State
+%% @doc gen_server code_change callback (trivial).
+code_change(_Vsn, State, _Extra) ->
+ {ok, State}.
+
+%% @spec reload_modules([atom()]) -> [{module, atom()} | {error, term()}]
+%% @doc code:purge/1 and code:load_file/1 the given list of modules in order,
+%% return the results of code:load_file/1.
+reload_modules(Modules) ->
+ [begin code:purge(M), code:load_file(M) end || M <- Modules].
+
+%% @spec all_changed() -> [atom()]
+%% @doc Return a list of beam modules that have changed.
+all_changed() ->
+ [M || {M, Fn} <- code:all_loaded(), is_list(Fn), is_changed(M)].
+
+%% @spec is_changed(atom()) -> boolean()
+%% @doc true if the loaded module is a beam with a vsn attribute
+%% and does not match the on-disk beam file, returns false otherwise.
+is_changed(M) ->
+ try
+ module_vsn(M:module_info()) =/= module_vsn(code:get_object_code(M))
+ catch _:_ ->
+ false
+ end.
+
+%% Internal API
+
+module_vsn({M, Beam, _Fn}) ->
+ {ok, {M, Vsn}} = beam_lib:version(Beam),
+ Vsn;
+module_vsn(L) when is_list(L) ->
+ {_, Attrs} = lists:keyfind(attributes, 1, L),
+ {_, Vsn} = lists:keyfind(vsn, 1, Attrs),
+ Vsn.
+
+doit(From, To) ->
+ [case file:read_file_info(Filename) of
+ {ok, #file_info{mtime = Mtime}} when Mtime >= From, Mtime < To ->
+ reload(Module);
+ {ok, _} ->
+ unmodified;
+ {error, enoent} ->
+ %% The Erlang compiler deletes existing .beam files if
+ %% recompiling fails. Maybe it's worth spitting out a
+ %% warning here, but I'd want to limit it to just once.
+ gone;
+ {error, Reason} ->
+ io:format("Error reading ~s's file info: ~p~n",
+ [Filename, Reason]),
+ error
+ end || {Module, Filename} <- code:all_loaded(), is_list(Filename)].
+
+reload(Module) ->
+ io:format("Reloading ~p ...", [Module]),
+ code:purge(Module),
+ case code:load_file(Module) of
+ {module, Module} ->
+ io:format(" ok.~n"),
+ case erlang:function_exported(Module, test, 0) of
+ true ->
+ io:format(" - Calling ~p:test() ...", [Module]),
+ case catch Module:test() of
+ ok ->
+ io:format(" ok.~n"),
+ reload;
+ Reason ->
+ io:format(" fail: ~p.~n", [Reason]),
+ reload_but_test_failed
+ end;
+ false ->
+ reload
+ end;
+ {error, Reason} ->
+ io:format(" fail: ~p.~n", [Reason]),
+ error
+ end.
+
+
+stamp() ->
+ erlang:localtime().
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+-endif.
--- /dev/null
+%% -*- erlang -*-
+{variables, [{appid, "mochiwebapp"},
+ {author, "Mochi Media <dev@mochimedia.com>"},
+ {year, "2010"},
+ {version, "0.1"},
+ {port, 8080},
+ {dest, "{{appid}}"}]}.
+{dir, "{{dest}}"}.
+{template, "mochiwebapp_skel/src/mochiapp.app.src", "{{dest}}/src/{{appid}}.app.src"}.
+{template, "mochiwebapp_skel/src/mochiapp.erl", "{{dest}}/src/{{appid}}.erl"}.
+{template, "mochiwebapp_skel/src/mochiapp_app.erl", "{{dest}}/src/{{appid}}_app.erl"}.
+{template, "mochiwebapp_skel/src/mochiapp_deps.erl", "{{dest}}/src/{{appid}}_deps.erl"}.
+{template, "mochiwebapp_skel/src/mochiapp_sup.erl", "{{dest}}/src/{{appid}}_sup.erl"}.
+{template, "mochiwebapp_skel/src/mochiapp_web.erl", "{{dest}}/src/{{appid}}_web.erl"}.
+{template, "mochiwebapp_skel/start-dev.sh", "{{dest}}/start-dev.sh"}.
+{template, "mochiwebapp_skel/priv/www/index.html", "{{dest}}/priv/www/index.html"}.
+{file, "../../.gitignore", "{{dest}}/.gitignore"}.
+{file, "../../Makefile", "{{dest}}/Makefile"}.
+{file, "mochiwebapp_skel/rebar.config", "{{dest}}/rebar.config"}.
+{file, "../../rebar", "{{dest}}/rebar"}.
+{chmod, 8#755, "{{dest}}/rebar"}.
+{chmod, 8#755, "{{dest}}/start-dev.sh"}.
--- /dev/null
+<html>
+<head>
+<title>It Worked</title>
+</head>
+<body>
+{{appid}} running.
+</body>
+</html>
--- /dev/null
+%% -*- erlang -*-
+{erl_opts, [debug_info]}.
+{deps, [
+ {mochiweb, ".*",
+ {git, "git://github.com/mochi/mochiweb.git", {branch, "master"}}}]}.
+{cover_enabled, true}.
+{eunit_opts, [verbose, {report,{eunit_surefire,[{dir,"."}]}}]}.
--- /dev/null
+%% -*- erlang -*-
+{application, {{appid}},
+ [{description, "{{appid}}"},
+ {vsn, "{{version}}"},
+ {modules, []},
+ {registered, []},
+ {mod, {'{{appid}}_app', []}},
+ {env, []},
+ {applications, [kernel, stdlib, crypto]}]}.
--- /dev/null
+%% @author {{author}}
+%% @copyright {{year}} {{author}}
+
+%% @doc {{appid}}.
+
+-module({{appid}}).
+-author("{{author}}").
+-export([start/0, stop/0]).
+
+ensure_started(App) ->
+ case application:start(App) of
+ ok ->
+ ok;
+ {error, {already_started, App}} ->
+ ok
+ end.
+
+
+%% @spec start() -> ok
+%% @doc Start the {{appid}} server.
+start() ->
+ {{appid}}_deps:ensure(),
+ ensure_started(crypto),
+ application:start({{appid}}).
+
+
+%% @spec stop() -> ok
+%% @doc Stop the {{appid}} server.
+stop() ->
+ application:stop({{appid}}).
--- /dev/null
+%% @author {{author}}
+%% @copyright {{appid}} {{author}}
+
+%% @doc Callbacks for the {{appid}} application.
+
+-module({{appid}}_app).
+-author("{{author}}").
+
+-behaviour(application).
+-export([start/2,stop/1]).
+
+
+%% @spec start(_Type, _StartArgs) -> ServerRet
+%% @doc application start callback for {{appid}}.
+start(_Type, _StartArgs) ->
+ {{appid}}_deps:ensure(),
+ {{appid}}_sup:start_link().
+
+%% @spec stop(_State) -> ServerRet
+%% @doc application stop callback for {{appid}}.
+stop(_State) ->
+ ok.
--- /dev/null
+%% @author {{author}}
+%% @copyright {{year}} {{author}}
+
+%% @doc Ensure that the relatively-installed dependencies are on the code
+%% loading path, and locate resources relative
+%% to this application's path.
+
+-module({{appid}}_deps).
+-author("{{author}}").
+
+-export([ensure/0, ensure/1]).
+-export([get_base_dir/0, get_base_dir/1]).
+-export([local_path/1, local_path/2]).
+-export([deps_on_path/0, new_siblings/1]).
+
+%% @spec deps_on_path() -> [ProjNameAndVers]
+%% @doc List of project dependencies on the path.
+deps_on_path() ->
+ F = fun (X, Acc) ->
+ ProjDir = filename:dirname(X),
+ case {filename:basename(X),
+ filename:basename(filename:dirname(ProjDir))} of
+ {"ebin", "deps"} ->
+ [filename:basename(ProjDir) | Acc];
+ _ ->
+ Acc
+ end
+ end,
+ ordsets:from_list(lists:foldl(F, [], code:get_path())).
+
+%% @spec new_siblings(Module) -> [Dir]
+%% @doc Find new siblings paths relative to Module that aren't already on the
+%% code path.
+new_siblings(Module) ->
+ Existing = deps_on_path(),
+ SiblingEbin = filelib:wildcard(local_path(["deps", "*", "ebin"], Module)),
+ Siblings = [filename:dirname(X) || X <- SiblingEbin,
+ ordsets:is_element(
+ filename:basename(filename:dirname(X)),
+ Existing) =:= false],
+ lists:filter(fun filelib:is_dir/1,
+ lists:append([[filename:join([X, "ebin"]),
+ filename:join([X, "include"])] ||
+ X <- Siblings])).
+
+
+%% @spec ensure(Module) -> ok
+%% @doc Ensure that all ebin and include paths for dependencies
+%% of the application for Module are on the code path.
+ensure(Module) ->
+ code:add_paths(new_siblings(Module)),
+ code:clash(),
+ ok.
+
+%% @spec ensure() -> ok
+%% @doc Ensure that the ebin and include paths for dependencies of
+%% this application are on the code path. Equivalent to
+%% ensure(?Module).
+ensure() ->
+ ensure(?MODULE).
+
+%% @spec get_base_dir(Module) -> string()
+%% @doc Return the application directory for Module. It assumes Module is in
+%% a standard OTP layout application in the ebin or src directory.
+get_base_dir(Module) ->
+ {file, Here} = code:is_loaded(Module),
+ filename:dirname(filename:dirname(Here)).
+
+%% @spec get_base_dir() -> string()
+%% @doc Return the application directory for this application. Equivalent to
+%% get_base_dir(?MODULE).
+get_base_dir() ->
+ get_base_dir(?MODULE).
+
+%% @spec local_path([string()], Module) -> string()
+%% @doc Return an application-relative directory from Module's application.
+local_path(Components, Module) ->
+ filename:join([get_base_dir(Module) | Components]).
+
+%% @spec local_path(Components) -> string()
+%% @doc Return an application-relative directory for this application.
+%% Equivalent to local_path(Components, ?MODULE).
+local_path(Components) ->
+ local_path(Components, ?MODULE).
--- /dev/null
+%% @author {{author}}
+%% @copyright {{year}} {{author}}
+
+%% @doc Supervisor for the {{appid}} application.
+
+-module({{appid}}_sup).
+-author("{{author}}").
+
+-behaviour(supervisor).
+
+%% External exports
+-export([start_link/0, upgrade/0]).
+
+%% supervisor callbacks
+-export([init/1]).
+
+%% @spec start_link() -> ServerRet
+%% @doc API for starting the supervisor.
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+%% @spec upgrade() -> ok
+%% @doc Add processes if necessary.
+upgrade() ->
+ {ok, {_, Specs}} = init([]),
+
+ Old = sets:from_list(
+ [Name || {Name, _, _, _} <- supervisor:which_children(?MODULE)]),
+ New = sets:from_list([Name || {Name, _, _, _, _, _} <- Specs]),
+ Kill = sets:subtract(Old, New),
+
+ sets:fold(fun (Id, ok) ->
+ supervisor:terminate_child(?MODULE, Id),
+ supervisor:delete_child(?MODULE, Id),
+ ok
+ end, ok, Kill),
+
+ [supervisor:start_child(?MODULE, Spec) || Spec <- Specs],
+ ok.
+
+%% @spec init([]) -> SupervisorTree
+%% @doc supervisor callback.
+init([]) ->
+ Web = web_specs({{appid}}_web, {{port}}),
+ Processes = [Web],
+ Strategy = {one_for_one, 10, 10},
+ {ok,
+ {Strategy, lists:flatten(Processes)}}.
+
+web_specs(Mod, Port) ->
+ WebConfig = [{ip, {0,0,0,0}},
+ {port, Port},
+ {docroot, {{appid}}_deps:local_path(["priv", "www"])}],
+ {Mod,
+ {Mod, start, [WebConfig]},
+ permanent, 5000, worker, dynamic}.
--- /dev/null
+%% @author {{author}}
+%% @copyright {{year}} {{author}}
+
+%% @doc Web server for {{appid}}.
+
+-module({{appid}}_web).
+-author("{{author}}").
+
+-export([start/1, stop/0, loop/2]).
+
+%% External API
+
+start(Options) ->
+ {DocRoot, Options1} = get_option(docroot, Options),
+ Loop = fun (Req) ->
+ ?MODULE:loop(Req, DocRoot)
+ end,
+ mochiweb_http:start([{name, ?MODULE}, {loop, Loop} | Options1]).
+
+stop() ->
+ mochiweb_http:stop(?MODULE).
+
+loop(Req, DocRoot) ->
+ "/" ++ Path = Req:get(path),
+ try
+ case Req:get(method) of
+ Method when Method =:= 'GET'; Method =:= 'HEAD' ->
+ case Path of
+ _ ->
+ Req:serve_file(Path, DocRoot)
+ end;
+ 'POST' ->
+ case Path of
+ _ ->
+ Req:not_found()
+ end;
+ _ ->
+ Req:respond({501, [], []})
+ end
+ catch
+ Type:What ->
+ Report = ["web request failed",
+ {path, Path},
+ {type, Type}, {what, What},
+ {trace, erlang:get_stacktrace()}],
+ error_logger:error_report(Report),
+ %% NOTE: mustache templates need \\ because they are not awesome.
+ Req:respond({500, [{"Content-Type", "text/plain"}],
+ "request failed, sorry\\n"})
+ end.
+
+%% Internal API
+
+get_option(Option, Options) ->
+ {proplists:get_value(Option, Options), proplists:delete(Option, Options)}.
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+you_should_write_a_test() ->
+ ?assertEqual(
+ "No, but I will!",
+ "Have you written any tests?"),
+ ok.
+
+-endif.
--- /dev/null
+#!/bin/sh
+# NOTE: mustache templates need \\ because they are not awesome.
+exec erl -pa ebin edit deps/*/ebin -boot start_sasl \\
+ -sname {{appid}}_dev \\
+ -s {{appid}} \\
+ -s reloader
--- /dev/null
+-----BEGIN CERTIFICATE-----
+MIIDIDCCAgigAwIBAgIJAJLkNZzERPIUMA0GCSqGSIb3DQEBBQUAMBQxEjAQBgNV
+BAMTCWxvY2FsaG9zdDAeFw0xMDAzMTgxOTM5MThaFw0yMDAzMTUxOTM5MThaMBQx
+EjAQBgNVBAMTCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBAJeUCOZxbmtngF4S5lXckjSDLc+8C+XjMBYBPyy5eKdJY20AQ1s9/hhp3ulI
+8pAvl+xVo4wQ+iBSvOzcy248Q+Xi6+zjceF7UNRgoYPgtJjKhdwcHV3mvFFrS/fp
+9ggoAChaJQWDO1OCfUgTWXImhkw+vcDR11OVMAJ/h73dqzJPI9mfq44PTTHfYtgr
+v4LAQAOlhXIAa2B+a6PlF6sqDqJaW5jLTcERjsBwnRhUGi7JevQzkejujX/vdA+N
+jRBjKH/KLU5h3Q7wUchvIez0PXWVTCnZjpA9aR4m7YV05nKQfxtGd71czYDYk+j8
+hd005jetT4ir7JkAWValBybJVksCAwEAAaN1MHMwHQYDVR0OBBYEFJl9s51SnjJt
+V/wgKWqV5Q6jnv1ZMEQGA1UdIwQ9MDuAFJl9s51SnjJtV/wgKWqV5Q6jnv1ZoRik
+FjAUMRIwEAYDVQQDEwlsb2NhbGhvc3SCCQCS5DWcxETyFDAMBgNVHRMEBTADAQH/
+MA0GCSqGSIb3DQEBBQUAA4IBAQB2ldLeLCc+lxK5i0EZquLamMBJwDIjGpT0JMP9
+b4XQOK2JABIu54BQIZhwcjk3FDJz/uOW5vm8k1kYni8FCjNZAaRZzCUfiUYTbTKL
+Rq9LuIAODyP2dnTqyKaQOOJHvrx9MRZ3XVecXPS0Tib4aO57vCaAbIkmhtYpTWmw
+e3t8CAIDVtgvjR6Se0a1JA4LktR7hBu22tDImvCSJn1nVAaHpani6iPBPPdMuMsP
+TBoeQfj8VpqBUjCStqJGa8ytjDFX73YaxV2mgrtGwPNme1x3YNRR11yTu7tksyMO
+GrmgxNriqYRchBhNEf72AKF0LR1ByKwfbDB9rIsV00HtCgOp
+-----END CERTIFICATE-----
--- /dev/null
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAl5QI5nFua2eAXhLmVdySNIMtz7wL5eMwFgE/LLl4p0ljbQBD
+Wz3+GGne6UjykC+X7FWjjBD6IFK87NzLbjxD5eLr7ONx4XtQ1GChg+C0mMqF3Bwd
+Xea8UWtL9+n2CCgAKFolBYM7U4J9SBNZciaGTD69wNHXU5UwAn+Hvd2rMk8j2Z+r
+jg9NMd9i2Cu/gsBAA6WFcgBrYH5ro+UXqyoOolpbmMtNwRGOwHCdGFQaLsl69DOR
+6O6Nf+90D42NEGMof8otTmHdDvBRyG8h7PQ9dZVMKdmOkD1pHibthXTmcpB/G0Z3
+vVzNgNiT6PyF3TTmN61PiKvsmQBZVqUHJslWSwIDAQABAoIBACI8Ky5xHDFh9RpK
+Rn/KC7OUlTpADKflgizWJ0Cgu2F9L9mkn5HyFHvLHa+u7CootbWJOiEejH/UcBtH
+WyMQtX0snYCpdkUpJv5wvMoebGu+AjHOn8tfm9T/2O6rhwgckLyMb6QpGbMo28b1
+p9QiY17BJPZx7qJQJcHKsAvwDwSThlb7MFmWf42LYWlzybpeYQvwpd+UY4I0WXLu
+/dqJIS9Npq+5Y5vbo2kAEAssb2hSCvhCfHmwFdKmBzlvgOn4qxgZ1iHQgfKI6Z3Y
+J0573ZgOVTuacn+lewtdg5AaHFcl/zIYEr9SNqRoPNGbPliuv6k6N2EYcufWL5lR
+sCmmmHECgYEAxm+7OpepGr++K3+O1e1MUhD7vSPkKJrCzNtUxbOi2NWj3FFUSPRU
+adWhuxvUnZgTcgM1+KuQ0fB2VmxXe9IDcrSFS7PKFGtd2kMs/5mBw4UgDZkOQh+q
+kDiBEV3HYYJWRq0w3NQ/9Iy1jxxdENHtGmG9aqamHxNtuO608wGW2S8CgYEAw4yG
+ZyAic0Q/U9V2OHI0MLxLCzuQz17C2wRT1+hBywNZuil5YeTuIt2I46jro6mJmWI2
+fH4S/geSZzg2RNOIZ28+aK79ab2jWBmMnvFCvaru+odAuser4N9pfAlHZvY0pT+S
+1zYX3f44ygiio+oosabLC5nWI0zB2gG8pwaJlaUCgYEAgr7poRB+ZlaCCY0RYtjo
+mYYBKD02vp5BzdKSB3V1zeLuBWM84pjB6b3Nw0fyDig+X7fH3uHEGN+USRs3hSj6
+BqD01s1OT6fyfbYXNw5A1r+nP+5h26Wbr0zblcKxdQj4qbbBZC8hOJNhqTqqA0Qe
+MmzF7jiBaiZV/Cyj4x1f9BcCgYEAhjL6SeuTuOctTqs/5pz5lDikh6DpUGcH8qaV
+o6aRAHHcMhYkZzpk8yh1uUdD7516APmVyvn6rrsjjhLVq4ZAJjwB6HWvE9JBN0TR
+bILF+sREHUqU8Zn2Ku0nxyfXCKIOnxlx/J/y4TaGYqBqfXNFWiXNUrjQbIlQv/xR
+K48g/MECgYBZdQlYbMSDmfPCC5cxkdjrkmAl0EgV051PWAi4wR+hLxIMRjHBvAk7
+IweobkFvT4TICulgroLkYcSa5eOZGxB/DHqcQCbWj3reFV0VpzmTDoFKG54sqBRl
+vVntGt0pfA40fF17VoS7riAdHF53ippTtsovHEsg5tq5NrBl5uKm2g==
+-----END RSA PRIVATE KEY-----
--- /dev/null
+-module(mochiweb_base64url_tests).
+-include_lib("eunit/include/eunit.hrl").
+
+id(X) ->
+ ?assertEqual(
+ X,
+ mochiweb_base64url:decode(mochiweb_base64url:encode(X))),
+ ?assertEqual(
+ X,
+ mochiweb_base64url:decode(
+ binary_to_list(mochiweb_base64url:encode(binary_to_list(X))))).
+
+random_binary(Short,Long) ->
+ << <<(random:uniform(256) - 1)>>
+ || _ <- lists:seq(1, Short + random:uniform(1 + Long - Short) - 1) >>.
+
+empty_test() ->
+ id(<<>>).
+
+onechar_test() ->
+ [id(<<C>>) || C <- lists:seq(0,255)],
+ ok.
+
+nchar_test() ->
+ %% 1000 tests of 2-6 char strings
+ [id(B) || _ <- lists:seq(1,1000), B <- [random_binary(2, 6)]],
+ ok.
--- /dev/null
+-module(mochiweb_html_tests).
+-include_lib("eunit/include/eunit.hrl").
+
+to_html_test() ->
+ ?assertEqual(
+ <<"<html><head><title>hey!</title></head><body><p class=\"foo\">what's up<br /></p><div>sucka</div>RAW!<!-- comment! --></body></html>">>,
+ iolist_to_binary(
+ mochiweb_html:to_html({html, [],
+ [{<<"head">>, [],
+ [{title, <<"hey!">>}]},
+ {body, [],
+ [{p, [{class, foo}], [<<"what's">>, <<" up">>, {br}]},
+ {'div', <<"sucka">>},
+ {'=', <<"RAW!">>},
+ {comment, <<" comment! ">>}]}]}))),
+ ?assertEqual(
+ <<"<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">">>,
+ iolist_to_binary(
+ mochiweb_html:to_html({doctype,
+ [<<"html">>, <<"PUBLIC">>,
+ <<"-//W3C//DTD XHTML 1.0 Transitional//EN">>,
+ <<"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">>]}))),
+ ?assertEqual(
+ <<"<html><?xml:namespace prefix=\"o\" ns=\"urn:schemas-microsoft-com:office:office\"?></html>">>,
+ iolist_to_binary(
+ mochiweb_html:to_html({<<"html">>,[],
+ [{pi, <<"xml:namespace">>,
+ [{<<"prefix">>,<<"o">>},
+ {<<"ns">>,<<"urn:schemas-microsoft-com:office:office">>}]}]}))),
+ ok.
+
+escape_test() ->
+ ?assertEqual(
+ <<"&quot;\"word ><<up!&quot;">>,
+ mochiweb_html:escape(<<""\"word ><<up!"">>)),
+ ?assertEqual(
+ <<"&quot;\"word ><<up!&quot;">>,
+ mochiweb_html:escape(""\"word ><<up!"")),
+ ?assertEqual(
+ <<"&quot;\"word ><<up!&quot;">>,
+ mochiweb_html:escape('"\"word ><<up!"')),
+ ok.
+
+escape_attr_test() ->
+ ?assertEqual(
+ <<"&quot;"word ><<up!&quot;">>,
+ mochiweb_html:escape_attr(<<""\"word ><<up!"">>)),
+ ?assertEqual(
+ <<"&quot;"word ><<up!&quot;">>,
+ mochiweb_html:escape_attr(""\"word ><<up!"")),
+ ?assertEqual(
+ <<"&quot;"word ><<up!&quot;">>,
+ mochiweb_html:escape_attr('"\"word ><<up!"')),
+ ?assertEqual(
+ <<"12345">>,
+ mochiweb_html:escape_attr(12345)),
+ ?assertEqual(
+ <<"1.5">>,
+ mochiweb_html:escape_attr(1.5)),
+ ok.
+
+tokens_test() ->
+ ?assertEqual(
+ [{start_tag, <<"foo">>, [{<<"bar">>, <<"baz">>},
+ {<<"wibble">>, <<"wibble">>},
+ {<<"alice">>, <<"bob">>}], true}],
+ mochiweb_html:tokens(<<"<foo bar=baz wibble='wibble' alice=\"bob\"/>">>)),
+ ?assertEqual(
+ [{start_tag, <<"foo">>, [{<<"bar">>, <<"baz">>},
+ {<<"wibble">>, <<"wibble">>},
+ {<<"alice">>, <<"bob">>}], true}],
+ mochiweb_html:tokens(<<"<foo bar=baz wibble='wibble' alice=bob/>">>)),
+ ?assertEqual(
+ [{comment, <<"[if lt IE 7]>\n<style type=\"text/css\">\n.no_ie { display: none; }\n</style>\n<![endif]">>}],
+ mochiweb_html:tokens(<<"<!--[if lt IE 7]>\n<style type=\"text/css\">\n.no_ie { display: none; }\n</style>\n<![endif]-->">>)),
+ ?assertEqual(
+ [{start_tag, <<"script">>, [{<<"type">>, <<"text/javascript">>}], false},
+ {data, <<" A= B <= C ">>, false},
+ {end_tag, <<"script">>}],
+ mochiweb_html:tokens(<<"<script type=\"text/javascript\"> A= B <= C </script>">>)),
+ ?assertEqual(
+ [{start_tag, <<"script">>, [{<<"type">>, <<"text/javascript">>}], false},
+ {data, <<" A= B <= C ">>, false},
+ {end_tag, <<"script">>}],
+ mochiweb_html:tokens(<<"<script type =\"text/javascript\"> A= B <= C </script>">>)),
+ ?assertEqual(
+ [{start_tag, <<"script">>, [{<<"type">>, <<"text/javascript">>}], false},
+ {data, <<" A= B <= C ">>, false},
+ {end_tag, <<"script">>}],
+ mochiweb_html:tokens(<<"<script type = \"text/javascript\"> A= B <= C </script>">>)),
+ ?assertEqual(
+ [{start_tag, <<"script">>, [{<<"type">>, <<"text/javascript">>}], false},
+ {data, <<" A= B <= C ">>, false},
+ {end_tag, <<"script">>}],
+ mochiweb_html:tokens(<<"<script type= \"text/javascript\"> A= B <= C </script>">>)),
+ ?assertEqual(
+ [{start_tag, <<"textarea">>, [], false},
+ {data, <<"<html></body>">>, false},
+ {end_tag, <<"textarea">>}],
+ mochiweb_html:tokens(<<"<textarea><html></body></textarea>">>)),
+ ?assertEqual(
+ [{start_tag, <<"textarea">>, [], false},
+ {data, <<"<html></body></textareaz>">>, false}],
+ mochiweb_html:tokens(<<"<textarea ><html></body></textareaz>">>)),
+ ?assertEqual(
+ [{pi, <<"xml:namespace">>,
+ [{<<"prefix">>,<<"o">>},
+ {<<"ns">>,<<"urn:schemas-microsoft-com:office:office">>}]}],
+ mochiweb_html:tokens(<<"<?xml:namespace prefix=\"o\" ns=\"urn:schemas-microsoft-com:office:office\"?>">>)),
+ ?assertEqual(
+ [{pi, <<"xml:namespace">>,
+ [{<<"prefix">>,<<"o">>},
+ {<<"ns">>,<<"urn:schemas-microsoft-com:office:office">>}]}],
+ mochiweb_html:tokens(<<"<?xml:namespace prefix=o ns=urn:schemas-microsoft-com:office:office \n?>">>)),
+ ?assertEqual(
+ [{pi, <<"xml:namespace">>,
+ [{<<"prefix">>,<<"o">>},
+ {<<"ns">>,<<"urn:schemas-microsoft-com:office:office">>}]}],
+ mochiweb_html:tokens(<<"<?xml:namespace prefix=o ns=urn:schemas-microsoft-com:office:office">>)),
+ ?assertEqual(
+ [{data, <<"<">>, false}],
+ mochiweb_html:tokens(<<"<">>)),
+ ?assertEqual(
+ [{data, <<"not html ">>, false},
+ {data, <<"< at all">>, false}],
+ mochiweb_html:tokens(<<"not html < at all">>)),
+ ok.
+
+parse_test() ->
+ D0 = <<"<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01//EN\" \"http://www.w3.org/TR/html4/strict.dtd\">
+<html>
+ <head>
+ <meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\">
+ <title>Foo</title>
+ <link rel=\"stylesheet\" type=\"text/css\" href=\"/static/rel/dojo/resources/dojo.css\" media=\"screen\">
+ <link rel=\"stylesheet\" type=\"text/css\" href=\"/static/foo.css\" media=\"screen\">
+ <!--[if lt IE 7]>
+ <style type=\"text/css\">
+ .no_ie { display: none; }
+ </style>
+ <![endif]-->
+ <link rel=\"icon\" href=\"/static/images/favicon.ico\" type=\"image/x-icon\">
+ <link rel=\"shortcut icon\" href=\"/static/images/favicon.ico\" type=\"image/x-icon\">
+ </head>
+ <body id=\"home\" class=\"tundra\"><![CDATA[<<this<!-- is -->CDATA>>]]></body>
+</html>">>,
+ ?assertEqual(
+ {<<"html">>, [],
+ [{<<"head">>, [],
+ [{<<"meta">>,
+ [{<<"http-equiv">>,<<"Content-Type">>},
+ {<<"content">>,<<"text/html; charset=UTF-8">>}],
+ []},
+ {<<"title">>,[],[<<"Foo">>]},
+ {<<"link">>,
+ [{<<"rel">>,<<"stylesheet">>},
+ {<<"type">>,<<"text/css">>},
+ {<<"href">>,<<"/static/rel/dojo/resources/dojo.css">>},
+ {<<"media">>,<<"screen">>}],
+ []},
+ {<<"link">>,
+ [{<<"rel">>,<<"stylesheet">>},
+ {<<"type">>,<<"text/css">>},
+ {<<"href">>,<<"/static/foo.css">>},
+ {<<"media">>,<<"screen">>}],
+ []},
+ {comment,<<"[if lt IE 7]>\n <style type=\"text/css\">\n .no_ie { display: none; }\n </style>\n <![endif]">>},
+ {<<"link">>,
+ [{<<"rel">>,<<"icon">>},
+ {<<"href">>,<<"/static/images/favicon.ico">>},
+ {<<"type">>,<<"image/x-icon">>}],
+ []},
+ {<<"link">>,
+ [{<<"rel">>,<<"shortcut icon">>},
+ {<<"href">>,<<"/static/images/favicon.ico">>},
+ {<<"type">>,<<"image/x-icon">>}],
+ []}]},
+ {<<"body">>,
+ [{<<"id">>,<<"home">>},
+ {<<"class">>,<<"tundra">>}],
+ [<<"<<this<!-- is -->CDATA>>">>]}]},
+ mochiweb_html:parse(D0)),
+ ?assertEqual(
+ {<<"html">>,[],
+ [{pi, <<"xml:namespace">>,
+ [{<<"prefix">>,<<"o">>},
+ {<<"ns">>,<<"urn:schemas-microsoft-com:office:office">>}]}]},
+ mochiweb_html:parse(
+ <<"<html><?xml:namespace prefix=\"o\" ns=\"urn:schemas-microsoft-com:office:office\"?></html>">>)),
+ ?assertEqual(
+ {<<"html">>, [],
+ [{<<"dd">>, [], [<<"foo">>]},
+ {<<"dt">>, [], [<<"bar">>]}]},
+ mochiweb_html:parse(<<"<html><dd>foo<dt>bar</html>">>)),
+ %% Singleton sadness
+ ?assertEqual(
+ {<<"html">>, [],
+ [{<<"link">>, [], []},
+ <<"foo">>,
+ {<<"br">>, [], []},
+ <<"bar">>]},
+ mochiweb_html:parse(<<"<html><link>foo<br>bar</html>">>)),
+ ?assertEqual(
+ {<<"html">>, [],
+ [{<<"link">>, [], [<<"foo">>,
+ {<<"br">>, [], []},
+ <<"bar">>]}]},
+ mochiweb_html:parse(<<"<html><link>foo<br>bar</link></html>">>)),
+ %% Case insensitive tags
+ ?assertEqual(
+ {<<"html">>, [],
+ [{<<"head">>, [], [<<"foo">>,
+ {<<"br">>, [], []},
+ <<"BAR">>]},
+ {<<"body">>, [{<<"class">>, <<"">>}, {<<"bgcolor">>, <<"#Aa01fF">>}], []}
+ ]},
+ mochiweb_html:parse(<<"<html><Head>foo<bR>BAR</head><body Class=\"\" bgcolor=\"#Aa01fF\"></BODY></html>">>)),
+ ok.
+
+exhaustive_is_singleton_test() ->
+ T = mochiweb_cover:clause_lookup_table(mochiweb_html, is_singleton),
+ [?assertEqual(V, mochiweb_html:is_singleton(K)) || {K, V} <- T].
+
+tokenize_attributes_test() ->
+ ?assertEqual(
+ {<<"foo">>,
+ [{<<"bar">>, <<"b\"az">>},
+ {<<"wibble">>, <<"wibble">>},
+ {<<"taco", 16#c2, 16#a9>>, <<"bell">>},
+ {<<"quux">>, <<"quux">>}],
+ []},
+ mochiweb_html:parse(<<"<foo bar=\"b"az\" wibble taco©=bell quux">>)),
+ ok.
+
+tokens2_test() ->
+ D0 = <<"<channel><title>from __future__ import *</title><link>http://bob.pythonmac.org</link><description>Bob's Rants</description></channel>">>,
+ ?assertEqual(
+ [{start_tag,<<"channel">>,[],false},
+ {start_tag,<<"title">>,[],false},
+ {data,<<"from __future__ import *">>,false},
+ {end_tag,<<"title">>},
+ {start_tag,<<"link">>,[],true},
+ {data,<<"http://bob.pythonmac.org">>,false},
+ {end_tag,<<"link">>},
+ {start_tag,<<"description">>,[],false},
+ {data,<<"Bob's Rants">>,false},
+ {end_tag,<<"description">>},
+ {end_tag,<<"channel">>}],
+ mochiweb_html:tokens(D0)),
+ ok.
+
+to_tokens_test() ->
+ ?assertEqual(
+ [{start_tag, <<"p">>, [{class, 1}], false},
+ {end_tag, <<"p">>}],
+ mochiweb_html:to_tokens({p, [{class, 1}], []})),
+ ?assertEqual(
+ [{start_tag, <<"p">>, [], false},
+ {end_tag, <<"p">>}],
+ mochiweb_html:to_tokens({p})),
+ ?assertEqual(
+ [{'=', <<"data">>}],
+ mochiweb_html:to_tokens({'=', <<"data">>})),
+ ?assertEqual(
+ [{comment, <<"comment">>}],
+ mochiweb_html:to_tokens({comment, <<"comment">>})),
+ %% This is only allowed in sub-tags:
+ %% {p, [{"class", "foo"}]} as {p, [{"class", "foo"}], []}
+ %% On the outside it's always treated as follows:
+ %% {p, [], [{"class", "foo"}]} as {p, [], [{"class", "foo"}]}
+ ?assertEqual(
+ [{start_tag, <<"html">>, [], false},
+ {start_tag, <<"p">>, [{class, 1}], false},
+ {end_tag, <<"p">>},
+ {end_tag, <<"html">>}],
+ mochiweb_html:to_tokens({html, [{p, [{class, 1}]}]})),
+ ok.
+
+parse2_test() ->
+ D0 = <<"<channel><title>from __future__ import *</title><link>http://bob.pythonmac.org<br>foo</link><description>Bob's Rants</description></channel>">>,
+ ?assertEqual(
+ {<<"channel">>,[],
+ [{<<"title">>,[],[<<"from __future__ import *">>]},
+ {<<"link">>,[],[
+ <<"http://bob.pythonmac.org">>,
+ {<<"br">>,[],[]},
+ <<"foo">>]},
+ {<<"description">>,[],[<<"Bob's Rants">>]}]},
+ mochiweb_html:parse(D0)),
+ ok.
+
+parse_tokens_test() ->
+ D0 = [{doctype,[<<"HTML">>,<<"PUBLIC">>,<<"-//W3C//DTD HTML 4.01 Transitional//EN">>]},
+ {data,<<"\n">>,true},
+ {start_tag,<<"html">>,[],false}],
+ ?assertEqual(
+ {<<"html">>, [], []},
+ mochiweb_html:parse_tokens(D0)),
+ D1 = D0 ++ [{end_tag, <<"html">>}],
+ ?assertEqual(
+ {<<"html">>, [], []},
+ mochiweb_html:parse_tokens(D1)),
+ D2 = D0 ++ [{start_tag, <<"body">>, [], false}],
+ ?assertEqual(
+ {<<"html">>, [], [{<<"body">>, [], []}]},
+ mochiweb_html:parse_tokens(D2)),
+ D3 = D0 ++ [{start_tag, <<"head">>, [], false},
+ {end_tag, <<"head">>},
+ {start_tag, <<"body">>, [], false}],
+ ?assertEqual(
+ {<<"html">>, [], [{<<"head">>, [], []}, {<<"body">>, [], []}]},
+ mochiweb_html:parse_tokens(D3)),
+ D4 = D3 ++ [{data,<<"\n">>,true},
+ {start_tag,<<"div">>,[{<<"class">>,<<"a">>}],false},
+ {start_tag,<<"a">>,[{<<"name">>,<<"#anchor">>}],false},
+ {end_tag,<<"a">>},
+ {end_tag,<<"div">>},
+ {start_tag,<<"div">>,[{<<"class">>,<<"b">>}],false},
+ {start_tag,<<"div">>,[{<<"class">>,<<"c">>}],false},
+ {end_tag,<<"div">>},
+ {end_tag,<<"div">>}],
+ ?assertEqual(
+ {<<"html">>, [],
+ [{<<"head">>, [], []},
+ {<<"body">>, [],
+ [{<<"div">>, [{<<"class">>, <<"a">>}], [{<<"a">>, [{<<"name">>, <<"#anchor">>}], []}]},
+ {<<"div">>, [{<<"class">>, <<"b">>}], [{<<"div">>, [{<<"class">>, <<"c">>}], []}]}
+ ]}]},
+ mochiweb_html:parse_tokens(D4)),
+ D5 = [{start_tag,<<"html">>,[],false},
+ {data,<<"\n">>,true},
+ {data,<<"boo">>,false},
+ {data,<<"hoo">>,false},
+ {data,<<"\n">>,true},
+ {end_tag,<<"html">>}],
+ ?assertEqual(
+ {<<"html">>, [], [<<"\nboohoo\n">>]},
+ mochiweb_html:parse_tokens(D5)),
+ D6 = [{start_tag,<<"html">>,[],false},
+ {data,<<"\n">>,true},
+ {data,<<"\n">>,true},
+ {end_tag,<<"html">>}],
+ ?assertEqual(
+ {<<"html">>, [], []},
+ mochiweb_html:parse_tokens(D6)),
+ D7 = [{start_tag,<<"html">>,[],false},
+ {start_tag,<<"ul">>,[],false},
+ {start_tag,<<"li">>,[],false},
+ {data,<<"word">>,false},
+ {start_tag,<<"li">>,[],false},
+ {data,<<"up">>,false},
+ {end_tag,<<"li">>},
+ {start_tag,<<"li">>,[],false},
+ {data,<<"fdsa">>,false},
+ {start_tag,<<"br">>,[],true},
+ {data,<<"asdf">>,false},
+ {end_tag,<<"ul">>},
+ {end_tag,<<"html">>}],
+ ?assertEqual(
+ {<<"html">>, [],
+ [{<<"ul">>, [],
+ [{<<"li">>, [], [<<"word">>]},
+ {<<"li">>, [], [<<"up">>]},
+ {<<"li">>, [], [<<"fdsa">>,{<<"br">>, [], []}, <<"asdf">>]}]}]},
+ mochiweb_html:parse_tokens(D7)),
+ ok.
+
+destack_test() ->
+ ?assertEqual(
+ {<<"a">>, [], []},
+ mochiweb_html:destack([{<<"a">>, [], []}])),
+ ?assertEqual(
+ {<<"a">>, [], [{<<"b">>, [], []}]},
+ mochiweb_html:destack([{<<"b">>, [], []}, {<<"a">>, [], []}])),
+ ?assertEqual(
+ {<<"a">>, [], [{<<"b">>, [], [{<<"c">>, [], []}]}]},
+ mochiweb_html:destack(
+ [{<<"c">>, [], []}, {<<"b">>, [], []}, {<<"a">>, [], []}])),
+ ?assertEqual(
+ [{<<"a">>, [], [{<<"b">>, [], [{<<"c">>, [], []}]}]}],
+ mochiweb_html:destack(
+ <<"b">>,
+ [{<<"c">>, [], []}, {<<"b">>, [], []}, {<<"a">>, [], []}])),
+ ?assertEqual(
+ [{<<"b">>, [], [{<<"c">>, [], []}]}, {<<"a">>, [], []}],
+ mochiweb_html:destack(
+ <<"c">>,
+ [{<<"c">>, [], []}, {<<"b">>, [], []},{<<"a">>, [], []}])),
+ ok.
+
+doctype_test() ->
+ ?assertEqual(
+ {<<"html">>,[],[{<<"head">>,[],[]}]},
+ mochiweb_html:parse("<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\" \"http://www.w3.org/TR/html4/loose.dtd\">"
+ "<html><head></head></body></html>")),
+ %% http://code.google.com/p/mochiweb/issues/detail?id=52
+ ?assertEqual(
+ {<<"html">>,[],[{<<"head">>,[],[]}]},
+ mochiweb_html:parse("<html>"
+ "<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\" \"http://www.w3.org/TR/html4/loose.dtd\">"
+ "<head></head></body></html>")),
+ %% http://github.com/mochi/mochiweb/pull/13
+ ?assertEqual(
+ {<<"html">>,[],[{<<"head">>,[],[]}]},
+ mochiweb_html:parse("<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0 Transitional//EN\"/>"
+ "<html>"
+ "<head></head></body></html>")),
+ ok.
+
+dumb_br_test() ->
+ %% http://code.google.com/p/mochiweb/issues/detail?id=71
+ ?assertEqual(
+ {<<"div">>,[],[{<<"br">>, [], []}, {<<"br">>, [], []}, <<"z">>]},
+ mochiweb_html:parse("<div><br/><br/>z</br/></br/></div>")),
+ ?assertEqual(
+ {<<"div">>,[],[{<<"br">>, [], []}, {<<"br">>, [], []}, <<"z">>]},
+ mochiweb_html:parse("<div><br><br>z</br/></br/></div>")),
+ ?assertEqual(
+ {<<"div">>,[],[{<<"br">>, [], []}, {<<"br">>, [], []}, <<"z">>, {<<"br">>, [], []}, {<<"br">>, [], []}]},
+ mochiweb_html:parse("<div><br><br>z<br/><br/></div>")),
+ ?assertEqual(
+ {<<"div">>,[],[{<<"br">>, [], []}, {<<"br">>, [], []}, <<"z">>]},
+ mochiweb_html:parse("<div><br><br>z</br></br></div>")).
+
+
+php_test() ->
+ %% http://code.google.com/p/mochiweb/issues/detail?id=71
+ ?assertEqual(
+ [{pi, <<"php\n">>}],
+ mochiweb_html:tokens(
+ "<?php\n?>")),
+ ?assertEqual(
+ {<<"div">>, [], [{pi, <<"php\n">>}]},
+ mochiweb_html:parse(
+ "<div><?php\n?></div>")),
+ ok.
+
+parse_unquoted_attr_test() ->
+ D0 = <<"<html><img src=/images/icon.png/></html>">>,
+ ?assertEqual(
+ {<<"html">>,[],[
+ { <<"img">>, [ { <<"src">>, <<"/images/icon.png">> } ], [] }
+ ]},
+ mochiweb_html:parse(D0)),
+
+ D1 = <<"<html><img src=/images/icon.png></img></html>">>,
+ ?assertEqual(
+ {<<"html">>,[],[
+ { <<"img">>, [ { <<"src">>, <<"/images/icon.png">> } ], [] }
+ ]},
+ mochiweb_html:parse(D1)),
+
+ D2 = <<"<html><img src=/images/icon>.png width=100></img></html>">>,
+ ?assertEqual(
+ {<<"html">>,[],[
+ { <<"img">>, [ { <<"src">>, <<"/images/icon>.png">> }, { <<"width">>, <<"100">> } ], [] }
+ ]},
+ mochiweb_html:parse(D2)),
+ ok.
+
+parse_quoted_attr_test() ->
+ D0 = <<"<html><img src='/images/icon.png'></html>">>,
+ ?assertEqual(
+ {<<"html">>,[],[
+ { <<"img">>, [ { <<"src">>, <<"/images/icon.png">> } ], [] }
+ ]},
+ mochiweb_html:parse(D0)),
+
+ D1 = <<"<html><img src=\"/images/icon.png'></html>">>,
+ ?assertEqual(
+ {<<"html">>,[],[
+ { <<"img">>, [ { <<"src">>, <<"/images/icon.png'></html>">> } ], [] }
+ ]},
+ mochiweb_html:parse(D1)),
+
+ D2 = <<"<html><img src=\"/images/icon>.png\"></html>">>,
+ ?assertEqual(
+ {<<"html">>,[],[
+ { <<"img">>, [ { <<"src">>, <<"/images/icon>.png">> } ], [] }
+ ]},
+ mochiweb_html:parse(D2)),
+
+ %% Quoted attributes can contain whitespace and newlines
+ D3 = <<"<html><a href=\"#\" onclick=\"javascript: test(1,\ntrue);\"></html>">>,
+ ?assertEqual(
+ {<<"html">>,[],[
+ { <<"a">>, [ { <<"href">>, <<"#">> }, {<<"onclick">>, <<"javascript: test(1,\ntrue);">>} ], [] }
+ ]},
+ mochiweb_html:parse(D3)),
+ ok.
+
+parse_missing_attr_name_test() ->
+ D0 = <<"<html =black></html>">>,
+ ?assertEqual(
+ {<<"html">>, [ { <<"=">>, <<"=">> }, { <<"black">>, <<"black">> } ], [] },
+ mochiweb_html:parse(D0)),
+ ok.
+
+parse_broken_pi_test() ->
+ D0 = <<"<html><?xml:namespace prefix = o ns = \"urn:schemas-microsoft-com:office:office\" /></html>">>,
+ ?assertEqual(
+ {<<"html">>, [], [
+ { pi, <<"xml:namespace">>, [ { <<"prefix">>, <<"o">> },
+ { <<"ns">>, <<"urn:schemas-microsoft-com:office:office">> } ] }
+ ] },
+ mochiweb_html:parse(D0)),
+ ok.
+
+parse_funny_singletons_test() ->
+ D0 = <<"<html><input><input>x</input></input></html>">>,
+ ?assertEqual(
+ {<<"html">>, [], [
+ { <<"input">>, [], [] },
+ { <<"input">>, [], [ <<"x">> ] }
+ ] },
+ mochiweb_html:parse(D0)),
+ ok.
+
+to_html_singleton_test() ->
+ D0 = <<"<link />">>,
+ T0 = {<<"link">>,[],[]},
+ ?assertEqual(D0, iolist_to_binary(mochiweb_html:to_html(T0))),
+
+ D1 = <<"<head><link /></head>">>,
+ T1 = {<<"head">>,[],[{<<"link">>,[],[]}]},
+ ?assertEqual(D1, iolist_to_binary(mochiweb_html:to_html(T1))),
+
+ D2 = <<"<head><link /><link /></head>">>,
+ T2 = {<<"head">>,[],[{<<"link">>,[],[]}, {<<"link">>,[],[]}]},
+ ?assertEqual(D2, iolist_to_binary(mochiweb_html:to_html(T2))),
+
+ %% Make sure singletons are converted to singletons.
+ D3 = <<"<head><link /></head>">>,
+ T3 = {<<"head">>,[],[{<<"link">>,[],[<<"funny">>]}]},
+ ?assertEqual(D3, iolist_to_binary(mochiweb_html:to_html(T3))),
+
+ D4 = <<"<link />">>,
+ T4 = {<<"link">>,[],[<<"funny">>]},
+ ?assertEqual(D4, iolist_to_binary(mochiweb_html:to_html(T4))),
+
+ ok.
+
+parse_amp_test_() ->
+ [?_assertEqual(
+ {<<"html">>,[],
+ [{<<"body">>,[{<<"onload">>,<<"javascript:A('1&2')">>}],[]}]},
+ mochiweb_html:parse("<html><body onload=\"javascript:A('1&2')\"></body></html>")),
+ ?_assertEqual(
+ {<<"html">>,[],
+ [{<<"body">>,[{<<"onload">>,<<"javascript:A('1& 2')">>}],[]}]},
+ mochiweb_html:parse("<html><body onload=\"javascript:A('1& 2')\"></body></html>")),
+ ?_assertEqual(
+ {<<"html">>,[],
+ [{<<"body">>,[],[<<"& ">>]}]},
+ mochiweb_html:parse("<html><body>& </body></html>")),
+ ?_assertEqual(
+ {<<"html">>,[],
+ [{<<"body">>,[],[<<"&">>]}]},
+ mochiweb_html:parse("<html><body>&</body></html>"))].
+
+parse_unescaped_lt_test() ->
+ D1 = <<"<div> < < <a href=\"/\">Back</a></div>">>,
+ ?assertEqual(
+ {<<"div">>, [], [<<" < < ">>, {<<"a">>, [{<<"href">>, <<"/">>}],
+ [<<"Back">>]}]},
+ mochiweb_html:parse(D1)),
+
+ D2 = <<"<div> << <a href=\"/\">Back</a></div>">>,
+ ?assertEqual(
+ {<<"div">>, [], [<<" << ">>, {<<"a">>, [{<<"href">>, <<"/">>}],
+ [<<"Back">>]}]},
+ mochiweb_html:parse(D2)).
+
+html5_doctype_test() ->
+ ?assertEqual(
+ [{doctype,[<<"html">>]},
+ {start_tag,<<"head">>,[],false},
+ {end_tag,<<"head">>},
+ {start_tag,<<"body">>,[],false},
+ {end_tag,<<"body">>}],
+ mochiweb_html:tokens("<!doctype html><head></head><body></body>")).
+
+implicit_html_test() ->
+ %% https://github.com/mochi/mochiweb/issues/110
+ ?assertEqual(
+ {<<"html">>, [],
+ [{<<"head">>, [], []},
+ {<<"body">>, [], []}]},
+ mochiweb_html:parse("<!doctype html><head></head><body></body>")).
--- /dev/null
+-module(mochiweb_http_tests).
+-include_lib("eunit/include/eunit.hrl").
+
+-ifdef(gen_tcp_r15b_workaround).
+-define(SHOULD_HAVE_BUG, true).
+-else.
+-define(SHOULD_HAVE_BUG, false).
+-endif.
+
+has_acceptor_bug_test_() ->
+ {setup,
+ fun start_server/0,
+ fun mochiweb_http:stop/1,
+ fun has_acceptor_bug_tests/1}.
+
+start_server() ->
+ application:start(inets),
+ {ok, Pid} = mochiweb_http:start_link([{port, 0},
+ {loop, fun responder/1}]),
+ Pid.
+
+has_acceptor_bug_tests(Server) ->
+ Port = mochiweb_socket_server:get(Server, port),
+ [{"1000 should be fine even with the bug",
+ ?_assertEqual(false, has_bug(Port, 1000))},
+ {"10000 should trigger the bug if present",
+ ?_assertEqual(?SHOULD_HAVE_BUG, has_bug(Port, 10000))}].
+
+responder(Req) ->
+ Req:respond({200,
+ [{"Content-Type", "text/html"}],
+ ["<html><body>Hello</body></html>"]}).
+
+has_bug(Port, Len) ->
+ case
+ httpc:request(get, {"http://127.0.0.1:" ++ integer_to_list(Port) ++ "/",
+ [{"X-Random", lists:duplicate(Len, $a)}]}, [], [])
+ of
+ {error, socket_closed_remotely} ->
+ true;
+ {ok, {{"HTTP/1.1", 200, "OK"}, _, "<html><body>Hello</body></html>"}} ->
+ false;
+ {ok, {{"HTTP/1.1", 400, "Bad Request"}, _, []}} ->
+ false
+ end.
--- /dev/null
+-module(mochiweb_tests).
+-include_lib("eunit/include/eunit.hrl").
+
+-record(treq, {path, body= <<>>, xreply= <<>>}).
+
+ssl_cert_opts() ->
+ EbinDir = filename:dirname(code:which(?MODULE)),
+ CertDir = filename:join([EbinDir, "..", "support", "test-materials"]),
+ CertFile = filename:join(CertDir, "test_ssl_cert.pem"),
+ KeyFile = filename:join(CertDir, "test_ssl_key.pem"),
+ [{certfile, CertFile}, {keyfile, KeyFile}].
+
+with_server(Transport, ServerFun, ClientFun) ->
+ ServerOpts0 = [{ip, "127.0.0.1"}, {port, 0}, {loop, ServerFun}],
+ ServerOpts = case Transport of
+ plain ->
+ ServerOpts0;
+ ssl ->
+ ServerOpts0 ++ [{ssl, true}, {ssl_opts, ssl_cert_opts()}]
+ end,
+ {ok, Server} = mochiweb_http:start_link(ServerOpts),
+ Port = mochiweb_socket_server:get(Server, port),
+ Res = (catch ClientFun(Transport, Port)),
+ mochiweb_http:stop(Server),
+ Res.
+
+request_test() ->
+ R = mochiweb_request:new(z, z, "/foo/bar/baz%20wibble+quux?qs=2", z, []),
+ "/foo/bar/baz wibble quux" = R:get(path),
+ ok.
+
+-define(LARGE_TIMEOUT, 60).
+
+single_http_GET_test() ->
+ do_GET(plain, 1).
+
+single_https_GET_test() ->
+ do_GET(ssl, 1).
+
+multiple_http_GET_test() ->
+ do_GET(plain, 3).
+
+multiple_https_GET_test() ->
+ do_GET(ssl, 3).
+
+hundred_http_GET_test_() -> % note the underscore
+ {timeout, ?LARGE_TIMEOUT,
+ fun() -> ?assertEqual(ok, do_GET(plain,100)) end}.
+
+hundred_https_GET_test_() -> % note the underscore
+ {timeout, ?LARGE_TIMEOUT,
+ fun() -> ?assertEqual(ok, do_GET(ssl,100)) end}.
+
+single_128_http_POST_test() ->
+ do_POST(plain, 128, 1).
+
+single_128_https_POST_test() ->
+ do_POST(ssl, 128, 1).
+
+single_2k_http_POST_test() ->
+ do_POST(plain, 2048, 1).
+
+single_2k_https_POST_test() ->
+ do_POST(ssl, 2048, 1).
+
+single_100k_http_POST_test() ->
+ do_POST(plain, 102400, 1).
+
+single_100k_https_POST_test() ->
+ do_POST(ssl, 102400, 1).
+
+multiple_100k_http_POST_test() ->
+ do_POST(plain, 102400, 3).
+
+multiple_100K_https_POST_test() ->
+ do_POST(ssl, 102400, 3).
+
+hundred_128_http_POST_test_() -> % note the underscore
+ {timeout, ?LARGE_TIMEOUT,
+ fun() -> ?assertEqual(ok, do_POST(plain, 128, 100)) end}.
+
+hundred_128_https_POST_test_() -> % note the underscore
+ {timeout, ?LARGE_TIMEOUT,
+ fun() -> ?assertEqual(ok, do_POST(ssl, 128, 100)) end}.
+
+do_GET(Transport, Times) ->
+ PathPrefix = "/whatever/",
+ ReplyPrefix = "You requested: ",
+ ServerFun = fun (Req) ->
+ Reply = ReplyPrefix ++ Req:get(path),
+ Req:ok({"text/plain", Reply})
+ end,
+ TestReqs = [begin
+ Path = PathPrefix ++ integer_to_list(N),
+ ExpectedReply = list_to_binary(ReplyPrefix ++ Path),
+ #treq{path=Path, xreply=ExpectedReply}
+ end || N <- lists:seq(1, Times)],
+ ClientFun = new_client_fun('GET', TestReqs),
+ ok = with_server(Transport, ServerFun, ClientFun),
+ ok.
+
+do_POST(Transport, Size, Times) ->
+ ServerFun = fun (Req) ->
+ Body = Req:recv_body(),
+ Headers = [{"Content-Type", "application/octet-stream"}],
+ Req:respond({201, Headers, Body})
+ end,
+ TestReqs = [begin
+ Path = "/stuff/" ++ integer_to_list(N),
+ Body = crypto:rand_bytes(Size),
+ #treq{path=Path, body=Body, xreply=Body}
+ end || N <- lists:seq(1, Times)],
+ ClientFun = new_client_fun('POST', TestReqs),
+ ok = with_server(Transport, ServerFun, ClientFun),
+ ok.
+
+new_client_fun(Method, TestReqs) ->
+ fun (Transport, Port) ->
+ client_request(Transport, Port, Method, TestReqs)
+ end.
+
+client_request(Transport, Port, Method, TestReqs) ->
+ Opts = [binary, {active, false}, {packet, http}],
+ SockFun = case Transport of
+ plain ->
+ {ok, Socket} = gen_tcp:connect("127.0.0.1", Port, Opts),
+ fun (recv) ->
+ gen_tcp:recv(Socket, 0);
+ ({recv, Length}) ->
+ gen_tcp:recv(Socket, Length);
+ ({send, Data}) ->
+ gen_tcp:send(Socket, Data);
+ ({setopts, L}) ->
+ inet:setopts(Socket, L)
+ end;
+ ssl ->
+ {ok, Socket} = ssl:connect("127.0.0.1", Port, [{ssl_imp, new} | Opts]),
+ fun (recv) ->
+ ssl:recv(Socket, 0);
+ ({recv, Length}) ->
+ ssl:recv(Socket, Length);
+ ({send, Data}) ->
+ ssl:send(Socket, Data);
+ ({setopts, L}) ->
+ ssl:setopts(Socket, L)
+ end
+ end,
+ client_request(SockFun, Method, TestReqs).
+
+client_request(SockFun, _Method, []) ->
+ {the_end, {error, closed}} = {the_end, SockFun(recv)},
+ ok;
+client_request(SockFun, Method,
+ [#treq{path=Path, body=Body, xreply=ExReply} | Rest]) ->
+ Request = [atom_to_list(Method), " ", Path, " HTTP/1.1\r\n",
+ client_headers(Body, Rest =:= []),
+ "\r\n",
+ Body],
+ ok = SockFun({send, Request}),
+ case Method of
+ 'GET' ->
+ {ok, {http_response, {1,1}, 200, "OK"}} = SockFun(recv);
+ 'POST' ->
+ {ok, {http_response, {1,1}, 201, "Created"}} = SockFun(recv)
+ end,
+ ok = SockFun({setopts, [{packet, httph}]}),
+ {ok, {http_header, _, 'Server', _, "MochiWeb" ++ _}} = SockFun(recv),
+ {ok, {http_header, _, 'Date', _, _}} = SockFun(recv),
+ {ok, {http_header, _, 'Content-Type', _, _}} = SockFun(recv),
+ {ok, {http_header, _, 'Content-Length', _, ConLenStr}} = SockFun(recv),
+ ContentLength = list_to_integer(ConLenStr),
+ {ok, http_eoh} = SockFun(recv),
+ ok = SockFun({setopts, [{packet, raw}]}),
+ {payload, ExReply} = {payload, drain_reply(SockFun, ContentLength, <<>>)},
+ ok = SockFun({setopts, [{packet, http}]}),
+ client_request(SockFun, Method, Rest).
+
+client_headers(Body, IsLastRequest) ->
+ ["Host: localhost\r\n",
+ case Body of
+ <<>> ->
+ "";
+ _ ->
+ ["Content-Type: application/octet-stream\r\n",
+ "Content-Length: ", integer_to_list(byte_size(Body)), "\r\n"]
+ end,
+ case IsLastRequest of
+ true ->
+ "Connection: close\r\n";
+ false ->
+ ""
+ end].
+
+drain_reply(_SockFun, 0, Acc) ->
+ Acc;
+drain_reply(SockFun, Length, Acc) ->
+ Sz = erlang:min(Length, 1024),
+ {ok, B} = SockFun({recv, Sz}),
+ drain_reply(SockFun, Length - Sz, <<Acc/bytes, B/bytes>>).
--- /dev/null
+APP_NAME:=mochiweb
+
+UPSTREAM_GIT:=https://github.com/rabbitmq/mochiweb.git
+UPSTREAM_REVISION:=680dba8a8a0dd8ee18d03bf814cfb2340bf3bbff
+RETAIN_ORIGINAL_VERSION:=true
+WRAPPER_PATCHES:=10-build-on-R12B-5.patch \
+ 20-MAX_RECV_BODY.patch \
+ 30-remove-crypto-ssl-dependencies.patch \
+ 40-remove-compiler-syntax_tools-dependencies.patch \
+ 50-remove-json.patch
+
+# internal.hrl is used by webmachine
+UPSTREAM_INCLUDE_DIRS+=$(CLONE_DIR)/src
+
+ORIGINAL_APP_FILE:=$(CLONE_DIR)/$(APP_NAME).app
+DO_NOT_GENERATE_APP_FILE=true
+
+define package_rules
+
+$(CLONE_DIR)/src/$(APP_NAME).app.src: $(CLONE_DIR)/.done
+
+$(ORIGINAL_APP_FILE): $(CLONE_DIR)/src/$(APP_NAME).app.src
+ cp $(CLONE_DIR)/src/$(APP_NAME).app.src $(ORIGINAL_APP_FILE)
+
+$(PACKAGE_DIR)+clean::
+ rm -rf $(ORIGINAL_APP_FILE)
+
+# This rule is run *before* the one in do_package.mk
+$(PLUGINS_SRC_DIST_DIR)/$(PACKAGE_DIR)/.srcdist_done::
+ cp $(CLONE_DIR)/LICENSE $(PACKAGE_DIR)/LICENSE-MIT-Mochi
+
+$(CLONE_DIR)/ebin/mochifmt_records.beam: $(CLONE_DIR)/ebin/pmod_pt.beam
+
+$(CLONE_DIR)/ebin/mochifmt_std.beam: $(CLONE_DIR)/ebin/pmod_pt.beam
+
+$(CLONE_DIR)/ebin/mochifmt_request.beam: $(CLONE_DIR)/ebin/pmod_pt.beam
+
+$(CLONE_DIR)/ebin/mochifmt_response.beam: $(CLONE_DIR)/ebin/pmod_pt.beam
+
+endef
--- /dev/null
+include ../umbrella.mk
--- /dev/null
+# AMQP 1.0 support for RabbitMQ
+
+This plugin adds AMQP 1.0 support to RabbitMQ.
+
+# Status
+
+This is a prototype. You can send and receive messages between 0-9-1
+or 0-8 clients and 1.0 clients with broadly the same semantics as you
+would get with 0-9-1.
+
+# Building and configuring
+
+The plugin uses the standard RabbitMQ plugin build environment; see <http://www.rabbitmq.com/plugin-development.html>.
+
+It will listen on the standard AMQP port, 5672. To reconfigure this,
+do so as you would for 0-9-1. Clients connecting with 0-9-1 and 0-8
+will continue to work on the same port.
+
+The following two configuration options (which are specific to the AMQP 1.0 adapter)
+are accepted in the `rabbitmq_amqp1_0` section of the configuration file.
+
+AMQP 1.0 conceptually allows connections that are not authenticated
+with SASL (i.e. where no username and password is supplied). By
+default these will connect as the "guest" user. To change this, set
+`default_user` to a string with the name of the user to use, or the
+atom `none` to prevent unauthenticated connections.
+
+ {default_user, "guest"}
+
+The default virtual host can be specified using the `default_vhost` setting.
+See the "Virtual Hosts" section below for a description.
+
+ {default_vhost, <<"/">>}
+
+The `protocol_strict_mode` setting controls how strictly peers must conform
+to the specification. The default is not to enforce strictness, which allows
+non-fatal byte-counts in frames and inaccuracies in flow-control from peers.
+
+ {protocol_strict_mode, false}
+
+# Clients we have tested
+
+The current field of AMQP 1.0 clients is somewhat limited. Therefore
+we have not achieved as much interoperability as we might like.
+
+We have tested against:
+
+ * SwiftMQ Java client [1]
+ We have done most of our testing against this client and things seem
+ to work.
+
+ * QPid / Proton C client [2]
+ We have successfully tested against the "proton" command line tool
+ this client ships with.
+
+ * QPid / Proton Java client [2]
+ We have not been able to get this client to get as far as opening a
+ network connection (tested against 0.2 and 0.4).
+
+ * Windows Azure Service Bus [3]
+ It seems that the URI scheme used by this client assumes that it is
+ connecting to Azure; it does not seem to be possible to get it to
+ connect to another server.
+
+[1] http://www.swiftmq.com/products/router/swiftlets/sys_amqp/client/index.html
+
+[2] http://qpid.apache.org/proton/
+
+[3] http://www.windowsazure.com/en-us/develop/net/how-to-guides/service-bus-amqp/
+
+As new clients appear we will of course work on interoperability with them.
+
+# Interoperability with AMQP 0-9-1
+
+## Message payloads
+
+This implementation as a plugin aims for useful interoperability with
+AMQP 0-9-1 clients. AMQP 1.0 messages can be far more structured than
+AMQP 0-9-1 messages, which simply have a payload of bytes.
+
+The way we deal with this is that an AMQP 1.0 message with a single
+data section will be transcoded to an AMQP 0-9-1 message with just the
+bytes from that section, and vice versa. An AMQP 1.0 with any other
+payload will keep exactly that payload (i.e., encoded AMQP 1.0
+sections, concatenated), and for AMQP 0-9-1 clients the `type` field
+of the `basic.properties` will contain the value `"amqp-1.0"`.
+
+Thus, AMQP 0-9-1 clients may receive messages that they cannot
+understand (if they don't have an AMQP 1.0 codec handy, anyway);
+however, these will at least be labelled. AMQP 1.0 clients shall
+receive exactly what they expect.
+
+## Message properties, annotations, headers, etc.
+
+The headers and properties map as follows:
+
+ AMQP 1.0 AMQP 0-9-1
+ Header Properties
+ durable <---------------> delivery-mode [1]
+ priority <---------------> priority
+ ttl <---------------> expiration [2]
+ first-acquirer [3]
+ delivery-count [4]
+ Properties
+ message-id <---------------> message-id [5]
+ user-id <---------------> user-id
+ to [6]
+ subject [6]
+ reply-to <---------------> reply-to [6]
+ correlation-id <---------------> correlation-id
+ content-type <---------------> content-type
+ content-encoding <---------------> content-encoding
+ absolute-expiry-time [7]
+ creation-time <---------------> timestamp
+ Application headers <-------/-------> headers [8]
+
+[1] `durable` is `true` if and only if `delivery-mode` is `2`.
+
+[2] `expiration` is a shortstr; since RabbitMQ will expect this to be
+an encoded string, we translate a `ttl` to the string representation
+of its integer value.
+
+[3] `first-acquirer` is true if and only if the `basic.deliver` field
+`redelivered` is false.
+
+[4] `delivery-count` is left null.
+
+[5] AMQP 0-9-1 expects this to be a shortstr.
+
+[6] See Routing and Addressing below.
+
+[7] `absolute-expiry-time` has no corresponding field in AMQP 0-9-1,
+and is not supported in RabbitMQ in any case.
+
+[8] The application headers section and the `basic.properties` field
+`headers` are natural analogues. However, rather than try to transcode
+an AMQP 1.0 map to an AMQP 0-9-1 field-table, currently we discard
+application headers (of AMQP 1.0 messages) and headers (of AMQP 0-9-1
+messages sent through to AMQP 1.0). In other words, the (AMQP 1.0)
+application headers section is only available to AMQP 1.0 clients, and
+the (AMQP 0-9-1) headers field is only available to AMQP 0-9-1
+clients.
+
+Note that properties (in both AMQP 1.0 and AMQP 0-9-1) and application
+properties (in AMQP 1.0) are immutable; however, this can only apply
+when the sending and receiving clients are using the same protocol.
+
+## Routing and Addressing
+
+In AMQP 1.0 source and destination addresses are opaque values, and
+each message may have a `subject` field value.
+
+For targets, addresses are:
+
+ = "/exchange/" X "/" RK Publish to exchange X with routing key RK
+ | "/exchange/" X Publish to exchange X with message subject as routing key
+ | "/topic/" RK Publish to amq.topic with routing key RK
+ | "/amq/queue/" Q Publish to default exchange with routing key Q
+ | "/queue/" Q Publish to default exchange with routing key Q
+ | "/queue" Publish to default exchange with message subj as routing key
+
+For sources, addresses are:
+
+ = "/exchange/" X "/" RK Consume from temp queue bound to X with routing key RK
+ | "/topic/" RK Consume from temp queue bound to amq.topic with routing key RK
+ | "/amq/queue/" Q Consume from Q
+ | "/queue/" Q Consume from Q
+
+## Virtual Hosts
+
+AMQP 1.0 has no equivalent of AMQP 0-9-1 virtual hosts. A virtual host
+on the broker may be addressed when opening an AMQP 1.0 connection by setting
+the `hostname` field, prefixing with "vhost:". Setting the `hostname` field
+to "vhost:/" addresses the default virtual host. If the `hostname` field
+does not start with "vhost:" then the `default_vhost` configuration
+setting will be consulted.
+
+# Limitations and unsupported features
+
+At the minute, the RabbitMQ AMQP 1.0 adapter does not support:
+
+ - "Exactly once" delivery [9]
+ - Link recovery [9]
+ - Full message fragmentation [10]
+ - Resuming messages
+ - "Modified" outcome
+ - Filters [11]
+ - Transactions
+ - Source/target expiry-policy other than link-detach and timeout
+ other than 0
+ - Max message size for links
+ - Aborted transfers
+ - TLS negotiation via the AMQP2100 handshake (although SSL is supported)
+
+[9] We do not deduplicate as a target, though we may resend as a
+source (messages that have no settled outcome when an outgoing link is
+detached will be requeued).
+
+[10] We do fragment messages over multiple frames; however, if this
+would overflow the session window we may discard or requeue messages.
+
+[11] In principle, filters for consuming from an exchange could
+translate to AMQP 0-9-1 bindings. This is not implemented, so
+effectively only consuming from fanout exchanges and queues is useful
+currently.
--- /dev/null
+#!/usr/bin/python
+import sys
+import os
+import re
+from xml.dom.minidom import parse
+
+def safe(str):
+ return str.replace('-', '_')
+
+class AMQPType:
+ def __init__(self, dom):
+ self.name = safe(dom.getAttribute('name'))
+ self.source = dom.getAttribute('source')
+ self.desc = dom.getElementsByTagName('descriptor')[0].getAttribute('name')
+ self.code = dom.getElementsByTagName('descriptor')[0].getAttribute('code')
+ self.number = parse_code(self.code)
+ self.fields = [safe(el.getAttribute('name')) for el in
+ dom.getElementsByTagName('field')]
+ # These are 'restricted' types, rather than composite, so they
+ # do not have defined fields.
+ if self.desc in ['amqp:data:binary', 'amqp:amqp-sequence:list',
+ 'amqp:amqp-value:*', 'amqp:application-properties:map',
+ 'amqp:delivery-annotations:map',
+ 'amqp:message-annotations:map', 'amqp:footer:map']:
+ self.fields = ['content']
+
+ def define(self):
+ return ('SYMBOL_%s' % self.name.upper(), self.desc)
+
+class AMQPDefines:
+ def __init__(self, dom):
+ self.name = safe(dom.getAttribute('name'))
+ self.source = dom.getAttribute('source')
+ self.options = [(self.name.upper() + '_' +
+ (safe(el.getAttribute('name')).upper()),
+ el.getAttribute('value')) for el in
+ dom.getElementsByTagName('choice')]
+
+def print_erl(types):
+ print """-module(rabbit_amqp1_0_framing0).
+-export([record_for/1, fields/1, encode/1, symbol_for/1, number_for/1]).
+-include("rabbit_amqp1_0.hrl")."""
+ for t in types:
+ print """record_for({symbol, <<"%s">>}) ->
+ #'v1_0.%s'{};""" % (t.desc, t.name)
+ if t.code:
+ print """record_for({_, %d}) ->
+ #'v1_0.%s'{};""" % (t.number, t.name)
+ print "%% %s\n" % t.code
+
+ print """record_for(Other) -> exit({unknown, Other}).
+
+"""
+ for t in types:
+ print """fields(#'v1_0.%s'{}) -> record_info(fields, 'v1_0.%s');""" % (t.name, t.name)
+ print """fields(_Other) -> unknown.
+
+"""
+ for t in types:
+ print """encode(Frame = #'v1_0.%s'{}) ->
+ rabbit_amqp1_0_framing:encode_described('%s', %s, Frame);""" % (t.name, t.source, t.number)
+ print """encode(undefined) -> null;
+encode(Other) -> Other.
+
+"""
+ for t in types:
+ print """symbol_for(#'v1_0.%s'{}) ->
+ {symbol, <<"%s">>};""" % (t.name, t.desc)
+ print """symbol_for(Other) -> exit({unknown, Other}).
+
+"""
+ for t in types:
+ print """number_for(#'v1_0.%s'{}) ->
+ {ulong, %s};""" % (t.name, t.number)
+ print """number_for(Other) -> exit({unknown, Other})."""
+
+def print_hrl(types, defines):
+ for t in types:
+ print """-record('v1_0.%s', {%s}).""" % (t.name, ", ".join(t.fields))
+ print_define(t.define(), 'symbol')
+ for d in defines:
+ if len(d.options) > 0:
+ print """ %% %s""" % (d.name)
+ for opt in d.options:
+ print_define(opt, d.source)
+
+def print_define(opt, source):
+ (name, value) = opt
+ if source == 'symbol':
+ quoted = '"%s"' % value
+ else:
+ quoted = value
+ print """-define(V_1_0_%s, {%s, %s}).""" % (name, source, quoted)
+
+def want_type(el):
+ descriptors = el.getElementsByTagName('descriptor')
+ return len(descriptors) > 0
+
+def want_define(el):
+ klass = el.getAttribute('class')
+ return klass == 'restricted'
+
+def parse_code(code):
+ res = re.match('0x([0-9a-fA-F]{8,8}):0x([0-9a-fA-F]{8,8})', code)
+ return res and int(res.group(1) + res.group(2), 16)
+
+types = []
+defines = []
+mode = sys.argv[1]
+
+for file in sys.argv[2:]:
+ tree = parse(file)
+ types.extend([AMQPType(el) for el in tree.getElementsByTagName('type')
+ if want_type(el)])
+ defines.extend([AMQPDefines(el) for el in tree.getElementsByTagName('type')
+ if want_define(el)])
+
+if mode == 'erl':
+ print_erl(types)
+elif mode == 'hrl':
+ print_hrl(types, defines)
+else:
+ raise "Mode != erl or hrl"
--- /dev/null
+%%-define(debug, true).
+
+-ifdef(debug).
+-define(DEBUG0(F), ?SAFE(io:format(F, []))).
+-define(DEBUG(F, A), ?SAFE(io:format(F, A))).
+-else.
+-define(DEBUG0(F), ok).
+-define(DEBUG(F, A), ok).
+-endif.
+
+-define(pprint(F), io:format("~p~n", [rabbit_amqp1_0_framing:pprint(F)])).
+
+-define(SAFE(F),
+ ((fun() ->
+ try F
+ catch __T:__E ->
+ io:format("~p:~p thrown debugging~n~p~n",
+ [__T, __E, erlang:get_stacktrace()])
+ end
+ end)())).
+
+%% General consts
+
+-define(FRAME_1_0_MIN_SIZE, 512).
+
+-define(SEND_ROLE, false).
+-define(RECV_ROLE, true).
+
+%% Encoding
+
+-define(DESCRIBED, 0:8).
+-define(DESCRIBED_BIN, <<?DESCRIBED>>).
+
+-include_lib("rabbit_amqp1_0_framing.hrl").
--- /dev/null
+RELEASABLE:=true
+APP_NAME=rabbitmq_amqp1_0
+DEPS:=rabbitmq-server rabbitmq-erlang-client
+STANDALONE_TEST_COMMANDS:=eunit:test(rabbit_amqp1_0_test,[verbose])
+WITH_BROKER_TEST_SCRIPTS:=$(PACKAGE_DIR)/test/swiftmq/run-tests.sh
+
+FRAMING_HRL=$(PACKAGE_DIR)/include/rabbit_amqp1_0_framing.hrl
+FRAMING_ERL=$(PACKAGE_DIR)/src/rabbit_amqp1_0_framing0.erl
+CODEGEN=$(PACKAGE_DIR)/codegen.py
+CODEGEN_SPECS=$(PACKAGE_DIR)/spec/messaging.xml $(PACKAGE_DIR)/spec/security.xml $(PACKAGE_DIR)/spec/transport.xml $(PACKAGE_DIR)/spec/transactions.xml
+
+INCLUDE_HRLS+=$(FRAMING_HRL)
+SOURCE_ERLS+=$(FRAMING_ERL)
+
+define package_rules
+
+$(FRAMING_ERL): $(CODEGEN) $(CODEGEN_SPECS)
+ $(CODEGEN) erl $(CODEGEN_SPECS) > $$@
+
+$(FRAMING_HRL): $(CODEGEN) $(CODEGEN_SPECS)
+ $(CODEGEN) hrl $(CODEGEN_SPECS) > $$@
+
+$(PACKAGE_DIR)+clean::
+ rm -f $(FRAMING_HRL) $(FRAMING_ERL)
+
+endef
--- /dev/null
+<?xml version="1.0"?>
+
+<!--
+Copyright Bank of America, N.A., Barclays Bank PLC, Cisco Systems, Credit
+Suisse, Deutsche Boerse, Envoy Technologies Inc., Goldman Sachs, HCL
+Technologies Ltd, IIT Software GmbH, iMatix Corporation, INETCO Systems Limited,
+Informatica Corporation, JPMorgan Chase & Co., Kaazing Corporation, N.A,
+Microsoft Corporation, my-Channels, Novell, Progress Software, Red Hat Inc.,
+Software AG, Solace Systems Inc., StormMQ Ltd., Tervela Inc., TWIST Process
+Innovations Ltd, GoPivotal, Inc., and WS02 Inc. 2006-2011. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+-->
+
+<amqp name="messaging" xmlns="http://www.amqp.org/schema/amqp.xsd">
+ <section name="message-format">
+ <type name="header" class="composite" source="list" provides="section">
+ <descriptor name="amqp:header:list" code="0x00000000:0x00000070"/>
+ <field name="durable" type="boolean"/>
+ <field name="priority" type="ubyte"/>
+ <field name="ttl" type="milliseconds"/>
+ <field name="first-acquirer" type="boolean"/>
+ <field name="delivery-count" type="uint"/>
+ </type>
+ <type name="delivery-annotations" class="restricted" source="annotations" provides="section">
+ <descriptor name="amqp:delivery-annotations:map" code="0x00000000:0x00000071"/>
+ </type>
+ <type name="message-annotations" class="restricted" source="annotations" provides="section">
+ <descriptor name="amqp:message-annotations:map" code="0x00000000:0x00000072"/>
+ </type>
+ <type name="properties" class="composite" source="list" provides="section">
+ <descriptor name="amqp:properties:list" code="0x00000000:0x00000073"/>
+ <field name="message-id" type="*" requires="message-id"/>
+ <field name="user-id" type="binary"/>
+ <field name="to" type="*" requires="address"/>
+ <field name="subject" type="string"/>
+ <field name="reply-to" type="*" requires="address"/>
+ <field name="correlation-id" type="*" requires="message-id"/>
+ <field name="content-type" type="symbol"/>
+ <field name="content-encoding" type="symbol"/>
+ <field name="absolute-expiry-time" type="timestamp"/>
+ <field name="creation-time" type="timestamp"/>
+ <field name="group-id" type="string"/>
+ <field name="group-sequence" type="sequence-no"/>
+ <field name="reply-to-group-id" type="string"/>
+ </type>
+ <type name="application-properties" class="restricted" source="map" provides="section">
+ <descriptor name="amqp:application-properties:map" code="0x00000000:0x00000074"/>
+ </type>
+ <type name="data" class="restricted" source="binary" provides="section">
+ <descriptor name="amqp:data:binary" code="0x00000000:0x00000075"/>
+ </type>
+ <type name="amqp-sequence" class="restricted" source="list" provides="section">
+ <descriptor name="amqp:amqp-sequence:list" code="0x00000000:0x00000076"/>
+ </type>
+ <type name="amqp-value" class="restricted" source="*" provides="section">
+ <descriptor name="amqp:amqp-value:*" code="0x00000000:0x00000077"/>
+ </type>
+ <type name="footer" class="restricted" source="annotations" provides="section">
+ <descriptor name="amqp:footer:map" code="0x00000000:0x00000078"/>
+ </type>
+ <type name="annotations" class="restricted" source="map"/>
+ <type name="message-id-ulong" class="restricted" source="ulong" provides="message-id"/>
+ <type name="message-id-uuid" class="restricted" source="uuid" provides="message-id"/>
+ <type name="message-id-binary" class="restricted" source="binary" provides="message-id"/>
+ <type name="message-id-string" class="restricted" source="string" provides="message-id"/>
+ <type name="address-string" class="restricted" source="string" provides="address"/>
+ <definition name="MESSAGE-FORMAT" value="0"/>
+ </section>
+ <section name="delivery-state">
+ <type name="received" class="composite" source="list" provides="delivery-state">
+ <descriptor name="amqp:received:list" code="0x00000000:0x00000023"/>
+ <field name="section-number" type="uint" mandatory="true"/>
+ <field name="section-offset" type="ulong" mandatory="true"/>
+ </type>
+ <type name="accepted" class="composite" source="list" provides="delivery-state, outcome">
+ <descriptor name="amqp:accepted:list" code="0x00000000:0x00000024"/>
+ </type>
+ <type name="rejected" class="composite" source="list" provides="delivery-state, outcome">
+ <descriptor name="amqp:rejected:list" code="0x00000000:0x00000025"/>
+ <field name="error" type="error"/>
+ </type>
+ <type name="released" class="composite" source="list" provides="delivery-state, outcome">
+ <descriptor name="amqp:released:list" code="0x00000000:0x00000026"/>
+ </type>
+ <type name="modified" class="composite" source="list" provides="delivery-state, outcome">
+ <descriptor name="amqp:modified:list" code="0x00000000:0x00000027"/>
+ <field name="delivery-failed" type="boolean"/>
+ <field name="undeliverable-here" type="boolean"/>
+ <field name="message-annotations" type="fields"/>
+ </type>
+ </section>
+ <section name="addressing">
+ <type name="source" class="composite" source="list" provides="source">
+ <descriptor name="amqp:source:list" code="0x00000000:0x00000028"/>
+ <field name="address" type="*" requires="address"/>
+ <field name="durable" type="terminus-durability" default="none"/>
+ <field name="expiry-policy" type="terminus-expiry-policy" default="session-end"/>
+ <field name="timeout" type="seconds" default="0"/>
+ <field name="dynamic" type="boolean" default="false"/>
+ <field name="dynamic-node-properties" type="node-properties"/>
+ <field name="distribution-mode" type="symbol" requires="distribution-mode"/>
+ <field name="filter" type="filter-set"/>
+ <field name="default-outcome" type="*" requires="outcome"/>
+ <field name="outcomes" type="symbol" multiple="true"/>
+ <field name="capabilities" type="symbol" multiple="true"/>
+ </type>
+ <type name="target" class="composite" source="list" provides="target">
+ <descriptor name="amqp:target:list" code="0x00000000:0x00000029"/>
+ <field name="address" type="*" requires="address"/>
+ <field name="durable" type="terminus-durability" default="none"/>
+ <field name="expiry-policy" type="terminus-expiry-policy" default="session-end"/>
+ <field name="timeout" type="seconds" default="0"/>
+ <field name="dynamic" type="boolean" default="false"/>
+ <field name="dynamic-node-properties" type="node-properties"/>
+ <field name="capabilities" type="symbol" multiple="true"/>
+ </type>
+ <type name="terminus-durability" class="restricted" source="uint">
+ <choice name="none" value="0"/>
+ <choice name="configuration" value="1"/>
+ <choice name="unsettled-state" value="2"/>
+ </type>
+ <type name="terminus-expiry-policy" class="restricted" source="symbol">
+ <choice name="link-detach" value="link-detach"/>
+ <choice name="session-end" value="session-end"/>
+ <choice name="connection-close" value="connection-close"/>
+ <choice name="never" value="never"/>
+ </type>
+ <type name="std-dist-mode" class="restricted" source="symbol" provides="distribution-mode">
+ <choice name="move" value="move"/>
+ <choice name="copy" value="copy"/>
+ </type>
+ <type name="filter-set" class="restricted" source="map"/>
+ <type name="node-properties" class="restricted" source="fields"/>
+ <type name="delete-on-close" class="composite" source="list" provides="lifetime-policy">
+ <descriptor name="amqp:delete-on-close:list" code="0x00000000:0x0000002b"/>
+ </type>
+ <type name="delete-on-no-links" class="composite" source="list" provides="lifetime-policy">
+ <descriptor name="amqp:delete-on-no-links:list" code="0x00000000:0x0000002c"/>
+ </type>
+ <type name="delete-on-no-messages" class="composite" source="list" provides="lifetime-policy">
+ <descriptor name="amqp:delete-on-no-messages:list" code="0x00000000:0x0000002d"/>
+ </type>
+ <type name="delete-on-no-links-or-messages" class="composite" source="list" provides="lifetime-policy">
+ <descriptor name="amqp:delete-on-no-links-or-messages:list" code="0x00000000:0x0000002e"/>
+ </type>
+ </section>
+</amqp>
--- /dev/null
+<?xml version="1.0"?>
+
+<!--
+Copyright Bank of America, N.A., Barclays Bank PLC, Cisco Systems, Credit
+Suisse, Deutsche Boerse, Envoy Technologies Inc., Goldman Sachs, HCL
+Technologies Ltd, IIT Software GmbH, iMatix Corporation, INETCO Systems Limited,
+Informatica Corporation, JPMorgan Chase & Co., Kaazing Corporation, N.A,
+Microsoft Corporation, my-Channels, Novell, Progress Software, Red Hat Inc.,
+Software AG, Solace Systems Inc., StormMQ Ltd., Tervela Inc., TWIST Process
+Innovations Ltd, GoPivotal, Inc., and WS02 Inc. 2006-2011. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+-->
+
+<amqp name="security" xmlns="http://www.amqp.org/schema/amqp.xsd">
+ <section name="tls">
+ <definition name="TLS-MAJOR" value="1"/>
+ <definition name="TLS-MINOR" value="0"/>
+ <definition name="TLS-REVISION" value="0"/>
+ </section>
+ <section name="sasl">
+ <type name="sasl-mechanisms" class="composite" source="list" provides="sasl-frame">
+ <descriptor name="amqp:sasl-mechanisms:list" code="0x00000000:0x00000040"/>
+ <field name="sasl-server-mechanisms" type="symbol" mandatory="true" multiple="true"/>
+ </type>
+ <type name="sasl-init" class="composite" source="list" provides="sasl-frame">
+ <descriptor name="amqp:sasl-init:list" code="0x00000000:0x00000041"/>
+ <field name="mechanism" type="symbol" mandatory="true"/>
+ <field name="initial-response" type="binary"/>
+ <field name="hostname" type="string"/>
+ </type>
+ <type name="sasl-challenge" class="composite" source="list" provides="sasl-frame">
+ <descriptor name="amqp:sasl-challenge:list" code="0x00000000:0x00000042"/>
+ <field name="challenge" type="binary" mandatory="true"/>
+ </type>
+ <type name="sasl-response" class="composite" source="list" provides="sasl-frame">
+ <descriptor name="amqp:sasl-response:list" code="0x00000000:0x00000043"/>
+ <field name="response" type="binary" mandatory="true"/>
+ </type>
+ <type name="sasl-outcome" class="composite" source="list" provides="sasl-frame">
+ <descriptor name="amqp:sasl-outcome:list" code="0x00000000:0x00000044"/>
+ <field name="code" type="sasl-code" mandatory="true"/>
+ <field name="additional-data" type="binary"/>
+ </type>
+ <type name="sasl-code" class="restricted" source="ubyte">
+ <choice name="ok" value="0"/>
+ <choice name="auth" value="1"/>
+ <choice name="sys" value="2"/>
+ <choice name="sys-perm" value="3"/>
+ <choice name="sys-temp" value="4"/>
+ </type>
+ <definition name="SASL-MAJOR" value="1"/>
+ <definition name="SASL-MINOR" value="0"/>
+ <definition name="SASL-REVISION" value="0"/>
+ </section>
+</amqp>
--- /dev/null
+<?xml version="1.0"?>
+
+<!--
+Copyright Bank of America, N.A., Barclays Bank PLC, Cisco Systems, Credit
+Suisse, Deutsche Boerse, Envoy Technologies Inc., Goldman Sachs, HCL
+Technologies Ltd, IIT Software GmbH, iMatix Corporation, INETCO Systems Limited,
+Informatica Corporation, JPMorgan Chase & Co., Kaazing Corporation, N.A,
+Microsoft Corporation, my-Channels, Novell, Progress Software, Red Hat Inc.,
+Software AG, Solace Systems Inc., StormMQ Ltd., Tervela Inc., TWIST Process
+Innovations Ltd, GoPivotal, Inc., and WS02 Inc. 2006-2011. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+-->
+
+<amqp name="transactions" xmlns="http://www.amqp.org/schema/amqp.xsd">
+ <section name="coordination">
+ <type name="coordinator" class="composite" source="list" provides="target">
+ <descriptor name="amqp:coordinator:list" code="0x00000000:0x00000030"/>
+ <field name="capabilities" type="symbol" requires="txn-capability" multiple="true"/>
+ </type>
+ <type name="declare" class="composite" source="list">
+ <descriptor name="amqp:declare:list" code="0x00000000:0x00000031"/>
+ <field name="global-id" type="*" requires="global-tx-id"/>
+ </type>
+ <type name="discharge" class="composite" source="list">
+ <descriptor name="amqp:discharge:list" code="0x00000000:0x00000032"/>
+ <field name="txn-id" type="*" mandatory="true" requires="txn-id"/>
+ <field name="fail" type="boolean"/>
+ </type>
+ <type name="transaction-id" class="restricted" source="binary" provides="txn-id"/>
+ <type name="declared" class="composite" source="list" provides="delivery-state, outcome">
+ <descriptor name="amqp:declared:list" code="0x00000000:0x00000033"/>
+ <field name="txn-id" type="*" mandatory="true" requires="txn-id"/>
+ </type>
+ <type name="transactional-state" class="composite" source="list" provides="delivery-state">
+ <descriptor name="amqp:transactional-state:list" code="0x00000000:0x00000034"/>
+ <field name="txn-id" type="*" mandatory="true" requires="txn-id"/>
+ <field name="outcome" type="*" requires="outcome"/>
+ </type>
+ <type name="txn-capability" class="restricted" source="symbol" provides="txn-capability">
+ <choice name="local-transactions" value="amqp:local-transactions"/>
+ <choice name="distributed-transactions" value="amqp:distributed-transactions"/>
+ <choice name="promotable-transactions" value="amqp:promotable-transactions"/>
+ <choice name="multi-txns-per-ssn" value="amqp:multi-txns-per-ssn"/>
+ <choice name="multi-ssns-per-txn" value="amqp:multi-ssns-per-txn"/>
+ </type>
+ <type name="transaction-error" class="restricted" source="symbol" provides="error-condition">
+ <choice name="unknown-id" value="amqp:transaction:unknown-id"/>
+ <choice name="transaction-rollback" value="amqp:transaction:rollback"/>
+ <choice name="transaction-timeout" value="amqp:transaction:timeout"/>
+ </type>
+ </section>
+</amqp>
--- /dev/null
+<?xml version="1.0"?>
+
+<!--
+Copyright Bank of America, N.A., Barclays Bank PLC, Cisco Systems, Credit
+Suisse, Deutsche Boerse, Envoy Technologies Inc., Goldman Sachs, HCL
+Technologies Ltd, IIT Software GmbH, iMatix Corporation, INETCO Systems Limited,
+Informatica Corporation, JPMorgan Chase & Co., Kaazing Corporation, N.A,
+Microsoft Corporation, my-Channels, Novell, Progress Software, Red Hat Inc.,
+Software AG, Solace Systems Inc., StormMQ Ltd., Tervela Inc., TWIST Process
+Innovations Ltd, GoPivotal, Inc., and WS02 Inc. 2006-2011. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+-->
+
+<amqp name="transport" xmlns="http://www.amqp.org/schema/amqp.xsd">
+ <section name="performatives">
+ <type name="open" class="composite" source="list" provides="frame">
+ <descriptor name="amqp:open:list" code="0x00000000:0x00000010"/>
+ <field name="container-id" type="string" mandatory="true"/>
+ <field name="hostname" type="string"/>
+ <field name="max-frame-size" type="uint" default="4294967295"/>
+ <field name="channel-max" type="ushort" default="65535"/>
+ <field name="idle-time-out" type="milliseconds"/>
+ <field name="outgoing-locales" type="ietf-language-tag" multiple="true"/>
+ <field name="incoming-locales" type="ietf-language-tag" multiple="true"/>
+ <field name="offered-capabilities" type="symbol" multiple="true"/>
+ <field name="desired-capabilities" type="symbol" multiple="true"/>
+ <field name="properties" type="fields"/>
+ </type>
+ <type name="begin" class="composite" source="list" provides="frame">
+ <descriptor name="amqp:begin:list" code="0x00000000:0x00000011"/>
+ <field name="remote-channel" type="ushort"/>
+ <field name="next-outgoing-id" type="transfer-number" mandatory="true"/>
+ <field name="incoming-window" type="uint" mandatory="true"/>
+ <field name="outgoing-window" type="uint" mandatory="true"/>
+ <field name="handle-max" type="handle" default="4294967295"/>
+ <field name="offered-capabilities" type="symbol" multiple="true"/>
+ <field name="desired-capabilities" type="symbol" multiple="true"/>
+ <field name="properties" type="fields"/>
+ </type>
+ <type name="attach" class="composite" source="list" provides="frame">
+ <descriptor name="amqp:attach:list" code="0x00000000:0x00000012"/>
+ <field name="name" type="string" mandatory="true"/>
+ <field name="handle" type="handle" mandatory="true"/>
+ <field name="role" type="role" mandatory="true"/>
+ <field name="snd-settle-mode" type="sender-settle-mode" default="mixed"/>
+ <field name="rcv-settle-mode" type="receiver-settle-mode" default="first"/>
+ <field name="source" type="*" requires="source"/>
+ <field name="target" type="*" requires="target"/>
+ <field name="unsettled" type="map"/>
+ <field name="incomplete-unsettled" type="boolean" default="false"/>
+ <field name="initial-delivery-count" type="sequence-no"/>
+ <field name="max-message-size" type="ulong"/>
+ <field name="offered-capabilities" type="symbol" multiple="true"/>
+ <field name="desired-capabilities" type="symbol" multiple="true"/>
+ <field name="properties" type="fields"/>
+ </type>
+ <type name="flow" class="composite" source="list" provides="frame">
+ <descriptor name="amqp:flow:list" code="0x00000000:0x00000013"/>
+ <field name="next-incoming-id" type="transfer-number"/>
+ <field name="incoming-window" type="uint" mandatory="true"/>
+ <field name="next-outgoing-id" type="transfer-number" mandatory="true"/>
+ <field name="outgoing-window" type="uint" mandatory="true"/>
+ <field name="handle" type="handle"/>
+ <field name="delivery-count" type="sequence-no"/>
+ <field name="link-credit" type="uint"/>
+ <field name="available" type="uint"/>
+ <field name="drain" type="boolean" default="false"/>
+ <field name="echo" type="boolean" default="false"/>
+ <field name="properties" type="fields"/>
+ </type>
+ <type name="transfer" class="composite" source="list" provides="frame">
+ <descriptor name="amqp:transfer:list" code="0x00000000:0x00000014"/>
+ <field name="handle" type="handle" mandatory="true"/>
+ <field name="delivery-id" type="delivery-number"/>
+ <field name="delivery-tag" type="delivery-tag"/>
+ <field name="message-format" type="message-format"/>
+ <field name="settled" type="boolean"/>
+ <field name="more" type="boolean" default="false"/>
+ <field name="rcv-settle-mode" type="receiver-settle-mode"/>
+ <field name="state" type="*" requires="delivery-state"/>
+ <field name="resume" type="boolean" default="false"/>
+ <field name="aborted" type="boolean" default="false"/>
+ <field name="batchable" type="boolean" default="false"/>
+ </type>
+ <type name="disposition" class="composite" source="list" provides="frame">
+ <descriptor name="amqp:disposition:list" code="0x00000000:0x00000015"/>
+ <field name="role" type="role" mandatory="true"/>
+ <field name="first" type="delivery-number" mandatory="true"/>
+ <field name="last" type="delivery-number"/>
+ <field name="settled" type="boolean" default="false"/>
+ <field name="state" type="*" requires="delivery-state"/>
+ <field name="batchable" type="boolean" default="false"/>
+ </type>
+ <type name="detach" class="composite" source="list" provides="frame">
+ <descriptor name="amqp:detach:list" code="0x00000000:0x00000016"/>
+ <field name="handle" type="handle" mandatory="true"/>
+ <field name="closed" type="boolean" default="false"/>
+ <field name="error" type="error"/>
+ </type>
+ <type name="end" class="composite" source="list" provides="frame">
+ <descriptor name="amqp:end:list" code="0x00000000:0x00000017"/>
+ <field name="error" type="error"/>
+ </type>
+ <type name="close" class="composite" source="list" provides="frame">
+ <descriptor name="amqp:close:list" code="0x00000000:0x00000018"/>
+ <field name="error" type="error"/>
+ </type>
+ </section>
+ <section name="definitions">
+ <type name="role" class="restricted" source="boolean">
+ <choice name="sender" value="false"/>
+ <choice name="receiver" value="true"/>
+ </type>
+ <type name="sender-settle-mode" class="restricted" source="ubyte">
+ <choice name="unsettled" value="0"/>
+ <choice name="settled" value="1"/>
+ <choice name="mixed" value="2"/>
+ </type>
+ <type name="receiver-settle-mode" class="restricted" source="ubyte">
+ <choice name="first" value="0"/>
+ <choice name="second" value="1"/>
+ </type>
+ <type name="handle" class="restricted" source="uint"/>
+ <type name="seconds" class="restricted" source="uint"/>
+ <type name="milliseconds" class="restricted" source="uint"/>
+ <type name="delivery-tag" class="restricted" source="binary"/>
+ <type name="delivery-number" class="restricted" source="sequence-no"/>
+ <type name="transfer-number" class="restricted" source="sequence-no"/>
+ <type name="sequence-no" class="restricted" source="uint"/>
+ <type name="message-format" class="restricted" source="uint"/>
+ <type name="ietf-language-tag" class="restricted" source="symbol"/>
+ <type name="fields" class="restricted" source="map"/>
+ <type name="error" class="composite" source="list">
+ <descriptor name="amqp:error:list" code="0x00000000:0x0000001d"/>
+ <field name="condition" type="symbol" mandatory="true" requires="error-condition"/>
+ <field name="description" type="string"/>
+ <field name="info" type="fields"/>
+ </type>
+ <type name="amqp-error" class="restricted" source="symbol" provides="error-condition">
+ <choice name="internal-error" value="amqp:internal-error"/>
+ <choice name="not-found" value="amqp:not-found"/>
+ <choice name="unauthorized-access" value="amqp:unauthorized-access"/>
+ <choice name="decode-error" value="amqp:decode-error"/>
+ <choice name="resource-limit-exceeded" value="amqp:resource-limit-exceeded"/>
+ <choice name="not-allowed" value="amqp:not-allowed"/>
+ <choice name="invalid-field" value="amqp:invalid-field"/>
+ <choice name="not-implemented" value="amqp:not-implemented"/>
+ <choice name="resource-locked" value="amqp:resource-locked"/>
+ <choice name="precondition-failed" value="amqp:precondition-failed"/>
+ <choice name="resource-deleted" value="amqp:resource-deleted"/>
+ <choice name="illegal-state" value="amqp:illegal-state"/>
+ <choice name="frame-size-too-small" value="amqp:frame-size-too-small"/>
+ </type>
+ <type name="connection-error" class="restricted" source="symbol" provides="error-condition">
+ <choice name="connection-forced" value="amqp:connection:forced"/>
+ <choice name="framing-error" value="amqp:connection:framing-error"/>
+ <choice name="redirect" value="amqp:connection:redirect"/>
+ </type>
+ <type name="session-error" class="restricted" source="symbol" provides="error-condition">
+ <choice name="window-violation" value="amqp:session:window-violation"/>
+ <choice name="errant-link" value="amqp:session:errant-link"/>
+ <choice name="handle-in-use" value="amqp:session:handle-in-use"/>
+ <choice name="unattached-handle" value="amqp:session:unattached-handle"/>
+ </type>
+ <type name="link-error" class="restricted" source="symbol" provides="error-condition">
+ <choice name="detach-forced" value="amqp:link:detach-forced"/>
+ <choice name="transfer-limit-exceeded" value="amqp:link:transfer-limit-exceeded"/>
+ <choice name="message-size-exceeded" value="amqp:link:message-size-exceeded"/>
+ <choice name="redirect" value="amqp:link:redirect"/>
+ <choice name="stolen" value="amqp:link:stolen"/>
+ </type>
+ <definition name="PORT" value="5672"/>
+ <definition name="SECURE-PORT" value="5671"/>
+ <definition name="MAJOR" value="1"/>
+ <definition name="MINOR" value="0"/>
+ <definition name="REVISION" value="0"/>
+ <definition name="MIN-MAX-FRAME-SIZE" value="512"/>
+ </section>
+</amqp>
--- /dev/null
+<?xml version="1.0"?>
+
+<!--
+Copyright Bank of America, N.A., Barclays Bank PLC, Cisco Systems, Credit
+Suisse, Deutsche Boerse, Envoy Technologies Inc., Goldman Sachs, HCL
+Technologies Ltd, IIT Software GmbH, iMatix Corporation, INETCO Systems Limited,
+Informatica Corporation, JPMorgan Chase & Co., Kaazing Corporation, N.A,
+Microsoft Corporation, my-Channels, Novell, Progress Software, Red Hat Inc.,
+Software AG, Solace Systems Inc., StormMQ Ltd., Tervela Inc., TWIST Process
+Innovations Ltd, GoPivotal, Inc., and WS02 Inc. 2006-2011. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+-->
+
+<amqp name="types" xmlns="http://www.amqp.org/schema/amqp.xsd">
+ <section name="encodings">
+ <type name="null" class="primitive">
+ <encoding code="0x40" category="fixed" width="0"/>
+ </type>
+ <type name="boolean" class="primitive">
+ <encoding code="0x56" category="fixed" width="1"/>
+ <encoding name="true" code="0x41" category="fixed" width="0"/>
+ <encoding name="false" code="0x42" category="fixed" width="0"/>
+ </type>
+ <type name="ubyte" class="primitive">
+ <encoding code="0x50" category="fixed" width="1"/>
+ </type>
+ <type name="ushort" class="primitive">
+ <encoding code="0x60" category="fixed" width="2"/>
+ </type>
+ <type name="uint" class="primitive">
+ <encoding code="0x70" category="fixed" width="4"/>
+ <encoding name="smalluint" code="0x52" category="fixed" width="1"/>
+ <encoding name="uint0" code="0x43" category="fixed" width="0"/>
+ </type>
+ <type name="ulong" class="primitive">
+ <encoding code="0x80" category="fixed" width="8"/>
+ <encoding name="smallulong" code="0x53" category="fixed" width="1"/>
+ <encoding name="ulong0" code="0x44" category="fixed" width="0"/>
+ </type>
+ <type name="byte" class="primitive">
+ <encoding code="0x51" category="fixed" width="1"/>
+ </type>
+ <type name="short" class="primitive">
+ <encoding code="0x61" category="fixed" width="2"/>
+ </type>
+ <type name="int" class="primitive">
+ <encoding code="0x71" category="fixed" width="4"/>
+ <encoding name="smallint" code="0x54" category="fixed" width="1"/>
+ </type>
+ <type name="long" class="primitive">
+ <encoding code="0x81" category="fixed" width="8"/>
+ <encoding name="smalllong" code="0x55" category="fixed" width="1"/>
+ </type>
+ <type name="float" class="primitive">
+ <encoding name="ieee-754" code="0x72" category="fixed" width="4"/>
+ </type>
+ <type name="double" class="primitive">
+ <encoding name="ieee-754" code="0x82" category="fixed" width="8"/>
+ </type>
+ <type name="decimal32" class="primitive">
+ <encoding name="ieee-754" code="0x74" category="fixed" width="4"/>
+ </type>
+ <type name="decimal64" class="primitive">
+ <encoding name="ieee-754" code="0x84" category="fixed" width="8"/>
+ </type>
+ <type name="decimal128" class="primitive">
+ <encoding name="ieee-754" code="0x94" category="fixed" width="16"/>
+ </type>
+ <type name="char" class="primitive">
+ <encoding name="utf32" code="0x73" category="fixed" width="4"/>
+ </type>
+ <type name="timestamp" class="primitive">
+ <encoding name="ms64" code="0x83" category="fixed" width="8"/>
+ </type>
+ <type name="uuid" class="primitive">
+ <encoding code="0x98" category="fixed" width="16"/>
+ </type>
+ <type name="binary" class="primitive">
+ <encoding name="vbin8" code="0xa0" category="variable" width="1"/>
+ <encoding name="vbin32" code="0xb0" category="variable" width="4"/>
+ </type>
+ <type name="string" class="primitive">
+ <encoding name="str8-utf8" code="0xa1" category="variable" width="1"/>
+ <encoding name="str32-utf8" code="0xb1" category="variable" width="4"/>
+ </type>
+ <type name="symbol" class="primitive">
+ <encoding name="sym8" code="0xa3" category="variable" width="1"/>
+ <encoding name="sym32" code="0xb3" category="variable" width="4"/>
+ </type>
+ <type name="list" class="primitive">
+ <encoding name="list0" code="0x45" category="fixed" width="0"/>
+ <encoding name="list8" code="0xc0" category="compound" width="1"/>
+ <encoding name="list32" code="0xd0" category="compound" width="4"/>
+ </type>
+ <type name="map" class="primitive">
+ <encoding name="map8" code="0xc1" category="compound" width="1"/>
+ <encoding name="map32" code="0xd1" category="compound" width="4"/>
+ </type>
+ <type name="array" class="primitive">
+ <encoding name="array8" code="0xe0" category="array" width="1"/>
+ <encoding name="array32" code="0xf0" category="array" width="4"/>
+ </type>
+ </section>
+</amqp>
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_binary_generator).
+
+-export([generate/1, build_frame/2, build_frame/3,
+ build_heartbeat_frame/0]).
+
+-include("rabbit_amqp1_0.hrl").
+
+-ifdef(use_specs).
+-spec(generate/1 :: (tuple()) -> iolist()).
+-spec(build_frame/2 :: (int(), iolist()) -> iolist()).
+-endif.
+
+-define(AMQP_FRAME_TYPE, 0).
+-define(DOFF, 2).
+-define(VAR_1_LIMIT, 16#FF).
+
+build_frame(Channel, Payload) ->
+ build_frame(Channel, ?AMQP_FRAME_TYPE, Payload).
+
+build_frame(Channel, FrameType, Payload) ->
+ Size = iolist_size(Payload) + 8, % frame header and no extension
+ [ <<Size:32/unsigned, 2:8, FrameType:8, Channel:16/unsigned>>, Payload ].
+
+build_heartbeat_frame() ->
+ %% length is inclusive
+ <<8:32, ?DOFF:8, ?AMQP_FRAME_TYPE:8, 0:16>>.
+
+generate({described, Descriptor, Value}) ->
+ DescBin = generate(Descriptor),
+ ValueBin = generate(Value),
+ [ ?DESCRIBED_BIN, DescBin, ValueBin ];
+
+generate(null) -> <<16#40>>;
+generate(true) -> <<16#41>>;
+generate(false) -> <<16#42>>;
+
+%% some integral types have a compact encoding as a byte; this is in
+%% particular for the descriptors of AMQP types, which have the domain
+%% bits set to zero and values < 256.
+generate({ubyte, V}) -> <<16#50,V:8/unsigned>>;
+generate({ushort, V}) -> <<16#60,V:16/unsigned>>;
+generate({uint, V}) when V =:= 0 -> <<16#43>>;
+generate({uint, V}) when V < 256 -> <<16#52,V:8/unsigned>>;
+generate({uint, V}) -> <<16#70,V:32/unsigned>>;
+generate({ulong, V}) when V =:= 0 -> <<16#44>>;
+generate({ulong, V}) when V < 256 -> <<16#53,V:8/unsigned>>;
+generate({ulong, V}) -> <<16#80,V:64/unsigned>>;
+generate({byte, V}) -> <<16#51,V:8/signed>>;
+generate({short, V}) -> <<16#61,V:16/signed>>;
+generate({int, V}) when V<128 andalso V>-129 -> <<16#54,V:8/signed>>;
+generate({int, V}) -> <<16#71,V:32/signed>>;
+generate({long, V}) when V<128 andalso V>-129 -> <<16#55,V:8/signed>>;
+generate({long, V}) -> <<16#81,V:64/signed>>;
+generate({float, V}) -> <<16#72,V:32/float>>;
+generate({double, V}) -> <<16#82,V:64/float>>;
+generate({char, V}) -> <<16#73,V:4/binary>>;
+generate({timestamp,V}) -> <<16#83,V:64/signed>>;
+generate({uuid, V}) -> <<16#98,V:16/binary>>;
+
+generate({utf8, V}) when size(V) < ?VAR_1_LIMIT -> [<<16#a1,(size(V)):8>>, V];
+generate({utf8, V}) -> [<<16#b1,(size(V)):32>>, V];
+generate({symbol, V}) -> [<<16#a3,(size(V)):8>>, V];
+generate({binary, V}) ->
+ Size = iolist_size(V),
+ if Size < ?VAR_1_LIMIT -> [<<16#a0,Size:8>>, V];
+ true -> [<<16#b0,Size:32>>, V]
+ end;
+
+generate({list, []}) ->
+ <<16#45>>;
+generate({list, List}) ->
+ Count = length(List),
+ Compound = lists:map(fun generate/1, List),
+ S = iolist_size(Compound),
+ %% S < 256 -> Count < 256
+ if S > 255 -> [<<16#d0, (S + 4):32/unsigned, Count:32/unsigned>>, Compound];
+ true -> [<<16#c0, (S + 1):8/unsigned, Count:8/unsigned>>, Compound]
+ end;
+
+generate({map, ListOfPairs}) ->
+ Count = length(ListOfPairs) * 2,
+ Compound = lists:map(fun ({Key, Val}) ->
+ [(generate(Key)),
+ (generate(Val))]
+ end, ListOfPairs),
+ S = iolist_size(Compound),
+ if S > 255 -> [<<16#d1,(S + 4):32,Count:32>>, Compound];
+ true -> [<<16#c1,(S + 1):8,Count:8>>, Compound]
+ end;
+
+generate({array, Type, List}) ->
+ Count = length(List),
+ Body = iolist_to_binary(
+ [constructor(Type), [generate(Type, I) || I <- List]]),
+ S = size(Body),
+ %% S < 256 -> Count < 256
+ if S > 255 -> [<<16#f0, (S + 4):32/unsigned, Count:32/unsigned>>, Body];
+ true -> [<<16#e0, (S + 1):8/unsigned, Count:8/unsigned>>, Body]
+ end;
+
+generate({as_is, TypeCode, Bin}) ->
+ <<TypeCode, Bin>>.
+
+%% TODO again these are a stub to get SASL working. New codec? Will
+%% that ever happen? If not we really just need to split generate/1
+%% up into things like these...
+constructor(symbol) ->
+ <<16#a3>>.
+
+generate(symbol, Value) ->
+ [<<(length(Value)):8>>, list_to_binary(Value)].
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_binary_parser).
+
+-export([parse/1, parse_all/1]).
+
+-include("rabbit_amqp1_0.hrl").
+
+-ifdef(use_specs).
+-spec(parse/1 :: (binary()) -> tuple()).
+-endif.
+
+parse_all(ValueBin) when is_binary(ValueBin) ->
+ lists:reverse(parse_all([], parse(ValueBin))).
+
+parse_all(Acc, {Value, <<>>}) -> [Value | Acc];
+parse_all(Acc, {Value, Rest}) -> parse_all([Value | Acc], parse(Rest)).
+
+parse(<<?DESCRIBED,Rest/binary>>) ->
+ parse_described(Rest);
+parse(Rest) ->
+ parse_primitive0(Rest).
+
+parse_described(Bin) ->
+ {Descriptor, Rest1} = parse(Bin),
+ {Value, Rest2} = parse(Rest1),
+ {{described, Descriptor, Value}, Rest2}.
+
+parse_primitive0(<<Type,Rest/binary>>) ->
+ parse_primitive(Type, Rest).
+
+%% Constants
+parse_primitive(16#40, Rest) -> {null, Rest};
+parse_primitive(16#41, Rest) -> {true, Rest};
+parse_primitive(16#42, Rest) -> {false, Rest};
+parse_primitive(16#43, Rest) -> {{uint, 0}, Rest};
+parse_primitive(16#44, Rest) -> {{ulong, 0}, Rest};
+
+%% Fixed-widths. Most integral types have a compact encoding as a byte.
+parse_primitive(16#50, <<V:8/unsigned, R/binary>>) -> {{ubyte, V}, R};
+parse_primitive(16#51, <<V:8/signed, R/binary>>) -> {{byte, V}, R};
+parse_primitive(16#52, <<V:8/unsigned, R/binary>>) -> {{uint, V}, R};
+parse_primitive(16#53, <<V:8/unsigned, R/binary>>) -> {{ulong, V}, R};
+parse_primitive(16#54, <<V:8/signed, R/binary>>) -> {{int, V}, R};
+parse_primitive(16#55, <<V:8/signed, R/binary>>) -> {{long, V}, R};
+parse_primitive(16#56, <<0:8/unsigned, R/binary>>) -> {false, R};
+parse_primitive(16#56, <<1:8/unsigned, R/binary>>) -> {true, R};
+parse_primitive(16#60, <<V:16/unsigned, R/binary>>) -> {{ushort, V}, R};
+parse_primitive(16#61, <<V:16/signed, R/binary>>) -> {{short, V}, R};
+parse_primitive(16#70, <<V:32/unsigned, R/binary>>) -> {{uint, V}, R};
+parse_primitive(16#71, <<V:32/signed, R/binary>>) -> {{int, V}, R};
+parse_primitive(16#72, <<V:32/float, R/binary>>) -> {{float, V}, R};
+parse_primitive(16#73, <<Utf32:4/binary,R/binary>>) -> {{char, Utf32}, R};
+parse_primitive(16#80, <<V:64/unsigned, R/binary>>) -> {{ulong, V}, R};
+parse_primitive(16#81, <<V:64/signed, R/binary>>) -> {{long, V}, R};
+parse_primitive(16#82, <<V:64/float, R/binary>>) -> {{double, V}, R};
+parse_primitive(16#83, <<TS:64/signed, R/binary>>) -> {{timestamp, TS}, R};
+parse_primitive(16#98, <<Uuid:16/binary,R/binary>>) -> {{uuid, Uuid}, R};
+
+%% Variable-widths
+parse_primitive(16#a0,<<S:8/unsigned, V:S/binary,R/binary>>)-> {{binary, V}, R};
+parse_primitive(16#a1,<<S:8/unsigned, V:S/binary,R/binary>>)-> {{utf8, V}, R};
+parse_primitive(16#a3,<<S:8/unsigned, V:S/binary,R/binary>>)-> {{symbol, V}, R};
+parse_primitive(16#b3,<<S:32/unsigned,V:S/binary,R/binary>>)-> {{symbol, V}, R};
+parse_primitive(16#b0,<<S:32/unsigned,V:S/binary,R/binary>>)-> {{binary, V}, R};
+parse_primitive(16#b1,<<S:32/unsigned,V:S/binary,R/binary>>)-> {{utf8, V}, R};
+
+%% Compounds
+parse_primitive(16#45, R) ->
+ {{list, []}, R};
+parse_primitive(16#c0,<<S:8/unsigned,CountAndValue:S/binary,R/binary>>) ->
+ {{list, parse_compound(8, CountAndValue)}, R};
+parse_primitive(16#c1,<<S:8/unsigned,CountAndValue:S/binary,R/binary>>) ->
+ List = parse_compound(8, CountAndValue),
+ {{map, mapify(List)}, R};
+parse_primitive(16#d0,<<S:32/unsigned,CountAndValue:S/binary,R/binary>>) ->
+ {{list, parse_compound(32, CountAndValue)}, R};
+parse_primitive(16#d1,<<S:32/unsigned,CountAndValue:S/binary,R/binary>>) ->
+ List = parse_compound(32, CountAndValue),
+ {{map, mapify(List)}, R};
+
+%% Arrays
+parse_primitive(16#e0,<<S:8/unsigned,CountAndV:S/binary,R/binary>>) ->
+ {{list, parse_array(8, CountAndV)}, R};
+parse_primitive(16#f0,<<S:32/unsigned,CountAndV:S/binary,R/binary>>) ->
+ {{list, parse_array(32, CountAndV)}, R};
+
+%% NaN or +-inf
+parse_primitive(16#72, <<V:32, R/binary>>) ->
+ {{as_is, 16#72, <<V:32>>}, R};
+parse_primitive(16#82, <<V:64, R/binary>>) ->
+ {{as_is, 16#82, <<V:64>>}, R};
+
+%% decimals
+parse_primitive(16#74, <<V:32, R/binary>>) ->
+ {{as_is, 16#74, <<V:32>>}, R};
+parse_primitive(16#84, <<V:64, R/binary>>) ->
+ {{as_is, 16#84, <<V:64>>}, R};
+parse_primitive(16#94, <<V:128, R/binary>>) ->
+ {{as_is, 16#94, <<V:128>>}, R};
+
+parse_primitive(Type, _) ->
+ throw({primitive_type_unsupported, Type}).
+
+parse_compound(UnitSize, Bin) ->
+ <<Count:UnitSize, Bin1/binary>> = Bin,
+ parse_compound1(Count, Bin1, []).
+
+parse_compound1(0, <<>>, List) ->
+ lists:reverse(List);
+parse_compound1(_Left, <<>>, List) ->
+ case application:get_env(rabbitmq_amqp1_0, protocol_strict_mode) of
+ {ok, false} -> lists:reverse(List); %% ignore miscount
+ {ok, true} -> throw(compound_datatype_miscount)
+ end;
+parse_compound1(Count, Bin, Acc) ->
+ {Value, Rest} = parse(Bin),
+ parse_compound1(Count - 1, Rest, [Value | Acc]).
+
+parse_array(UnitSize, Bin) ->
+ <<Count:UnitSize, Bin1/binary>> = Bin,
+ parse_array1(Count, Bin1).
+
+parse_array1(Count, <<?DESCRIBED,Rest/binary>>) ->
+ {Descriptor, Rest1} = parse(Rest),
+ List = parse_array1(Count, Rest1),
+ lists:map(fun (Value) ->
+ {described, Descriptor, Value}
+ end, List);
+parse_array1(Count, <<Type,ArrayBin/binary>>) ->
+ parse_array2(Count, Type, ArrayBin, []).
+
+parse_array2(0, _Type, <<>>, Acc) ->
+ lists:reverse(Acc);
+parse_array2(Count, Type, Bin, Acc) ->
+ {Value, Rest} = parse_primitive(Type, Bin),
+ parse_array2(Count - 1, Type, Rest, [Value | Acc]).
+
+mapify([]) ->
+ [];
+mapify([Key, Value | Rest]) ->
+ [{Key, Value} | mapify(Rest)].
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_channel).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_amqp1_0.hrl").
+
+-export([call/2, call/3, cast/2, cast/3, cast_flow/3, subscribe/3]).
+-export([convert_error/1]).
+
+-import(rabbit_amqp1_0_util, [protocol_error/3]).
+
+call(Ch, Method) ->
+ convert_error(fun () -> amqp_channel:call(Ch, Method) end).
+
+call(Ch, Method, Content) ->
+ convert_error(fun () -> amqp_channel:call(Ch, Method, Content) end).
+
+cast(Ch, Method) ->
+ convert_error(fun () -> amqp_channel:cast(Ch, Method) end).
+
+cast(Ch, Method, Content) ->
+ convert_error(fun () -> amqp_channel:cast(Ch, Method, Content) end).
+
+cast_flow(Ch, Method, Content) ->
+ convert_error(fun () -> amqp_channel:cast_flow(Ch, Method, Content) end).
+
+subscribe(Ch, Method, Subscriber) ->
+ convert_error(fun () -> amqp_channel:subscribe(Ch, Method, Subscriber) end).
+
+convert_error(Fun) ->
+ try
+ Fun()
+ catch exit:{{shutdown, {server_initiated_close, Code, Msg}}, _} ->
+ protocol_error(convert_code(Code), Msg, [])
+ end.
+
+%% TODO this was completely off the top of my head. Check these make sense.
+convert_code(?CONTENT_TOO_LARGE) -> ?V_1_0_AMQP_ERROR_FRAME_SIZE_TOO_SMALL;
+convert_code(?NO_ROUTE) -> ?V_1_0_AMQP_ERROR_PRECONDITION_FAILED;
+convert_code(?NO_CONSUMERS) -> ?V_1_0_AMQP_ERROR_PRECONDITION_FAILED;
+convert_code(?ACCESS_REFUSED) -> ?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS;
+convert_code(?NOT_FOUND) -> ?V_1_0_AMQP_ERROR_NOT_FOUND;
+convert_code(?RESOURCE_LOCKED) -> ?V_1_0_AMQP_ERROR_RESOURCE_LOCKED;
+convert_code(?PRECONDITION_FAILED) -> ?V_1_0_AMQP_ERROR_PRECONDITION_FAILED;
+convert_code(?CONNECTION_FORCED) -> ?V_1_0_CONNECTION_ERROR_CONNECTION_FORCED;
+convert_code(?INVALID_PATH) -> ?V_1_0_AMQP_ERROR_INVALID_FIELD;
+convert_code(?FRAME_ERROR) -> ?V_1_0_CONNECTION_ERROR_FRAMING_ERROR;
+convert_code(?SYNTAX_ERROR) -> ?V_1_0_CONNECTION_ERROR_FRAMING_ERROR;
+convert_code(?COMMAND_INVALID) -> ?V_1_0_CONNECTION_ERROR_FRAMING_ERROR;
+convert_code(?CHANNEL_ERROR) -> ?V_1_0_CONNECTION_ERROR_FRAMING_ERROR;
+convert_code(?UNEXPECTED_FRAME) -> ?V_1_0_CONNECTION_ERROR_FRAMING_ERROR;
+convert_code(?RESOURCE_ERROR) -> ?V_1_0_AMQP_ERROR_RESOURCE_LIMIT_EXCEEDED;
+convert_code(?NOT_ALLOWED) -> ?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS;
+convert_code(?NOT_IMPLEMENTED) -> ?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED;
+convert_code(?INTERNAL_ERROR) -> ?V_1_0_AMQP_ERROR_INTERNAL_ERROR.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_framing).
+
+-export([encode/1, encode_described/3, decode/1, version/0,
+ symbol_for/1, number_for/1, encode_bin/1, decode_bin/1, pprint/1]).
+
+%% debug
+-export([fill_from_list/2, fill_from_map/2]).
+
+-include("rabbit_amqp1_0.hrl").
+
+version() ->
+ {1, 0, 0}.
+
+%% These are essentially in lieu of code generation ..
+
+fill_from_list(Record, Fields) ->
+ {Res, _} = lists:foldl(
+ fun (Field, {Record1, Num}) ->
+ DecodedField = decode(Field),
+ {setelement(Num, Record1, DecodedField),
+ Num + 1}
+ end,
+ {Record, 2}, Fields),
+ Res.
+
+fill_from_map(Record, Fields) ->
+ {Res, _} = lists:foldl(
+ fun (Key, {Record1, Num}) ->
+ case proplists:get_value(Key, Fields) of
+ undefined ->
+ {Record1, Num+1};
+ Value ->
+ {setelement(Num, Record1, decode(Value)), Num+1}
+ end
+ end,
+ {Record, 2}, keys(Record)),
+ Res.
+
+%% TODO should this be part of a more general handler for AMQP values etc?
+fill_from_binary(F = #'v1_0.data'{}, Field) ->
+ F#'v1_0.data'{content = Field}.
+
+%% TODO so should this?
+fill_from_amqp(F = #'v1_0.amqp_value'{}, Field) ->
+ F#'v1_0.amqp_value'{content = Field}.
+
+keys(Record) ->
+ [{symbol, symbolify(K)} || K <- rabbit_amqp1_0_framing0:fields(Record)].
+
+symbolify(FieldName) when is_atom(FieldName) ->
+ re:replace(atom_to_list(FieldName), "_", "-", [{return,list}, global]).
+
+%% TODO: in fields of composite types with multiple=true, "a null
+%% value and a zero-length array (with a correct type for its
+%% elements) both describe an absence of a value and should be treated
+%% as semantically identical." (see section 1.3)
+
+%% A sequence comes as an arbitrary list of values; it's not a
+%% composite type.
+decode({described, Descriptor, {list, Fields}}) ->
+ case rabbit_amqp1_0_framing0:record_for(Descriptor) of
+ #'v1_0.amqp_sequence'{} ->
+ #'v1_0.amqp_sequence'{content = [decode(F) || F <- Fields]};
+ Else ->
+ fill_from_list(Else, Fields)
+ end;
+decode({described, Descriptor, {map, Fields}}) ->
+ case rabbit_amqp1_0_framing0:record_for(Descriptor) of
+ #'v1_0.application_properties'{} ->
+ #'v1_0.application_properties'{content = decode_map(Fields)};
+ #'v1_0.delivery_annotations'{} ->
+ #'v1_0.delivery_annotations'{content = decode_map(Fields)};
+ #'v1_0.message_annotations'{} ->
+ #'v1_0.message_annotations'{content = decode_map(Fields)};
+ #'v1_0.footer'{} ->
+ #'v1_0.footer'{content = decode_map(Fields)};
+ Else ->
+ fill_from_map(Else, Fields)
+ end;
+decode({described, Descriptor, {binary, Field}}) ->
+ fill_from_binary(rabbit_amqp1_0_framing0:record_for(Descriptor), Field);
+decode({described, Descriptor, Field}) ->
+ fill_from_amqp(rabbit_amqp1_0_framing0:record_for(Descriptor), Field);
+decode(null) ->
+ undefined;
+decode(Other) ->
+ Other.
+
+decode_map(Fields) ->
+ [{decode(K), decode(V)} || {K, V} <- Fields].
+
+encode_described(list, ListOrNumber, Frame) ->
+ Desc = descriptor(ListOrNumber),
+ {described, Desc,
+ {list, lists:map(fun encode/1, tl(tuple_to_list(Frame)))}};
+encode_described(map, ListOrNumber, Frame) ->
+ Desc = descriptor(ListOrNumber),
+ {described, Desc,
+ {map, lists:zip(keys(Frame),
+ lists:map(fun encode/1, tl(tuple_to_list(Frame))))}};
+encode_described(binary, ListOrNumber, #'v1_0.data'{content = Content}) ->
+ Desc = descriptor(ListOrNumber),
+ {described, Desc, {binary, Content}};
+encode_described('*', ListOrNumber, #'v1_0.amqp_value'{content = Content}) ->
+ Desc = descriptor(ListOrNumber),
+ {described, Desc, Content};
+encode_described(annotations, ListOrNumber, Frame) ->
+ encode_described(map, ListOrNumber, Frame).
+
+encode(X) ->
+ rabbit_amqp1_0_framing0:encode(X).
+
+encode_bin(X) ->
+ rabbit_amqp1_0_binary_generator:generate(encode(X)).
+
+decode_bin(X) -> [decode(PerfDesc) || PerfDesc <- decode_bin0(X)].
+decode_bin0(<<>>) -> [];
+decode_bin0(X) -> {PerfDesc, Rest} = rabbit_amqp1_0_binary_parser:parse(X),
+ [PerfDesc | decode_bin0(Rest)].
+
+symbol_for(X) ->
+ rabbit_amqp1_0_framing0:symbol_for(X).
+
+number_for(X) ->
+ rabbit_amqp1_0_framing0:number_for(X).
+
+descriptor(Symbol) when is_list(Symbol) ->
+ {symbol, Symbol};
+descriptor(Number) when is_number(Number) ->
+ {ulong, Number}.
+
+
+pprint(Thing) when is_tuple(Thing) ->
+ case rabbit_amqp1_0_framing0:fields(Thing) of
+ unknown -> Thing;
+ Names -> [T|L] = tuple_to_list(Thing),
+ {T, lists:zip(Names, [pprint(I) || I <- L])}
+ end;
+pprint(Other) -> Other.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_incoming_link).
+
+-export([attach/3, transfer/4]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_amqp1_0.hrl").
+
+-import(rabbit_amqp1_0_util, [protocol_error/3]).
+
+%% Just make these constant for the time being.
+-define(INCOMING_CREDIT, 65536).
+
+-record(incoming_link, {name, exchange, routing_key,
+ delivery_id = undefined,
+ delivery_count = 0,
+ send_settle_mode = undefined,
+ recv_settle_mode = undefined,
+ credit_used = ?INCOMING_CREDIT div 2,
+ msg_acc = [],
+ route_state}).
+
+attach(#'v1_0.attach'{name = Name,
+ handle = Handle,
+ source = Source,
+ snd_settle_mode = SndSettleMode,
+ rcv_settle_mode = RcvSettleMode,
+ target = Target,
+ initial_delivery_count = {uint, InitTransfer}},
+ BCh, DCh) ->
+ %% TODO associate link name with target
+ case ensure_target(Target,
+ #incoming_link{
+ name = Name,
+ route_state = rabbit_routing_util:init_state() },
+ DCh) of
+ {ok, ServerTarget,
+ IncomingLink = #incoming_link{ delivery_count = InitTransfer }} ->
+ {_, _Outcomes} = rabbit_amqp1_0_link_util:outcomes(Source),
+ %% Default is mixed
+ Confirm =
+ case SndSettleMode of
+ ?V_1_0_SENDER_SETTLE_MODE_SETTLED ->
+ false;
+ _ when SndSettleMode == undefined;
+ SndSettleMode == ?V_1_0_SENDER_SETTLE_MODE_UNSETTLED;
+ SndSettleMode == ?V_1_0_SENDER_SETTLE_MODE_MIXED ->
+ amqp_channel:register_confirm_handler(BCh, self()),
+ rabbit_amqp1_0_channel:call(BCh, #'confirm.select'{}),
+ true
+ end,
+ Flow = #'v1_0.flow'{ handle = Handle,
+ link_credit = {uint, ?INCOMING_CREDIT},
+ drain = false,
+ echo = false },
+ Attach = #'v1_0.attach'{
+ name = Name,
+ handle = Handle,
+ source = Source,
+ snd_settle_mode = SndSettleMode,
+ rcv_settle_mode = RcvSettleMode,
+ target = ServerTarget,
+ initial_delivery_count = undefined, % must be, I am the receiver
+ role = ?RECV_ROLE}, %% server is receiver
+ IncomingLink1 =
+ IncomingLink#incoming_link{recv_settle_mode = RcvSettleMode},
+ {ok, [Attach, Flow], IncomingLink1, Confirm};
+ {error, Reason} ->
+ rabbit_log:warning("AMQP 1.0 attach rejected ~p~n", [Reason]),
+ %% TODO proper link establishment protocol here?
+ protocol_error(?V_1_0_AMQP_ERROR_INVALID_FIELD,
+ "Attach rejected: ~p", [Reason])
+ end.
+
+set_delivery_id({uint, D},
+ #incoming_link{delivery_id = undefined} = Link) ->
+ Link#incoming_link{delivery_id = D};
+set_delivery_id(DeliveryId,
+ #incoming_link{delivery_id = D} = Link)
+ when DeliveryId == {uint, D} orelse DeliveryId == undefined ->
+ Link.
+
+effective_send_settle_mode(undefined, undefined) ->
+ false;
+effective_send_settle_mode(undefined, SettleMode)
+ when is_boolean(SettleMode) ->
+ SettleMode;
+effective_send_settle_mode(SettleMode, undefined)
+ when is_boolean(SettleMode) ->
+ SettleMode;
+effective_send_settle_mode(SettleMode, SettleMode)
+ when is_boolean(SettleMode) ->
+ SettleMode.
+
+effective_recv_settle_mode(undefined, undefined) ->
+ ?V_1_0_RECEIVER_SETTLE_MODE_FIRST;
+effective_recv_settle_mode(undefined, Mode) ->
+ Mode;
+effective_recv_settle_mode(Mode, _) ->
+ Mode.
+
+% TODO: validate effective settle modes against
+% those declared during attach
+
+% TODO: handle aborted transfers
+
+transfer(#'v1_0.transfer'{delivery_id = DeliveryId,
+ more = true,
+ settled = Settled}, MsgPart,
+ #incoming_link{msg_acc = MsgAcc,
+ send_settle_mode = SSM} = Link, _BCh) ->
+ {ok, set_delivery_id(
+ DeliveryId,
+ Link#incoming_link{msg_acc = [MsgPart | MsgAcc],
+ send_settle_mode =
+ effective_send_settle_mode(Settled, SSM)})};
+transfer(#'v1_0.transfer'{delivery_id = DeliveryId0,
+ settled = Settled,
+ rcv_settle_mode = RcvSettleMode,
+ handle = Handle},
+ MsgPart,
+ #incoming_link{exchange = X,
+ routing_key = LinkRKey,
+ delivery_count = Count,
+ credit_used = CreditUsed,
+ msg_acc = MsgAcc,
+ send_settle_mode = SSM,
+ recv_settle_mode = RSM} = Link, BCh) ->
+ MsgBin = iolist_to_binary(lists:reverse([MsgPart | MsgAcc])),
+ ?DEBUG("Inbound content:~n ~p~n",
+ [[rabbit_amqp1_0_framing:pprint(Section) ||
+ Section <- rabbit_amqp1_0_framing:decode_bin(MsgBin)]]),
+ {MsgRKey, Msg} = rabbit_amqp1_0_message:assemble(MsgBin),
+ RKey = case LinkRKey of
+ undefined -> MsgRKey;
+ _ -> LinkRKey
+ end,
+ rabbit_amqp1_0_channel:cast_flow(
+ BCh, #'basic.publish'{exchange = X,
+ routing_key = RKey}, Msg),
+ {SendFlow, CreditUsed1} = case CreditUsed - 1 of
+ C when C =< 0 ->
+ {true, ?INCOMING_CREDIT div 2};
+ D ->
+ {false, D}
+ end,
+ #incoming_link{delivery_id = DeliveryId} =
+ set_delivery_id(DeliveryId0, Link),
+ NewLink = Link#incoming_link{
+ delivery_id = undefined,
+ send_settle_mode = undefined,
+ delivery_count = rabbit_amqp1_0_util:serial_add(Count, 1),
+ credit_used = CreditUsed1,
+ msg_acc = []},
+ Reply = case SendFlow of
+ true -> ?DEBUG("sending flow for incoming ~p", [NewLink]),
+ [incoming_flow(NewLink, Handle)];
+ false -> []
+ end,
+ EffectiveSendSettleMode = effective_send_settle_mode(Settled, SSM),
+ EffectiveRecvSettleMode = effective_recv_settle_mode(RcvSettleMode, RSM),
+ case not EffectiveSendSettleMode andalso
+ EffectiveRecvSettleMode =:= ?V_1_0_RECEIVER_SETTLE_MODE_SECOND of
+ false -> ok;
+ true -> protocol_error(?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED,
+ "rcv-settle-mode second not supported", [])
+ end,
+ {message, Reply, NewLink, DeliveryId,
+ EffectiveSendSettleMode}.
+
+%% TODO default-outcome and outcomes, dynamic lifetimes
+
+ensure_target(Target = #'v1_0.target'{address = Address,
+ dynamic = Dynamic,
+ durable = Durable,
+ %% TODO
+ expiry_policy = _ExpiryPolicy,
+ %% TODO
+ timeout = _Timeout},
+ Link = #incoming_link{ route_state = RouteState }, DCh) ->
+ DeclareParams = [{durable, rabbit_amqp1_0_link_util:durable(Durable)},
+ {check_exchange, true}],
+ case Dynamic of
+ true ->
+ protocol_error(?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED,
+ "Dynamic targets not supported", []);
+ _ ->
+ ok
+ end,
+ case Address of
+ {utf8, Destination} ->
+ case rabbit_routing_util:parse_endpoint(Destination, true) of
+ {ok, Dest} ->
+ {ok, _Queue, RouteState1} =
+ rabbit_amqp1_0_channel:convert_error(
+ fun () ->
+ rabbit_routing_util:ensure_endpoint(
+ dest, DCh, Dest, DeclareParams,
+ RouteState)
+ end),
+ {XName, RK} = rabbit_routing_util:parse_routing(Dest),
+ {ok, Target, Link#incoming_link{
+ route_state = RouteState1,
+ exchange = list_to_binary(XName),
+ routing_key = case RK of
+ undefined -> undefined;
+ [] -> undefined;
+ _ -> list_to_binary(RK)
+ end}};
+ {error, _} = E ->
+ E
+ end;
+ _Else ->
+ {error, {unknown_address, Address}}
+ end.
+
+incoming_flow(#incoming_link{ delivery_count = Count }, Handle) ->
+ #'v1_0.flow'{handle = Handle,
+ delivery_count = {uint, Count},
+ link_credit = {uint, ?INCOMING_CREDIT}}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_link_util).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_amqp1_0.hrl").
+
+-export([outcomes/1, ctag_to_handle/1, handle_to_ctag/1, durable/1]).
+
+-define(EXCHANGE_SUB_LIFETIME, "delete-on-close").
+-define(DEFAULT_OUTCOME, #'v1_0.released'{}).
+-define(OUTCOMES, [?V_1_0_SYMBOL_ACCEPTED,
+ ?V_1_0_SYMBOL_REJECTED,
+ ?V_1_0_SYMBOL_RELEASED]).
+
+outcomes(Source) ->
+ {DefaultOutcome, Outcomes} =
+ case Source of
+ #'v1_0.source' {
+ default_outcome = DO,
+ outcomes = Os
+ } ->
+ DO1 = case DO of
+ undefined -> ?DEFAULT_OUTCOME;
+ _ -> DO
+ end,
+ Os1 = case Os of
+ undefined -> ?OUTCOMES;
+ _ -> Os
+ end,
+ {DO1, Os1};
+ _ ->
+ {?DEFAULT_OUTCOME, ?OUTCOMES}
+ end,
+ case [O || O <- Outcomes, not lists:member(O, ?OUTCOMES)] of
+ [] -> {DefaultOutcome, {array, symbol, [X || {symbol, X} <- Outcomes]}};
+ Bad -> rabbit_amqp1_0_util:protocol_error(
+ ?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED,
+ "Outcomes not supported: ~p", [Bad])
+ end.
+
+handle_to_ctag({uint, H}) ->
+ <<"ctag-", H:32/integer>>.
+
+ctag_to_handle(<<"ctag-", H:32/integer>>) ->
+ {uint, H}.
+
+durable(undefined) -> false; %% default: none
+durable(?V_1_0_TERMINUS_DURABILITY_NONE) -> false;
+%% This one means "existence of the thing is durable, but unacked msgs
+%% aren't". We choose to upgrade that.
+durable(?V_1_0_TERMINUS_DURABILITY_CONFIGURATION) -> true;
+durable(?V_1_0_TERMINUS_DURABILITY_UNSETTLED_STATE) -> true.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_message).
+
+-export([assemble/1, annotated_message/3]).
+
+-define(PROPERTIES_HEADER, <<"x-amqp-1.0-properties">>).
+-define(APP_PROPERTIES_HEADER, <<"x-amqp-1.0-app-properties">>).
+-define(MESSAGE_ANNOTATIONS_HEADER, <<"x-amqp-1.0-message-annotations">>).
+-define(FOOTER, <<"x-amqp-1.0-footer">>).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_amqp1_0.hrl").
+
+assemble(MsgBin) ->
+ {RKey, Props, Content} = assemble(header, {<<"">>, #'P_basic'{}, []},
+ decode_section(MsgBin), MsgBin),
+ {RKey, #amqp_msg{props = Props, payload = Content}}.
+
+assemble(header, {R, P, C}, {H = #'v1_0.header'{}, Rest}, _Uneaten) ->
+ assemble(message_annotations, {R, translate_header(H, P), C},
+ decode_section(Rest), Rest);
+assemble(header, {R, P, C}, Else, Uneaten) ->
+ assemble(message_annotations, {R, P, C}, Else, Uneaten);
+
+assemble(delivery_annotations, RPC, {#'v1_0.delivery_annotations'{}, Rest},
+ Uneaten) ->
+ %% ignore delivery annotations for now
+ %% TODO: handle "rejected" error
+ assemble(message_annotations, RPC, Rest, Uneaten);
+assemble(delivery_annotations, RPC, Else, Uneaten) ->
+ assemble(message_annotations, RPC, Else, Uneaten);
+
+assemble(message_annotations, {R, P = #'P_basic'{headers = Headers}, C},
+ {#'v1_0.message_annotations'{}, Rest}, Uneaten) ->
+ MsgAnnoBin = chunk(Rest, Uneaten),
+ assemble(properties, {R, P#'P_basic'{
+ headers = set_header(?MESSAGE_ANNOTATIONS_HEADER,
+ MsgAnnoBin, Headers)}, C},
+ decode_section(Rest), Rest);
+assemble(message_annotations, {R, P, C}, Else, Uneaten) ->
+ assemble(properties, {R, P, C}, Else, Uneaten);
+
+assemble(properties, {_R, P, C}, {X = #'v1_0.properties'{}, Rest}, Uneaten) ->
+ PropsBin = chunk(Rest, Uneaten),
+ assemble(app_properties, {routing_key(X),
+ translate_properties(X, PropsBin, P), C},
+ decode_section(Rest), Rest);
+assemble(properties, {R, P, C}, Else, Uneaten) ->
+ assemble(app_properties, {R, P, C}, Else, Uneaten);
+
+assemble(app_properties, {R, P = #'P_basic'{headers = Headers}, C},
+ {#'v1_0.application_properties'{}, Rest}, Uneaten) ->
+ AppPropsBin = chunk(Rest, Uneaten),
+ assemble(body, {R, P#'P_basic'{
+ headers = set_header(?APP_PROPERTIES_HEADER,
+ AppPropsBin, Headers)}, C},
+ decode_section(Rest), Rest);
+assemble(app_properties, {R, P, C}, Else, Uneaten) ->
+ assemble(body, {R, P, C}, Else, Uneaten);
+
+%% The only 'interoperable' content is a single amqp-data section.
+%% Everything else we will leave as-is. We still have to parse the
+%% sections one-by-one, however, to see when we hit the footer or
+%% whatever comes next.
+
+%% NB we do not strictly enforce the (slightly random) rules
+%% pertaining to body sections, that is:
+%% - one amqp-value; OR
+%% - one or more amqp-sequence; OR
+%% - one or more amqp-data.
+%% We allow any number of each kind, in any permutation.
+
+assemble(body, {R, P, _}, {#'v1_0.data'{content = Content}, Rest}, Uneaten) ->
+ Chunk = chunk(Rest, Uneaten),
+ assemble(amqp10body, {R, set_1_0_type(<<"binary">>, P),
+ {data, Content, Chunk}},
+ decode_section(Rest), Rest);
+assemble(body, {R, P, C}, Else, Uneaten) ->
+ assemble(amqp10body, {R, P, C}, Else, Uneaten);
+
+assemble(amqp10body, {R, P, C}, {{Type, _}, Rest}, Uneaten)
+ when Type =:= 'v1_0.data' orelse
+ Type =:= 'v1_0.amqp_sequence' orelse
+ Type =:= 'v1_0.amqp_value' ->
+ Encoded = chunk(Rest, Uneaten),
+ assemble(amqp10body,
+ {R, set_1_0_type(<<"amqp-1.0">>, P), add_body_section(Encoded, C)},
+ decode_section(Rest), Rest);
+assemble(amqp10body, {R, P, C}, Else, Uneaten) ->
+ assemble(footer, {R, P, compile_body(C)}, Else, Uneaten);
+
+assemble(footer, {R, P = #'P_basic'{headers = Headers}, C},
+ {#'v1_0.footer'{}, <<>>}, Uneaten) ->
+ {R, P#'P_basic'{headers = set_header(?FOOTER, Uneaten, Headers)}, C};
+assemble(footer, {R, P, C}, none, _) ->
+ {R, P, C};
+assemble(footer, _, Else, _) ->
+ exit({unexpected_trailing_sections, Else});
+
+assemble(Expected, _, Actual, _) ->
+ exit({expected_section, Expected, Actual}).
+
+decode_section(<<>>) ->
+ none;
+decode_section(MsgBin) ->
+ {AmqpValue, Rest} = rabbit_amqp1_0_binary_parser:parse(MsgBin),
+ {rabbit_amqp1_0_framing:decode(AmqpValue), Rest}.
+
+chunk(Rest, Uneaten) ->
+ ChunkLen = size(Uneaten) - size(Rest),
+ <<Chunk:ChunkLen/binary, _ActuallyRest/binary>> = Uneaten,
+ Chunk.
+
+add_body_section(C, {data, _, Bin}) ->
+ [C, Bin];
+add_body_section(C, Cs) ->
+ [C | Cs].
+
+compile_body({data, Content, _}) ->
+ Content;
+compile_body(Sections) ->
+ lists:reverse(Sections).
+
+translate_header(Header10, Props) ->
+ Props#'P_basic'{
+ delivery_mode = case Header10#'v1_0.header'.durable of
+ true -> 2;
+ _ -> 1
+ end,
+ priority = unwrap(Header10#'v1_0.header'.priority),
+ expiration = to_expiration(Header10#'v1_0.header'.ttl),
+ type = undefined,
+ app_id = undefined,
+ cluster_id = undefined}.
+
+translate_properties(Props10, Props10Bin,
+ Props = #'P_basic'{headers = Headers}) ->
+ Props#'P_basic'{
+ headers = set_header(?PROPERTIES_HEADER, Props10Bin,
+ Headers),
+ content_type = unwrap(Props10#'v1_0.properties'.content_type),
+ content_encoding = unwrap(Props10#'v1_0.properties'.content_encoding),
+ correlation_id = unwrap(Props10#'v1_0.properties'.correlation_id),
+ reply_to = case unwrap(Props10#'v1_0.properties'.reply_to) of
+ <<"/queue/", Q/binary>> -> Q;
+ Else -> Else
+ end,
+ message_id = unwrap(Props10#'v1_0.properties'.message_id),
+ user_id = unwrap(Props10#'v1_0.properties'.user_id),
+ timestamp = unwrap(Props10#'v1_0.properties'.creation_time)}.
+
+routing_key(Props10) ->
+ unwrap(Props10#'v1_0.properties'.subject).
+
+unwrap(undefined) -> undefined;
+unwrap({_Type, Thing}) -> Thing.
+
+to_expiration(undefined) ->
+ undefined;
+to_expiration({timestamp, Num}) ->
+ list_to_binary(integer_to_list(Num)).
+
+from_expiration(undefined) ->
+ undefined;
+from_expiration(MaybeIntegerBin) ->
+ case catch list_to_integer(binary_to_list(MaybeIntegerBin)) of
+ {'EXIT', {badarg, _}} ->
+ undefined;
+ Integer -> {timestamp, Integer}
+ end.
+
+set_header(Header, Value, undefined) ->
+ set_header(Header, Value, []);
+set_header(Header, Value, Headers) ->
+ rabbit_misc:set_table_value(Headers, Header, longstr, Value).
+
+set_1_0_type(Type, Props = #'P_basic'{}) ->
+ Props#'P_basic'{type = Type}.
+
+%%--------------------------------------------------------------------
+
+%% TODO create delivery-annotations
+
+annotated_message(RKey, #'basic.deliver'{redelivered = Redelivered},
+ #amqp_msg{props = Props,
+ payload = Content}) ->
+ #'P_basic'{ headers = Headers } = Props,
+ Header10 = #'v1_0.header'
+ {durable = case Props#'P_basic'.delivery_mode of
+ 2 -> true;
+ _ -> false
+ end,
+ priority = wrap(ubyte, Props#'P_basic'.priority),
+ ttl = from_expiration(Props#'P_basic'.expiration),
+ first_acquirer = not Redelivered,
+ delivery_count = undefined},
+ HeadersBin = rabbit_amqp1_0_framing:encode_bin(Header10),
+ MsgAnnoBin =
+ case table_lookup(Headers, ?MESSAGE_ANNOTATIONS_HEADER) of
+ undefined -> <<>>;
+ {_, MABin} -> MABin
+ end,
+ PropsBin =
+ case table_lookup(Headers, ?PROPERTIES_HEADER) of
+ {_, Props10Bin} ->
+ Props10Bin;
+ undefined ->
+ Props10 = #'v1_0.properties'{
+ message_id = wrap(utf8, Props#'P_basic'.message_id),
+ user_id = wrap(utf8, Props#'P_basic'.user_id),
+ to = undefined,
+ subject = wrap(utf8, RKey),
+ reply_to = case Props#'P_basic'.reply_to of
+ undefined ->
+ undefined;
+ _ ->
+ wrap(utf8,
+ <<"/queue/",
+ (Props#'P_basic'.reply_to)/binary>>)
+ end,
+ correlation_id = wrap(utf8, Props#'P_basic'.correlation_id),
+ content_type = wrap(symbol, Props#'P_basic'.content_type),
+ content_encoding = wrap(symbol, Props#'P_basic'.content_encoding),
+ creation_time = wrap(timestamp, Props#'P_basic'.timestamp)},
+ rabbit_amqp1_0_framing:encode_bin(Props10)
+ end,
+ AppPropsBin =
+ case table_lookup(Headers, ?APP_PROPERTIES_HEADER) of
+ {_, AppProps10Bin} ->
+ AppProps10Bin;
+ undefined ->
+ []
+ end,
+ DataBin = case Props#'P_basic'.type of
+ <<"amqp-1.0">> ->
+ Content;
+ _Else -> % e.g., <<"binary">> if originally from 1.0
+ rabbit_amqp1_0_framing:encode_bin(
+ #'v1_0.data'{content = Content})
+ end,
+ FooterBin =
+ case table_lookup(Headers, ?FOOTER) of
+ undefined -> <<>>;
+ {_, FBin} -> FBin
+ end,
+ [HeadersBin, MsgAnnoBin, PropsBin, AppPropsBin, DataBin, FooterBin].
+
+wrap(_Type, undefined) ->
+ undefined;
+wrap(Type, Val) ->
+ {Type, Val}.
+
+table_lookup(undefined, _) -> undefined;
+table_lookup(Headers, Header) -> rabbit_misc:table_lookup(Headers, Header).
+
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_outgoing_link).
+
+-export([attach/3, delivery/6, transferred/3, credit_drained/4, flow/3]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_amqp1_0.hrl").
+
+-import(rabbit_amqp1_0_util, [protocol_error/3, serial_add/2]).
+-import(rabbit_amqp1_0_link_util, [handle_to_ctag/1]).
+
+-define(INIT_TXFR_COUNT, 0).
+-define(DEFAULT_SEND_SETTLED, false).
+
+-record(outgoing_link, {queue,
+ delivery_count = 0,
+ send_settled,
+ default_outcome,
+ route_state}).
+
+attach(#'v1_0.attach'{name = Name,
+ handle = Handle,
+ source = Source,
+ snd_settle_mode = SndSettleMode,
+ rcv_settle_mode = RcvSettleMode}, BCh, DCh) ->
+ {DefaultOutcome, Outcomes} = rabbit_amqp1_0_link_util:outcomes(Source),
+ SndSettled =
+ case SndSettleMode of
+ ?V_1_0_SENDER_SETTLE_MODE_SETTLED -> true;
+ ?V_1_0_SENDER_SETTLE_MODE_UNSETTLED -> false;
+ _ -> ?DEFAULT_SEND_SETTLED
+ end,
+ DOSym = rabbit_amqp1_0_framing:symbol_for(DefaultOutcome),
+ case ensure_source(Source,
+ #outgoing_link{delivery_count = ?INIT_TXFR_COUNT,
+ send_settled = SndSettled,
+ default_outcome = DOSym,
+ route_state =
+ rabbit_routing_util:init_state()},
+ DCh) of
+ {ok, Source1, OutgoingLink = #outgoing_link{queue = QueueName}} ->
+ CTag = handle_to_ctag(Handle),
+ case rabbit_amqp1_0_channel:subscribe(
+ BCh, #'basic.consume'{
+ queue = QueueName,
+ consumer_tag = CTag,
+ %% we will ack when we've transferred
+ %% a message, or when we get an ack
+ %% from the client.
+ no_ack = false,
+ %% TODO exclusive?
+ exclusive = false,
+ arguments = [{<<"x-credit">>, table,
+ [{<<"credit">>, long, 0},
+ {<<"drain">>, boolean, false}]}]},
+ self()) of
+ #'basic.consume_ok'{} ->
+ %% TODO we should avoid the race by getting the queue to send
+ %% attach back, but a.t.m. it would use the wrong codec.
+ {ok, [#'v1_0.attach'{
+ name = Name,
+ handle = Handle,
+ initial_delivery_count = {uint, ?INIT_TXFR_COUNT},
+ snd_settle_mode =
+ case SndSettled of
+ true -> ?V_1_0_SENDER_SETTLE_MODE_SETTLED;
+ false -> ?V_1_0_SENDER_SETTLE_MODE_UNSETTLED
+ end,
+ rcv_settle_mode = RcvSettleMode,
+ source = Source1#'v1_0.source'{
+ default_outcome = DefaultOutcome,
+ outcomes = Outcomes
+ },
+ role = ?SEND_ROLE}], OutgoingLink};
+ Fail ->
+ protocol_error(?V_1_0_AMQP_ERROR_INTERNAL_ERROR,
+ "Consume failed: ~p", [Fail])
+ end;
+ {error, _Reason} ->
+ %% TODO Deal with this properly -- detach and what have you
+ {ok, [#'v1_0.attach'{source = undefined}]}
+ end.
+
+credit_drained(#'basic.credit_drained'{credit_drained = CreditDrained},
+ Handle, Link = #outgoing_link{delivery_count = Count0},
+ WriterPid) ->
+ Count = Count0 + CreditDrained,
+ %% The transfer count that is given by the queue should be at
+ %% least that we have locally, since we will either have received
+ %% all the deliveries and transferred them, or the queue will have
+ %% advanced it due to drain. So we adopt the queue's idea of the
+ %% count.
+ %% TODO account for it not being there any more
+ F = #'v1_0.flow'{ handle = Handle,
+ delivery_count = {uint, Count},
+ link_credit = {uint, 0},
+ available = {uint, 0},
+ drain = true },
+ rabbit_amqp1_0_writer:send_command(WriterPid, F),
+ Link#outgoing_link{delivery_count = Count}.
+
+flow(#outgoing_link{delivery_count = LocalCount},
+ #'v1_0.flow'{handle = Handle,
+ delivery_count = Count0,
+ link_credit = {uint, RemoteCredit},
+ drain = Drain0}, BCh) ->
+ {uint, RemoteCount} = default(Count0, {uint, LocalCount}),
+ Drain = default(Drain0, false),
+ %% See section 2.6.7
+ LocalCredit = RemoteCount + RemoteCredit - LocalCount,
+ CTag = handle_to_ctag(Handle),
+ #'basic.credit_ok'{available = Available} =
+ rabbit_amqp1_0_channel:call(
+ BCh, #'basic.credit'{consumer_tag = CTag,
+ credit = LocalCredit,
+ drain = Drain}),
+ case Available of
+ -1 ->
+ {ok, []};
+ %% We don't know - probably because this flow relates
+ %% to a handle that does not yet exist
+ %% TODO is this an error?
+ _ ->
+ {ok, [#'v1_0.flow'{
+ handle = Handle,
+ delivery_count = {uint, LocalCount},
+ link_credit = {uint, LocalCredit},
+ available = {uint, Available},
+ drain = Drain}]}
+ end.
+
+default(undefined, Default) -> Default;
+default(Thing, _Default) -> Thing.
+
+ensure_source(Source = #'v1_0.source'{address = Address,
+ dynamic = Dynamic,
+ durable = Durable,
+ %% TODO
+ expiry_policy = _ExpiryPolicy,
+ %% TODO
+ timeout = _Timeout},
+ Link = #outgoing_link{ route_state = RouteState }, DCh) ->
+ DeclareParams = [{durable, rabbit_amqp1_0_link_util:durable(Durable)},
+ {check_exchange, true}],
+ case Dynamic of
+ true -> protocol_error(?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED,
+ "Dynamic sources not supported", []);
+ _ -> ok
+ end,
+ case Address of
+ {utf8, Destination} ->
+ case rabbit_routing_util:parse_endpoint(Destination, false) of
+ {ok, Dest} ->
+ {ok, Queue, RouteState1} =
+ rabbit_amqp1_0_channel:convert_error(
+ fun() ->
+ rabbit_routing_util:ensure_endpoint(
+ source, DCh, Dest, DeclareParams,
+ RouteState)
+ end),
+ ER = rabbit_routing_util:parse_routing(Dest),
+ ok = rabbit_routing_util:ensure_binding(Queue, ER, DCh),
+ {ok, Source, Link#outgoing_link{route_state = RouteState1,
+ queue = Queue}}
+ end;
+ _ ->
+ {error, {unknown_address, Address}}
+ end.
+
+delivery(Deliver = #'basic.deliver'{delivery_tag = DeliveryTag,
+ routing_key = RKey},
+ Msg, FrameMax, Handle, Session,
+ #outgoing_link{send_settled = SendSettled,
+ default_outcome = DefaultOutcome}) ->
+ DeliveryId = rabbit_amqp1_0_session:next_delivery_id(Session),
+ Session1 = rabbit_amqp1_0_session:record_outgoing(
+ DeliveryTag, SendSettled, DefaultOutcome, Session),
+ Txfr = #'v1_0.transfer'{handle = Handle,
+ delivery_tag = {binary, <<DeliveryTag:64>>},
+ delivery_id = {uint, DeliveryId},
+ %% The only one in AMQP 1-0
+ message_format = {uint, 0},
+ settled = SendSettled,
+ resume = false,
+ more = false,
+ aborted = false,
+ %% TODO: actually batchable would be fine,
+ %% but in any case it's only a hint
+ batchable = false},
+ Msg1_0 = rabbit_amqp1_0_message:annotated_message(
+ RKey, Deliver, Msg),
+ ?DEBUG("Outbound content:~n ~p~n",
+ [[rabbit_amqp1_0_framing:pprint(Section) ||
+ Section <- rabbit_amqp1_0_framing:decode_bin(
+ iolist_to_binary(Msg1_0))]]),
+ %% TODO Ugh
+ TLen = iolist_size(rabbit_amqp1_0_framing:encode_bin(Txfr)),
+ Frames = case FrameMax of
+ unlimited ->
+ [[Txfr, Msg1_0]];
+ _ ->
+ encode_frames(Txfr, Msg1_0, FrameMax - TLen, [])
+ end,
+ {ok, Frames, Session1}.
+
+encode_frames(_T, _Msg, MaxContentLen, _Transfers) when MaxContentLen =< 0 ->
+ protocol_error(?V_1_0_AMQP_ERROR_FRAME_SIZE_TOO_SMALL,
+ "Frame size is too small by ~p bytes", [-MaxContentLen]);
+encode_frames(T, Msg, MaxContentLen, Transfers) ->
+ case iolist_size(Msg) > MaxContentLen of
+ true ->
+ <<Chunk:MaxContentLen/binary, Rest/binary>> =
+ iolist_to_binary(Msg),
+ T1 = T#'v1_0.transfer'{more = true},
+ encode_frames(T, Rest, MaxContentLen,
+ [[T1, Chunk] | Transfers]);
+ false ->
+ lists:reverse([[T, Msg] | Transfers])
+ end.
+
+transferred(DeliveryTag, Channel,
+ Link = #outgoing_link{ delivery_count = Count,
+ send_settled = SendSettled }) ->
+ if SendSettled ->
+ rabbit_amqp1_0_channel:cast(
+ Channel, #'basic.ack'{ delivery_tag = DeliveryTag });
+ true ->
+ ok
+ end,
+ Link#outgoing_link{delivery_count = serial_add(Count, 1)}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_reader).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+-include_lib("kernel/include/inet.hrl").
+-include("rabbit_amqp1_0.hrl").
+
+-export([init/2, mainloop/2]).
+
+%% TODO which of these are needed?
+-export([shutdown/2]).
+-export([system_continue/3, system_terminate/4, system_code_change/4]).
+-export([conserve_resources/3]).
+
+-import(rabbit_amqp1_0_util, [protocol_error/3]).
+
+-define(HANDSHAKE_TIMEOUT, 10).
+-define(NORMAL_TIMEOUT, 3).
+-define(CLOSING_TIMEOUT, 30).
+-define(SILENT_CLOSE_DELAY, 3).
+
+%%--------------------------------------------------------------------------
+
+-record(v1, {parent, sock, connection, callback, recv_len, pending_recv,
+ connection_state, queue_collector, heartbeater, helper_sup,
+ channel_sup_sup_pid, buf, buf_len, throttle}).
+
+-record(connection, {user, timeout_sec, frame_max, auth_mechanism, auth_state,
+ hostname}).
+
+-record(throttle, {alarmed_by, last_blocked_by, last_blocked_at}).
+
+-define(IS_RUNNING(State),
+ (State#v1.connection_state =:= running orelse
+ State#v1.connection_state =:= blocking orelse
+ State#v1.connection_state =:= blocked)).
+
+%%--------------------------------------------------------------------------
+
+unpack_from_0_9_1({Parent, Sock,RecvLen, PendingRecv,
+ HelperSupPid, Buf, BufLen}) ->
+ #v1{parent = Parent,
+ sock = Sock,
+ callback = handshake,
+ recv_len = RecvLen,
+ pending_recv = PendingRecv,
+ connection_state = pre_init,
+ queue_collector = undefined,
+ heartbeater = none,
+ helper_sup = HelperSupPid,
+ buf = Buf,
+ buf_len = BufLen,
+ throttle = #throttle{alarmed_by = [],
+ last_blocked_by = none,
+ last_blocked_at = never},
+ connection = #connection{user = none,
+ timeout_sec = ?HANDSHAKE_TIMEOUT,
+ frame_max = ?FRAME_MIN_SIZE,
+ auth_mechanism = none,
+ auth_state = none}}.
+
+shutdown(Pid, Explanation) ->
+ gen_server:call(Pid, {shutdown, Explanation}, infinity).
+
+system_continue(Parent, Deb, State) ->
+ ?MODULE:mainloop(Deb, State#v1{parent = Parent}).
+
+system_terminate(Reason, _Parent, _Deb, _State) ->
+ exit(Reason).
+
+system_code_change(Misc, _Module, _OldVsn, _Extra) ->
+ {ok, Misc}.
+
+conserve_resources(Pid, Source, Conserve) ->
+ Pid ! {conserve_resources, Source, Conserve},
+ ok.
+
+server_properties() ->
+ %% The atom doesn't match anything, it's just "not 0-9-1".
+ Raw = lists:keydelete(
+ <<"capabilities">>, 1, rabbit_reader:server_properties(amqp_1_0)),
+ {map, [{{symbol, K}, {utf8, V}} || {K, longstr, V} <- Raw]}.
+
+%%--------------------------------------------------------------------------
+
+log(Level, Fmt, Args) -> rabbit_log:log(connection, Level, Fmt, Args).
+
+inet_op(F) -> rabbit_misc:throw_on_error(inet_error, F).
+
+recvloop(Deb, State = #v1{pending_recv = true}) ->
+ mainloop(Deb, State);
+recvloop(Deb, State = #v1{connection_state = blocked}) ->
+ mainloop(Deb, State);
+recvloop(Deb, State = #v1{sock = Sock, recv_len = RecvLen, buf_len = BufLen})
+ when BufLen < RecvLen ->
+ ok = rabbit_net:setopts(Sock, [{active, once}]),
+ mainloop(Deb, State#v1{pending_recv = true});
+recvloop(Deb, State = #v1{recv_len = RecvLen, buf = Buf, buf_len = BufLen}) ->
+ {Data, Rest} = split_binary(case Buf of
+ [B] -> B;
+ _ -> list_to_binary(lists:reverse(Buf))
+ end, RecvLen),
+ recvloop(Deb, handle_input(State#v1.callback, Data,
+ State#v1{buf = [Rest],
+ buf_len = BufLen - RecvLen})).
+
+mainloop(Deb, State = #v1{sock = Sock, buf = Buf, buf_len = BufLen}) ->
+ case rabbit_net:recv(Sock) of
+ {data, Data} ->
+ recvloop(Deb, State#v1{buf = [Data | Buf],
+ buf_len = BufLen + size(Data),
+ pending_recv = false});
+ closed when State#v1.connection_state =:= closed ->
+ ok;
+ closed ->
+ throw(connection_closed_abruptly);
+ {error, Reason} ->
+ throw({inet_error, Reason});
+ {other, {system, From, Request}} ->
+ sys:handle_system_msg(Request, From, State#v1.parent,
+ ?MODULE, Deb, State);
+ {other, Other} ->
+ case handle_other(Other, State) of
+ stop -> ok;
+ NewState -> recvloop(Deb, NewState)
+ end
+ end.
+
+handle_other({conserve_resources, Source, Conserve},
+ State = #v1{throttle = Throttle =
+ #throttle{alarmed_by = CR}}) ->
+ CR1 = case Conserve of
+ true -> lists:usort([Source | CR]);
+ false -> CR -- [Source]
+ end,
+ Throttle1 = Throttle#throttle{alarmed_by = CR1},
+ control_throttle(State#v1{throttle = Throttle1});
+handle_other({'EXIT', Parent, Reason}, State = #v1{parent = Parent}) ->
+ terminate(io_lib:format("broker forced connection closure "
+ "with reason '~w'", [Reason]), State),
+ %% this is what we are expected to do according to
+ %% http://www.erlang.org/doc/man/sys.html
+ %%
+ %% If we wanted to be *really* nice we should wait for a while for
+ %% clients to close the socket at their end, just as we do in the
+ %% ordinary error case. However, since this termination is
+ %% initiated by our parent it is probably more important to exit
+ %% quickly.
+ exit(Reason);
+handle_other({'DOWN', _MRef, process, ChPid, Reason}, State) ->
+ handle_dependent_exit(ChPid, Reason, State);
+handle_other(handshake_timeout, State)
+ when ?IS_RUNNING(State) orelse
+ State#v1.connection_state =:= closing orelse
+ State#v1.connection_state =:= closed ->
+ State;
+handle_other(handshake_timeout, State) ->
+ throw({handshake_timeout, State#v1.callback});
+handle_other(heartbeat_timeout, State = #v1{connection_state = closed}) ->
+ State;
+handle_other(heartbeat_timeout, #v1{connection_state = S}) ->
+ throw({heartbeat_timeout, S});
+handle_other({'$gen_call', From, {shutdown, Explanation}}, State) ->
+ {ForceTermination, NewState} = terminate(Explanation, State),
+ gen_server:reply(From, ok),
+ case ForceTermination of
+ force -> stop;
+ normal -> NewState
+ end;
+handle_other({'$gen_cast', force_event_refresh}, State) ->
+ %% Ignore, the broker sent us this as it thinks we are a 0-9-1 connection
+ State;
+handle_other({bump_credit, Msg}, State) ->
+ credit_flow:handle_bump_msg(Msg),
+ control_throttle(State);
+handle_other(terminate_connection, State) ->
+ State;
+handle_other(Other, _State) ->
+ %% internal error -> something worth dying for
+ exit({unexpected_message, Other}).
+
+switch_callback(State, Callback, Length) ->
+ State#v1{callback = Callback, recv_len = Length}.
+
+terminate(Reason, State) when ?IS_RUNNING(State) ->
+ {normal, handle_exception(State, 0,
+ {?V_1_0_AMQP_ERROR_INTERNAL_ERROR,
+ "Connection forced: ~p~n", [Reason]})};
+terminate(_Reason, State) ->
+ {force, State}.
+
+control_throttle(State = #v1{connection_state = CS, throttle = Throttle}) ->
+ IsThrottled = ((Throttle#throttle.alarmed_by =/= []) orelse
+ credit_flow:blocked()),
+ case {CS, IsThrottled} of
+ {running, true} -> State#v1{connection_state = blocking};
+ {blocking, false} -> State#v1{connection_state = running};
+ {blocked, false} -> ok = rabbit_heartbeat:resume_monitor(
+ State#v1.heartbeater),
+ State#v1{connection_state = running};
+ {blocked, true} -> State#v1{throttle = update_last_blocked_by(
+ Throttle)};
+ {_, _} -> State
+ end.
+
+update_last_blocked_by(Throttle = #throttle{alarmed_by = []}) ->
+ Throttle#throttle{last_blocked_by = flow};
+update_last_blocked_by(Throttle) ->
+ Throttle#throttle{last_blocked_by = resource}.
+
+%%--------------------------------------------------------------------------
+%% error handling / termination
+
+close_connection(State = #v1{connection = #connection{
+ timeout_sec = TimeoutSec}}) ->
+ erlang:send_after((if TimeoutSec > 0 andalso
+ TimeoutSec < ?CLOSING_TIMEOUT -> TimeoutSec;
+ true -> ?CLOSING_TIMEOUT
+ end) * 1000, self(), terminate_connection),
+ State#v1{connection_state = closed}.
+
+handle_dependent_exit(ChPid, Reason, State) ->
+ case {ChPid, termination_kind(Reason)} of
+ {undefined, uncontrolled} ->
+ exit({abnormal_dependent_exit, ChPid, Reason});
+ {_Channel, controlled} ->
+ maybe_close(control_throttle(State));
+ {Channel, uncontrolled} ->
+ {RealReason, Trace} = Reason,
+ R = {?V_1_0_AMQP_ERROR_INTERNAL_ERROR,
+ "Session error: ~p~n~p~n", [RealReason, Trace]},
+ maybe_close(handle_exception(control_throttle(State), Channel, R))
+ end.
+
+termination_kind(normal) -> controlled;
+termination_kind(_) -> uncontrolled.
+
+maybe_close(State = #v1{connection_state = closing,
+ sock = Sock}) ->
+ NewState = close_connection(State),
+ ok = send_on_channel0(Sock, #'v1_0.close'{}),
+ NewState;
+maybe_close(State) ->
+ State.
+
+error_frame(Condition, Fmt, Args) ->
+ #'v1_0.error'{condition = Condition,
+ description = {utf8, list_to_binary(
+ rabbit_misc:format(Fmt, Args))}}.
+
+handle_exception(State = #v1{connection_state = closed}, Channel,
+ #'v1_0.error'{description = {utf8, Desc}}) ->
+ log(error, "AMQP 1.0 connection ~p (~p), channel ~p - error:~n~p~n",
+ [self(), closed, Channel, Desc]),
+ State;
+handle_exception(State = #v1{connection_state = CS}, Channel,
+ ErrorFrame = #'v1_0.error'{description = {utf8, Desc}})
+ when ?IS_RUNNING(State) orelse CS =:= closing ->
+ log(error, "AMQP 1.0 connection ~p (~p), channel ~p - error:~n~p~n",
+ [self(), CS, Channel, Desc]),
+ %% TODO: session errors shouldn't force the connection to close
+ State1 = close_connection(State),
+ ok = send_on_channel0(State#v1.sock, #'v1_0.close'{error = ErrorFrame}),
+ State1;
+handle_exception(State, Channel, Error) ->
+ %% We don't trust the client at this point - force them to wait
+ %% for a bit so they can't DOS us with repeated failed logins etc.
+ timer:sleep(?SILENT_CLOSE_DELAY * 1000),
+ throw({handshake_error, State#v1.connection_state, Channel, Error}).
+
+%%--------------------------------------------------------------------------
+
+%% Begin 1-0
+
+%% ----------------------------------------
+%% AMQP 1.0 frame handlers
+
+is_connection_frame(#'v1_0.open'{}) -> true;
+is_connection_frame(#'v1_0.close'{}) -> true;
+is_connection_frame(_) -> false.
+
+%% TODO Handle depending on connection state
+%% TODO It'd be nice to only decode up to the descriptor
+
+handle_1_0_frame(Mode, Channel, Payload, State) ->
+ try
+ handle_1_0_frame0(Mode, Channel, Payload, State)
+ catch
+ _:#'v1_0.error'{} = Reason ->
+ handle_exception(State, 0, Reason);
+ _:Reason ->
+ Trace = erlang:get_stacktrace(),
+ handle_exception(State, 0, error_frame(
+ ?V_1_0_AMQP_ERROR_INTERNAL_ERROR,
+ "Reader error: ~p~n~p~n",
+ [Reason, Trace]))
+ end.
+
+%% Nothing specifies that connection methods have to be on a
+%% particular channel.
+handle_1_0_frame0(_Mode, Channel, Payload,
+ State = #v1{ connection_state = CS}) when
+ CS =:= closing; CS =:= closed ->
+ Sections = parse_1_0_frame(Payload, Channel),
+ case is_connection_frame(Sections) of
+ true -> handle_1_0_connection_frame(Sections, State);
+ false -> State
+ end;
+handle_1_0_frame0(Mode, Channel, Payload, State) ->
+ Sections = parse_1_0_frame(Payload, Channel),
+ case {Mode, is_connection_frame(Sections)} of
+ {amqp, true} -> handle_1_0_connection_frame(Sections, State);
+ {amqp, false} -> handle_1_0_session_frame(Channel, Sections, State);
+ {sasl, false} -> handle_1_0_sasl_frame(Sections, State)
+ end.
+
+parse_1_0_frame(Payload, _Channel) ->
+ {PerfDesc, Rest} = rabbit_amqp1_0_binary_parser:parse(Payload),
+ Perf = rabbit_amqp1_0_framing:decode(PerfDesc),
+ ?DEBUG("Channel ~p ->~n~p~n~s~n",
+ [_Channel, rabbit_amqp1_0_framing:pprint(Perf),
+ case Rest of
+ <<>> -> <<>>;
+ _ -> rabbit_misc:format(
+ " followed by ~p bytes of content~n", [size(Rest)])
+ end]),
+ case Rest of
+ <<>> -> Perf;
+ _ -> {Perf, Rest}
+ end.
+
+handle_1_0_connection_frame(#'v1_0.open'{ max_frame_size = ClientFrameMax,
+ channel_max = ClientChannelMax,
+ idle_time_out = IdleTimeout,
+ hostname = Hostname,
+ properties = Props },
+ State = #v1{
+ connection_state = starting,
+ connection = Connection,
+ throttle = Throttle,
+ helper_sup = HelperSupPid,
+ sock = Sock}) ->
+ ClientProps = case Props of
+ undefined -> [];
+ {map, Ps} -> Ps
+ end,
+ ClientHeartbeatSec = case IdleTimeout of
+ undefined -> 0;
+ {uint, Interval} -> Interval div 1000
+ end,
+ FrameMax = case ClientFrameMax of
+ undefined -> unlimited;
+ {_, FM} -> FM
+ end,
+ ChannelMax = case ClientChannelMax of
+ undefined -> unlimited;
+ {_, CM} -> CM
+ end,
+ {ok, HeartbeatSec} = application:get_env(rabbit, heartbeat),
+ State1 =
+ if (FrameMax =/= unlimited) and (FrameMax < ?FRAME_1_0_MIN_SIZE) ->
+ protocol_error(?V_1_0_AMQP_ERROR_FRAME_SIZE_TOO_SMALL,
+ "frame_max=~w < ~w min size",
+ [FrameMax, ?FRAME_1_0_MIN_SIZE]);
+ true ->
+ {ok, Collector} =
+ rabbit_connection_helper_sup:start_queue_collector(
+ HelperSupPid, <<"AMQP 1.0">>), %% TODO describe the connection
+ SendFun =
+ fun() ->
+ Frame =
+ rabbit_amqp1_0_binary_generator:build_heartbeat_frame(),
+ catch rabbit_net:send(Sock, Frame)
+ end,
+
+ Parent = self(),
+ ReceiveFun =
+ fun() ->
+ Parent ! heartbeat_timeout
+ end,
+ %% [2.4.5] the value in idle-time-out SHOULD be half the peer's
+ %% actual timeout threshold
+ ReceiverHeartbeatSec = lists:min([HeartbeatSec * 2, 4294967]),
+ %% TODO: only start heartbeat receive timer at next next frame
+ Heartbeater =
+ rabbit_heartbeat:start(HelperSupPid, Sock,
+ ClientHeartbeatSec, SendFun,
+ ReceiverHeartbeatSec, ReceiveFun),
+ State#v1{connection_state = running,
+ connection = Connection#connection{
+ frame_max = FrameMax,
+ hostname = Hostname},
+ heartbeater = Heartbeater,
+ queue_collector = Collector}
+ end,
+ %% TODO enforce channel_max
+ ok = send_on_channel0(
+ Sock,
+ #'v1_0.open'{channel_max = ClientChannelMax,
+ max_frame_size = ClientFrameMax,
+ idle_time_out = {uint, HeartbeatSec * 1000},
+ container_id = {utf8, rabbit_nodes:cluster_name()},
+ properties = server_properties()}),
+ Conserve = rabbit_alarm:register(self(), {?MODULE, conserve_resources, []}),
+ control_throttle(
+ State1#v1{throttle = Throttle#throttle{alarmed_by = Conserve}});
+
+handle_1_0_connection_frame(_Frame, State) ->
+ maybe_close(State#v1{connection_state = closing}).
+
+handle_1_0_session_frame(Channel, Frame, State) ->
+ case get({channel, Channel}) of
+ {ch_fr_pid, SessionPid} ->
+ ok = rabbit_amqp1_0_session:process_frame(SessionPid, Frame),
+ case Frame of
+ #'v1_0.end'{} ->
+ erase({channel, Channel}),
+ State;
+ #'v1_0.transfer'{} ->
+ case (State#v1.connection_state =:= blocking) of
+ true ->
+ ok = rabbit_heartbeat:pause_monitor(
+ State#v1.heartbeater),
+ State#v1{connection_state = blocked};
+ false ->
+ State
+ end;
+ _ ->
+ State
+ end;
+ closing ->
+ case Frame of
+ #'v1_0.end'{} ->
+ erase({channel, Channel});
+ _Else ->
+ ok
+ end,
+ State;
+ undefined ->
+ case ?IS_RUNNING(State) of
+ true ->
+ ok = send_to_new_1_0_session(Channel, Frame, State),
+ State;
+ false ->
+ throw({channel_frame_while_starting,
+ Channel, State#v1.connection_state,
+ Frame})
+ end
+ end.
+
+%% TODO: write a proper ANONYMOUS plugin and unify with STOMP
+handle_1_0_sasl_frame(#'v1_0.sasl_init'{mechanism = {symbol, <<"ANONYMOUS">>},
+ hostname = _Hostname},
+ State = #v1{connection_state = starting,
+ sock = Sock}) ->
+ case application:get_env(rabbitmq_amqp1_0, default_user) of
+ {ok, none} ->
+ %% No need to do anything, we will blow up in start_connection
+ ok;
+ {ok, _} ->
+ %% We only need to send the frame, again start_connection
+ %% will set up the default user.
+ Outcome = #'v1_0.sasl_outcome'{code = {ubyte, 0}},
+ ok = send_on_channel0(Sock, Outcome, rabbit_amqp1_0_sasl),
+ switch_callback(State#v1{connection_state = waiting_amqp0100},
+ handshake, 8)
+ end;
+handle_1_0_sasl_frame(#'v1_0.sasl_init'{mechanism = {symbol, Mechanism},
+ initial_response = {binary, Response},
+ hostname = _Hostname},
+ State0 = #v1{connection_state = starting,
+ connection = Connection,
+ sock = Sock}) ->
+ AuthMechanism = auth_mechanism_to_module(Mechanism, Sock),
+ State = State0#v1{connection =
+ Connection#connection{
+ auth_mechanism = {Mechanism, AuthMechanism},
+ auth_state = AuthMechanism:init(Sock)},
+ connection_state = securing},
+ auth_phase_1_0(Response, State);
+handle_1_0_sasl_frame(#'v1_0.sasl_response'{response = {binary, Response}},
+ State = #v1{connection_state = securing}) ->
+ auth_phase_1_0(Response, State);
+handle_1_0_sasl_frame(Frame, State) ->
+ throw({unexpected_1_0_sasl_frame, Frame, State}).
+
+%% We need to handle restarts...
+handle_input(handshake, <<"AMQP", 0, 1, 0, 0>>, State) ->
+ start_1_0_connection(amqp, State);
+
+%% 3 stands for "SASL" (keeping this here for when we do TLS)
+handle_input(handshake, <<"AMQP", 3, 1, 0, 0>>, State) ->
+ start_1_0_connection(sasl, State);
+
+handle_input({frame_header_1_0, Mode},
+ Header = <<Size:32, DOff:8, Type:8, Channel:16>>,
+ State) when DOff >= 2 ->
+ case {Mode, Type} of
+ {amqp, 0} -> ok;
+ {sasl, 1} -> ok;
+ _ -> throw({bad_1_0_header_type, Header, Mode})
+ end,
+ case Size of
+ 8 -> % length inclusive
+ {State, {frame_header_1_0, Mode}, 8}; %% heartbeat
+ _ ->
+ switch_callback(State, {frame_payload_1_0, Mode, DOff, Channel}, Size - 8)
+ end;
+handle_input({frame_header_1_0, _Mode}, Malformed, _State) ->
+ throw({bad_1_0_header, Malformed});
+handle_input({frame_payload_1_0, Mode, DOff, Channel},
+ FrameBin, State) ->
+ SkipBits = (DOff * 32 - 64), % DOff = 4-byte words, we've read 8 already
+ <<Skip:SkipBits, FramePayload/binary>> = FrameBin,
+ Skip = Skip, %% hide warning when debug is off
+ handle_1_0_frame(Mode, Channel, FramePayload,
+ switch_callback(State, {frame_header_1_0, Mode}, 8));
+
+handle_input(Callback, Data, _State) ->
+ throw({bad_input, Callback, Data}).
+
+init(Mode, PackedState) ->
+ %% By invoking recvloop here we become 1.0.
+ recvloop(sys:debug_options([]),
+ start_1_0_connection(Mode, unpack_from_0_9_1(PackedState))).
+
+start_1_0_connection(sasl, State = #v1{sock = Sock}) ->
+ send_1_0_handshake(Sock, <<"AMQP",3,1,0,0>>),
+ Ms = {array, symbol,
+ case application:get_env(rabbitmq_amqp1_0, default_user) of
+ {ok, none} -> [];
+ {ok, _} -> ["ANONYMOUS"]
+ end ++ [ atom_to_list(M) || M <- auth_mechanisms(Sock)]},
+ Mechanisms = #'v1_0.sasl_mechanisms'{sasl_server_mechanisms = Ms},
+ ok = send_on_channel0(Sock, Mechanisms, rabbit_amqp1_0_sasl),
+ start_1_0_connection0(sasl, State);
+
+start_1_0_connection(amqp,
+ State = #v1{sock = Sock,
+ connection = C = #connection{user = User}}) ->
+ {ok, NoAuthUsername} = application:get_env(rabbitmq_amqp1_0, default_user),
+ case {User, NoAuthUsername} of
+ {none, none} ->
+ send_1_0_handshake(Sock, <<"AMQP",3,1,0,0>>),
+ throw(banned_unauthenticated_connection);
+ {none, Username} ->
+ case rabbit_access_control:check_user_login(
+ list_to_binary(Username), []) of
+ {ok, NoAuthUser} ->
+ State1 = State#v1{
+ connection = C#connection{user = NoAuthUser}},
+ send_1_0_handshake(Sock, <<"AMQP",0,1,0,0>>),
+ start_1_0_connection0(amqp, State1);
+ _ ->
+ send_1_0_handshake(Sock, <<"AMQP",3,1,0,0>>),
+ throw(default_user_missing)
+ end;
+ _ ->
+ send_1_0_handshake(Sock, <<"AMQP",0,1,0,0>>),
+ start_1_0_connection0(amqp, State)
+ end.
+
+start_1_0_connection0(Mode, State = #v1{connection = Connection,
+ helper_sup = HelperSup}) ->
+ ChannelSupSupPid =
+ case Mode of
+ sasl -> undefined;
+ amqp -> {ok, Pid} =
+ supervisor2:start_child(
+ HelperSup,
+ {channel_sup_sup,
+ {rabbit_amqp1_0_session_sup_sup, start_link, []},
+ intrinsic, infinity, supervisor,
+ [rabbit_amqp1_0_session_sup_sup]}),
+ Pid
+ end,
+ switch_callback(State#v1{connection = Connection#connection{
+ timeout_sec = ?NORMAL_TIMEOUT},
+ channel_sup_sup_pid = ChannelSupSupPid,
+ connection_state = starting},
+ {frame_header_1_0, Mode}, 8).
+
+send_1_0_handshake(Sock, Handshake) ->
+ ok = inet_op(fun () -> rabbit_net:send(Sock, Handshake) end).
+
+send_on_channel0(Sock, Method) ->
+ send_on_channel0(Sock, Method, rabbit_amqp1_0_framing).
+
+send_on_channel0(Sock, Method, Framing) ->
+ ok = rabbit_amqp1_0_writer:internal_send_command(
+ Sock, 0, Method, Framing).
+
+%% End 1-0
+
+auth_mechanism_to_module(TypeBin, Sock) ->
+ case rabbit_registry:binary_to_type(TypeBin) of
+ {error, not_found} ->
+ protocol_error(?V_1_0_AMQP_ERROR_NOT_FOUND,
+ "unknown authentication mechanism '~s'", [TypeBin]);
+ T ->
+ case {lists:member(T, auth_mechanisms(Sock)),
+ rabbit_registry:lookup_module(auth_mechanism, T)} of
+ {true, {ok, Module}} ->
+ Module;
+ _ ->
+ protocol_error(?V_1_0_AMQP_ERROR_NOT_FOUND,
+ "invalid authentication mechanism '~s'", [T])
+ end
+ end.
+
+auth_mechanisms(Sock) ->
+ {ok, Configured} = application:get_env(auth_mechanisms),
+ [Name || {Name, Module} <- rabbit_registry:lookup_all(auth_mechanism),
+ Module:should_offer(Sock), lists:member(Name, Configured)].
+
+%% Begin 1-0
+
+auth_phase_1_0(Response,
+ State = #v1{connection = Connection =
+ #connection{auth_mechanism = {Name, AuthMechanism},
+ auth_state = AuthState},
+ sock = Sock}) ->
+ case AuthMechanism:handle_response(Response, AuthState) of
+ {refused, Msg, Args} ->
+ protocol_error(
+ ?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS, "~s login refused: ~s",
+ [Name, io_lib:format(Msg, Args)]);
+ {protocol_error, Msg, Args} ->
+ protocol_error(?V_1_0_AMQP_ERROR_DECODE_ERROR, Msg, Args);
+ {challenge, Challenge, AuthState1} ->
+ Secure = #'v1_0.sasl_challenge'{challenge = {binary, Challenge}},
+ ok = send_on_channel0(Sock, Secure, rabbit_amqp1_0_sasl),
+ State#v1{connection = Connection =
+ #connection{auth_state = AuthState1}};
+ {ok, User = #user{username = Username}} ->
+ case rabbit_access_control:check_user_loopback(Username, Sock) of
+ ok -> ok;
+ not_allowed -> protocol_error(
+ ?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS,
+ "user '~s' can only connect via localhost",
+ [Username])
+ end,
+ Outcome = #'v1_0.sasl_outcome'{code = {ubyte, 0}},
+ ok = send_on_channel0(Sock, Outcome, rabbit_amqp1_0_sasl),
+ switch_callback(
+ State#v1{connection_state = waiting_amqp0100,
+ connection = Connection#connection{user = User}},
+ handshake, 8)
+ end.
+
+send_to_new_1_0_session(Channel, Frame, State) ->
+ #v1{sock = Sock, queue_collector = Collector,
+ channel_sup_sup_pid = ChanSupSup,
+ connection = #connection{frame_max = FrameMax,
+ hostname = Hostname,
+ user = User}} = State,
+ {ok, ChSupPid, ChFrPid} =
+ %% Note: the equivalent, start_channel is in channel_sup_sup
+ rabbit_amqp1_0_session_sup_sup:start_session(
+ %% NB subtract fixed frame header size
+ ChanSupSup, {rabbit_amqp1_0_framing, Sock, Channel,
+ case FrameMax of
+ unlimited -> unlimited;
+ _ -> FrameMax - 8
+ end,
+ self(), User, vhost(Hostname), Collector}),
+ erlang:monitor(process, ChFrPid),
+ put({channel, Channel}, {ch_fr_pid, ChFrPid}),
+ put({ch_sup_pid, ChSupPid}, {{channel, Channel}, {ch_fr_pid, ChFrPid}}),
+ put({ch_fr_pid, ChFrPid}, {channel, Channel}),
+ ok = rabbit_amqp1_0_session:process_frame(ChFrPid, Frame).
+
+vhost({utf8, <<"vhost:", VHost/binary>>}) ->
+ VHost;
+vhost(_) ->
+ {ok, DefaultVHost} = application:get_env(default_vhost),
+ DefaultVHost.
+
+%% End 1-0
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_session).
+
+-export([process_frame/2]).
+
+-export([init/1, begin_/2, maybe_init_publish_id/2, record_delivery/3,
+ incr_incoming_id/1, next_delivery_id/1, transfers_left/1,
+ record_transfers/2, bump_outgoing_window/1,
+ record_outgoing/4, settle/3, flow_fields/2, channel/1,
+ flow/2, ack/2, validate_attach/1]).
+
+-import(rabbit_amqp1_0_util, [protocol_error/3,
+ serial_add/2, serial_diff/2, serial_compare/2]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_amqp1_0.hrl").
+
+-define(MAX_SESSION_WINDOW_SIZE, 65535).
+-define(DEFAULT_MAX_HANDLE, 16#ffffffff).
+
+-record(session, {channel_num, %% we just use the incoming (AMQP 1.0) channel number
+ remote_incoming_window, % keep track of the window until we're told
+ remote_outgoing_window,
+ next_incoming_id, % just to keep a check
+ incoming_window_max, % )
+ incoming_window, % ) so we know when to open the session window
+ next_outgoing_id = 0, % arbitrary count of outgoing transfers
+ outgoing_window,
+ outgoing_window_max,
+ next_publish_id, %% the 0-9-1-side counter for confirms
+ next_delivery_id = 0,
+ incoming_unsettled_map,
+ outgoing_unsettled_map }).
+
+%% We record delivery_id -> #outgoing_delivery{}, so that we can
+%% respond to dispositions about messages we've sent. NB the
+%% delivery-tag doubles as the id we use when acking the rabbit
+%% delivery.
+-record(outgoing_delivery, {delivery_tag, expected_outcome}).
+
+%% We record confirm_id -> #incoming_delivery{} so we can relay
+%% confirms from the broker back to the sending client. NB we have
+%% only one possible outcome, so there's no need to record it here.
+-record(incoming_delivery, {delivery_id}).
+
+process_frame(Pid, Frame) ->
+ credit_flow:send(Pid),
+ gen_server2:cast(Pid, {frame, Frame, self()}).
+
+init(Channel) ->
+ #session{channel_num = Channel,
+ next_publish_id = 0,
+ incoming_unsettled_map = gb_trees:empty(),
+ outgoing_unsettled_map = gb_trees:empty()}.
+
+%% Session window:
+%%
+%% Each session has two abstract[1] buffers, one to record the
+%% unsettled state of incoming messages, one to record the unsettled
+%% state of outgoing messages. In general we want to bound these
+%% buffers; but if we bound them, and don't tell the other side, we
+%% may end up deadlocking the other party.
+%%
+%% Hence the flow frame contains a session window, expressed as the
+%% next-id and the window size for each of the buffers. The frame
+%% refers to the window of the sender of the frame, of course.
+%%
+%% The numbers work this way: for the outgoing window, the next-id
+%% counts the next transfer the session will send, and it will stop
+%% sending at next-id + window. For the incoming window, the next-id
+%% counts the next transfer id expected, and it will not accept
+%% messages beyond next-id + window (in fact it will probably close
+%% the session, since sending outside the window is a transgression of
+%% the protocol).
+%%
+%% We may as well just pick a value for the incoming and outgoing
+%% windows; choosing based on what the client says may just stop
+%% things dead, if the value is zero for instance.
+%%
+%% [1] Abstract because there probably won't be a data structure with
+%% a size directly related to transfers; settlement is done with
+%% delivery-id, which may refer to one or more transfers.
+begin_(#'v1_0.begin'{next_outgoing_id = {uint, RemoteNextOut},
+ incoming_window = {uint, RemoteInWindow},
+ outgoing_window = {uint, RemoteOutWindow},
+ handle_max = HandleMax0},
+ Session = #session{next_outgoing_id = LocalNextOut,
+ channel_num = Channel}) ->
+ InWindow = ?MAX_SESSION_WINDOW_SIZE,
+ OutWindow = ?MAX_SESSION_WINDOW_SIZE,
+ HandleMax = case HandleMax0 of
+ {uint, Max} -> Max;
+ _ -> ?DEFAULT_MAX_HANDLE
+ end,
+ {ok, #'v1_0.begin'{remote_channel = {ushort, Channel},
+ handle_max = {uint, HandleMax},
+ next_outgoing_id = {uint, LocalNextOut},
+ incoming_window = {uint, InWindow},
+ outgoing_window = {uint, OutWindow}},
+ Session#session{
+ outgoing_window = OutWindow,
+ outgoing_window_max = OutWindow,
+ next_incoming_id = RemoteNextOut,
+ remote_incoming_window = RemoteInWindow,
+ remote_outgoing_window = RemoteOutWindow,
+ incoming_window = InWindow,
+ incoming_window_max = InWindow},
+ OutWindow}.
+
+validate_attach(#'v1_0.attach'{target = #'v1_0.coordinator'{}}) ->
+ protocol_error(?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED,
+ "Transactions not supported", []);
+validate_attach(#'v1_0.attach'{unsettled = Unsettled,
+ incomplete_unsettled = IncompleteSettled})
+ when Unsettled =/= undefined andalso Unsettled =/= {map, []} orelse
+ IncompleteSettled =:= true ->
+ protocol_error(?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED,
+ "Link recovery not supported", []);
+validate_attach(
+ #'v1_0.attach'{snd_settle_mode = SndSettleMode,
+ rcv_settle_mode = ?V_1_0_RECEIVER_SETTLE_MODE_SECOND})
+ when SndSettleMode =/= ?V_1_0_SENDER_SETTLE_MODE_SETTLED ->
+ protocol_error(?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED,
+ "rcv-settle-mode second not supported", []);
+validate_attach(#'v1_0.attach'{}) ->
+ ok.
+
+maybe_init_publish_id(false, Session) ->
+ Session;
+maybe_init_publish_id(true, Session = #session{next_publish_id = Id}) ->
+ Session#session{next_publish_id = erlang:max(1, Id)}.
+
+record_delivery(DeliveryId, Settled,
+ Session = #session{next_publish_id = Id,
+ incoming_unsettled_map = Unsettled}) ->
+ Id1 = case Id of
+ 0 -> 0;
+ _ -> Id + 1 % this ought to be a serial number in the broker, but isn't
+ end,
+ Unsettled1 = case Settled of
+ true ->
+ Unsettled;
+ false ->
+ gb_trees:insert(Id,
+ #incoming_delivery{
+ delivery_id = DeliveryId },
+ Unsettled)
+ end,
+ Session#session{
+ next_publish_id = Id1,
+ incoming_unsettled_map = Unsettled1}.
+
+incr_incoming_id(Session = #session{ next_incoming_id = NextIn,
+ incoming_window = InWindow,
+ incoming_window_max = InWindowMax,
+ remote_outgoing_window = RemoteOut }) ->
+ NewOutWindow = RemoteOut - 1,
+ InWindow1 = InWindow - 1,
+ NewNextIn = serial_add(NextIn, 1),
+ %% If we've reached halfway, open the window
+ {Flows, NewInWindow} =
+ if InWindow1 =< (InWindowMax div 2) ->
+ {[#'v1_0.flow'{}], InWindowMax};
+ true ->
+ {[], InWindow1}
+ end,
+ {Flows, Session#session{ next_incoming_id = NewNextIn,
+ incoming_window = NewInWindow,
+ remote_outgoing_window = NewOutWindow}}.
+
+next_delivery_id(#session{next_delivery_id = Num}) -> Num.
+
+transfers_left(#session{remote_incoming_window = RemoteWindow,
+ outgoing_window = LocalWindow}) ->
+ {LocalWindow, RemoteWindow}.
+
+record_outgoing(DeliveryTag, SendSettled, DefaultOutcome,
+ Session = #session{next_delivery_id = DeliveryId,
+ outgoing_unsettled_map = Unsettled}) ->
+ Unsettled1 = case SendSettled of
+ true ->
+ Unsettled;
+ false ->
+ gb_trees:insert(DeliveryId,
+ #outgoing_delivery{
+ delivery_tag = DeliveryTag,
+ expected_outcome = DefaultOutcome },
+ Unsettled)
+ end,
+ Session#session{outgoing_unsettled_map = Unsettled1,
+ next_delivery_id = serial_add(DeliveryId, 1)}.
+
+record_transfers(NumTransfers,
+ Session = #session{ remote_incoming_window = RemoteInWindow,
+ outgoing_window = OutWindow,
+ next_outgoing_id = NextOutId }) ->
+ Session#session{ remote_incoming_window = RemoteInWindow - NumTransfers,
+ outgoing_window = OutWindow - NumTransfers,
+ next_outgoing_id = serial_add(NextOutId, NumTransfers) }.
+
+%% Make sure we have "room" in our outgoing window by bumping the
+%% window if necessary. TODO this *could* be based on how much
+%% notional "room" there is in outgoing_unsettled.
+bump_outgoing_window(Session = #session{ outgoing_window_max = OutMax }) ->
+ {#'v1_0.flow'{}, Session#session{ outgoing_window = OutMax }}.
+
+%% We've been told that the fate of a delivery has been determined.
+%% Generally if the other side has not settled it, we will do so. If
+%% the other side /has/ settled it, we don't need to reply -- it's
+%% already forgotten its state for the delivery anyway.
+settle(Disp = #'v1_0.disposition'{first = First0,
+ last = Last0,
+ state = _Outcome,
+ settled = Settled},
+ Session = #session{outgoing_unsettled_map = Unsettled},
+ UpstreamAckFun) ->
+ {uint, First} = First0,
+ %% Last may be omitted, in which case it's the same as first
+ Last = case Last0 of
+ {uint, L} -> L;
+ undefined -> First
+ end,
+ %% The other party may be talking about something we've already
+ %% forgotten; this isn't a crime, we can just ignore it.
+ case gb_trees:is_empty(Unsettled) of
+ true ->
+ {none, Session};
+ false ->
+ {LWM, _} = gb_trees:smallest(Unsettled),
+ {HWM, _} = gb_trees:largest(Unsettled),
+ if Last < LWM ->
+ {none, Session};
+ %% TODO this should probably be an error, rather than ignored.
+ First > HWM ->
+ {none, Session};
+ true ->
+ Unsettled1 =
+ lists:foldl(
+ fun (Delivery, Map) ->
+ case gb_trees:lookup(Delivery, Map) of
+ none ->
+ Map;
+ {value, Entry} ->
+ #outgoing_delivery{delivery_tag = DeliveryTag } = Entry,
+ ?DEBUG("Settling ~p with ~p~n", [Delivery, _Outcome]),
+ UpstreamAckFun(DeliveryTag),
+ gb_trees:delete(Delivery, Map)
+ end
+ end,
+ Unsettled, lists:seq(erlang:max(LWM, First),
+ erlang:min(HWM, Last))),
+ {case Settled of
+ true -> none;
+ false -> Disp#'v1_0.disposition'{ settled = true,
+ role = ?SEND_ROLE }
+ end,
+ Session#session{outgoing_unsettled_map = Unsettled1}}
+ end
+ end.
+
+flow_fields(Frames, Session) when is_list(Frames) ->
+ [flow_fields(F, Session) || F <- Frames];
+
+flow_fields(Flow = #'v1_0.flow'{},
+ #session{next_outgoing_id = NextOut,
+ next_incoming_id = NextIn,
+ outgoing_window = OutWindow,
+ incoming_window = InWindow}) ->
+ Flow#'v1_0.flow'{
+ next_outgoing_id = {uint, NextOut},
+ outgoing_window = {uint, OutWindow},
+ next_incoming_id = {uint, NextIn},
+ incoming_window = {uint, InWindow}};
+
+flow_fields(Frame, _Session) ->
+ Frame.
+
+channel(#session{channel_num = Channel}) -> Channel.
+
+%% We should already know the next outgoing transfer sequence number,
+%% because it's one more than the last transfer we saw; and, we don't
+%% need to know the next incoming transfer sequence number (although
+%% we might use it to detect congestion -- e.g., if it's lagging far
+%% behind our outgoing sequence number). We probably care about the
+%% outgoing window, since we want to keep it open by sending back
+%% settlements, but there's not much we can do to hurry things along.
+%%
+%% We do care about the incoming window, because we must not send
+%% beyond it. This may cause us problems, even in normal operation,
+%% since we want our unsettled transfers to be exactly those that are
+%% held as unacked by the backing channel; however, the far side may
+%% close the window while we still have messages pending transfer, and
+%% indeed, an individual message may take more than one 'slot'.
+%%
+%% Note that this isn't a race so far as AMQP 1.0 is concerned; it's
+%% only because AMQP 0-9-1 defines QoS in terms of the total number of
+%% unacked messages, whereas 1.0 has an explicit window.
+flow(#'v1_0.flow'{next_incoming_id = FlowNextIn0,
+ incoming_window = {uint, FlowInWindow},
+ next_outgoing_id = {uint, FlowNextOut},
+ outgoing_window = {uint, FlowOutWindow}},
+ Session = #session{next_incoming_id = LocalNextIn,
+ next_outgoing_id = LocalNextOut}) ->
+ %% The far side may not have our begin{} with our next-transfer-id
+ FlowNextIn = case FlowNextIn0 of
+ {uint, Id} -> Id;
+ undefined -> LocalNextOut
+ end,
+ case serial_compare(FlowNextOut, LocalNextIn) of
+ equal ->
+ case serial_compare(FlowNextIn, LocalNextOut) of
+ greater ->
+ protocol_error(?V_1_0_SESSION_ERROR_WINDOW_VIOLATION,
+ "Remote incoming id (~p) leads "
+ "local outgoing id (~p)",
+ [FlowNextIn, LocalNextOut]);
+ equal ->
+ Session#session{
+ remote_outgoing_window = FlowOutWindow,
+ remote_incoming_window = FlowInWindow};
+ less ->
+ Session#session{
+ remote_outgoing_window = FlowOutWindow,
+ remote_incoming_window =
+ serial_diff(serial_add(FlowNextIn, FlowInWindow),
+ LocalNextOut)}
+ end;
+ _ ->
+ case application:get_env(rabbitmq_amqp1_0, protocol_strict_mode) of
+ {ok, false} ->
+ Session#session{next_incoming_id = FlowNextOut};
+ {ok, true} ->
+ protocol_error(?V_1_0_SESSION_ERROR_WINDOW_VIOLATION,
+ "Remote outgoing id (~p) not equal to "
+ "local incoming id (~p)",
+ [FlowNextOut, LocalNextIn])
+ end
+ end.
+
+%% An acknowledgement from the queue, which we'll get if we are
+%% using confirms.
+ack(#'basic.ack'{delivery_tag = DTag, multiple = Multiple},
+ Session = #session{incoming_unsettled_map = Unsettled}) ->
+ {DeliveryIds, Unsettled1} =
+ case Multiple of
+ true -> acknowledgement_range(DTag, Unsettled);
+ false -> case gb_trees:lookup(DTag, Unsettled) of
+ {value, #incoming_delivery{ delivery_id = Id }} ->
+ {[Id], gb_trees:delete(DTag, Unsettled)};
+ none ->
+ {[], Unsettled}
+ end
+ end,
+ Disposition = case DeliveryIds of
+ [] -> [];
+ _ -> [acknowledgement(
+ DeliveryIds,
+ #'v1_0.disposition'{role = ?RECV_ROLE})]
+ end,
+ {Disposition,
+ Session#session{incoming_unsettled_map = Unsettled1}}.
+
+acknowledgement_range(DTag, Unsettled) ->
+ acknowledgement_range(DTag, Unsettled, []).
+
+acknowledgement_range(DTag, Unsettled, Acc) ->
+ case gb_trees:is_empty(Unsettled) of
+ true ->
+ {lists:reverse(Acc), Unsettled};
+ false ->
+ {DTag1, #incoming_delivery{ delivery_id = Id}} =
+ gb_trees:smallest(Unsettled),
+ case DTag1 =< DTag of
+ true ->
+ {_K, _V, Unsettled1} = gb_trees:take_smallest(Unsettled),
+ acknowledgement_range(DTag, Unsettled1,
+ [Id|Acc]);
+ false ->
+ {lists:reverse(Acc), Unsettled}
+ end
+ end.
+
+acknowledgement(DeliveryIds, Disposition) ->
+ Disposition#'v1_0.disposition'{ first = {uint, hd(DeliveryIds)},
+ last = {uint, lists:last(DeliveryIds)},
+ settled = true,
+ state = #'v1_0.accepted'{} }.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_session_process).
+
+-behaviour(gen_server2).
+
+-export([init/1, terminate/2, code_change/3,
+ handle_call/3, handle_cast/2, handle_info/2]).
+
+-export([start_link/1]).
+
+-record(state, {backing_connection, backing_channel, frame_max,
+ reader_pid, writer_pid, buffer, session}).
+
+-record(pending, {delivery_tag, frames, link_handle }).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_amqp1_0.hrl").
+
+-import(rabbit_amqp1_0_util, [protocol_error/3]).
+-import(rabbit_amqp1_0_link_util, [ctag_to_handle/1]).
+
+start_link(Args) ->
+ gen_server2:start_link(?MODULE, Args, []).
+
+%% ---------
+
+init({Channel, ReaderPid, WriterPid, #user{username = Username}, VHost,
+ FrameMax, AdapterInfo, _Collector}) ->
+ process_flag(trap_exit, true),
+ {ok, Conn} = amqp_connection:start(
+ #amqp_params_direct{username = Username,
+ virtual_host = VHost,
+ adapter_info = AdapterInfo}),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ {ok, #state{backing_connection = Conn,
+ backing_channel = Ch,
+ reader_pid = ReaderPid,
+ writer_pid = WriterPid,
+ frame_max = FrameMax,
+ buffer = queue:new(),
+ session = rabbit_amqp1_0_session:init(Channel)
+ }}.
+
+terminate(_Reason, _State = #state{backing_connection = Conn}) ->
+ rabbit_misc:with_exit_handler(fun () -> ok end,
+ fun () -> amqp_connection:close(Conn) end).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+handle_call(Msg, _From, State) ->
+ {reply, {error, not_understood, Msg}, State}.
+
+handle_info(#'basic.consume_ok'{}, State) ->
+ %% Handled above
+ {noreply, State};
+
+handle_info({#'basic.deliver'{ consumer_tag = ConsumerTag,
+ delivery_tag = DeliveryTag } = Deliver, Msg},
+ State = #state{frame_max = FrameMax,
+ buffer = Buffer,
+ session = Session}) ->
+ Handle = ctag_to_handle(ConsumerTag),
+ case get({out, Handle}) of
+ undefined ->
+ %% TODO handle missing link -- why does the queue think it's there?
+ rabbit_log:warning("Delivery to non-existent consumer ~p",
+ [ConsumerTag]),
+ {noreply, State};
+ Link ->
+ {ok, Frames, Session1} =
+ rabbit_amqp1_0_outgoing_link:delivery(
+ Deliver, Msg, FrameMax, Handle, Session, Link),
+ Pending = #pending{ delivery_tag = DeliveryTag,
+ frames = Frames,
+ link_handle = Handle },
+ Buffer1 = queue:in(Pending, Buffer),
+ {noreply, run_buffer(
+ state(Session1, State#state{ buffer = Buffer1 }))}
+ end;
+
+%% A message from the queue saying that there are no more messages
+handle_info(#'basic.credit_drained'{consumer_tag = CTag} = CreditDrained,
+ State = #state{writer_pid = WriterPid}) ->
+ Handle = ctag_to_handle(CTag),
+ Link = get({out, Handle}),
+ Link1 = rabbit_amqp1_0_outgoing_link:credit_drained(
+ CreditDrained, Handle, Link, WriterPid),
+ put({out, Handle}, Link1),
+ {noreply, State};
+
+handle_info(#'basic.ack'{} = Ack, State = #state{writer_pid = WriterPid,
+ session = Session}) ->
+ {Reply, Session1} = rabbit_amqp1_0_session:ack(Ack, Session),
+ [rabbit_amqp1_0_writer:send_command(WriterPid, F) ||
+ F <- rabbit_amqp1_0_session:flow_fields(Reply, Session)],
+ {noreply, state(Session1, State)};
+
+handle_info({bump_credit, Msg}, State) ->
+ credit_flow:handle_bump_msg(Msg),
+ {noreply, State};
+
+%% TODO these pretty much copied wholesale from rabbit_channel
+handle_info({'EXIT', WriterPid, Reason = {writer, send_failed, _Error}},
+ State = #state{writer_pid = WriterPid}) ->
+ State#state.reader_pid !
+ {channel_exit, rabbit_amqp1_0_session:channel(session(State)), Reason},
+ {stop, normal, State};
+handle_info({'EXIT', _Pid, Reason}, State) ->
+ {stop, Reason, State};
+handle_info({'DOWN', _MRef, process, _QPid, _Reason}, State) ->
+ %% TODO do we care any more since we're using direct client?
+ {noreply, State}. % TODO rabbit_channel uses queue_blocked?
+
+handle_cast({frame, Frame, FlowPid},
+ State = #state{ reader_pid = ReaderPid,
+ writer_pid = Sock }) ->
+ credit_flow:ack(FlowPid),
+ try handle_control(Frame, State) of
+ {reply, Replies, NewState} when is_list(Replies) ->
+ lists:foreach(fun (Reply) ->
+ rabbit_amqp1_0_writer:send_command(Sock, Reply)
+ end, Replies),
+ noreply(NewState);
+ {reply, Reply, NewState} ->
+ rabbit_amqp1_0_writer:send_command(Sock, Reply),
+ noreply(NewState);
+ {noreply, NewState} ->
+ noreply(NewState);
+ stop ->
+ {stop, normal, State}
+ catch exit:Reason = #'v1_0.error'{} ->
+ %% TODO shut down nicely like rabbit_channel
+ End = #'v1_0.end'{ error = Reason },
+ rabbit_log:warning("Closing session for connection ~p: ~p~n",
+ [ReaderPid, Reason]),
+ ok = rabbit_amqp1_0_writer:send_command_sync(Sock, End),
+ {stop, normal, State};
+ exit:normal ->
+ {stop, normal, State};
+ _:Reason ->
+ {stop, {Reason, erlang:get_stacktrace()}, State}
+ end.
+
+%% TODO rabbit_channel returns {noreply, State, hibernate}, but that
+%% appears to break things here (it stops the session responding to
+%% frames).
+noreply(State) ->
+ {noreply, State}.
+
+%% ------
+
+handle_control(#'v1_0.begin'{} = Begin,
+ State = #state{backing_channel = Ch,
+ session = Session}) ->
+ {ok, Reply, Session1, Prefetch} =
+ rabbit_amqp1_0_session:begin_(Begin, Session),
+ %% Attempt to limit the number of "at risk" messages we can have.
+ rabbit_amqp1_0_channel:cast(Ch, #'basic.qos'{prefetch_count = Prefetch}),
+ reply(Reply, state(Session1, State));
+
+handle_control(#'v1_0.attach'{handle = Handle,
+ role = ?SEND_ROLE} = Attach,
+ State = #state{backing_channel = BCh,
+ backing_connection = Conn}) ->
+ ok = rabbit_amqp1_0_session:validate_attach(Attach),
+ {ok, Reply, Link, Confirm} =
+ with_disposable_channel(
+ Conn, fun (DCh) ->
+ rabbit_amqp1_0_incoming_link:attach(Attach, BCh, DCh)
+ end),
+ put({in, Handle}, Link),
+ reply(Reply, state(rabbit_amqp1_0_session:maybe_init_publish_id(
+ Confirm, session(State)), State));
+
+handle_control(#'v1_0.attach'{handle = Handle,
+ role = ?RECV_ROLE} = Attach,
+ State = #state{backing_channel = BCh,
+ backing_connection = Conn}) ->
+ ok = rabbit_amqp1_0_session:validate_attach(Attach),
+ {ok, Reply, Link} =
+ with_disposable_channel(
+ Conn, fun (DCh) ->
+ rabbit_amqp1_0_outgoing_link:attach(Attach, BCh, DCh)
+ end),
+ put({out, Handle}, Link),
+ reply(Reply, State);
+
+handle_control({Txfr = #'v1_0.transfer'{handle = Handle},
+ MsgPart},
+ State = #state{backing_channel = BCh,
+ session = Session}) ->
+ case get({in, Handle}) of
+ undefined ->
+ protocol_error(?V_1_0_AMQP_ERROR_ILLEGAL_STATE,
+ "Unknown link handle ~p", [Handle]);
+ Link ->
+ {Flows, Session1} = rabbit_amqp1_0_session:incr_incoming_id(Session),
+ case rabbit_amqp1_0_incoming_link:transfer(
+ Txfr, MsgPart, Link, BCh) of
+ {message, Reply, Link1, DeliveryId, Settled} ->
+ put({in, Handle}, Link1),
+ Session2 = rabbit_amqp1_0_session:record_delivery(
+ DeliveryId, Settled, Session1),
+ reply(Reply ++ Flows, state(Session2, State));
+ {ok, Link1} ->
+ put({in, Handle}, Link1),
+ reply(Flows, state(Session1, State))
+ end
+ end;
+
+%% Disposition: multiple deliveries may be settled at a time.
+%% TODO: should we send a flow after this, to indicate the state
+%% of the session window?
+handle_control(#'v1_0.disposition'{state = Outcome,
+ role = ?RECV_ROLE} = Disp,
+ State = #state{backing_channel = Ch}) ->
+ AckFun =
+ fun (DeliveryTag) ->
+ ok = rabbit_amqp1_0_channel:call(
+ Ch, case Outcome of
+ #'v1_0.accepted'{} ->
+ #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = false};
+ #'v1_0.rejected'{} ->
+ #'basic.reject'{delivery_tag = DeliveryTag,
+ requeue = false};
+ #'v1_0.released'{} ->
+ #'basic.reject'{delivery_tag = DeliveryTag,
+ requeue = true}
+ end)
+ end,
+ case rabbit_amqp1_0_session:settle(Disp, session(State), AckFun) of
+ {none, Session1} -> {noreply, state(Session1, State)};
+ {Reply, Session1} -> {reply, Reply, state(Session1, State)}
+ end;
+
+handle_control(#'v1_0.detach'{ handle = Handle }, State) ->
+ %% TODO keep the state around depending on the lifetime
+ %% TODO outgoing links?
+ erase({in, Handle}),
+ {reply, #'v1_0.detach'{ handle = Handle }, State};
+
+handle_control(#'v1_0.end'{}, _State = #state{ writer_pid = Sock }) ->
+ ok = rabbit_amqp1_0_writer:send_command(Sock, #'v1_0.end'{}),
+ stop;
+
+%% Flow control. These frames come with two pieces of information:
+%% the session window, and optionally, credit for a particular link.
+%% We'll deal with each of them separately.
+handle_control(Flow = #'v1_0.flow'{},
+ State = #state{backing_channel = BCh,
+ session = Session}) ->
+ State1 = state(rabbit_amqp1_0_session:flow(Flow, Session), State),
+ State2 = run_buffer(State1),
+ case Flow#'v1_0.flow'.handle of
+ undefined ->
+ {noreply, State2};
+ Handle ->
+ case get({in, Handle}) of
+ undefined ->
+ case get({out, Handle}) of
+ undefined ->
+ rabbit_log:warning("Flow for unknown link handle ~p", [Flow]),
+ protocol_error(?V_1_0_AMQP_ERROR_INVALID_FIELD,
+ "Unattached handle: ~p", [Handle]);
+ Out ->
+ {ok, Reply} = rabbit_amqp1_0_outgoing_link:flow(
+ Out, Flow, BCh),
+ reply(Reply, State2)
+ end;
+ _In ->
+ %% We're being told about available messages at
+ %% the sender. Yawn.
+ %% TODO at least check transfer-count?
+ {noreply, State2}
+ end
+ end;
+
+handle_control(Frame, _State) ->
+ protocol_error(?V_1_0_AMQP_ERROR_INTERNAL_ERROR,
+ "Unexpected frame ~p",
+ [rabbit_amqp1_0_framing:pprint(Frame)]).
+
+run_buffer(State = #state{ writer_pid = WriterPid,
+ session = Session,
+ backing_channel = BCh,
+ buffer = Buffer }) ->
+ {Session1, Buffer1} =
+ run_buffer1(WriterPid, BCh, Session, Buffer),
+ State#state{ buffer = Buffer1, session = Session1 }.
+
+run_buffer1(WriterPid, BCh, Session, Buffer) ->
+ case rabbit_amqp1_0_session:transfers_left(Session) of
+ {LocalSpace, RemoteSpace} when RemoteSpace > 0 andalso LocalSpace > 0 ->
+ Space = erlang:min(LocalSpace, RemoteSpace),
+ case queue:out(Buffer) of
+ {empty, Buffer} ->
+ {Session, Buffer};
+ {{value, #pending{ delivery_tag = DeliveryTag,
+ frames = Frames,
+ link_handle = Handle } = Pending},
+ BufferTail} ->
+ Link = get({out, Handle}),
+ case send_frames(WriterPid, Frames, Space) of
+ {all, SpaceLeft} ->
+ NewLink =
+ rabbit_amqp1_0_outgoing_link:transferred(
+ DeliveryTag, BCh, Link),
+ put({out, Handle}, NewLink),
+ Session1 = rabbit_amqp1_0_session:record_transfers(
+ Space - SpaceLeft, Session),
+ run_buffer1(WriterPid, BCh, Session1, BufferTail);
+ {some, Rest} ->
+ Session1 = rabbit_amqp1_0_session:record_transfers(
+ Space, Session),
+ Buffer1 = queue:in_r(Pending#pending{ frames = Rest },
+ BufferTail),
+ run_buffer1(WriterPid, BCh, Session1, Buffer1)
+ end
+ end;
+ {_, RemoteSpace} when RemoteSpace > 0 ->
+ case rabbit_amqp1_0_session:bump_outgoing_window(Session) of
+ {Flow = #'v1_0.flow'{}, Session1} ->
+ rabbit_amqp1_0_writer:send_command(
+ WriterPid,
+ rabbit_amqp1_0_session:flow_fields(Flow, Session1)),
+ run_buffer1(WriterPid, BCh, Session1, Buffer);
+ {none, Session1} ->
+ {Session1, Buffer}
+ end;
+ _ ->
+ {Session, Buffer}
+ end.
+
+send_frames(_WriterPid, [], Left) ->
+ {all, Left};
+send_frames(_WriterPid, Rest, 0) ->
+ {some, Rest};
+send_frames(WriterPid, [[T, C] | Rest], Left) ->
+ rabbit_amqp1_0_writer:send_command(WriterPid, T, C),
+ send_frames(WriterPid, Rest, Left - 1).
+
+%% ------
+
+reply([], State) ->
+ {noreply, State};
+reply(Reply, State) ->
+ {reply, rabbit_amqp1_0_session:flow_fields(Reply, session(State)), State}.
+
+session(#state{session = Session}) -> Session.
+state(Session, State) -> State#state{session = Session}.
+
+with_disposable_channel(Conn, Fun) ->
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ try
+ Fun(Ch)
+ after
+ catch amqp_channel:close(Ch)
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_session_sup).
+
+-behaviour(supervisor2).
+
+-export([start_link/1]).
+
+-export([init/1]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-export_type([start_link_args/0]).
+
+-type(start_link_args() ::
+ {rabbit_types:protocol(), rabbit_net:socket(),
+ rabbit_channel:channel_number(), non_neg_integer(), pid(),
+ rabbit_access_control:username(), rabbit_types:vhost(), pid()}).
+
+-spec(start_link/1 :: (start_link_args()) -> {'ok', pid(), pid()}).
+
+-endif.
+
+
+%%----------------------------------------------------------------------------
+start_link({rabbit_amqp1_0_framing, Sock, Channel, FrameMax, ReaderPid,
+ Username, VHost, Collector}) ->
+ {ok, SupPid} = supervisor2:start_link(?MODULE, []),
+ {ok, WriterPid} =
+ supervisor2:start_child(
+ SupPid,
+ {writer, {rabbit_amqp1_0_writer, start_link,
+ [Sock, Channel, FrameMax, rabbit_amqp1_0_framing,
+ ReaderPid]},
+ intrinsic, ?MAX_WAIT, worker, [rabbit_amqp1_0_writer]}),
+ {ok, ChannelPid} =
+ supervisor2:start_child(
+ SupPid,
+ {channel, {rabbit_amqp1_0_session_process, start_link,
+ [{Channel, ReaderPid, WriterPid, Username, VHost, FrameMax,
+ adapter_info(Sock), Collector}]},
+ intrinsic, ?MAX_WAIT, worker, [rabbit_amqp1_0_session_process]}),
+ {ok, SupPid, ChannelPid}.
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{one_for_all, 0, 1}, []}}.
+
+adapter_info(Sock) ->
+ amqp_connection:socket_adapter_info(Sock, {'AMQP', "1.0"}).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_session_sup_sup).
+
+-behaviour(supervisor2).
+
+-export([start_link/0, start_session/2]).
+
+-export([init/1]).
+
+%% It would be much nicer if rabbit_channel_sup_sup was parameterised
+%% on the module.
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
+-spec(start_session/2 :: (pid(), rabbit_amqp1_0_session_sup:start_link_args()) ->
+ {'ok', pid(), pid()}).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ supervisor2:start_link(?MODULE, []).
+
+start_session(Pid, Args) ->
+ supervisor2:start_child(Pid, [Args]).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{simple_one_for_one, 0, 1},
+ [{session_sup, {rabbit_amqp1_0_session_sup, start_link, []},
+ temporary, infinity, supervisor, [rabbit_amqp1_0_session_sup]}]}}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_util).
+
+-include("rabbit_amqp1_0.hrl").
+
+-export([protocol_error/3]).
+-export([serial_add/2, serial_compare/2, serial_diff/2]).
+
+-ifdef(use_specs).
+
+-export_type([serial_number/0]).
+-type(serial_number() :: non_neg_integer()).
+-type(serial_compare_result() :: 'equal' | 'less' | 'greater').
+
+-spec(serial_add/2 :: (serial_number(), non_neg_integer()) ->
+ serial_number()).
+-spec(serial_compare/2 :: (serial_number(), serial_number()) ->
+ serial_compare_result()).
+-spec(serial_diff/2 :: (serial_number(), serial_number()) ->
+ integer()).
+
+-endif.
+
+
+protocol_error(Condition, Msg, Args) ->
+ exit(#'v1_0.error'{
+ condition = Condition,
+ description = {utf8, list_to_binary(
+ lists:flatten(io_lib:format(Msg, Args)))}
+ }).
+
+%% Serial arithmetic for unsigned ints.
+%% http://www.faqs.org/rfcs/rfc1982.html
+%% SERIAL_BITS = 32
+
+%% 2 ^ SERIAL_BITS
+-define(SERIAL_MAX, 16#100000000).
+%% 2 ^ (SERIAL_BITS - 1) - 1
+-define(SERIAL_MAX_ADDEND, 16#7fffffff).
+
+serial_add(S, N) when N =< ?SERIAL_MAX_ADDEND ->
+ (S + N) rem ?SERIAL_MAX;
+serial_add(S, N) ->
+ exit({out_of_bound_serial_addition, S, N}).
+
+serial_compare(A, B) ->
+ if A =:= B ->
+ equal;
+ (A < B andalso B - A < ?SERIAL_MAX_ADDEND) orelse
+ (A > B andalso A - B > ?SERIAL_MAX_ADDEND) ->
+ less;
+ (A < B andalso B - A > ?SERIAL_MAX_ADDEND) orelse
+ (A > B andalso B - A < ?SERIAL_MAX_ADDEND) ->
+ greater;
+ true -> exit({indeterminate_serial_comparison, A, B})
+ end.
+
+-define(SERIAL_DIFF_BOUND, 16#80000000).
+
+serial_diff(A, B) ->
+ Diff = A - B,
+ if Diff > (?SERIAL_DIFF_BOUND) ->
+ %% B is actually greater than A
+ - (?SERIAL_MAX - Diff);
+ Diff < - (?SERIAL_DIFF_BOUND) ->
+ ?SERIAL_MAX + Diff;
+ Diff < ?SERIAL_DIFF_BOUND andalso Diff > -?SERIAL_DIFF_BOUND ->
+ Diff;
+ true ->
+ exit({indeterminate_serial_diff, A, B})
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_writer).
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+-include("rabbit_amqp1_0.hrl").
+
+-export([start/5, start_link/5, start/6, start_link/6]).
+-export([send_command/2, send_command/3,
+ send_command_sync/2, send_command_sync/3,
+ send_command_and_notify/4, send_command_and_notify/5]).
+-export([internal_send_command/4, internal_send_command/6]).
+
+%% internal
+-export([mainloop/1, mainloop1/1]).
+
+-record(wstate, {sock, channel, frame_max, protocol, reader,
+ stats_timer, pending}).
+
+-define(HIBERNATE_AFTER, 5000).
+-define(AMQP_SASL_FRAME_TYPE, 1).
+
+%%---------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start/5 ::
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), rabbit_types:protocol(), pid())
+ -> rabbit_types:ok(pid())).
+-spec(start_link/5 ::
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), rabbit_types:protocol(), pid())
+ -> rabbit_types:ok(pid())).
+-spec(start/6 ::
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), rabbit_types:protocol(), pid(), boolean())
+ -> rabbit_types:ok(pid())).
+-spec(start_link/6 ::
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), rabbit_types:protocol(), pid(), boolean())
+ -> rabbit_types:ok(pid())).
+-spec(send_command/2 ::
+ (pid(), rabbit_framing:amqp_method_record()) -> 'ok').
+-spec(send_command/3 ::
+ (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content())
+ -> 'ok').
+-spec(send_command_sync/2 ::
+ (pid(), rabbit_framing:amqp_method_record()) -> 'ok').
+-spec(send_command_sync/3 ::
+ (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content())
+ -> 'ok').
+-spec(send_command_and_notify/4 ::
+ (pid(), pid(), pid(), rabbit_framing:amqp_method_record())
+ -> 'ok').
+-spec(send_command_and_notify/5 ::
+ (pid(), pid(), pid(), rabbit_framing:amqp_method_record(),
+ rabbit_types:content())
+ -> 'ok').
+-spec(internal_send_command/4 ::
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ rabbit_framing:amqp_method_record(), rabbit_types:protocol())
+ -> 'ok').
+-spec(internal_send_command/6 ::
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ rabbit_framing:amqp_method_record(), rabbit_types:content(),
+ non_neg_integer(), rabbit_types:protocol())
+ -> 'ok').
+
+-endif.
+
+%%---------------------------------------------------------------------------
+
+start(Sock, Channel, FrameMax, Protocol, ReaderPid) ->
+ start(Sock, Channel, FrameMax, Protocol, ReaderPid, false).
+
+start_link(Sock, Channel, FrameMax, Protocol, ReaderPid) ->
+ start_link(Sock, Channel, FrameMax, Protocol, ReaderPid, false).
+
+start(Sock, Channel, FrameMax, Protocol, ReaderPid, ReaderWantsStats) ->
+ State = initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid,
+ ReaderWantsStats),
+ {ok, proc_lib:spawn(?MODULE, mainloop, [State])}.
+
+start_link(Sock, Channel, FrameMax, Protocol, ReaderPid, ReaderWantsStats) ->
+ State = initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid,
+ ReaderWantsStats),
+ {ok, proc_lib:spawn_link(?MODULE, mainloop, [State])}.
+
+initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid, ReaderWantsStats) ->
+ (case ReaderWantsStats of
+ true -> fun rabbit_event:init_stats_timer/2;
+ false -> fun rabbit_event:init_disabled_stats_timer/2
+ end)(#wstate{sock = Sock,
+ channel = Channel,
+ frame_max = FrameMax,
+ protocol = Protocol,
+ reader = ReaderPid,
+ pending = []},
+ #wstate.stats_timer).
+
+mainloop(State) ->
+ try
+ mainloop1(State)
+ catch
+ exit:Error -> #wstate{reader = ReaderPid, channel = Channel} = State,
+ ReaderPid ! {channel_exit, Channel, Error}
+ end,
+ done.
+
+mainloop1(State = #wstate{pending = []}) ->
+ receive
+ Message -> ?MODULE:mainloop1(handle_message(Message, State))
+ after ?HIBERNATE_AFTER ->
+ erlang:hibernate(?MODULE, mainloop, [State])
+ end;
+mainloop1(State) ->
+ receive
+ Message -> ?MODULE:mainloop1(handle_message(Message, State))
+ after 0 ->
+ ?MODULE:mainloop1(flush(State))
+ end.
+
+handle_message({send_command, MethodRecord}, State) ->
+ internal_send_command_async(MethodRecord, State);
+handle_message({send_command, MethodRecord, Content}, State) ->
+ internal_send_command_async(MethodRecord, Content, State);
+handle_message({'$gen_call', From, {send_command_sync, MethodRecord}}, State) ->
+ State1 = flush(internal_send_command_async(MethodRecord, State)),
+ gen_server:reply(From, ok),
+ State1;
+handle_message({'$gen_call', From, {send_command_sync, MethodRecord, Content}},
+ State) ->
+ State1 = flush(internal_send_command_async(MethodRecord, Content, State)),
+ gen_server:reply(From, ok),
+ State1;
+handle_message({send_command_and_notify, QPid, ChPid, MethodRecord}, State) ->
+ State1 = internal_send_command_async(MethodRecord, State),
+ rabbit_amqqueue:notify_sent(QPid, ChPid),
+ State1;
+handle_message({send_command_and_notify, QPid, ChPid, MethodRecord, Content},
+ State) ->
+ State1 = internal_send_command_async(MethodRecord, Content, State),
+ rabbit_amqqueue:notify_sent(QPid, ChPid),
+ State1;
+handle_message({'DOWN', _MRef, process, QPid, _Reason}, State) ->
+ rabbit_amqqueue:notify_sent_queue_down(QPid),
+ State;
+handle_message({inet_reply, _, ok}, State) ->
+ rabbit_event:ensure_stats_timer(State, #wstate.stats_timer, emit_stats);
+handle_message({inet_reply, _, Status}, _State) ->
+ exit({writer, send_failed, Status});
+handle_message(emit_stats, State = #wstate{reader = ReaderPid}) ->
+ ReaderPid ! ensure_stats,
+ rabbit_event:reset_stats_timer(State, #wstate.stats_timer);
+handle_message(Message, _State) ->
+ exit({writer, message_not_understood, Message}).
+
+%%---------------------------------------------------------------------------
+
+send_command(W, MethodRecord) ->
+ W ! {send_command, MethodRecord},
+ ok.
+
+send_command(W, MethodRecord, Content) ->
+ W ! {send_command, MethodRecord, Content},
+ ok.
+
+send_command_sync(W, MethodRecord) ->
+ call(W, {send_command_sync, MethodRecord}).
+
+send_command_sync(W, MethodRecord, Content) ->
+ call(W, {send_command_sync, MethodRecord, Content}).
+
+send_command_and_notify(W, Q, ChPid, MethodRecord) ->
+ W ! {send_command_and_notify, Q, ChPid, MethodRecord},
+ ok.
+
+send_command_and_notify(W, Q, ChPid, MethodRecord, Content) ->
+ W ! {send_command_and_notify, Q, ChPid, MethodRecord, Content},
+ ok.
+
+%%---------------------------------------------------------------------------
+
+call(Pid, Msg) ->
+ {ok, Res} = gen:call(Pid, '$gen_call', Msg, infinity),
+ Res.
+
+%%---------------------------------------------------------------------------
+
+%% Begin 1-0
+
+assemble_frame(Channel, Performative, rabbit_amqp1_0_framing) ->
+ ?DEBUG("Channel ~p <-~n~p~n~n",
+ [Channel, rabbit_amqp1_0_framing:pprint(Performative)]),
+ PerfBin = rabbit_amqp1_0_framing:encode_bin(Performative),
+ rabbit_amqp1_0_binary_generator:build_frame(Channel, PerfBin);
+
+assemble_frame(Channel, Performative, rabbit_amqp1_0_sasl) ->
+ ?DEBUG("Channel ~p <-~n~p~n~n",
+ [Channel, rabbit_amqp1_0_framing:pprint(Performative)]),
+ PerfBin = rabbit_amqp1_0_framing:encode_bin(Performative),
+ rabbit_amqp1_0_binary_generator:build_frame(Channel,
+ ?AMQP_SASL_FRAME_TYPE, PerfBin).
+
+%% Note: a transfer record can be followed by a number of other
+%% records to make a complete frame but unlike 0-9-1 we may have many
+%% content records. However, that's already been handled for us, we're
+%% just sending a chunk, so from this perspective it's just a binary.
+
+assemble_frames(Channel, Performative, Content, FrameMax,
+ rabbit_amqp1_0_framing) ->
+ ?DEBUG("Channel ~p <-~n~p~n followed by ~p bytes of content~n~n",
+ [Channel, rabbit_amqp1_0_framing:pprint(Performative),
+ iolist_size(Content)]),
+ PerfBin = rabbit_amqp1_0_framing:encode_bin(Performative),
+ rabbit_amqp1_0_binary_generator:build_frame(Channel, [PerfBin, Content]).
+
+%% End 1-0
+
+tcp_send(Sock, Data) ->
+ rabbit_misc:throw_on_error(inet_error,
+ fun () -> rabbit_net:send(Sock, Data) end).
+
+internal_send_command(Sock, Channel, MethodRecord, Protocol) ->
+ ok = tcp_send(Sock, assemble_frame(Channel, MethodRecord, Protocol)).
+
+internal_send_command(Sock, Channel, MethodRecord, Content, FrameMax,
+ Protocol) ->
+ ok = lists:foldl(fun (Frame, ok) -> tcp_send(Sock, Frame);
+ (_Frame, Other) -> Other
+ end, ok, assemble_frames(Channel, MethodRecord,
+ Content, FrameMax, Protocol)).
+
+internal_send_command_async(MethodRecord,
+ State = #wstate{channel = Channel,
+ protocol = Protocol,
+ pending = Pending}) ->
+ Frame = assemble_frame(Channel, MethodRecord, Protocol),
+ maybe_flush(State#wstate{pending = [Frame | Pending]}).
+
+internal_send_command_async(MethodRecord, Content,
+ State = #wstate{channel = Channel,
+ frame_max = FrameMax,
+ protocol = Protocol,
+ pending = Pending}) ->
+ Frames = assemble_frames(Channel, MethodRecord, Content, FrameMax,
+ Protocol),
+ maybe_flush(State#wstate{pending = [Frames | Pending]}).
+
+%% This magic number is the tcp-over-ethernet MSS (1460) minus the
+%% minimum size of a AMQP basic.deliver method frame (24) plus basic
+%% content header (22). The idea is that we want to flush just before
+%% exceeding the MSS.
+-define(FLUSH_THRESHOLD, 1414).
+
+maybe_flush(State = #wstate{pending = Pending}) ->
+ case iolist_size(Pending) >= ?FLUSH_THRESHOLD of
+ true -> flush(State);
+ false -> State
+ end.
+
+flush(State = #wstate{pending = []}) ->
+ State;
+flush(State = #wstate{sock = Sock, pending = Pending}) ->
+ ok = port_cmd(Sock, lists:reverse(Pending)),
+ State#wstate{pending = []}.
+
+%% gen_tcp:send/2 does a selective receive of {inet_reply, Sock,
+%% Status} to obtain the result. That is bad when it is called from
+%% the writer since it requires scanning of the writers possibly quite
+%% large message queue.
+%%
+%% So instead we lift the code from prim_inet:send/2, which is what
+%% gen_tcp:send/2 calls, do the first half here and then just process
+%% the result code in handle_message/2 as and when it arrives.
+%%
+%% This means we may end up happily sending data down a closed/broken
+%% socket, but that's ok since a) data in the buffers will be lost in
+%% any case (so qualitatively we are no worse off than if we used
+%% gen_tcp:send/2), and b) we do detect the changed socket status
+%% eventually, i.e. when we get round to handling the result code.
+%%
+%% Also note that the port has bounded buffers and port_command blocks
+%% when these are full. So the fact that we process the result
+%% asynchronously does not impact flow control.
+port_cmd(Sock, Data) ->
+ true = try rabbit_net:port_command(Sock, Data)
+ catch error:Error -> exit({writer, send_failed, Error})
+ end,
+ ok.
--- /dev/null
+{application, rabbitmq_amqp1_0,
+ [{description, "AMQP 1.0 support for RabbitMQ"},
+ {vsn, "%%VSN%%"},
+ {modules, []},
+ {registered, []},
+ {env, [{default_user, "guest"},
+ {default_vhost, <<"/">>},
+ {protocol_strict_mode, false}]},
+ {applications, [kernel, stdlib, rabbit, amqp_client]}]}.
--- /dev/null
+PROTON_VER=0.7
+PROTON_DIR=qpid-proton-$(PROTON_VER)
+PROTON_TARBALL=$(PROTON_DIR).tar.gz
+PROTON_URL=http://www.mirrorservice.org/sites/ftp.apache.org/qpid/proton/$(PROTON_VER)/$(PROTON_TARBALL)
+
+.PHONY: test
+
+test: build/lib
+ ant test
+
+build/lib: $(PROTON_TARBALL)
+ mkdir -p build/tmp
+ tar xvz -C build/tmp -f $(PROTON_TARBALL)
+ cd build/tmp/$(PROTON_DIR)/proton-j && mvn package
+ mkdir -p build/lib
+ cp build/tmp/$(PROTON_DIR)/proton-j/target/proton-j-$(PROTON_VER).jar build/lib
+ cp ../lib-java/*.jar build/lib
+
+clean:
+ rm -rf build $(PROTON_TARBALL)
+
+$(PROTON_TARBALL):
+ wget $(PROTON_URL)
--- /dev/null
+<?xml version="1.0"?>
+<project name="RabbitMQ AMQP 1.0 tests using Proton client" default="test">
+ <target name="test-build">
+ <mkdir dir="build/classes"/>
+
+ <javac srcdir="test" destdir="build/classes" debug="true">
+ <classpath>
+ <fileset dir="build/lib">
+ <include name="**/*.jar"/>
+ </fileset>
+ </classpath>
+ </javac>
+ </target>
+
+ <target name="test" depends="test-build">
+ <mkdir dir="build/test-output"/>
+
+ <junit printSummary="withOutAndErr" fork="yes" failureproperty="test.failed">
+ <classpath>
+ <fileset dir="build/lib">
+ <include name="**/*.jar"/>
+ </fileset>
+ <pathelement location="build/classes"/>
+ </classpath>
+ <formatter type="plain"/>
+ <test todir="build/test-output" name="com.rabbitmq.amqp1_0.tests.proton.ProtonTests"/>
+ </junit>
+ <fail message="Tests failed" if="test.failed" />
+ </target>
+</project>
--- /dev/null
+package com.rabbitmq.amqp1_0.tests.proton;
+
+import junit.framework.TestCase;
+import org.apache.qpid.proton.amqp.Binary;
+import org.apache.qpid.proton.amqp.messaging.Data;
+import org.apache.qpid.proton.message.Message;
+import org.apache.qpid.proton.message.impl.MessageImpl;
+import org.apache.qpid.proton.messenger.Messenger;
+import org.apache.qpid.proton.messenger.impl.MessengerImpl;
+
+public class ProtonTests extends TestCase {
+ public static final String ADDRESS = "amqp://localhost/amqp-1.0-test";
+ // This uses deprecated classes, yes. I took them from the examples provided...
+
+ public void testRoundTrip() throws Exception {
+ Messenger mng = new MessengerImpl();
+ mng.start();
+ Message msg = new MessageImpl();
+ msg.setAddress(ADDRESS);
+ msg.setSubject("hello");
+ msg.setContentType("application/octet-stream");
+ msg.setBody(new Data(new Binary("hello world".getBytes())));
+ mng.put(msg);
+ mng.send();
+
+ mng.subscribe(ADDRESS);
+ mng.recv();
+ Message msg2 = mng.get();
+ assertEquals(msg.getSubject(), msg2.getSubject());
+ assertEquals(msg.getContentType(), msg2.getContentType());
+ assertEquals(msg.getBody().toString(), msg2.getBody().toString());
+ mng.stop();
+ }
+}
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_test).
+
+-include("rabbit_amqp1_0.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-import(rabbit_amqp1_0_util, [serial_add/2, serial_diff/2, serial_compare/2]).
+
+serial_arithmetic_test() ->
+ ?assertEqual(1, serial_add(0, 1)),
+ ?assertEqual(16#7fffffff, serial_add(0, 16#7fffffff)),
+ ?assertEqual(0, serial_add(16#ffffffff, 1)),
+ %% Cannot add more than 2 ^ 31 - 1
+ ?assertExit({out_of_bound_serial_addition, _, _},
+ serial_add(200, 16#80000000)),
+ ?assertEqual(1, serial_diff(1, 0)),
+ ?assertEqual(2, serial_diff(1, 16#ffffffff)),
+ ?assertEqual(-2, serial_diff(16#ffffffff, 1)),
+ ?assertExit({indeterminate_serial_diff, _, _},
+ serial_diff(0, 16#80000000)),
+ ?assertExit({indeterminate_serial_diff, _, _},
+ serial_diff(16#ffffffff, 16#7fffffff)),
+ passed.
--- /dev/null
+CLIENT_DIR=swiftmq_9_2_5_client
+CLIENT_PKG=$(CLIENT_DIR).zip
+
+.PHONY: test
+
+test: build/lib
+ ant test
+
+build/lib: $(CLIENT_PKG)
+ mkdir -p build/tmp
+ unzip -d build/tmp $(CLIENT_PKG)
+ mkdir -p build/lib
+ mv build/tmp/$(CLIENT_DIR)/jars/*.jar build/lib
+ rm -rf build/tmp
+ cp ../lib-java/*.jar build/lib
+
+$(CLIENT_PKG):
+ @echo
+ @echo You need $(CLIENT_PKG) to run these tests. Unfortunately we can\'t
+ @echo redistribute it. Obtain it from the SwiftMQ website and place it
+ @echo in $(shell pwd).
+ @echo
+ @false
+
+clean:
+ rm -rf build
--- /dev/null
+<?xml version="1.0"?>
+<project name="RabbitMQ AMQP 1.0 tests using SwiftMQ client" default="test">
+ <target name="test-build">
+ <mkdir dir="build/classes"/>
+
+ <javac srcdir="test" destdir="build/classes" debug="true">
+ <classpath>
+ <fileset dir="build/lib">
+ <include name="**/*.jar"/>
+ </fileset>
+ </classpath>
+ </javac>
+ </target>
+
+ <target name="test" depends="test-build">
+ <mkdir dir="build/test-output"/>
+
+ <junit printSummary="withOutAndErr" fork="yes" failureproperty="test.failed">
+ <classpath>
+ <fileset dir="build/lib">
+ <include name="**/*.jar"/>
+ </fileset>
+ <pathelement location="build/classes"/>
+ </classpath>
+ <formatter type="plain"/>
+ <test todir="build/test-output" name="com.rabbitmq.amqp1_0.tests.swiftmq.SwiftMQTests"/>
+ </junit>
+ <fail message="Tests failed" if="test.failed" />
+ </target>
+</project>
--- /dev/null
+#!/bin/sh -e
+make -C $(dirname $0) test
--- /dev/null
+package com.rabbitmq.amqp1_0.tests.swiftmq;
+
+import com.swiftmq.amqp.AMQPContext;
+import com.swiftmq.amqp.v100.client.*;
+import com.swiftmq.amqp.v100.generated.messaging.message_format.*;
+import com.swiftmq.amqp.v100.generated.messaging.message_format.Properties;
+import com.swiftmq.amqp.v100.messaging.AMQPMessage;
+import com.swiftmq.amqp.v100.types.*;
+import junit.framework.TestCase;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.util.*;
+
+public class SwiftMQTests extends TestCase {
+ private static final String host = "localhost";
+ private static final int port = 5672;
+ private static final int INBOUND_WINDOW = 100;
+ private static final int OUTBOUND_WINDOW = 100;
+ private static final int CONSUMER_LINK_CREDIT = 200;
+ private static final String QUEUE = "/queue/test";
+
+ private AMQPMessage msg() {
+ AMQPMessage m = new AMQPMessage();
+ m.addData(data());
+ return m;
+ }
+
+ private Data data() {
+ return new Data("Hello World".getBytes());
+ }
+
+ public void testRoundTrip() throws Exception {
+ AMQPContext ctx = new AMQPContext(AMQPContext.CLIENT);
+ Connection conn = new Connection(ctx, host, port, false);
+ conn.connect();
+
+ Session s = conn.createSession(INBOUND_WINDOW, OUTBOUND_WINDOW);
+ Producer p = s.createProducer(QUEUE, QoS.AT_LEAST_ONCE);
+ p.send(msg());
+ p.close(); // Settlement happens here
+ Consumer c = s.createConsumer(QUEUE, CONSUMER_LINK_CREDIT, QoS.AT_LEAST_ONCE, false, null);
+ AMQPMessage m = c.receive();
+ m.accept();
+ assertEquals(1, m.getData().size());
+ assertEquals(data(), m.getData().get(0));
+ conn.close();
+ }
+
+ public void testMessageFragmentation()
+ throws UnsupportedProtocolVersionException, AMQPException, AuthenticationException, IOException {
+ fragmentation(512L, 512);
+ fragmentation(512L, 600);
+ fragmentation(512L, 1024);
+ fragmentation(1024L, 1024);
+ }
+
+ public void fragmentation(long FrameSize, int PayloadSize)
+ throws UnsupportedProtocolVersionException, AMQPException, AuthenticationException, IOException {
+ AMQPContext ctx = new AMQPContext(AMQPContext.CLIENT);
+ Connection conn = new Connection(ctx, host, port, false);
+ conn.setMaxFrameSize(FrameSize);
+ conn.connect();
+ Session s = conn.createSession(INBOUND_WINDOW, OUTBOUND_WINDOW);
+
+ Producer p = s.createProducer(QUEUE, QoS.AT_LEAST_ONCE);
+ AMQPMessage msg = new AMQPMessage();
+ msg.addData(new Data(new byte [PayloadSize]));
+ p.send(msg);
+ p.close();
+
+ Consumer c = s.createConsumer(QUEUE, CONSUMER_LINK_CREDIT, QoS.AT_LEAST_ONCE, false, null);
+ AMQPMessage m = c.receive();
+ m.accept();
+ c.close();
+ assertEquals(PayloadSize, m.getData().get(0).getValue().length);
+ conn.close();
+ }
+
+ public void testMessageAnnotations() throws Exception {
+ decorationTest(new DecorationProtocol() {
+ @Override
+ public void decorateMessage(AMQPMessage msg, Map<AMQPString, AMQPType> m) throws IOException {
+ msg.setMessageAnnotations(new MessageAnnotations(m));
+ }
+ @Override
+ public Map<AMQPType, AMQPType> getDecoration(AMQPMessage msg) throws IOException {
+ return msg.getMessageAnnotations().getValue();
+ }
+ }, annotationMap());
+ }
+
+ public void testFooter() throws Exception {
+ decorationTest(new DecorationProtocol() {
+ @Override
+ public void decorateMessage(AMQPMessage msg, Map<AMQPString, AMQPType> m) throws IOException {
+ msg.setFooter(new Footer(m));
+ }
+ @Override
+ public Map<AMQPType, AMQPType> getDecoration(AMQPMessage msg) throws IOException {
+ return msg.getFooter().getValue();
+ }
+ }, annotationMap());
+ }
+
+ public void testDataTypes() throws Exception {
+ AMQPContext ctx = new AMQPContext(AMQPContext.CLIENT);
+ Connection conn = new Connection(ctx, host, port, false);
+ conn.connect();
+
+ Session s = conn.createSession(INBOUND_WINDOW, OUTBOUND_WINDOW);
+ Producer p = s.createProducer(QUEUE, QoS.AT_LEAST_ONCE);
+ AMQPMessage msg = new AMQPMessage();
+
+ List<AMQPType> al = new ArrayList<AMQPType>();
+ al.add(new AMQPBoolean(true));
+ al.add(new AMQPByte(Byte.MAX_VALUE));
+ al.add(new AMQPChar(Character.CURRENCY_SYMBOL));
+ al.add(new AMQPDecimal64(BigDecimal.TEN));
+ al.add(new AMQPDouble(Double.NaN));
+ al.add(new AMQPInt(Integer.MIN_VALUE));
+ al.add(new AMQPNull());
+ al.add(new AMQPString("\uFFF9"));
+ al.add(new AMQPSymbol(new String(new char[256])));
+ al.add(new AMQPTimestamp(Long.MAX_VALUE));
+ al.add(new AMQPUuid(System.currentTimeMillis(), Long.MIN_VALUE));
+ al.add(new AMQPUnsignedShort(0));
+ al.add(new AMQPArray(AMQPBoolean.FALSE.getCode(), new AMQPBoolean[]{}));
+ al.add(new AmqpSequence(new ArrayList<AMQPType>()));
+ AmqpSequence seq = new AmqpSequence(al);
+ AmqpValue val = new AmqpValue(seq);
+ msg.setAmqpValue(val);
+
+ p.send(msg);
+ p.close();
+ Consumer c = s.createConsumer(QUEUE, CONSUMER_LINK_CREDIT, QoS.AT_LEAST_ONCE, false, null);
+ AMQPMessage recvMsg = c.receive();
+ recvMsg.accept();
+
+ assertEquals(val.getValue().getValueString(), recvMsg.getAmqpValue().getValue().getValueString());
+ conn.close();
+ }
+
+ public void testAtMostOnce() throws Exception {
+ AMQPContext ctx = new AMQPContext(AMQPContext.CLIENT);
+ Connection conn = new Connection(ctx, host, port, false);
+ conn.connect();
+
+ Session s = conn.createSession(INBOUND_WINDOW, OUTBOUND_WINDOW);
+ Producer p = s.createProducer(QUEUE, QoS.AT_MOST_ONCE);
+ p.send(msg());
+ p.close();
+
+ Consumer c = s.createConsumer(QUEUE, CONSUMER_LINK_CREDIT, QoS.AT_MOST_ONCE, false, null);
+ AMQPMessage m = c.receive();
+ assertTrue(m.isSettled());
+
+ s.close();
+ s = conn.createSession(INBOUND_WINDOW, OUTBOUND_WINDOW);
+ c = s.createConsumer(QUEUE, CONSUMER_LINK_CREDIT, QoS.AT_MOST_ONCE, false, null);
+ assertNull(get(c));
+ conn.close();
+ }
+
+ public void testReject() throws Exception {
+ AMQPContext ctx = new AMQPContext(AMQPContext.CLIENT);
+ Connection conn = new Connection(ctx, host, port, false);
+ conn.connect();
+
+ Session s = conn.createSession(INBOUND_WINDOW, OUTBOUND_WINDOW);
+ Producer p = s.createProducer(QUEUE, QoS.AT_LEAST_ONCE);
+ p.send(msg());
+ p.close();
+
+ Consumer c = s.createConsumer(QUEUE, CONSUMER_LINK_CREDIT, QoS.AT_LEAST_ONCE, false, null);
+ AMQPMessage m = c.receive();
+ m.reject();
+ assertNull(get(c));
+ conn.close();
+ }
+
+ public void testRedelivery() throws Exception {
+ AMQPContext ctx = new AMQPContext(AMQPContext.CLIENT);
+ Connection conn = new Connection(ctx, host, port, false);
+ conn.connect();
+
+ Session s = conn.createSession(INBOUND_WINDOW, OUTBOUND_WINDOW);
+ Producer p = s.createProducer(QUEUE, QoS.AT_MOST_ONCE);
+ p.send(msg());
+ p.close();
+
+ Consumer c = s.createConsumer(QUEUE, CONSUMER_LINK_CREDIT, QoS.AT_LEAST_ONCE, false, null);
+ AMQPMessage m1 = c.receive();
+ assertTrue(m1.getHeader().getFirstAcquirer().getValue());
+ assertFalse(m1.isSettled());
+
+ s.close();
+ s = conn.createSession(INBOUND_WINDOW, OUTBOUND_WINDOW);
+ c = s.createConsumer(QUEUE, CONSUMER_LINK_CREDIT, QoS.AT_LEAST_ONCE, false, null);
+ AMQPMessage m2 = c.receive();
+ m2.accept();
+
+ assertTrue(compareMessageData(m1, m2));
+ assertFalse(m2.getHeader().getFirstAcquirer().getValue());
+ assertNull(get(c));
+ conn.close();
+ }
+
+ public void testRouting() throws Exception {
+ route("test", QUEUE, "", true);
+ route(QUEUE, "test", "", true);
+ route("test", "test", "", true);
+
+ try {
+ route(QUEUE, "/exchange/missing", "", false);
+ fail("Missing exchange should fail");
+ } catch (Exception e) { }
+
+ try {
+ route("/exchange/missing/", QUEUE, "", false);
+ fail("Missing exchange should fail");
+ } catch (Exception e) { }
+
+ route("/topic/#.c.*", "/topic/a.b.c.d", "", true);
+ route("/topic/#.c.*", "/exchange/amq.topic", "a.b.c.d", true);
+ route("/exchange/amq.topic/#.y.*", "/topic/w.x.y.z", "", true);
+ route("/exchange/amq.topic/#.y.*", "/exchange/amq.topic", "w.x.y.z", true);
+
+ route("/exchange/amq.fanout/", "/exchange/amq.fanout", "", true);
+ route("/exchange/amq.direct/", "/exchange/amq.direct", "", true);
+ route("/exchange/amq.direct/a", "/exchange/amq.direct", "a", true);
+
+ route("/amq/queue/test", QUEUE, "", true);
+ route(QUEUE, "/amq/queue/test", "", true);
+ route("/amq/queue/test", "/amq/queue/test", "", true);
+
+ route("/exchange/amq.direct/b", "/exchange/amq.direct", "a", false);
+ route(QUEUE, "/exchange/amq.fanout", "", false);
+ route(QUEUE, "/exchange/amq.headers", "", false);
+ emptyQueue(QUEUE);
+ }
+
+ private void emptyQueue(String q) throws Exception {
+ AMQPContext ctx = new AMQPContext(AMQPContext.CLIENT);
+ Connection conn = new Connection(ctx, host, port, false);
+ conn.connect();
+ Session s = conn.createSession(INBOUND_WINDOW, OUTBOUND_WINDOW);
+ Consumer c = s.createConsumer(q, CONSUMER_LINK_CREDIT, QoS.AT_MOST_ONCE, false, null);
+ AMQPMessage m;
+ while ((m = get(c)) != null);
+ conn.close();
+ }
+
+ // Whatever Consumer.receiveNoWait() does, it does not involve the drain
+ // flag, so it's clearly more a case of "have any messages arrived?" rather
+ // than "has the queue got any messages?" Therefore we have an icky timeout
+ // to give the server time to deliver messages. Really we want a way to use
+ // drain...
+ private AMQPMessage get(Consumer c) {
+ return c.receive(100);
+ }
+
+ private void route(String consumerSource, String producerTarget, String routingKey, boolean succeed) throws Exception {
+ AMQPContext ctx = new AMQPContext(AMQPContext.CLIENT);
+ Connection conn = new Connection(ctx, host, port, false);
+ conn.connect();
+ Session s = conn.createSession(INBOUND_WINDOW, OUTBOUND_WINDOW);
+
+ Consumer c = s.createConsumer(consumerSource, CONSUMER_LINK_CREDIT, QoS.AT_LEAST_ONCE, false, null);
+ Producer p = s.createProducer(producerTarget, QoS.AT_LEAST_ONCE);
+ AMQPMessage msg = msg();
+ AmqpValue sentinel = new AmqpValue(new AMQPDouble(Math.random()));
+ msg.setAmqpValue(sentinel);
+ Properties props = new Properties();
+ props.setSubject(new AMQPString(routingKey));
+ msg.setProperties(props);
+ p.send(msg);
+
+ if (succeed) {
+ AMQPMessage m = c.receive();
+ assertNotNull(m);
+ assertEquals(sentinel.getValue().getValueString(), m.getAmqpValue().getValue().getValueString());
+ m.accept();
+ } else {
+ assertNull(get(c));
+ }
+ c.close();
+ p.close();
+ conn.close();
+ }
+
+ // TODO: generalise to a comparison of all immutable parts of messages
+ private boolean compareMessageData(AMQPMessage m1, AMQPMessage m2) throws IOException {
+ ByteArrayOutputStream b1 = new ByteArrayOutputStream();
+ ByteArrayOutputStream b2 = new ByteArrayOutputStream();
+
+ m1.getData().get(0).writeContent(new DataOutputStream(b1));
+ m2.getData().get(0).writeContent(new DataOutputStream(b2));
+ return Arrays.equals(b1.toByteArray(), b2.toByteArray());
+ }
+
+ private void decorationTest(DecorationProtocol d, Map<AMQPString, AMQPType> map) throws Exception {
+ AMQPContext ctx = new AMQPContext(AMQPContext.CLIENT);
+ Connection conn = new Connection(ctx, host, port, false);
+ conn.connect();
+ Session s = conn.createSession(INBOUND_WINDOW, OUTBOUND_WINDOW);
+ Producer p = s.createProducer(QUEUE, QoS.AT_LEAST_ONCE);
+ AMQPMessage msg = msg();
+
+ d.decorateMessage(msg, map);
+ p.send(msg);
+ p.close();
+ Consumer c = s.createConsumer(QUEUE, CONSUMER_LINK_CREDIT, QoS.AT_LEAST_ONCE, false, null);
+ AMQPMessage recvMsg = c.receive();
+ recvMsg.accept();
+
+ compareMaps(map, d.getDecoration(recvMsg));
+ conn.close();
+ }
+
+ private void compareMaps(Map<AMQPString, AMQPType> m1, Map<AMQPType, AMQPType> m2){
+ Set e1 = m1.entrySet();
+ Set e2 = m2.entrySet();
+ assertTrue(e1.containsAll(e2));
+ assertTrue(e2.containsAll(e1));
+ }
+
+ private Map<AMQPString, AMQPType> annotationMap() throws IOException {
+ Map<AMQPString, AMQPType> annotations = new HashMap<AMQPString, AMQPType>();
+ // the spec allows keys to be symbol or ulong only, but the library only allows string
+ annotations.put(new AMQPString("key1"), new AMQPString("value1"));
+ annotations.put(new AMQPString("key2"), new AMQPString("value2"));
+ return annotations;
+ }
+
+ private interface DecorationProtocol {
+ void decorateMessage(AMQPMessage msg, Map<AMQPString, AMQPType> m) throws IOException;
+ Map<AMQPType, AMQPType> getDecoration(AMQPMessage _) throws IOException;
+ }
+
+}
--- /dev/null
+include ../umbrella.mk
--- /dev/null
+Requirements
+============
+
+You can build and install it like any other plugin (see
+http://www.rabbitmq.com/plugin-development.html).
+
+Documentation
+=============
+
+See http://www.rabbitmq.com/ldap.html
+
+Limitations
+===========
+
+Currently this plugin is rather chatty with LDAP connections when
+doing authorisation over LDAP - every time RabbitMQ needs to do an
+authorisation query it starts a new LDAP connection. However, RabbitMQ
+does have a per-channel authorisation cache, so this is not too awful.
+
+There might need to be more types of queries.
--- /dev/null
+See http://www.rabbitmq.com/ldap.html
--- /dev/null
+The tests *require* a locally installed LDAP server with some
+predefined objects inside. If there's no LDAP server running on port
+389, they will be skipped.
+
+On Debian / Ubuntu you can just:
+
+$ ./example/setup.sh
+$ make test
+
+ - but be aware that this will wipe out your local OpenLDAP installation.
+
+Poke around in example/ if using any other distro, you can probably
+make it work.
--- /dev/null
+%% -*- erlang -*-
+[{rabbit, [{auth_backends, [rabbit_auth_backend_ldap]},
+ {default_vhost, <<"test">>}]},
+ {rabbitmq_auth_backend_ldap,
+ [ {servers, ["localhost"]},
+ {user_dn_pattern, "cn=${username},ou=People,dc=example,dc=com"},
+ {other_bind, anon},
+ {use_ssl, false},
+ {port, 389},
+ {log, true},
+ {tag_queries, [{administrator, {constant, false}}]},
+ {vhost_access_query, {exists, "ou=${vhost},ou=vhosts,dc=example,dc=com"}},
+ {resource_access_query,
+ {for, [{resource, exchange,
+ {for, [{permission, configure,
+ {in_group, "cn=wheel,ou=groups,dc=example,dc=com"}
+ },
+ {permission, write, {constant, true}},
+ {permission, read,
+ {match, {string, "${name}"},
+ {string, "^xch-${username}-.*"}}
+ }
+ ]}},
+ {resource, queue,
+ {for, [{permission, configure,
+ {match, {attribute, "${user_dn}", "description"},
+ {string, "can-declare-queues"}}
+ },
+ {permission, write, {constant, true}},
+ {permission, read,
+ {'or',
+ [{'and',
+ [{equals, "${name}", "test1"},
+ {equals, "${username}", "Simon MacMullen"}]},
+ {'and',
+ [{equals, "${name}", "test2"},
+ {'not', {equals, "${username}", "Mike Bridgen"}}]}
+ ]}}
+ ]}}
+ ]}}
+ ]}
+].
--- /dev/null
+This is a very simple example, designed to be set up with the modern
+Debian / Ubuntu packaging of OpenLDAP. Running setup.sh after "apt-get
+install slapd" will wipe out any existing LDAP database and get you:
+
+* A domain
+* An admin user
+* A couple of normal users
+* A group containing the users
+* An OU representing a vhost
+
+These correspond to the examples mentioned in the documentation.
--- /dev/null
+# Load modules for database type
+dn: cn=module,cn=config
+objectclass: olcModuleList
+cn: module
+olcModuleLoad: back_bdb.la
+
+# Create directory database
+dn: olcDatabase=bdb,cn=config
+objectClass: olcDatabaseConfig
+objectClass: olcBdbConfig
+olcDatabase: bdb
+# Domain name (e.g. example.com)
+olcSuffix: dc=example,dc=com
+# Location on system where database is stored
+olcDbDirectory: /var/lib/ldap
+# Manager of the database
+olcRootDN: cn=admin,dc=example,dc=com
+olcRootPW: admin
+olcAccess: to attrs=userPassword
+ by self write
+ by anonymous auth
+ by dn.base="cn=admin,dc=example,dc=com" write
+ by * none
+olcAccess: to *
+ by self write
+ by dn.base="cn=admin,dc=example,dc=com" write
+ by * read
--- /dev/null
+dn: ou=groups,dc=example,dc=com
+objectclass:organizationalunit
+ou: groups
+
+dn: cn=wheel,ou=groups,dc=example,dc=com
+objectclass: groupOfNames
+cn: wheel
+member: cn=Simon MacMullen,ou=people,dc=example,dc=com
--- /dev/null
+dn: dc=example,dc=com
+objectClass: top
+objectClass: dcObject
+objectclass: organization
+o: example.com
+dc: example
+description: Example
+
+dn: ou=people,dc=example,dc=com
+objectClass: organizationalUnit
+ou: people
+
+dn: cn=Simon MacMullen,ou=people,dc=example,dc=com
+objectClass: person
+cn: Simon MacMullen
+sn: MacMullen
+userPassword: password
+description: can-declare-queues
+
+dn: cn=Mike Bridgen,ou=people,dc=example,dc=com
+objectClass: person
+cn: Mike Bridgen
+sn: Bridgen
+userPassword: password
--- /dev/null
+dn: ou=vhosts,dc=example,dc=com
+objectClass: organizationalUnit
+ou: vhosts
+
+dn: ou=test,ou=vhosts,dc=example,dc=com
+objectClass: top
+objectClass: organizationalUnit
+ou: test
--- /dev/null
+#!/bin/sh -e
+
+# Based on instructions found at
+# http://ubuntuforums.org/showthread.php?p=8161118#post8161118
+# - yes that does seem to be the most authoritative place.
+
+sudo apt-get --yes purge slapd
+sudo rm -rf /var/lib/ldap
+sudo apt-get --yes install slapd
+sleep 1
+
+DIR=$(dirname $0)
+
+sudo ldapadd -Y EXTERNAL -H ldapi:/// -f ${DIR}/global.ldif
+ldapadd -x -D cn=admin,dc=example,dc=com -w admin -f ${DIR}/people.ldif
+ldapadd -x -D cn=admin,dc=example,dc=com -w admin -f ${DIR}/groups.ldif
+ldapadd -x -D cn=admin,dc=example,dc=com -w admin -f ${DIR}/rabbit.ldif
--- /dev/null
+RELEASABLE:=true
+DEPS:=rabbitmq-server rabbitmq-erlang-client eldap-wrapper
+
+ifeq ($(shell nc -z localhost 389 && echo true),true)
+WITH_BROKER_TEST_COMMANDS:=eunit:test(rabbit_auth_backend_ldap_test,[verbose])
+WITH_BROKER_TEST_CONFIG:=$(PACKAGE_DIR)/etc/rabbit-test
+endif
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_auth_backend_ldap).
+
+%% Connect to an LDAP server for authentication and authorisation
+
+-include_lib("eldap/include/eldap.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-behaviour(rabbit_auth_backend).
+
+-export([description/0]).
+-export([check_user_login/2, check_vhost_access/2, check_resource_access/3]).
+
+-define(L(F, A), log("LDAP " ++ F, A)).
+-define(L1(F, A), log(" LDAP " ++ F, A)).
+-define(L2(F, A), log(" LDAP " ++ F, A)).
+
+-import(rabbit_misc, [pget/2]).
+
+-record(impl, { user_dn, password }).
+
+%%--------------------------------------------------------------------
+
+description() ->
+ [{name, <<"LDAP">>},
+ {description, <<"LDAP authentication / authorisation">>}].
+
+%%--------------------------------------------------------------------
+
+check_user_login(Username, []) ->
+ %% Without password, e.g. EXTERNAL
+ ?L("CHECK: passwordless login for ~s", [Username]),
+ R = with_ldap(creds(none),
+ fun(LDAP) -> do_login(Username, unknown, none, LDAP) end),
+ ?L("DECISION: passwordless login for ~s: ~p",
+ [Username, log_result(R)]),
+ R;
+
+check_user_login(Username, [{password, <<>>}]) ->
+ %% Password "" is special in LDAP, see
+ %% https://tools.ietf.org/html/rfc4513#section-5.1.2
+ ?L("CHECK: unauthenticated login for ~s", [Username]),
+ ?L("DECISION: unauthenticated login for ~s: denied", [Username]),
+ {refused, "user '~s' - unauthenticated bind not allowed", [Username]};
+
+check_user_login(User, [{password, PW}]) ->
+ ?L("CHECK: login for ~s", [User]),
+ R = case dn_lookup_when() of
+ prebind -> UserDN = username_to_dn_prebind(User),
+ with_ldap({ok, {UserDN, PW}},
+ fun(L) -> do_login(User, UserDN, PW, L) end);
+ _ -> with_ldap({ok, {fill_user_dn_pattern(User), PW}},
+ fun(L) -> do_login(User, unknown, PW, L) end)
+ end,
+ ?L("DECISION: login for ~s: ~p", [User, log_result(R)]),
+ R;
+
+check_user_login(Username, AuthProps) ->
+ exit({unknown_auth_props, Username, AuthProps}).
+
+check_vhost_access(User = #user{username = Username,
+ impl = #impl{user_dn = UserDN}}, VHost) ->
+ Args = [{username, Username},
+ {user_dn, UserDN},
+ {vhost, VHost}],
+ ?L("CHECK: ~s for ~s", [log_vhost(Args), log_user(User)]),
+ R = evaluate_ldap(env(vhost_access_query), Args, User),
+ ?L("DECISION: ~s for ~s: ~p",
+ [log_vhost(Args), log_user(User), log_result(R)]),
+ R.
+
+check_resource_access(User = #user{username = Username,
+ impl = #impl{user_dn = UserDN}},
+ #resource{virtual_host = VHost, kind = Type, name = Name},
+ Permission) ->
+ Args = [{username, Username},
+ {user_dn, UserDN},
+ {vhost, VHost},
+ {resource, Type},
+ {name, Name},
+ {permission, Permission}],
+ ?L("CHECK: ~s for ~s", [log_resource(Args), log_user(User)]),
+ R = evaluate_ldap(env(resource_access_query), Args, User),
+ ?L("DECISION: ~s for ~s: ~p",
+ [log_resource(Args), log_user(User), log_result(R)]),
+ R.
+
+%%--------------------------------------------------------------------
+
+evaluate(Query, Args, User, LDAP) ->
+ ?L1("evaluating query: ~p", [Query]),
+ evaluate0(Query, Args, User, LDAP).
+
+evaluate0({constant, Bool}, _Args, _User, _LDAP) ->
+ ?L1("evaluated constant: ~p", [Bool]),
+ Bool;
+
+evaluate0({for, [{Type, Value, SubQuery}|Rest]}, Args, User, LDAP) ->
+ case pget(Type, Args) of
+ undefined -> {error, {args_do_not_contain, Type, Args}};
+ Value -> ?L1("selecting subquery ~s = ~s", [Type, Value]),
+ evaluate(SubQuery, Args, User, LDAP);
+ _ -> evaluate0({for, Rest}, Args, User, LDAP)
+ end;
+
+evaluate0({for, []}, _Args, _User, _LDAP) ->
+ {error, {for_query_incomplete}};
+
+evaluate0({exists, DNPattern}, Args, _User, LDAP) ->
+ %% eldap forces us to have a filter. objectClass should always be there.
+ Filter = eldap:present("objectClass"),
+ DN = fill(DNPattern, Args),
+ R = object_exists(DN, Filter, LDAP),
+ ?L1("evaluated exists for \"~s\": ~p", [DN, R]),
+ R;
+
+evaluate0({in_group, DNPattern}, Args, User, LDAP) ->
+ evaluate({in_group, DNPattern, "member"}, Args, User, LDAP);
+
+evaluate0({in_group, DNPattern, Desc}, Args,
+ #user{impl = #impl{user_dn = UserDN}}, LDAP) ->
+ Filter = eldap:equalityMatch(Desc, UserDN),
+ DN = fill(DNPattern, Args),
+ R = object_exists(DN, Filter, LDAP),
+ ?L1("evaluated in_group for \"~s\": ~p", [DN, R]),
+ R;
+
+evaluate0({'not', SubQuery}, Args, User, LDAP) ->
+ R = evaluate(SubQuery, Args, User, LDAP),
+ ?L1("negated result to ~s", [R]),
+ not R;
+
+evaluate0({'and', Queries}, Args, User, LDAP) when is_list(Queries) ->
+ R = lists:foldl(fun (Q, true) -> evaluate(Q, Args, User, LDAP);
+ (_Q, false) -> false
+ end, true, Queries),
+ ?L1("'and' result: ~s", [R]),
+ R;
+
+evaluate0({'or', Queries}, Args, User, LDAP) when is_list(Queries) ->
+ R = lists:foldl(fun (_Q, true) -> true;
+ (Q, false) -> evaluate(Q, Args, User, LDAP)
+ end, false, Queries),
+ ?L1("'or' result: ~s", [R]),
+ R;
+
+evaluate0({equals, StringQuery1, StringQuery2}, Args, User, LDAP) ->
+ safe_eval(fun (String1, String2) ->
+ R = String1 =:= String2,
+ ?L1("evaluated equals \"~s\", \"~s\": ~s",
+ [String1, String2, R]),
+ R
+ end,
+ evaluate(StringQuery1, Args, User, LDAP),
+ evaluate(StringQuery2, Args, User, LDAP));
+
+evaluate0({match, StringQuery, REQuery}, Args, User, LDAP) ->
+ safe_eval(fun (String, RE) ->
+ R = case re:run(String, RE) of
+ {match, _} -> true;
+ nomatch -> false
+ end,
+ ?L1("evaluated match \"~s\" against RE \"~s\": ~s",
+ [String, RE, R]),
+ R
+ end,
+ evaluate(StringQuery, Args, User, LDAP),
+ evaluate(REQuery, Args, User, LDAP));
+
+evaluate0(StringPattern, Args, User, LDAP) when is_list(StringPattern) ->
+ evaluate0({string, StringPattern}, Args, User, LDAP);
+
+evaluate0({string, StringPattern}, Args, _User, _LDAP) ->
+ R = fill(StringPattern, Args),
+ ?L1("evaluated string for \"~s\"", [R]),
+ R;
+
+evaluate0({attribute, DNPattern, AttributeName}, Args, _User, LDAP) ->
+ DN = fill(DNPattern, Args),
+ R = attribute(DN, AttributeName, LDAP),
+ ?L1("evaluated attribute \"~s\" for \"~s\": ~p",
+ [AttributeName, DN, R]),
+ R;
+
+evaluate0(Q, Args, _User, _LDAP) ->
+ {error, {unrecognised_query, Q, Args}}.
+
+safe_eval(_F, {error, _}, _) -> false;
+safe_eval(_F, _, {error, _}) -> false;
+safe_eval(F, V1, V2) -> F(V1, V2).
+
+object_exists(DN, Filter, LDAP) ->
+ case eldap:search(LDAP,
+ [{base, DN},
+ {filter, Filter},
+ {attributes, ["objectClass"]}, %% Reduce verbiage
+ {scope, eldap:baseObject()}]) of
+ {ok, #eldap_search_result{entries = Entries}} ->
+ length(Entries) > 0;
+ {error, _} = E ->
+ E
+ end.
+
+attribute(DN, AttributeName, LDAP) ->
+ case eldap:search(LDAP,
+ [{base, DN},
+ {filter, eldap:present("objectClass")},
+ {attributes, [AttributeName]}]) of
+ {ok, #eldap_search_result{entries = [#eldap_entry{attributes = A}]}} ->
+ case pget(AttributeName, A) of
+ [Attr] -> Attr;
+ _ -> {error, not_found}
+ end;
+ {ok, #eldap_search_result{entries = _}} ->
+ {error, not_found};
+ {error, _} = E ->
+ E
+ end.
+
+evaluate_ldap(Q, Args, User) ->
+ with_ldap(creds(User), fun(LDAP) -> evaluate(Q, Args, User, LDAP) end).
+
+%%--------------------------------------------------------------------
+
+with_ldap(Creds, Fun) -> with_ldap(Creds, Fun, env(servers)).
+
+with_ldap(_Creds, _Fun, undefined) ->
+ {error, no_ldap_servers_defined};
+
+with_ldap({error, _} = E, _Fun, _State) ->
+ E;
+%% TODO - ATM we create and destroy a new LDAP connection on every
+%% call. This could almost certainly be more efficient.
+with_ldap({ok, Creds}, Fun, Servers) ->
+ Opts0 = [{ssl, env(use_ssl)}, {port, env(port)}],
+ SSLOpts = env(ssl_options),
+ %% We can't just pass through [] as sslopts in the old case, eldap
+ %% exit()s when you do that.
+ Opts1 = case {SSLOpts, rabbit_misc:version_compare(
+ erlang:system_info(version), "5.10")} of %% R16A
+ {[], _} -> Opts0;
+ {_, lt} -> exit({ssl_options_requires_min_r16a});
+ {_, _} -> [{sslopts, SSLOpts} | Opts0]
+ end,
+ Opts2 = case env(log) of
+ network ->
+ Pre = " LDAP network traffic: ",
+ rabbit_log:info(
+ " LDAP connecting to servers: ~p~n", [Servers]),
+ [{log, fun(1, S, A) -> rabbit_log:warning(Pre ++ S, A);
+ (2, S, A) -> rabbit_log:info (Pre ++ S, A)
+ end} | Opts1];
+ _ ->
+ Opts1
+ end,
+ %% eldap defaults to 'infinity' but doesn't allow you to set that. Harrumph.
+ Opts = case env(timeout) of
+ infinity -> Opts2;
+ MS -> [{timeout, MS} | Opts2]
+ end,
+ case eldap:open(Servers, Opts) of
+ {ok, LDAP} ->
+ try Creds of
+ anon ->
+ ?L1("anonymous bind", []),
+ Fun(LDAP);
+ {UserDN, Password} ->
+ case eldap:simple_bind(LDAP, UserDN, Password) of
+ ok ->
+ ?L1("bind succeeded: ~s", [UserDN]),
+ Fun(LDAP);
+ {error, invalidCredentials} ->
+ ?L1("bind returned \"invalid credentials\": ~s",
+ [UserDN]),
+ {refused, UserDN, []};
+ {error, E} ->
+ ?L1("bind error: ~s ~p", [UserDN, E]),
+ {error, E}
+ end
+ after
+ eldap:close(LDAP)
+ end;
+ Error ->
+ ?L1("connect error: ~p", [Error]),
+ Error
+ end.
+
+env(F) ->
+ {ok, V} = application:get_env(rabbitmq_auth_backend_ldap, F),
+ V.
+
+do_login(Username, PrebindUserDN, Password, LDAP) ->
+ UserDN = case PrebindUserDN of
+ unknown -> username_to_dn(Username, LDAP, dn_lookup_when());
+ _ -> PrebindUserDN
+ end,
+ User = #user{username = Username,
+ auth_backend = ?MODULE,
+ impl = #impl{user_dn = UserDN,
+ password = Password}},
+ TagRes = [begin
+ ?L1("CHECK: does ~s have tag ~s?", [Username, Tag]),
+ R = evaluate(Q, [{username, Username},
+ {user_dn, UserDN}], User, LDAP),
+ ?L1("DECISION: does ~s have tag ~s? ~p",
+ [Username, Tag, R]),
+ {Tag, R}
+ end || {Tag, Q} <- env(tag_queries)],
+ case [E || {_, E = {error, _}} <- TagRes] of
+ [] -> {ok, User#user{tags = [Tag || {Tag, true} <- TagRes]}};
+ [E | _] -> E
+ end.
+
+dn_lookup_when() -> case {env(dn_lookup_attribute), env(dn_lookup_bind)} of
+ {none, _} -> never;
+ {_, as_user} -> postbind;
+ {_, _} -> prebind
+ end.
+
+username_to_dn_prebind(Username) ->
+ with_ldap({ok, env(dn_lookup_bind)},
+ fun (LDAP) -> dn_lookup(Username, LDAP) end).
+
+username_to_dn(Username, LDAP, postbind) -> dn_lookup(Username, LDAP);
+username_to_dn(Username, _LDAP, _When) -> fill_user_dn_pattern(Username).
+
+dn_lookup(Username, LDAP) ->
+ Filled = fill_user_dn_pattern(Username),
+ case eldap:search(LDAP,
+ [{base, env(dn_lookup_base)},
+ {filter, eldap:equalityMatch(
+ env(dn_lookup_attribute), Filled)},
+ {attributes, ["distinguishedName"]}]) of
+ {ok, #eldap_search_result{entries = [#eldap_entry{object_name = DN}]}}->
+ ?L1("DN lookup: ~s -> ~s", [Username, DN]),
+ DN;
+ {ok, #eldap_search_result{entries = Entries}} ->
+ rabbit_log:warning("Searching for DN for ~s, got back ~p~n",
+ [Filled, Entries]),
+ Filled;
+ {error, _} = E ->
+ exit(E)
+ end.
+
+fill_user_dn_pattern(Username) ->
+ fill(env(user_dn_pattern), [{username, Username}]).
+
+creds(User) -> creds(User, env(other_bind)).
+
+creds(none, as_user) ->
+ {error, "'other_bind' set to 'as_user' but no password supplied"};
+creds(#user{impl = #impl{user_dn = UserDN, password = Password}}, as_user) ->
+ {ok, {UserDN, Password}};
+creds(_, Creds) ->
+ {ok, Creds}.
+
+log(Fmt, Args) -> case env(log) of
+ false -> ok;
+ _ -> rabbit_log:info(Fmt ++ "~n", Args)
+ end.
+
+fill(Fmt, Args) ->
+ ?L2("filling template \"~s\" with~n ~p", [Fmt, Args]),
+ R = rabbit_auth_backend_ldap_util:fill(Fmt, Args),
+ ?L2("template result: \"~s\"", [R]),
+ R.
+
+log_result({ok, #user{}}) -> ok;
+log_result(true) -> ok;
+log_result(false) -> denied;
+log_result({refused, _, _}) -> denied;
+log_result(E) -> E.
+
+log_user(#user{username = U}) -> rabbit_misc:format("\"~s\"", [U]).
+
+log_vhost(Args) ->
+ rabbit_misc:format("access to vhost \"~s\"", [pget(vhost, Args)]).
+
+log_resource(Args) ->
+ rabbit_misc:format("~s permission for ~s \"~s\" in \"~s\"",
+ [pget(permission, Args), pget(resource, Args),
+ pget(name, Args), pget(vhost, Args)]).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_auth_backend_ldap_app).
+
+-behaviour(application).
+-export([start/2, stop/1]).
+
+%% Dummy supervisor to get this application behaviour working
+-behaviour(supervisor).
+-export([init/1]).
+
+start(_Type, _StartArgs) ->
+ {ok, Backends} = application:get_env(rabbit, auth_backends),
+ case configured(rabbit_auth_backend_ldap, Backends) of
+ true -> ok;
+ false -> rabbit_log:warning(
+ "LDAP plugin loaded, but rabbit_auth_backend_ldap is not "
+ "in the list of auth_backends. LDAP auth will not work.~n")
+ end,
+ {ok, SSL} = application:get_env(rabbitmq_auth_backend_ldap, use_ssl),
+ case SSL of
+ true -> rabbit_networking:ensure_ssl();
+ false -> ok
+ end,
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+stop(_State) ->
+ ok.
+
+configured(_M, []) -> false;
+configured(M, [M |_]) -> true;
+configured(M, [{M,_}|_]) -> true;
+configured(M, [{_,M}|_]) -> true;
+configured(M, [_ |T]) -> configured(M, T).
+
+%%----------------------------------------------------------------------------
+
+init([]) -> {ok, {{one_for_one, 3, 10}, []}}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_auth_backend_ldap_util).
+
+-export([fill/2]).
+
+fill(Fmt, []) ->
+ binary_to_list(iolist_to_binary(Fmt));
+
+fill(Fmt, [{K, V} | T]) ->
+ Var = [[$\\, $$, ${] ++ atom_to_list(K) ++ [$}]],
+ fill(re:replace(Fmt, Var, [to_repl(V)], [global]), T).
+
+to_repl(V) when is_atom(V) ->
+ atom_to_list(V);
+to_repl(V) ->
+ V.
--- /dev/null
+%% -*- erlang -*-
+{application, rabbitmq_auth_backend_ldap,
+ [{description, "RabbitMQ LDAP Authentication Backend"},
+ {vsn, "%%VSN%%"},
+ {modules, []},
+ {registered, []},
+ {mod, {rabbit_auth_backend_ldap_app, []}},
+ {env, [ {servers, undefined},
+ {user_dn_pattern, "${username}"},
+ {dn_lookup_attribute, none},
+ {dn_lookup_base, none},
+ {dn_lookup_bind, as_user},
+ {other_bind, as_user},
+ {vhost_access_query, {constant, true}},
+ {resource_access_query, {constant, true}},
+ {tag_queries, [{administrator, {constant, false}}]},
+ {use_ssl, false},
+ {ssl_options, []},
+ {port, 389},
+ {timeout, infinity},
+ {log, false} ] },
+ {applications, [kernel, stdlib, eldap, rabbit]}]}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_auth_backend_ldap_test).
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-define(SIMON, #amqp_params_network{username = <<"Simon MacMullen">>,
+ password = <<"password">>,
+ virtual_host = <<"test">>}).
+
+-define(MIKEB, #amqp_params_network{username = <<"Mike Bridgen">>,
+ password = <<"password">>,
+ virtual_host = <<"test">>}).
+
+%%--------------------------------------------------------------------
+
+login_test_() ->
+ [test_login(Env, L, case {LGood, EnvGood} of
+ {good, good} -> fun succ/1;
+ _ -> fun fail/1
+ end) || {LGood, L} <- logins(),
+ {EnvGood, Env} <- login_envs()].
+
+logins() ->
+ [{bad, #amqp_params_network{}},
+ {bad, #amqp_params_network{username = <<"Simon MacMullen">>}},
+ {bad, #amqp_params_network{username = <<"Simon MacMullen">>,
+ password = <<"password">>}},
+ {good, ?SIMON},
+ {good, ?MIKEB}].
+
+login_envs() ->
+ [{good, base_login_env()},
+ {good, dn_lookup_pre_bind_env()},
+ {good, other_bind_admin_env()},
+ {good, other_bind_anon_env()},
+ {bad, other_bind_broken_env()}].
+
+base_login_env() ->
+ [{user_dn_pattern, "cn=${username},ou=People,dc=example,dc=com"},
+ {dn_lookup_attribute, none},
+ {dn_lookup_base, none},
+ {dn_lookup_bind, as_user},
+ {other_bind, as_user}].
+
+%% TODO configure OpenLDAP to allow a dn_lookup_post_bind_env()
+dn_lookup_pre_bind_env() ->
+ [{user_dn_pattern, "${username}"},
+ {dn_lookup_attribute, "cn"},
+ {dn_lookup_base, "OU=People,DC=example,DC=com"},
+ {dn_lookup_bind, {"cn=admin,dc=example,dc=com", "admin"}}].
+
+other_bind_admin_env() ->
+ [{other_bind, {"cn=admin,dc=example,dc=com", "admin"}}].
+
+other_bind_anon_env() ->
+ [{other_bind, anon}].
+
+other_bind_broken_env() ->
+ [{other_bind, {"cn=admin,dc=example,dc=com", "admi"}}].
+
+test_login(Env, Login, ResultFun) ->
+ ?_test(try
+ set_env(Env),
+ ResultFun(Login)
+ after
+ set_env(base_login_env())
+ end).
+
+set_env(Env) ->
+ [application:set_env(rabbitmq_auth_backend_ldap, K, V) || {K, V} <- Env].
+
+succ(Login) -> ?assertMatch({ok, _}, amqp_connection:start(Login)).
+fail(Login) -> ?assertMatch({error, _}, amqp_connection:start(Login)).
+
+%%--------------------------------------------------------------------
+
+in_group_test_() ->
+ X = [#'exchange.declare'{exchange = <<"test">>}],
+ test_resource_funs([{?SIMON, X, ok},
+ {?MIKEB, X, fail}]).
+
+const_test_() ->
+ Q = [#'queue.declare'{queue = <<"test">>}],
+ test_resource_funs([{?SIMON, Q, ok},
+ {?MIKEB, Q, fail}]).
+
+string_match_test_() ->
+ B = fun(N) ->
+ [#'exchange.declare'{exchange = N},
+ #'queue.declare'{queue = <<"test">>},
+ #'queue.bind'{exchange = N, queue = <<"test">>}]
+ end,
+ test_resource_funs([{?SIMON, B(<<"xch-Simon MacMullen-abc123">>), ok},
+ {?SIMON, B(<<"abc123">>), fail},
+ {?SIMON, B(<<"xch-Someone Else-abc123">>), fail}]).
+
+boolean_logic_test_() ->
+ Q1 = [#'queue.declare'{queue = <<"test1">>},
+ #'basic.consume'{queue = <<"test1">>}],
+ Q2 = [#'queue.declare'{queue = <<"test2">>},
+ #'basic.consume'{queue = <<"test2">>}],
+ [test_resource_fun(PTR) || PTR <- [{?SIMON, Q1, ok},
+ {?SIMON, Q2, ok},
+ {?MIKEB, Q1, fail},
+ {?MIKEB, Q2, fail}]].
+
+test_resource_funs(PTRs) -> [test_resource_fun(PTR) || PTR <- PTRs].
+
+test_resource_fun({Person, Things, Result}) ->
+ fun() ->
+ {ok, Conn} = amqp_connection:start(Person),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ ?assertEqual(Result,
+ try
+ [amqp_channel:call(Ch, T) || T <- Things],
+ amqp_connection:close(Conn),
+ ok
+ catch exit:_ -> fail
+ end)
+ end.
+
+%%--------------------------------------------------------------------
--- /dev/null
+include ../umbrella.mk
--- /dev/null
+Authenticates the user, obtaining the username from the client's
+SSL certificate. The user's password is not checked.
+
+In order to use this mechanism the client must connect over SSL, and
+present a client certificate.
+
+The mechanism must also be enabled in RabbitMQ's configuration file -
+see http://www.rabbitmq.com/authentication.html for more details, or
+in short, ensure that the 'rabbit' section of your configuration
+contains:
+
+{auth_mechanisms, ['PLAIN', 'AMQPLAIN', 'EXTERNAL']}
+
+to allow this mechanism in addition to the defaults, or:
+
+{auth_mechanisms, ['EXTERNAL']}
+
+to allow only this mechanism.
+
+For safety the server must be configured with the SSL option 'verify'
+set to 'verify_peer', to ensure that if an SSL client presents a
+certificate, it gets verified.
+
+By default this will set the username to an RFC4514-ish string form of
+the certificate's subject's Distinguished Name, similar to that
+produced by OpenSSL's "-nameopt RFC2253" option.
+
+You can obtain this string form from a certificate with a command like:
+
+$ openssl x509 -in path/to/cert.pem -nameopt RFC2253 -subject -noout
+
+or from an existing amqps connection with commands like:
+
+$ rabbitmqctl list_connections peer_cert_subject
+
+To use the Common Name instead, ensure that the 'rabbit' section of
+your configuration contains:
+
+{ssl_cert_login_from, common_name}
+
+Note that the authenticated user will then be looked up in the
+configured authentication / authorisation backend(s) - this will be
+the mnesia-based user database by default, but could include other
+backends if so configured.
--- /dev/null
+RELEASABLE:=true
+DEPS:=rabbitmq-server rabbitmq-erlang-client
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+
+-module(rabbit_auth_mechanism_ssl).
+
+-behaviour(rabbit_auth_mechanism).
+
+-export([description/0, should_offer/1, init/1, handle_response/2]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("public_key/include/public_key.hrl").
+
+-rabbit_boot_step({?MODULE,
+ [{description, "auth mechanism external"},
+ {mfa, {rabbit_registry, register,
+ [auth_mechanism, <<"EXTERNAL">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+-record(state, {username = undefined}).
+
+%% SASL EXTERNAL. SASL says EXTERNAL means "use credentials
+%% established by means external to the mechanism". We define that to
+%% mean the peer certificate's subject's DN or CN.
+
+description() ->
+ [{description, <<"SSL authentication mechanism using SASL EXTERNAL">>}].
+
+should_offer(Sock) ->
+ case rabbit_net:peercert(Sock) of
+ nossl -> false;
+ {error, no_peercert} -> true; %% [0]
+ {ok, _} -> true
+ end.
+%% We offer EXTERNAL even if there is no peercert since that leads to
+%% a more comprehensible error message - authentication is refused
+%% below with "no peer certificate" rather than have the client fail
+%% to negotiate an authentication mechanism.
+
+init(Sock) ->
+ Username = case rabbit_net:peercert(Sock) of
+ {ok, C} ->
+ case rabbit_ssl:peer_cert_auth_name(C) of
+ unsafe -> {refused, "configuration unsafe", []};
+ not_found -> {refused, "no name found", []};
+ Name -> Name
+ end;
+ {error, no_peercert} ->
+ {refused, "no peer certificate", []};
+ nossl ->
+ {refused, "not SSL connection", []}
+ end,
+ #state{username = Username}.
+
+handle_response(_Response, #state{username = Username}) ->
+ case Username of
+ {refused, _, _} = E ->
+ E;
+ _ ->
+ rabbit_access_control:check_user_login(Username, [])
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_auth_mechanism_ssl_app).
+
+-behaviour(application).
+-export([start/2, stop/1]).
+
+%% Dummy supervisor - see Ulf Wiger's comment at
+%% http://erlang.2086793.n4.nabble.com/initializing-library-applications-without-processes-td2094473.html
+
+-behaviour(supervisor).
+-export([init/1]).
+
+start(normal, []) ->
+ supervisor:start_link({local,?MODULE},?MODULE,[]).
+
+stop(_State) ->
+ ok.
+
+init([]) ->
+ {ok, {{one_for_one,3,10},[]}}.
--- /dev/null
+%% -*- erlang -*-
+{application, rabbitmq_auth_mechanism_ssl,
+ [{description, "RabbitMQ SSL authentication (SASL EXTERNAL)"},
+ {vsn, "%%VSN%%"},
+ {modules, []},
+ {registered, []},
+ {mod, {rabbit_auth_mechanism_ssl_app, []}},
+ {env, [{name_from, distinguished_name}] },
+ {applications, [kernel, stdlib]}]}.
--- /dev/null
+This package, the RabbitMQ Consistent Hash Exchange is licensed under
+the MPL. For the MPL, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
--- /dev/null
+ MOZILLA PUBLIC LICENSE
+ Version 1.1
+
+ ---------------
+
+1. Definitions.
+
+ 1.0.1. "Commercial Use" means distribution or otherwise making the
+ Covered Code available to a third party.
+
+ 1.1. "Contributor" means each entity that creates or contributes to
+ the creation of Modifications.
+
+ 1.2. "Contributor Version" means the combination of the Original
+ Code, prior Modifications used by a Contributor, and the Modifications
+ made by that particular Contributor.
+
+ 1.3. "Covered Code" means the Original Code or Modifications or the
+ combination of the Original Code and Modifications, in each case
+ including portions thereof.
+
+ 1.4. "Electronic Distribution Mechanism" means a mechanism generally
+ accepted in the software development community for the electronic
+ transfer of data.
+
+ 1.5. "Executable" means Covered Code in any form other than Source
+ Code.
+
+ 1.6. "Initial Developer" means the individual or entity identified
+ as the Initial Developer in the Source Code notice required by Exhibit
+ A.
+
+ 1.7. "Larger Work" means a work which combines Covered Code or
+ portions thereof with code not governed by the terms of this License.
+
+ 1.8. "License" means this document.
+
+ 1.8.1. "Licensable" means having the right to grant, to the maximum
+ extent possible, whether at the time of the initial grant or
+ subsequently acquired, any and all of the rights conveyed herein.
+
+ 1.9. "Modifications" means any addition to or deletion from the
+ substance or structure of either the Original Code or any previous
+ Modifications. When Covered Code is released as a series of files, a
+ Modification is:
+ A. Any addition to or deletion from the contents of a file
+ containing Original Code or previous Modifications.
+
+ B. Any new file that contains any part of the Original Code or
+ previous Modifications.
+
+ 1.10. "Original Code" means Source Code of computer software code
+ which is described in the Source Code notice required by Exhibit A as
+ Original Code, and which, at the time of its release under this
+ License is not already Covered Code governed by this License.
+
+ 1.10.1. "Patent Claims" means any patent claim(s), now owned or
+ hereafter acquired, including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by grantor.
+
+ 1.11. "Source Code" means the preferred form of the Covered Code for
+ making modifications to it, including all modules it contains, plus
+ any associated interface definition files, scripts used to control
+ compilation and installation of an Executable, or source code
+ differential comparisons against either the Original Code or another
+ well known, available Covered Code of the Contributor's choice. The
+ Source Code can be in a compressed or archival form, provided the
+ appropriate decompression or de-archiving software is widely available
+ for no charge.
+
+ 1.12. "You" (or "Your") means an individual or a legal entity
+ exercising rights under, and complying with all of the terms of, this
+ License or a future version of this License issued under Section 6.1.
+ For legal entities, "You" includes any entity which controls, is
+ controlled by, or is under common control with You. For purposes of
+ this definition, "control" means (a) the power, direct or indirect,
+ to cause the direction or management of such entity, whether by
+ contract or otherwise, or (b) ownership of more than fifty percent
+ (50%) of the outstanding shares or beneficial ownership of such
+ entity.
+
+2. Source Code License.
+
+ 2.1. The Initial Developer Grant.
+ The Initial Developer hereby grants You a world-wide, royalty-free,
+ non-exclusive license, subject to third party intellectual property
+ claims:
+ (a) under intellectual property rights (other than patent or
+ trademark) Licensable by Initial Developer to use, reproduce,
+ modify, display, perform, sublicense and distribute the Original
+ Code (or portions thereof) with or without Modifications, and/or
+ as part of a Larger Work; and
+
+ (b) under Patents Claims infringed by the making, using or
+ selling of Original Code, to make, have made, use, practice,
+ sell, and offer for sale, and/or otherwise dispose of the
+ Original Code (or portions thereof).
+
+ (c) the licenses granted in this Section 2.1(a) and (b) are
+ effective on the date Initial Developer first distributes
+ Original Code under the terms of this License.
+
+ (d) Notwithstanding Section 2.1(b) above, no patent license is
+ granted: 1) for code that You delete from the Original Code; 2)
+ separate from the Original Code; or 3) for infringements caused
+ by: i) the modification of the Original Code or ii) the
+ combination of the Original Code with other software or devices.
+
+ 2.2. Contributor Grant.
+ Subject to third party intellectual property claims, each Contributor
+ hereby grants You a world-wide, royalty-free, non-exclusive license
+
+ (a) under intellectual property rights (other than patent or
+ trademark) Licensable by Contributor, to use, reproduce, modify,
+ display, perform, sublicense and distribute the Modifications
+ created by such Contributor (or portions thereof) either on an
+ unmodified basis, with other Modifications, as Covered Code
+ and/or as part of a Larger Work; and
+
+ (b) under Patent Claims infringed by the making, using, or
+ selling of Modifications made by that Contributor either alone
+ and/or in combination with its Contributor Version (or portions
+ of such combination), to make, use, sell, offer for sale, have
+ made, and/or otherwise dispose of: 1) Modifications made by that
+ Contributor (or portions thereof); and 2) the combination of
+ Modifications made by that Contributor with its Contributor
+ Version (or portions of such combination).
+
+ (c) the licenses granted in Sections 2.2(a) and 2.2(b) are
+ effective on the date Contributor first makes Commercial Use of
+ the Covered Code.
+
+ (d) Notwithstanding Section 2.2(b) above, no patent license is
+ granted: 1) for any code that Contributor has deleted from the
+ Contributor Version; 2) separate from the Contributor Version;
+ 3) for infringements caused by: i) third party modifications of
+ Contributor Version or ii) the combination of Modifications made
+ by that Contributor with other software (except as part of the
+ Contributor Version) or other devices; or 4) under Patent Claims
+ infringed by Covered Code in the absence of Modifications made by
+ that Contributor.
+
+3. Distribution Obligations.
+
+ 3.1. Application of License.
+ The Modifications which You create or to which You contribute are
+ governed by the terms of this License, including without limitation
+ Section 2.2. The Source Code version of Covered Code may be
+ distributed only under the terms of this License or a future version
+ of this License released under Section 6.1, and You must include a
+ copy of this License with every copy of the Source Code You
+ distribute. You may not offer or impose any terms on any Source Code
+ version that alters or restricts the applicable version of this
+ License or the recipients' rights hereunder. However, You may include
+ an additional document offering the additional rights described in
+ Section 3.5.
+
+ 3.2. Availability of Source Code.
+ Any Modification which You create or to which You contribute must be
+ made available in Source Code form under the terms of this License
+ either on the same media as an Executable version or via an accepted
+ Electronic Distribution Mechanism to anyone to whom you made an
+ Executable version available; and if made available via Electronic
+ Distribution Mechanism, must remain available for at least twelve (12)
+ months after the date it initially became available, or at least six
+ (6) months after a subsequent version of that particular Modification
+ has been made available to such recipients. You are responsible for
+ ensuring that the Source Code version remains available even if the
+ Electronic Distribution Mechanism is maintained by a third party.
+
+ 3.3. Description of Modifications.
+ You must cause all Covered Code to which You contribute to contain a
+ file documenting the changes You made to create that Covered Code and
+ the date of any change. You must include a prominent statement that
+ the Modification is derived, directly or indirectly, from Original
+ Code provided by the Initial Developer and including the name of the
+ Initial Developer in (a) the Source Code, and (b) in any notice in an
+ Executable version or related documentation in which You describe the
+ origin or ownership of the Covered Code.
+
+ 3.4. Intellectual Property Matters
+ (a) Third Party Claims.
+ If Contributor has knowledge that a license under a third party's
+ intellectual property rights is required to exercise the rights
+ granted by such Contributor under Sections 2.1 or 2.2,
+ Contributor must include a text file with the Source Code
+ distribution titled "LEGAL" which describes the claim and the
+ party making the claim in sufficient detail that a recipient will
+ know whom to contact. If Contributor obtains such knowledge after
+ the Modification is made available as described in Section 3.2,
+ Contributor shall promptly modify the LEGAL file in all copies
+ Contributor makes available thereafter and shall take other steps
+ (such as notifying appropriate mailing lists or newsgroups)
+ reasonably calculated to inform those who received the Covered
+ Code that new knowledge has been obtained.
+
+ (b) Contributor APIs.
+ If Contributor's Modifications include an application programming
+ interface and Contributor has knowledge of patent licenses which
+ are reasonably necessary to implement that API, Contributor must
+ also include this information in the LEGAL file.
+
+ (c) Representations.
+ Contributor represents that, except as disclosed pursuant to
+ Section 3.4(a) above, Contributor believes that Contributor's
+ Modifications are Contributor's original creation(s) and/or
+ Contributor has sufficient rights to grant the rights conveyed by
+ this License.
+
+ 3.5. Required Notices.
+ You must duplicate the notice in Exhibit A in each file of the Source
+ Code. If it is not possible to put such notice in a particular Source
+ Code file due to its structure, then You must include such notice in a
+ location (such as a relevant directory) where a user would be likely
+ to look for such a notice. If You created one or more Modification(s)
+ You may add your name as a Contributor to the notice described in
+ Exhibit A. You must also duplicate this License in any documentation
+ for the Source Code where You describe recipients' rights or ownership
+ rights relating to Covered Code. You may choose to offer, and to
+ charge a fee for, warranty, support, indemnity or liability
+ obligations to one or more recipients of Covered Code. However, You
+ may do so only on Your own behalf, and not on behalf of the Initial
+ Developer or any Contributor. You must make it absolutely clear than
+ any such warranty, support, indemnity or liability obligation is
+ offered by You alone, and You hereby agree to indemnify the Initial
+ Developer and every Contributor for any liability incurred by the
+ Initial Developer or such Contributor as a result of warranty,
+ support, indemnity or liability terms You offer.
+
+ 3.6. Distribution of Executable Versions.
+ You may distribute Covered Code in Executable form only if the
+ requirements of Section 3.1-3.5 have been met for that Covered Code,
+ and if You include a notice stating that the Source Code version of
+ the Covered Code is available under the terms of this License,
+ including a description of how and where You have fulfilled the
+ obligations of Section 3.2. The notice must be conspicuously included
+ in any notice in an Executable version, related documentation or
+ collateral in which You describe recipients' rights relating to the
+ Covered Code. You may distribute the Executable version of Covered
+ Code or ownership rights under a license of Your choice, which may
+ contain terms different from this License, provided that You are in
+ compliance with the terms of this License and that the license for the
+ Executable version does not attempt to limit or alter the recipient's
+ rights in the Source Code version from the rights set forth in this
+ License. If You distribute the Executable version under a different
+ license You must make it absolutely clear that any terms which differ
+ from this License are offered by You alone, not by the Initial
+ Developer or any Contributor. You hereby agree to indemnify the
+ Initial Developer and every Contributor for any liability incurred by
+ the Initial Developer or such Contributor as a result of any such
+ terms You offer.
+
+ 3.7. Larger Works.
+ You may create a Larger Work by combining Covered Code with other code
+ not governed by the terms of this License and distribute the Larger
+ Work as a single product. In such a case, You must make sure the
+ requirements of this License are fulfilled for the Covered Code.
+
+4. Inability to Comply Due to Statute or Regulation.
+
+ If it is impossible for You to comply with any of the terms of this
+ License with respect to some or all of the Covered Code due to
+ statute, judicial order, or regulation then You must: (a) comply with
+ the terms of this License to the maximum extent possible; and (b)
+ describe the limitations and the code they affect. Such description
+ must be included in the LEGAL file described in Section 3.4 and must
+ be included with all distributions of the Source Code. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Application of this License.
+
+ This License applies to code to which the Initial Developer has
+ attached the notice in Exhibit A and to related Covered Code.
+
+6. Versions of the License.
+
+ 6.1. New Versions.
+ Netscape Communications Corporation ("Netscape") may publish revised
+ and/or new versions of the License from time to time. Each version
+ will be given a distinguishing version number.
+
+ 6.2. Effect of New Versions.
+ Once Covered Code has been published under a particular version of the
+ License, You may always continue to use it under the terms of that
+ version. You may also choose to use such Covered Code under the terms
+ of any subsequent version of the License published by Netscape. No one
+ other than Netscape has the right to modify the terms applicable to
+ Covered Code created under this License.
+
+ 6.3. Derivative Works.
+ If You create or use a modified version of this License (which you may
+ only do in order to apply it to code which is not already Covered Code
+ governed by this License), You must (a) rename Your license so that
+ the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
+ "MPL", "NPL" or any confusingly similar phrase do not appear in your
+ license (except to note that your license differs from this License)
+ and (b) otherwise make it clear that Your version of the license
+ contains terms which differ from the Mozilla Public License and
+ Netscape Public License. (Filling in the name of the Initial
+ Developer, Original Code or Contributor in the notice described in
+ Exhibit A shall not of themselves be deemed to be modifications of
+ this License.)
+
+7. DISCLAIMER OF WARRANTY.
+
+ COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
+ DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
+ THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
+ IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
+ YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
+ COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
+ OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
+ ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
+
+8. TERMINATION.
+
+ 8.1. This License and the rights granted hereunder will terminate
+ automatically if You fail to comply with terms herein and fail to cure
+ such breach within 30 days of becoming aware of the breach. All
+ sublicenses to the Covered Code which are properly granted shall
+ survive any termination of this License. Provisions which, by their
+ nature, must remain in effect beyond the termination of this License
+ shall survive.
+
+ 8.2. If You initiate litigation by asserting a patent infringement
+ claim (excluding declatory judgment actions) against Initial Developer
+ or a Contributor (the Initial Developer or Contributor against whom
+ You file such action is referred to as "Participant") alleging that:
+
+ (a) such Participant's Contributor Version directly or indirectly
+ infringes any patent, then any and all rights granted by such
+ Participant to You under Sections 2.1 and/or 2.2 of this License
+ shall, upon 60 days notice from Participant terminate prospectively,
+ unless if within 60 days after receipt of notice You either: (i)
+ agree in writing to pay Participant a mutually agreeable reasonable
+ royalty for Your past and future use of Modifications made by such
+ Participant, or (ii) withdraw Your litigation claim with respect to
+ the Contributor Version against such Participant. If within 60 days
+ of notice, a reasonable royalty and payment arrangement are not
+ mutually agreed upon in writing by the parties or the litigation claim
+ is not withdrawn, the rights granted by Participant to You under
+ Sections 2.1 and/or 2.2 automatically terminate at the expiration of
+ the 60 day notice period specified above.
+
+ (b) any software, hardware, or device, other than such Participant's
+ Contributor Version, directly or indirectly infringes any patent, then
+ any rights granted to You by such Participant under Sections 2.1(b)
+ and 2.2(b) are revoked effective as of the date You first made, used,
+ sold, distributed, or had made, Modifications made by that
+ Participant.
+
+ 8.3. If You assert a patent infringement claim against Participant
+ alleging that such Participant's Contributor Version directly or
+ indirectly infringes any patent where such claim is resolved (such as
+ by license or settlement) prior to the initiation of patent
+ infringement litigation, then the reasonable value of the licenses
+ granted by such Participant under Sections 2.1 or 2.2 shall be taken
+ into account in determining the amount or value of any payment or
+ license.
+
+ 8.4. In the event of termination under Sections 8.1 or 8.2 above,
+ all end user license agreements (excluding distributors and resellers)
+ which have been validly granted by You or any distributor hereunder
+ prior to termination shall survive termination.
+
+9. LIMITATION OF LIABILITY.
+
+ UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
+ (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
+ DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
+ OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
+ ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
+ CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
+ WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
+ COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
+ INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
+ LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
+ RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
+ PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
+ EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
+ THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
+
+10. U.S. GOVERNMENT END USERS.
+
+ The Covered Code is a "commercial item," as that term is defined in
+ 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
+ software" and "commercial computer software documentation," as such
+ terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
+ C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
+ all U.S. Government End Users acquire Covered Code with only those
+ rights set forth herein.
+
+11. MISCELLANEOUS.
+
+ This License represents the complete agreement concerning subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. This License shall be governed by
+ California law provisions (except to the extent applicable law, if
+ any, provides otherwise), excluding its conflict-of-law provisions.
+ With respect to disputes in which at least one party is a citizen of,
+ or an entity chartered or registered to do business in the United
+ States of America, any litigation relating to this License shall be
+ subject to the jurisdiction of the Federal Courts of the Northern
+ District of California, with venue lying in Santa Clara County,
+ California, with the losing party responsible for costs, including
+ without limitation, court costs and reasonable attorneys' fees and
+ expenses. The application of the United Nations Convention on
+ Contracts for the International Sale of Goods is expressly excluded.
+ Any law or regulation which provides that the language of a contract
+ shall be construed against the drafter shall not apply to this
+ License.
+
+12. RESPONSIBILITY FOR CLAIMS.
+
+ As between Initial Developer and the Contributors, each party is
+ responsible for claims and damages arising, directly or indirectly,
+ out of its utilization of rights under this License and You agree to
+ work with Initial Developer and Contributors to distribute such
+ responsibility on an equitable basis. Nothing herein is intended or
+ shall be deemed to constitute any admission of liability.
+
+13. MULTIPLE-LICENSED CODE.
+
+ Initial Developer may designate portions of the Covered Code as
+ "Multiple-Licensed". "Multiple-Licensed" means that the Initial
+ Developer permits you to utilize portions of the Covered Code under
+ Your choice of the NPL or the alternative licenses, if any, specified
+ by the Initial Developer in the file described in Exhibit A.
+
+EXHIBIT A -Mozilla Public License.
+
+ ``The contents of this file are subject to the Mozilla Public License
+ Version 1.1 (the "License"); you may not use this file except in
+ compliance with the License. You may obtain a copy of the License at
+ http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+ License for the specific language governing rights and limitations
+ under the License.
+
+ The Original Code is RabbitMQ Consistent Hash Exchange.
+
+ The Initial Developer of the Original Code is GoPivotal, Inc.
+ Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.''
+
+ [NOTE: The text of this Exhibit A may differ slightly from the text of
+ the notices in the Source Code files of the Original Code. You should
+ use the text of this Exhibit A rather than the text found in the
+ Original Code Source Code for Your Modifications.]
--- /dev/null
+include ../umbrella.mk
--- /dev/null
+# RabbitMQ Consistent Hash Exchange Type
+
+This plugin adds a consistent-hash exchange type to RabbitMQ.
+
+In various scenarios, you may wish to ensure that messages sent to an
+exchange are consistently and equally distributed across a number of
+different queues based on the routing key of the message (or a
+nominated header, see "Routing on a header" below). You could arrange
+for this to occur yourself by using a direct or topic exchange,
+binding queues to that exchange and then publishing messages to that
+exchange that match the various binding keys.
+
+However, arranging things this way can be problematic:
+
+1. It is difficult to ensure that all queues bound to the exchange
+will receive a (roughly) equal number of messages without baking in to
+the publishers quite a lot of knowledge about the number of queues and
+their bindings.
+
+2. If the number of queues changes, it is not easy to ensure that the
+new topology still distributes messages between the different queues
+evenly.
+
+[Consistent Hashing](http://en.wikipedia.org/wiki/Consistent_hashing)
+is a hashing technique whereby each bucket appears at multiple points
+throughout the hash space, and the bucket selected is the nearest
+higher (or lower, it doesn't matter, provided it's consistent) bucket
+to the computed hash (and the hash space wraps around). The effect of
+this is that when a new bucket is added or an existing bucket removed,
+only a very few hashes change which bucket they are routed to.
+
+In the case of Consistent Hashing as an exchange type, the hash is
+calculated from the hash of the routing key of each message
+received. Thus messages that have the same routing key will have the
+same hash computed, and thus will be routed to the same queue,
+assuming no bindings have changed.
+
+When you bind a queue to a consistent-hash exchange, the binding key
+is a number-as-a-string which indicates the number of points in the
+hash space at which you wish the queue to appear. The actual points
+are generated randomly.
+
+So, if you wish for queue A to receive twice as many messages as queue
+B, then you bind the queue A with a binding key of twice the number
+(as a string -- binding keys are always strings) of the binding key of
+the binding to queue B.
+
+Each message gets delivered to at most one queue. Normally, each
+message gets delivered to exactly one queue, but there is a race
+between the determination of which queue to send a message to, and the
+deletion/death of that queue that does permit the possibility of the
+message being sent to a queue which then disappears before the message
+is processed. Hence in general, at most one queue.
+
+The exchange type is "x-consistent-hash".
+
+Here is an example using the Erlang client:
+
+ -include_lib("amqp_client/include/amqp_client.hrl").
+
+ test() ->
+ {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ Queues = [<<"q0">>, <<"q1">>, <<"q2">>, <<"q3">>],
+ amqp_channel:call(Chan,
+ #'exchange.declare' {
+ exchange = <<"e">>, type = <<"x-consistent-hash">>
+ }),
+ [amqp_channel:call(Chan, #'queue.declare' { queue = Q }) || Q <- Queues],
+ [amqp_channel:call(Chan, #'queue.bind' { queue = Q,
+ exchange = <<"e">>,
+ routing_key = <<"10">> })
+ || Q <- [<<"q0">>, <<"q1">>]],
+ [amqp_channel:call(Chan, #'queue.bind' { queue = Q,
+ exchange = <<"e">>,
+ routing_key = <<"20">> })
+ || Q <- [<<"q2">>, <<"q3">>]],
+ Msg = #amqp_msg { props = #'P_basic'{}, payload = <<>> },
+ [amqp_channel:call(Chan,
+ #'basic.publish'{
+ exchange = <<"e">>,
+ routing_key = list_to_binary(
+ integer_to_list(
+ random:uniform(1000000)))
+ }, Msg) || _ <- lists:seq(1,100000)],
+ amqp_connection:close(Conn),
+ ok.
+
+As you can see, the queues `q0` and `q1` get bound each with 10 points
+in the hash space to the exchange `e` which means they'll each get
+roughly the same number of messages. The queues `q2` and `q3` however,
+get 20 points each which means they'll each get roughly the same
+number of messages too, but that will be approximately twice as many
+as `q0` and `q1`. We then publish 100,000 messages to our exchange
+with random routing keys. After this has completed, running
+`rabbitmqctl list_queues` should show that the messages have been
+distributed approximately as desired.
+
+Note the `routing_key`s in the bindings are numbers-as-strings. This
+is because AMQP specifies the routing_key must be a string.
+
+The more points in the hash space each binding has, the closer the
+actual distribution will be to the desired distribution (as indicated
+by the ratio of points by binding). However, large numbers of points
+(many thousands) will substantially decrease performance of the
+exchange type.
+
+Equally, it is important to ensure that the messages being published
+to the exchange have a range of different `routing_key`s: if a very
+small set of routing keys are being used then there's a possibility of
+messages not being evenly distributed between the various queues. If
+the routing key is a pseudo-random session ID or such, then good
+results should follow.
+
+## Routing on a header
+
+Under most circumstances the routing key is a good choice for something to
+hash. However, in some cases you need to use the routing key for some other
+purpose (for example with more complex routing involving exchange to
+exchange bindings). In this case you can configure the consistent hash
+exchange to route based on a named header instead. To do this, declare the
+exchange with a string argument called "hash-header" naming the header to
+be used. For example using the Erlang client as above:
+
+ amqp_channel:call(
+ Chan, #'exchange.declare' {
+ exchange = <<"e">>,
+ type = <<"x-consistent-hash">>,
+ arguments = [{<<"hash-header">>, longstr, <<"hash-me">>}]
+ }).
+
+If you specify "hash-header" and then publish messages without the named
+header, they will all get routed to the same (arbitrarily-chosen) queue.
+
+Any comments or feedback welcome, to the
+[rabbitmq-discuss mailing list](https://lists.rabbitmq.com/cgi-bin/mailman/listinfo/rabbitmq-discuss)
+or info@rabbitmq.com.
--- /dev/null
+RELEASABLE:=true
+DEPS:=rabbitmq-server rabbitmq-erlang-client
+WITH_BROKER_TEST_COMMANDS:=rabbit_exchange_type_consistent_hash_test:test()
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Consistent Hash Exchange.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_exchange_type_consistent_hash).
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-behaviour(rabbit_exchange_type).
+
+-export([description/0, serialise_events/0, route/2]).
+-export([validate/1, validate_binding/2,
+ create/2, delete/3, policy_changed/2,
+ add_binding/3, remove_bindings/3, assert_args_equivalence/2]).
+-export([init/0]).
+
+-record(bucket, {source_number, destination, binding}).
+
+-rabbit_boot_step(
+ {rabbit_exchange_type_consistent_hash_registry,
+ [{description, "exchange type x-consistent-hash: registry"},
+ {mfa, {rabbit_registry, register,
+ [exchange, <<"x-consistent-hash">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+-rabbit_boot_step(
+ {rabbit_exchange_type_consistent_hash_mnesia,
+ [{description, "exchange type x-consistent-hash: mnesia"},
+ {mfa, {?MODULE, init, []}},
+ {requires, database},
+ {enables, external_infrastructure}]}).
+
+-define(TABLE, ?MODULE).
+-define(PHASH2_RANGE, 134217728). %% 2^27
+
+description() ->
+ [{description, <<"Consistent Hashing Exchange">>}].
+
+serialise_events() -> false.
+
+route(#exchange { name = Name,
+ arguments = Args },
+ #delivery { message = Msg }) ->
+ %% Yes, we're being exceptionally naughty here, by using ets on an
+ %% mnesia table. However, RabbitMQ-server itself is just as
+ %% naughty, and for good reasons.
+
+ %% Note that given the nature of this select, it will force mnesia
+ %% to do a linear scan of the entries in the table that have the
+ %% correct exchange name. More sophisticated solutions include,
+ %% for example, having some sort of tree as the value of a single
+ %% mnesia entry for each exchange. However, such values tend to
+ %% end up as relatively deep data structures which cost a lot to
+ %% continually copy to the process heap. Consequently, such
+ %% approaches have not been found to be much faster, if at all.
+ HashOn = rabbit_misc:table_lookup(Args, <<"hash-header">>),
+ H = erlang:phash2(hash(HashOn, Msg), ?PHASH2_RANGE),
+ case ets:select(?TABLE, [{#bucket { source_number = {Name, '$2'},
+ destination = '$1',
+ _ = '_' },
+ [{'>=', '$2', H}],
+ ['$1']}], 1) of
+ '$end_of_table' ->
+ case ets:match_object(?TABLE, #bucket { source_number = {Name, '_'},
+ _ = '_' }, 1) of
+ {[Bucket], _Cont} -> [Bucket#bucket.destination];
+ _ -> []
+ end;
+ {Destinations, _Continuation} ->
+ Destinations
+ end.
+
+validate(_X) -> ok.
+
+validate_binding(_X, _B) -> ok.
+
+create(_Tx, _X) -> ok.
+
+delete(transaction, #exchange { name = Name }, _Bs) ->
+ ok = mnesia:write_lock_table(?TABLE),
+ [ok = mnesia:delete_object(?TABLE, R, write) ||
+ R <- mnesia:match_object(
+ ?TABLE, #bucket{source_number = {Name, '_'}, _ = '_'}, write)],
+ ok;
+delete(_Tx, _X, _Bs) -> ok.
+
+policy_changed(_X1, _X2) -> ok.
+
+add_binding(transaction, _X,
+ #binding { source = S, destination = D, key = K } = B) ->
+ %% Use :select rather than :match_object so that we can limit the
+ %% number of results and not bother copying results over to this
+ %% process.
+ case mnesia:select(?TABLE,
+ [{#bucket { binding = B, _ = '_' }, [], [ok]}],
+ 1, read) of
+ '$end_of_table' ->
+ ok = mnesia:write_lock_table(?TABLE),
+ BucketCount = lists:min([list_to_integer(binary_to_list(K)),
+ ?PHASH2_RANGE]),
+ [ok = mnesia:write(?TABLE,
+ #bucket { source_number = {S, N},
+ destination = D,
+ binding = B },
+ write) || N <- find_numbers(S, BucketCount, [])],
+ ok;
+ _ ->
+ ok
+ end;
+add_binding(none, _X, _B) ->
+ ok.
+
+remove_bindings(transaction, _X, Bindings) ->
+ ok = mnesia:write_lock_table(?TABLE),
+ [ok = mnesia:delete(?TABLE, Key, write) ||
+ Binding <- Bindings,
+ Key <- mnesia:select(?TABLE,
+ [{#bucket { source_number = '$1',
+ binding = Binding,
+ _ = '_' }, [], ['$1']}],
+ write)],
+ ok;
+remove_bindings(none, _X, _Bs) ->
+ ok.
+
+assert_args_equivalence(X, Args) ->
+ rabbit_exchange:assert_args_equivalence(X, Args).
+
+init() ->
+ mnesia:create_table(?TABLE, [{record_name, bucket},
+ {attributes, record_info(fields, bucket)},
+ {type, ordered_set}]),
+ mnesia:add_table_copy(?TABLE, node(), ram_copies),
+ mnesia:wait_for_tables([?TABLE], 30000),
+ ok.
+
+find_numbers(_Source, 0, Acc) ->
+ Acc;
+find_numbers(Source, N, Acc) ->
+ Number = random:uniform(?PHASH2_RANGE) - 1,
+ case mnesia:read(?TABLE, {Source, Number}, write) of
+ [] -> find_numbers(Source, N-1, [Number | Acc]);
+ [_] -> find_numbers(Source, N, Acc)
+ end.
+
+hash(undefined, #basic_message { routing_keys = Routes }) ->
+ Routes;
+hash({longstr, Header}, #basic_message { content = Content }) ->
+ Headers = rabbit_basic:extract_headers(Content),
+ case Headers of
+ undefined -> undefined;
+ _ -> rabbit_misc:table_lookup(Headers, Header)
+ end.
--- /dev/null
+{application, rabbitmq_consistent_hash_exchange,
+ [{description, "Consistent Hash Exchange Type"},
+ {vsn, "%%VSN%%"},
+ {modules, []},
+ {registered, []},
+ {env, []},
+ {applications, [rabbit]}]}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Consistent Hash Exchange.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_exchange_type_consistent_hash_test).
+-export([test/0]).
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%% Because the routing is probabilistic, we can't really test a great
+%% deal here.
+
+test() ->
+ %% Run the test twice to test we clean up correctly
+ t([<<"q0">>, <<"q1">>, <<"q2">>, <<"q3">>]),
+ t([<<"q4">>, <<"q5">>, <<"q6">>, <<"q7">>]).
+
+t(Qs) ->
+ ok = test_with_rk(Qs),
+ ok = test_with_header(Qs),
+ ok.
+
+test_with_rk(Qs) ->
+ test0(fun () ->
+ #'basic.publish'{exchange = <<"e">>, routing_key = rnd()}
+ end,
+ fun() ->
+ #amqp_msg{props = #'P_basic'{}, payload = <<>>}
+ end, [], Qs).
+
+test_with_header(Qs) ->
+ test0(fun () ->
+ #'basic.publish'{exchange = <<"e">>}
+ end,
+ fun() ->
+ H = [{<<"hashme">>, longstr, rnd()}],
+ #amqp_msg{props = #'P_basic'{headers = H}, payload = <<>>}
+ end, [{<<"hash-header">>, longstr, <<"hashme">>}], Qs).
+
+rnd() ->
+ list_to_binary(integer_to_list(random:uniform(1000000))).
+
+test0(MakeMethod, MakeMsg, DeclareArgs, [Q1, Q2, Q3, Q4] = Queues) ->
+ Count = 10000,
+
+ {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ #'exchange.declare_ok'{} =
+ amqp_channel:call(Chan,
+ #'exchange.declare' {
+ exchange = <<"e">>,
+ type = <<"x-consistent-hash">>,
+ auto_delete = true,
+ arguments = DeclareArgs
+ }),
+ [#'queue.declare_ok'{} =
+ amqp_channel:call(Chan, #'queue.declare' {
+ queue = Q, exclusive = true }) || Q <- Queues],
+ [#'queue.bind_ok'{} =
+ amqp_channel:call(Chan, #'queue.bind' { queue = Q,
+ exchange = <<"e">>,
+ routing_key = <<"10">> })
+ || Q <- [Q1, Q2]],
+ [#'queue.bind_ok'{} =
+ amqp_channel:call(Chan, #'queue.bind' { queue = Q,
+ exchange = <<"e">>,
+ routing_key = <<"20">> })
+ || Q <- [Q3, Q4]],
+ #'tx.select_ok'{} = amqp_channel:call(Chan, #'tx.select'{}),
+ [amqp_channel:call(Chan,
+ MakeMethod(),
+ MakeMsg()) || _ <- lists:duplicate(Count, const)],
+ amqp_channel:call(Chan, #'tx.commit'{}),
+ Counts =
+ [begin
+ #'queue.declare_ok'{message_count = M} =
+ amqp_channel:call(Chan, #'queue.declare' {queue = Q,
+ exclusive = true }),
+ M
+ end || Q <- Queues],
+ Count = lists:sum(Counts), %% All messages got routed
+ [true = C > 0.01 * Count || C <- Counts], %% We are not *grossly* unfair
+ amqp_channel:call(Chan, #'exchange.delete' { exchange = <<"e">> }),
+ [amqp_channel:call(Chan, #'queue.delete' { queue = Q }) || Q <- Queues],
+ amqp_channel:close(Chan),
+ amqp_connection:close(Conn),
+ ok.
--- /dev/null
+# The contents of this file are subject to the Mozilla Public License
+# Version 1.1 (the "License"); you may not use this file except in
+# compliance with the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+# License for the specific language governing rights and limitations
+# under the License.
+#
+# The Original Code is RabbitMQ.
+#
+# The Initial Developer of the Original Code is GoPivotal, Inc.
+# Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+#
+
+VERSION=0.0.0
+
+SOURCE_PACKAGE_DIR=$(PACKAGE)-$(VERSION)-src
+SOURCE_PACKAGE_TAR_GZ=$(SOURCE_PACKAGE_DIR).tar.gz
+
+BROKER_HEADERS=$(wildcard $(BROKER_DIR)/$(INCLUDE_DIR)/*.hrl)
+BROKER_SOURCES=$(wildcard $(BROKER_DIR)/$(SOURCE_DIR)/*.erl)
+BROKER_DEPS=$(BROKER_HEADERS) $(BROKER_SOURCES)
+
+INFILES=$(shell find . -name '*.app.in')
+INTARGETS=$(patsubst %.in, %, $(INFILES))
+
+WEB_URL=http://www.rabbitmq.com/
+
+include common.mk
+
+run_in_broker: compile $(BROKER_DEPS) $(EBIN_DIR)/$(PACKAGE).app
+ $(MAKE) RABBITMQ_SERVER_START_ARGS='$(PA_LOAD_PATH)' -C $(BROKER_DIR) run
+
+clean: common_clean
+ rm -f $(INTARGETS)
+ rm -rf $(DIST_DIR)
+
+distribution: documentation source_tarball package
+
+%.app: %.app.in $(SOURCES) $(BROKER_DIR)/generate_app
+ escript $(BROKER_DIR)/generate_app $< $@ $(SOURCE_DIR)
+ sed 's/%%VSN%%/$(VERSION)/' $@ > $@.tmp && mv $@.tmp $@
+
+###############################################################################
+## Dialyzer
+###############################################################################
+
+RABBIT_PLT=$(BROKER_DIR)/rabbit.plt
+
+dialyze: $(RABBIT_PLT) $(TARGETS)
+ dialyzer --plt $(RABBIT_PLT) --no_native -Wrace_conditions $(TARGETS)
+
+.PHONY: $(RABBIT_PLT)
+$(RABBIT_PLT):
+ $(MAKE) -C $(BROKER_DIR) create-plt
+
+###############################################################################
+## Documentation
+###############################################################################
+
+documentation: $(DOC_DIR)/index.html
+
+$(DOC_DIR)/overview.edoc: $(SOURCE_DIR)/overview.edoc.in
+ mkdir -p $(DOC_DIR)
+ sed -e 's:%%VERSION%%:$(VERSION):g' < $< > $@
+
+$(DOC_DIR)/index.html: $(DEPS_DIR)/$(COMMON_PACKAGE_DIR) $(DOC_DIR)/overview.edoc $(SOURCES)
+ $(LIBS_PATH) erl -noshell -eval 'edoc:application(amqp_client, ".", [{preprocess, true}, {macros, [{edoc, true}]}])' -run init stop
+
+###############################################################################
+## Testing
+###############################################################################
+
+include test.mk
+
+compile_tests: $(TEST_TARGETS) $(EBIN_DIR)/$(PACKAGE).app
+
+$(TEST_TARGETS): $(TEST_DIR)
+
+.PHONY: $(TEST_DIR)
+$(TEST_DIR): $(DEPS_DIR)/$(COMMON_PACKAGE_DIR)
+ $(MAKE) -C $(TEST_DIR)
+
+###############################################################################
+## Packaging
+###############################################################################
+
+COPY=cp -pR
+
+$(DIST_DIR)/$(COMMON_PACKAGE_EZ): $(BROKER_DEPS) $(COMMON_PACKAGE).app | $(DIST_DIR)
+ rm -f $@
+ $(MAKE) -C $(BROKER_DIR)
+ rm -rf $(DIST_DIR)/$(COMMON_PACKAGE_DIR)
+ mkdir -p $(DIST_DIR)/$(COMMON_PACKAGE_DIR)/$(INCLUDE_DIR)
+ mkdir -p $(DIST_DIR)/$(COMMON_PACKAGE_DIR)/$(EBIN_DIR)
+ cp $(COMMON_PACKAGE).app $(DIST_DIR)/$(COMMON_PACKAGE_DIR)/$(EBIN_DIR)/
+ $(foreach DEP, $(DEPS), \
+ ( cp $(BROKER_DIR)/ebin/$(DEP).beam $(DIST_DIR)/$(COMMON_PACKAGE_DIR)/$(EBIN_DIR)/ \
+ );)
+ cp $(BROKER_DIR)/include/*.hrl $(DIST_DIR)/$(COMMON_PACKAGE_DIR)/$(INCLUDE_DIR)/
+ (cd $(DIST_DIR); zip -q -r $(COMMON_PACKAGE_EZ) $(COMMON_PACKAGE_DIR))
+
+source_tarball: $(DIST_DIR)/$(COMMON_PACKAGE_EZ) $(EBIN_DIR)/$(PACKAGE).app | $(DIST_DIR)
+ mkdir -p $(DIST_DIR)/$(SOURCE_PACKAGE_DIR)/$(DIST_DIR)
+ $(COPY) $(DIST_DIR)/$(COMMON_PACKAGE_EZ) $(DIST_DIR)/$(SOURCE_PACKAGE_DIR)/$(DIST_DIR)/
+ $(COPY) README.in $(DIST_DIR)/$(SOURCE_PACKAGE_DIR)/README
+ elinks -dump -no-references -no-numbering $(WEB_URL)build-erlang-client.html >> $(DIST_DIR)/$(SOURCE_PACKAGE_DIR)/README
+ $(COPY) common.mk $(DIST_DIR)/$(SOURCE_PACKAGE_DIR)/
+ $(COPY) test.mk $(DIST_DIR)/$(SOURCE_PACKAGE_DIR)/
+ sed 's/%%VSN%%/$(VERSION)/' Makefile.in > $(DIST_DIR)/$(SOURCE_PACKAGE_DIR)/Makefile
+ mkdir -p $(DIST_DIR)/$(SOURCE_PACKAGE_DIR)/$(SOURCE_DIR)
+ $(COPY) $(SOURCE_DIR)/*.erl $(DIST_DIR)/$(SOURCE_PACKAGE_DIR)/$(SOURCE_DIR)/
+ mkdir -p $(DIST_DIR)/$(SOURCE_PACKAGE_DIR)/$(EBIN_DIR)
+ $(COPY) $(EBIN_DIR)/*.app $(DIST_DIR)/$(SOURCE_PACKAGE_DIR)/$(EBIN_DIR)/
+ mkdir -p $(DIST_DIR)/$(SOURCE_PACKAGE_DIR)/$(INCLUDE_DIR)
+ $(COPY) $(INCLUDE_DIR)/*.hrl $(DIST_DIR)/$(SOURCE_PACKAGE_DIR)/$(INCLUDE_DIR)/
+ mkdir -p $(DIST_DIR)/$(SOURCE_PACKAGE_DIR)/$(TEST_DIR)
+ $(COPY) $(TEST_DIR)/*.erl $(DIST_DIR)/$(SOURCE_PACKAGE_DIR)/$(TEST_DIR)/
+ $(COPY) $(TEST_DIR)/Makefile $(DIST_DIR)/$(SOURCE_PACKAGE_DIR)/$(TEST_DIR)/
+ cd $(DIST_DIR) ; tar czf $(SOURCE_PACKAGE_TAR_GZ) $(SOURCE_PACKAGE_DIR)
+
+$(DIST_DIR):
+ mkdir -p $@
--- /dev/null
+# The contents of this file are subject to the Mozilla Public License
+# Version 1.1 (the "License"); you may not use this file except in
+# compliance with the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+# License for the specific language governing rights and limitations
+# under the License.
+#
+# The Original Code is RabbitMQ.
+#
+# The Initial Developer of the Original Code is GoPivotal, Inc.
+# Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+#
+
+VERSION=%%VSN%%
+
+include common.mk
+include test.mk
+
+clean: common_clean
+
+compile_tests:
+ $(MAKE) -C test VERSION=$(VERSION)
+
--- /dev/null
+README.in:
+
+Please see http://www.rabbitmq.com/build-erlang-client.html for build
+instructions.
+
+For your convenience, a text copy of these instructions is available
+below. Please be aware that the instructions here may not be as up to
+date as those at the above URL.
+
+===========================================================================
--- /dev/null
+# The contents of this file are subject to the Mozilla Public License
+# Version 1.1 (the "License"); you may not use this file except in
+# compliance with the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+# License for the specific language governing rights and limitations
+# under the License.
+#
+# The Original Code is RabbitMQ.
+#
+# The Initial Developer of the Original Code is GoPivotal, Inc.
+# Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+#
+
+# The client library can either be built from source control or by downloading
+# a source tarball from the RabbitMQ site. The intention behind the source tarball is
+# to be able to unpack this anywhere and just run a simple a test, under the
+# assumption that you have a running broker. This provides the simplest
+# possible way of building and running the client.
+#
+# The source control version, on the other hand, contains far more infrastructure
+# to start and stop brokers, package modules from the server, run embedded tests
+# and so forth.
+#
+# This means that the Makefile of the source control version contains a lot of
+# functionality that just wouldn't work with the source tarball version.
+#
+# The purpose of this common Makefile is to define as many commonalities
+# between the build requirements of the source control version and the source
+# tarball version. This avoids duplicating make definitions and rules and
+# helps keep the Makefile maintenence well factored.
+
+ifndef TMPDIR
+TMPDIR := /tmp
+endif
+
+EBIN_DIR=ebin
+BROKER_DIR=../rabbitmq-server
+export INCLUDE_DIR=include
+TEST_DIR=test
+SOURCE_DIR=src
+DIST_DIR=dist
+DEPS_DIR=deps
+DOC_DIR=doc
+DEPS_FILE=deps.mk
+
+ifeq ("$(ERL_LIBS)", "")
+ ERL_LIBS :=
+else
+ ERL_LIBS := :$(ERL_LIBS)
+endif
+
+ERL_PATH ?=
+
+PACKAGE=amqp_client
+PACKAGE_DIR=$(PACKAGE)-$(VERSION)
+PACKAGE_NAME_EZ=$(PACKAGE_DIR).ez
+COMMON_PACKAGE=rabbit_common
+export COMMON_PACKAGE_DIR=$(COMMON_PACKAGE)-$(VERSION)
+COMMON_PACKAGE_EZ=$(COMMON_PACKAGE_DIR).ez
+NODE_NAME=amqp_client
+
+DEPS=$(shell erl -noshell -eval '{ok,[{_,_,[_,_,{modules, Mods},_,_,_]}]} = \
+ file:consult("$(COMMON_PACKAGE).app.in"), \
+ [io:format("~p ",[M]) || M <- Mods], halt().')
+
+INCLUDES=$(wildcard $(INCLUDE_DIR)/*.hrl)
+SOURCES=$(wildcard $(SOURCE_DIR)/*.erl)
+TARGETS=$(patsubst $(SOURCE_DIR)/%.erl, $(EBIN_DIR)/%.beam, $(SOURCES))
+TEST_SOURCES=$(wildcard $(TEST_DIR)/*.erl)
+TEST_TARGETS=$(patsubst $(TEST_DIR)/%.erl, $(TEST_DIR)/%.beam, $(TEST_SOURCES))
+
+LIBS_PATH_UNIX=$(DEPS_DIR):$(DIST_DIR)$(ERL_LIBS)
+IS_CYGWIN=$(shell if [ $(shell expr "$(shell uname -s)" : 'CYGWIN_NT') -gt 0 ]; then echo "true"; else echo "false"; fi)
+ifeq ($(IS_CYGWIN),true)
+ LIBS_PATH=ERL_LIBS="$(shell cygpath -wp $(LIBS_PATH_UNIX))"
+else
+ LIBS_PATH=ERL_LIBS=$(LIBS_PATH_UNIX)
+endif
+
+LOAD_PATH=$(EBIN_DIR) $(TEST_DIR) $(ERL_PATH)
+
+RUN:=$(LIBS_PATH) erl -pa $(LOAD_PATH) -sname $(NODE_NAME)
+
+MKTEMP=$$(mktemp $(TMPDIR)/tmp.XXXXXXXXXX)
+
+ifndef USE_SPECS
+# our type specs rely on features / bug fixes in dialyzer that are
+# only available in R13B01 upwards (R13B is eshell 5.7.2)
+#
+# NB: do not mark this variable for export, otherwise it will
+# override the test in rabbitmq-server's Makefile when it does the
+# make -C, which causes problems whenever the test here and the test
+# there compare system_info(version) against *different* eshell
+# version numbers.
+USE_SPECS:=$(shell erl -noshell -eval 'io:format([list_to_integer(X) || X <- string:tokens(erlang:system_info(version), ".")] >= [5,7,2]), halt().')
+endif
+
+ERLC_OPTS=-I $(INCLUDE_DIR) -pa $(EBIN_DIR) -o $(EBIN_DIR) -Wall -v +debug_info $(if $(filter true,$(USE_SPECS)),-Duse_specs)
+
+RABBITMQ_NODENAME=rabbit
+PA_LOAD_PATH=-pa $(realpath $(LOAD_PATH))
+RABBITMQCTL=$(BROKER_DIR)/scripts/rabbitmqctl
+
+ifdef SSL_CERTS_DIR
+SSL := true
+ALL_SSL := { $(MAKE) test_ssl || OK=false; }
+ALL_SSL_COVERAGE := { $(MAKE) test_ssl_coverage || OK=false; }
+SSL_BROKER_ARGS := -rabbit ssl_listeners [{\\\"0.0.0.0\\\",5671},{\\\"::1\\\",5671}] \
+ -rabbit ssl_options [{cacertfile,\\\"$(SSL_CERTS_DIR)/testca/cacert.pem\\\"},{certfile,\\\"$(SSL_CERTS_DIR)/server/cert.pem\\\"},{keyfile,\\\"$(SSL_CERTS_DIR)/server/key.pem\\\"},{verify,verify_peer},{fail_if_no_peer_cert,true}]
+SSL_CLIENT_ARGS := -erlang_client_ssl_dir $(SSL_CERTS_DIR)
+else
+SSL := @echo No SSL_CERTS_DIR defined. && false
+ALL_SSL := true
+ALL_SSL_COVERAGE := true
+SSL_BROKER_ARGS :=
+SSL_CLIENT_ARGS :=
+endif
+
+# Versions prior to this are not supported
+NEED_MAKE := 3.80
+ifneq "$(NEED_MAKE)" "$(firstword $(sort $(NEED_MAKE) $(MAKE_VERSION)))"
+$(error Versions of make prior to $(NEED_MAKE) are not supported)
+endif
+
+# .DEFAULT_GOAL introduced in 3.81
+DEFAULT_GOAL_MAKE := 3.81
+ifneq "$(DEFAULT_GOAL_MAKE)" "$(firstword $(sort $(DEFAULT_GOAL_MAKE) $(MAKE_VERSION)))"
+.DEFAULT_GOAL=all
+endif
+
+all: package
+
+common_clean:
+ rm -f $(EBIN_DIR)/*.beam
+ rm -f erl_crash.dump
+ rm -rf $(DEPS_DIR)
+ rm -rf $(DOC_DIR)
+ rm -f $(DEPS_FILE)
+ $(MAKE) -C $(TEST_DIR) clean
+
+compile: $(TARGETS) $(EBIN_DIR)/$(PACKAGE).app
+
+run: compile
+ $(RUN)
+
+###############################################################################
+## Packaging
+###############################################################################
+
+$(DIST_DIR)/$(PACKAGE_NAME_EZ): $(TARGETS) $(EBIN_DIR)/$(PACKAGE).app | $(DIST_DIR)
+ rm -f $@
+ rm -rf $(DIST_DIR)/$(PACKAGE_DIR)
+ mkdir -p $(DIST_DIR)/$(PACKAGE_DIR)/$(EBIN_DIR)
+ mkdir -p $(DIST_DIR)/$(PACKAGE_DIR)/$(INCLUDE_DIR)
+ cp -r $(EBIN_DIR)/*.beam $(DIST_DIR)/$(PACKAGE_DIR)/$(EBIN_DIR)
+ cp -r $(EBIN_DIR)/*.app $(DIST_DIR)/$(PACKAGE_DIR)/$(EBIN_DIR)
+ mkdir -p $(DIST_DIR)/$(PACKAGE_DIR)/$(INCLUDE_DIR)
+ cp -r $(INCLUDE_DIR)/* $(DIST_DIR)/$(PACKAGE_DIR)/$(INCLUDE_DIR)
+ (cd $(DIST_DIR); zip -q -r $(PACKAGE_NAME_EZ) $(PACKAGE_DIR))
+
+package: $(DIST_DIR)/$(PACKAGE_NAME_EZ)
+
+###############################################################################
+## Internal targets
+###############################################################################
+
+$(DEPS_DIR)/$(COMMON_PACKAGE_DIR): $(DIST_DIR)/$(COMMON_PACKAGE_EZ) | $(DEPS_DIR)
+ rm -rf $(DEPS_DIR)/$(COMMON_PACKAGE_DIR)
+ mkdir -p $(DEPS_DIR)/$(COMMON_PACKAGE_DIR)
+ unzip -q -o $< -d $(DEPS_DIR)
+
+$(DEPS_FILE): $(SOURCES) $(INCLUDES)
+ rm -f $@
+ echo $(subst : ,:,$(foreach FILE,$^,$(FILE):)) | escript $(BROKER_DIR)/generate_deps $@ $(EBIN_DIR)
+
+$(EBIN_DIR)/%.beam: $(SOURCE_DIR)/%.erl $(INCLUDES) $(DEPS_DIR)/$(COMMON_PACKAGE_DIR) | $(DEPS_FILE)
+ $(LIBS_PATH) erlc $(ERLC_OPTS) $<
+
+$(DEPS_DIR):
+ mkdir -p $@
+
+# Note that all targets which depend on clean must have clean in their
+# name. Also any target that doesn't depend on clean should not have
+# clean in its name, unless you know that you don't need any of the
+# automatic dependency generation for that target.
+
+# We want to load the dep file if *any* target *doesn't* contain
+# "clean" - i.e. if removing all clean-like targets leaves something
+
+ifeq "$(MAKECMDGOALS)" ""
+TESTABLEGOALS:=$(.DEFAULT_GOAL)
+else
+TESTABLEGOALS:=$(MAKECMDGOALS)
+endif
+
+ifneq "$(strip $(patsubst clean%,,$(patsubst %clean,,$(TESTABLEGOALS))))" ""
+-include $(DEPS_FILE)
+endif
--- /dev/null
+{application, amqp_client,
+ [{description, "RabbitMQ AMQP Client"},
+ {vsn, "%%VSN%%"},
+ {modules, []},
+ {registered, [amqp_sup]},
+ {env, [{prefer_ipv6, false}]},
+ {mod, {amqp_client, []}},
+ {applications, [kernel, stdlib, xmerl]}]}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-ifndef(AMQP_CLIENT_HRL).
+-define(AMQP_CLIENT_HRL, true).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+
+-record(amqp_msg, {props = #'P_basic'{}, payload = <<>>}).
+
+-record(amqp_params_network, {username = <<"guest">>,
+ password = <<"guest">>,
+ virtual_host = <<"/">>,
+ host = "localhost",
+ port = undefined,
+ channel_max = 0,
+ frame_max = 0,
+ heartbeat = 0,
+ connection_timeout = infinity,
+ ssl_options = none,
+ auth_mechanisms =
+ [fun amqp_auth_mechanisms:plain/3,
+ fun amqp_auth_mechanisms:amqplain/3],
+ client_properties = [],
+ socket_options = []}).
+
+-record(amqp_params_direct, {username = none,
+ password = none,
+ virtual_host = <<"/">>,
+ node = node(),
+ adapter_info = none,
+ client_properties = []}).
+
+-record(amqp_adapter_info, {host = unknown,
+ port = unknown,
+ peer_host = unknown,
+ peer_port = unknown,
+ name = unknown,
+ protocol = unknown,
+ additional_info = []}).
+
+-endif.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-include("amqp_client.hrl").
+
+-define(PROTOCOL_VERSION_MAJOR, 0).
+-define(PROTOCOL_VERSION_MINOR, 9).
+-define(PROTOCOL_HEADER, <<"AMQP", 0, 0, 9, 1>>).
+-define(PROTOCOL, rabbit_framing_amqp_0_9_1).
+
+-define(MAX_CHANNEL_NUMBER, 65535).
+
+-define(LOG_DEBUG(Format), error_logger:info_msg(Format)).
+-define(LOG_INFO(Format, Args), error_logger:info_msg(Format, Args)).
+-define(LOG_WARN(Format, Args), error_logger:warning_msg(Format, Args)).
+-define(LOG_ERR(Format, Args), error_logger:error_msg(Format, Args)).
+
+-define(CLIENT_CAPABILITIES,
+ [{<<"publisher_confirms">>, bool, true},
+ {<<"exchange_exchange_bindings">>, bool, true},
+ {<<"basic.nack">>, bool, true},
+ {<<"consumer_cancel_notify">>, bool, true},
+ {<<"connection.blocked">>, bool, true},
+ {<<"authentication_failure_close">>, bool, true}]).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-include("amqp_client.hrl").
+
+-ifndef(edoc).
+-type(state() :: any()).
+-type(consume() :: #'basic.consume'{}).
+-type(consume_ok() :: #'basic.consume_ok'{}).
+-type(cancel() :: #'basic.cancel'{}).
+-type(cancel_ok() :: #'basic.cancel_ok'{}).
+-type(deliver() :: #'basic.deliver'{}).
+-type(from() :: any()).
+-type(reason() :: any()).
+-type(ok_error() :: {ok, state()} | {error, reason(), state()}).
+
+-spec(init/1 :: ([any()]) -> {ok, state()}).
+-spec(handle_consume/3 :: (consume(), pid(), state()) -> ok_error()).
+-spec(handle_consume_ok/3 :: (consume_ok(), consume(), state()) ->
+ ok_error()).
+-spec(handle_cancel/2 :: (cancel(), state()) -> ok_error()).
+-spec(handle_server_cancel/2 :: (cancel(), state()) -> ok_error()).
+-spec(handle_cancel_ok/3 :: (cancel_ok(), cancel(), state()) -> ok_error()).
+-spec(handle_deliver/3 :: (deliver(), #amqp_msg{}, state()) -> ok_error()).
+-spec(handle_info/2 :: (any(), state()) -> ok_error()).
+-spec(handle_call/3 :: (any(), from(), state()) ->
+ {reply, any(), state()} | {noreply, state()} |
+ {error, reason(), state()}).
+-spec(terminate/2 :: (any(), state()) -> state()).
+-endif.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-define(QUEUE_PREFIX, "/queue").
+-define(TOPIC_PREFIX, "/topic").
+-define(EXCHANGE_PREFIX, "/exchange").
+-define(AMQQUEUE_PREFIX, "/amq/queue").
+-define(TEMP_QUEUE_PREFIX, "/temp-queue").
+%% reply queues names can have slashes in the content so no further
+%% parsing happens.
+-define(REPLY_QUEUE_PREFIX, "/reply-queue/").
--- /dev/null
+{application, rabbit_common,
+ [{description, "RabbitMQ Common Libraries"},
+ {vsn, "%%VSN%%"},
+ {modules, [
+ app_utils,
+ credit_flow,
+ pmon,
+ gen_server2,
+ mirrored_supervisor,
+ mochijson2,
+ priority_queue,
+ rabbit_backing_queue,
+ rabbit_basic,
+ rabbit_binary_generator,
+ rabbit_binary_parser,
+ rabbit_channel,
+ rabbit_channel_interceptor,
+ rabbit_runtime_parameter,
+ rabbit_command_assembler,
+ rabbit_exchange_type,
+ rabbit_exchange_decorator,
+ rabbit_auth_backend,
+ rabbit_auth_mechanism,
+ rabbit_framing_amqp_0_8,
+ rabbit_framing_amqp_0_9_1,
+ rabbit_heartbeat,
+ rabbit_misc,
+ rabbit_msg_store_index,
+ rabbit_net,
+ rabbit_nodes,
+ rabbit_policy_validator,
+ rabbit_reader,
+ rabbit_writer,
+ rabbit_event,
+ rabbit_queue_collector,
+ rabbit_queue_decorator,
+ rabbit_amqqueue,
+ supervisor2
+ ]},
+ {registered, []},
+ {env, []},
+ {applications, [kernel, stdlib]}]}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% @private
+-module(amqp_auth_mechanisms).
+
+-include("amqp_client.hrl").
+
+-export([plain/3, amqplain/3, external/3, crdemo/3]).
+
+%%---------------------------------------------------------------------------
+
+plain(none, _, init) ->
+ {<<"PLAIN">>, []};
+plain(none, #amqp_params_network{username = Username,
+ password = Password}, _State) ->
+ {<<0, Username/binary, 0, Password/binary>>, _State}.
+
+amqplain(none, _, init) ->
+ {<<"AMQPLAIN">>, []};
+amqplain(none, #amqp_params_network{username = Username,
+ password = Password}, _State) ->
+ LoginTable = [{<<"LOGIN">>, longstr, Username},
+ {<<"PASSWORD">>, longstr, Password}],
+ {rabbit_binary_generator:generate_table(LoginTable), _State}.
+
+external(none, _, init) ->
+ {<<"EXTERNAL">>, []};
+external(none, _, _State) ->
+ {<<"">>, _State}.
+
+crdemo(none, _, init) ->
+ {<<"RABBIT-CR-DEMO">>, 0};
+crdemo(none, #amqp_params_network{username = Username}, 0) ->
+ {Username, 1};
+crdemo(<<"Please tell me your password">>,
+ #amqp_params_network{password = Password}, 1) ->
+ {<<"My password is ", Password/binary>>, 2}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% @type close_reason(Type) = {shutdown, amqp_reason(Type)}.
+%% @type amqp_reason(Type) = {Type, Code, Text}
+%% Code = non_neg_integer()
+%% Text = binary().
+%% @doc This module encapsulates the client's view of an AMQP
+%% channel. Each server side channel is represented by an amqp_channel
+%% process on the client side. Channel processes are created using the
+%% {@link amqp_connection} module. Channel processes are supervised
+%% under amqp_client's supervision tree.<br/>
+%% <br/>
+%% In case of a failure or an AMQP error, the channel process exits with a
+%% meaningful exit reason:<br/>
+%% <br/>
+%% <table>
+%% <tr>
+%% <td><strong>Cause</strong></td>
+%% <td><strong>Exit reason</strong></td>
+%% </tr>
+%% <tr>
+%% <td>Any reason, where Code would have been 200 otherwise</td>
+%% <td>```normal'''</td>
+%% </tr>
+%% <tr>
+%% <td>User application calls amqp_channel:close/3</td>
+%% <td>```close_reason(app_initiated_close)'''</td>
+%% </tr>
+%% <tr>
+%% <td>Server closes channel (soft error)</td>
+%% <td>```close_reason(server_initiated_close)'''</td>
+%% </tr>
+%% <tr>
+%% <td>Server misbehaved (did not follow protocol)</td>
+%% <td>```close_reason(server_misbehaved)'''</td>
+%% </tr>
+%% <tr>
+%% <td>Connection is closing (causing all channels to cleanup and
+%% close)</td>
+%% <td>```{shutdown, {connection_closing, amqp_reason(atom())}}'''</td>
+%% </tr>
+%% <tr>
+%% <td>Other error</td>
+%% <td>(various error reasons, causing more detailed logging)</td>
+%% </tr>
+%% </table>
+%% <br/>
+%% See type definitions below.
+-module(amqp_channel).
+
+-include("amqp_client_internal.hrl").
+
+-behaviour(gen_server).
+
+-export([call/2, call/3, cast/2, cast/3, cast_flow/3]).
+-export([close/1, close/3]).
+-export([register_return_handler/2, unregister_return_handler/1,
+ register_flow_handler/2, unregister_flow_handler/1,
+ register_confirm_handler/2, unregister_confirm_handler/1]).
+-export([call_consumer/2, subscribe/3]).
+-export([next_publish_seqno/1, wait_for_confirms/1, wait_for_confirms/2,
+ wait_for_confirms_or_die/1, wait_for_confirms_or_die/2]).
+-export([start_link/5, set_writer/2, connection_closing/3, open/1]).
+
+-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
+ handle_info/2]).
+
+-define(TIMEOUT_FLUSH, 60000).
+
+-record(state, {number,
+ connection,
+ consumer,
+ driver,
+ rpc_requests = queue:new(),
+ closing = false, %% false |
+ %% {just_channel, Reason} |
+ %% {connection, Reason}
+ writer,
+ return_handler = none,
+ confirm_handler = none,
+ next_pub_seqno = 0,
+ flow_active = true,
+ flow_handler = none,
+ unconfirmed_set = gb_sets:new(),
+ waiting_set = gb_trees:empty(),
+ only_acks_received = true
+ }).
+
+%%---------------------------------------------------------------------------
+%% Type Definitions
+%%---------------------------------------------------------------------------
+
+%% @type amqp_method().
+%% This abstract datatype represents the set of methods that comprise
+%% the AMQP execution model. As indicated in the overview, the
+%% attributes of each method in the execution model are described in
+%% the protocol documentation. The Erlang record definitions are
+%% autogenerated from a parseable version of the specification. Most
+%% fields in the generated records have sensible default values that
+%% you need not worry in the case of a simple usage of the client
+%% library.
+
+%% @type amqp_msg() = #amqp_msg{}.
+%% This is the content encapsulated in content-bearing AMQP methods. It
+%% contains the following fields:
+%% <ul>
+%% <li>props :: class_property() - A class property record, defaults to
+%% #'P_basic'{}</li>
+%% <li>payload :: binary() - The arbitrary data payload</li>
+%% </ul>
+
+%%---------------------------------------------------------------------------
+%% AMQP Channel API methods
+%%---------------------------------------------------------------------------
+
+%% @spec (Channel, Method) -> Result
+%% @doc This is equivalent to amqp_channel:call(Channel, Method, none).
+call(Channel, Method) ->
+ gen_server:call(Channel, {call, Method, none, self()}, infinity).
+
+%% @spec (Channel, Method, Content) -> Result
+%% where
+%% Channel = pid()
+%% Method = amqp_method()
+%% Content = amqp_msg() | none
+%% Result = amqp_method() | ok | blocked | closing
+%% @doc This sends an AMQP method on the channel.
+%% For content bearing methods, Content has to be an amqp_msg(), whereas
+%% for non-content bearing methods, it needs to be the atom 'none'.<br/>
+%% In the case of synchronous methods, this function blocks until the
+%% corresponding reply comes back from the server and returns it.
+%% In the case of asynchronous methods, the function blocks until the method
+%% gets sent on the wire and returns the atom 'ok' on success.<br/>
+%% This will return the atom 'blocked' if the server has
+%% throttled the client for flow control reasons. This will return the
+%% atom 'closing' if the channel is in the process of shutting down.<br/>
+%% Note that for asynchronous methods, the synchronicity implied by
+%% 'call' only means that the client has transmitted the method to
+%% the broker. It does not necessarily imply that the broker has
+%% accepted responsibility for the message.
+call(Channel, Method, Content) ->
+ gen_server:call(Channel, {call, Method, Content, self()}, infinity).
+
+%% @spec (Channel, Method) -> ok
+%% @doc This is equivalent to amqp_channel:cast(Channel, Method, none).
+cast(Channel, Method) ->
+ gen_server:cast(Channel, {cast, Method, none, self(), noflow}).
+
+%% @spec (Channel, Method, Content) -> ok
+%% where
+%% Channel = pid()
+%% Method = amqp_method()
+%% Content = amqp_msg() | none
+%% @doc This function is the same as {@link call/3}, except that it returns
+%% immediately with the atom 'ok', without blocking the caller process.
+%% This function is not recommended with synchronous methods, since there is no
+%% way to verify that the server has received the method.
+cast(Channel, Method, Content) ->
+ gen_server:cast(Channel, {cast, Method, Content, self(), noflow}).
+
+%% @spec (Channel, Method, Content) -> ok
+%% where
+%% Channel = pid()
+%% Method = amqp_method()
+%% Content = amqp_msg() | none
+%% @doc Like cast/3, with flow control.
+cast_flow(Channel, Method, Content) ->
+ credit_flow:send(Channel),
+ gen_server:cast(Channel, {cast, Method, Content, self(), flow}).
+
+%% @spec (Channel) -> ok | closing
+%% where
+%% Channel = pid()
+%% @doc Closes the channel, invokes
+%% close(Channel, 200, <<"Goodbye">>).
+close(Channel) ->
+ close(Channel, 200, <<"Goodbye">>).
+
+%% @spec (Channel, Code, Text) -> ok | closing
+%% where
+%% Channel = pid()
+%% Code = integer()
+%% Text = binary()
+%% @doc Closes the channel, allowing the caller to supply a reply code and
+%% text. If the channel is already closing, the atom 'closing' is returned.
+close(Channel, Code, Text) ->
+ gen_server:call(Channel, {close, Code, Text}, infinity).
+
+%% @spec (Channel) -> integer()
+%% where
+%% Channel = pid()
+%% @doc When in confirm mode, returns the sequence number of the next
+%% message to be published.
+next_publish_seqno(Channel) ->
+ gen_server:call(Channel, next_publish_seqno, infinity).
+
+%% @spec (Channel) -> boolean() | 'timeout'
+%% where
+%% Channel = pid()
+%% @doc Wait until all messages published since the last call have
+%% been either ack'd or nack'd by the broker. Note, when called on a
+%% non-Confirm channel, waitForConfirms returns an error.
+wait_for_confirms(Channel) ->
+ wait_for_confirms(Channel, infinity).
+
+%% @spec (Channel, Timeout) -> boolean() | 'timeout'
+%% where
+%% Channel = pid()
+%% Timeout = non_neg_integer() | 'infinity'
+%% @doc Wait until all messages published since the last call have
+%% been either ack'd or nack'd by the broker or the timeout expires.
+%% Note, when called on a non-Confirm channel, waitForConfirms throws
+%% an exception.
+wait_for_confirms(Channel, Timeout) ->
+ case gen_server:call(Channel, {wait_for_confirms, Timeout}, infinity) of
+ {error, Reason} -> throw(Reason);
+ Other -> Other
+ end.
+
+%% @spec (Channel) -> true
+%% where
+%% Channel = pid()
+%% @doc Behaves the same as wait_for_confirms/1, but if a nack is
+%% received, the calling process is immediately sent an
+%% exit(nack_received).
+wait_for_confirms_or_die(Channel) ->
+ wait_for_confirms_or_die(Channel, infinity).
+
+%% @spec (Channel, Timeout) -> true
+%% where
+%% Channel = pid()
+%% Timeout = non_neg_integer() | 'infinity'
+%% @doc Behaves the same as wait_for_confirms/1, but if a nack is
+%% received, the calling process is immediately sent an
+%% exit(nack_received). If the timeout expires, the calling process is
+%% sent an exit(timeout).
+wait_for_confirms_or_die(Channel, Timeout) ->
+ case wait_for_confirms(Channel, Timeout) of
+ timeout -> close(Channel, 200, <<"Confirm Timeout">>),
+ exit(timeout);
+ false -> close(Channel, 200, <<"Nacks Received">>),
+ exit(nacks_received);
+ true -> true
+ end.
+
+%% @spec (Channel, ReturnHandler) -> ok
+%% where
+%% Channel = pid()
+%% ReturnHandler = pid()
+%% @doc This registers a handler to deal with returned messages. The
+%% registered process will receive #basic.return{} records.
+register_return_handler(Channel, ReturnHandler) ->
+ gen_server:cast(Channel, {register_return_handler, ReturnHandler} ).
+
+%% @spec (Channel) -> ok
+%% where
+%% Channel = pid()
+%% @doc Removes the return handler, if it exists. Does nothing if there is no
+%% such handler.
+unregister_return_handler(Channel) ->
+ gen_server:cast(Channel, unregister_return_handler).
+
+%% @spec (Channel, ConfirmHandler) -> ok
+%% where
+%% Channel = pid()
+%% ConfirmHandler = pid()
+
+%% @doc This registers a handler to deal with confirm-related
+%% messages. The registered process will receive #basic.ack{} and
+%% #basic.nack{} commands.
+register_confirm_handler(Channel, ConfirmHandler) ->
+ gen_server:cast(Channel, {register_confirm_handler, ConfirmHandler} ).
+
+%% @spec (Channel) -> ok
+%% where
+%% Channel = pid()
+%% @doc Removes the confirm handler, if it exists. Does nothing if there is no
+%% such handler.
+unregister_confirm_handler(Channel) ->
+ gen_server:cast(Channel, unregister_confirm_handler).
+
+%% @spec (Channel, FlowHandler) -> ok
+%% where
+%% Channel = pid()
+%% FlowHandler = pid()
+%% @doc This registers a handler to deal with channel flow notifications.
+%% The registered process will receive #channel.flow{} records.
+register_flow_handler(Channel, FlowHandler) ->
+ gen_server:cast(Channel, {register_flow_handler, FlowHandler} ).
+
+%% @spec (Channel) -> ok
+%% where
+%% Channel = pid()
+%% @doc Removes the flow handler, if it exists. Does nothing if there is no
+%% such handler.
+unregister_flow_handler(Channel) ->
+ gen_server:cast(Channel, unregister_flow_handler).
+
+%% @spec (Channel, Msg) -> ok
+%% where
+%% Channel = pid()
+%% Msg = any()
+%% @doc This causes the channel to invoke Consumer:handle_call/2,
+%% where Consumer is the amqp_gen_consumer implementation registered with
+%% the channel.
+call_consumer(Channel, Msg) ->
+ gen_server:call(Channel, {call_consumer, Msg}, infinity).
+
+%% @spec (Channel, BasicConsume, Subscriber) -> ok
+%% where
+%% Channel = pid()
+%% BasicConsume = amqp_method()
+%% Subscriber = pid()
+%% @doc Subscribe the given pid to a queue using the specified
+%% basic.consume method.
+subscribe(Channel, BasicConsume = #'basic.consume'{}, Subscriber) ->
+ gen_server:call(Channel, {subscribe, BasicConsume, Subscriber}, infinity).
+
+%%---------------------------------------------------------------------------
+%% Internal interface
+%%---------------------------------------------------------------------------
+
+%% @private
+start_link(Driver, Connection, ChannelNumber, Consumer, Identity) ->
+ gen_server:start_link(
+ ?MODULE, [Driver, Connection, ChannelNumber, Consumer, Identity], []).
+
+set_writer(Pid, Writer) ->
+ gen_server:cast(Pid, {set_writer, Writer}).
+
+%% @private
+connection_closing(Pid, ChannelCloseType, Reason) ->
+ gen_server:cast(Pid, {connection_closing, ChannelCloseType, Reason}).
+
+%% @private
+open(Pid) ->
+ gen_server:call(Pid, open, infinity).
+
+%%---------------------------------------------------------------------------
+%% gen_server callbacks
+%%---------------------------------------------------------------------------
+
+%% @private
+init([Driver, Connection, ChannelNumber, Consumer, Identity]) ->
+ ?store_proc_name(Identity),
+ {ok, #state{connection = Connection,
+ driver = Driver,
+ number = ChannelNumber,
+ consumer = Consumer}}.
+
+%% @private
+handle_call(open, From, State) ->
+ {noreply, rpc_top_half(#'channel.open'{}, none, From, none, noflow, State)};
+%% @private
+handle_call({close, Code, Text}, From, State) ->
+ handle_close(Code, Text, From, State);
+%% @private
+handle_call({call, Method, AmqpMsg, Sender}, From, State) ->
+ handle_method_to_server(Method, AmqpMsg, From, Sender, noflow, State);
+%% Handles the delivery of messages from a direct channel
+%% @private
+handle_call({send_command_sync, Method, Content}, From, State) ->
+ Ret = handle_method_from_server(Method, Content, State),
+ gen_server:reply(From, ok),
+ Ret;
+%% Handles the delivery of messages from a direct channel
+%% @private
+handle_call({send_command_sync, Method}, From, State) ->
+ Ret = handle_method_from_server(Method, none, State),
+ gen_server:reply(From, ok),
+ Ret;
+%% @private
+handle_call(next_publish_seqno, _From,
+ State = #state{next_pub_seqno = SeqNo}) ->
+ {reply, SeqNo, State};
+handle_call({wait_for_confirms, Timeout}, From, State) ->
+ handle_wait_for_confirms(From, Timeout, State);
+%% @private
+handle_call({call_consumer, Msg}, _From,
+ State = #state{consumer = Consumer}) ->
+ {reply, amqp_gen_consumer:call_consumer(Consumer, Msg), State};
+%% @private
+handle_call({subscribe, BasicConsume, Subscriber}, From, State) ->
+ handle_method_to_server(BasicConsume, none, From, Subscriber, noflow,
+ State).
+
+%% @private
+handle_cast({set_writer, Writer}, State) ->
+ {noreply, State#state{writer = Writer}};
+%% @private
+handle_cast({cast, Method, AmqpMsg, Sender, noflow}, State) ->
+ handle_method_to_server(Method, AmqpMsg, none, Sender, noflow, State);
+handle_cast({cast, Method, AmqpMsg, Sender, flow}, State) ->
+ credit_flow:ack(Sender),
+ handle_method_to_server(Method, AmqpMsg, none, Sender, flow, State);
+%% @private
+handle_cast({register_return_handler, ReturnHandler}, State) ->
+ Ref = erlang:monitor(process, ReturnHandler),
+ {noreply, State#state{return_handler = {ReturnHandler, Ref}}};
+%% @private
+handle_cast(unregister_return_handler,
+ State = #state{return_handler = {_ReturnHandler, Ref}}) ->
+ erlang:demonitor(Ref),
+ {noreply, State#state{return_handler = none}};
+%% @private
+handle_cast({register_confirm_handler, ConfirmHandler}, State) ->
+ Ref = erlang:monitor(process, ConfirmHandler),
+ {noreply, State#state{confirm_handler = {ConfirmHandler, Ref}}};
+%% @private
+handle_cast(unregister_confirm_handler,
+ State = #state{confirm_handler = {_ConfirmHandler, Ref}}) ->
+ erlang:demonitor(Ref),
+ {noreply, State#state{confirm_handler = none}};
+%% @private
+handle_cast({register_flow_handler, FlowHandler}, State) ->
+ Ref = erlang:monitor(process, FlowHandler),
+ {noreply, State#state{flow_handler = {FlowHandler, Ref}}};
+%% @private
+handle_cast(unregister_flow_handler,
+ State = #state{flow_handler = {_FlowHandler, Ref}}) ->
+ erlang:demonitor(Ref),
+ {noreply, State#state{flow_handler = none}};
+%% Received from channels manager
+%% @private
+handle_cast({method, Method, Content, noflow}, State) ->
+ handle_method_from_server(Method, Content, State);
+%% Handles the situation when the connection closes without closing the channel
+%% beforehand. The channel must block all further RPCs,
+%% flush the RPC queue (optional), and terminate
+%% @private
+handle_cast({connection_closing, CloseType, Reason}, State) ->
+ handle_connection_closing(CloseType, Reason, State);
+%% @private
+handle_cast({shutdown, Shutdown}, State) ->
+ handle_shutdown(Shutdown, State).
+
+%% Received from rabbit_channel in the direct case
+%% @private
+handle_info({send_command, Method}, State) ->
+ handle_method_from_server(Method, none, State);
+%% Received from rabbit_channel in the direct case
+%% @private
+handle_info({send_command, Method, Content}, State) ->
+ handle_method_from_server(Method, Content, State);
+%% Received from rabbit_channel in the direct case
+%% @private
+handle_info({send_command_and_notify, Q, ChPid, Method, Content}, State) ->
+ handle_method_from_server(Method, Content, State),
+ rabbit_amqqueue:notify_sent(Q, ChPid),
+ {noreply, State};
+%% This comes from the writer or rabbit_channel
+%% @private
+handle_info({channel_exit, _ChNumber, Reason}, State) ->
+ handle_channel_exit(Reason, State);
+%% This comes from rabbit_channel in the direct case
+handle_info({channel_closing, ChPid}, State) ->
+ ok = rabbit_channel:ready_for_close(ChPid),
+ {noreply, State};
+%% @private
+handle_info({bump_credit, Msg}, State) ->
+ credit_flow:handle_bump_msg(Msg),
+ {noreply, State};
+%% @private
+handle_info(timed_out_flushing_channel, State) ->
+ ?LOG_WARN("Channel (~p) closing: timed out flushing while "
+ "connection closing~n", [self()]),
+ {stop, timed_out_flushing_channel, State};
+%% @private
+handle_info({'DOWN', _, process, ReturnHandler, Reason},
+ State = #state{return_handler = {ReturnHandler, _Ref}}) ->
+ ?LOG_WARN("Channel (~p): Unregistering return handler ~p because it died. "
+ "Reason: ~p~n", [self(), ReturnHandler, Reason]),
+ {noreply, State#state{return_handler = none}};
+%% @private
+handle_info({'DOWN', _, process, ConfirmHandler, Reason},
+ State = #state{confirm_handler = {ConfirmHandler, _Ref}}) ->
+ ?LOG_WARN("Channel (~p): Unregistering confirm handler ~p because it died. "
+ "Reason: ~p~n", [self(), ConfirmHandler, Reason]),
+ {noreply, State#state{confirm_handler = none}};
+%% @private
+handle_info({'DOWN', _, process, FlowHandler, Reason},
+ State = #state{flow_handler = {FlowHandler, _Ref}}) ->
+ ?LOG_WARN("Channel (~p): Unregistering flow handler ~p because it died. "
+ "Reason: ~p~n", [self(), FlowHandler, Reason]),
+ {noreply, State#state{flow_handler = none}};
+handle_info({'DOWN', _, process, QPid, _Reason}, State) ->
+ rabbit_amqqueue:notify_sent_queue_down(QPid),
+ {noreply, State};
+handle_info({confirm_timeout, From}, State = #state{waiting_set = WSet}) ->
+ case gb_trees:lookup(From, WSet) of
+ none ->
+ {noreply, State};
+ {value, _} ->
+ gen_server:reply(From, timeout),
+ {noreply, State#state{waiting_set = gb_trees:delete(From, WSet)}}
+ end.
+
+%% @private
+terminate(_Reason, State) ->
+ flush_writer(State),
+ State.
+
+%% @private
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%---------------------------------------------------------------------------
+%% RPC mechanism
+%%---------------------------------------------------------------------------
+
+handle_method_to_server(Method, AmqpMsg, From, Sender, Flow,
+ State = #state{unconfirmed_set = USet}) ->
+ case {check_invalid_method(Method), From,
+ check_block(Method, AmqpMsg, State)} of
+ {ok, _, ok} ->
+ State1 = case {Method, State#state.next_pub_seqno} of
+ {#'confirm.select'{}, _} ->
+ State#state{next_pub_seqno = 1};
+ {#'basic.publish'{}, 0} ->
+ State;
+ {#'basic.publish'{}, SeqNo} ->
+ State#state{unconfirmed_set =
+ gb_sets:add(SeqNo, USet),
+ next_pub_seqno = SeqNo + 1};
+ _ ->
+ State
+ end,
+ {noreply, rpc_top_half(Method, build_content(AmqpMsg),
+ From, Sender, Flow, State1)};
+ {ok, none, BlockReply} ->
+ ?LOG_WARN("Channel (~p): discarding method ~p in cast.~n"
+ "Reason: ~p~n", [self(), Method, BlockReply]),
+ {noreply, State};
+ {ok, _, BlockReply} ->
+ {reply, BlockReply, State};
+ {{_, InvalidMethodMessage}, none, _} ->
+ ?LOG_WARN("Channel (~p): ignoring cast of ~p method. " ++
+ InvalidMethodMessage ++ "~n", [self(), Method]),
+ {noreply, State};
+ {{InvalidMethodReply, _}, _, _} ->
+ {reply, {error, InvalidMethodReply}, State}
+ end.
+
+handle_close(Code, Text, From, State) ->
+ Close = #'channel.close'{reply_code = Code,
+ reply_text = Text,
+ class_id = 0,
+ method_id = 0},
+ case check_block(Close, none, State) of
+ ok -> {noreply, rpc_top_half(Close, none, From, none, noflow,
+ State)};
+ BlockReply -> {reply, BlockReply, State}
+ end.
+
+rpc_top_half(Method, Content, From, Sender, Flow,
+ State0 = #state{rpc_requests = RequestQueue}) ->
+ State1 = State0#state{
+ rpc_requests = queue:in({From, Sender, Method, Content, Flow},
+ RequestQueue)},
+ IsFirstElement = queue:is_empty(RequestQueue),
+ if IsFirstElement -> do_rpc(State1);
+ true -> State1
+ end.
+
+rpc_bottom_half(Reply, State = #state{rpc_requests = RequestQueue}) ->
+ {{value, {From, _Sender, _Method, _Content, _Flow}}, RequestQueue1} =
+ queue:out(RequestQueue),
+ case From of
+ none -> ok;
+ _ -> gen_server:reply(From, Reply)
+ end,
+ do_rpc(State#state{rpc_requests = RequestQueue1}).
+
+do_rpc(State = #state{rpc_requests = Q,
+ closing = Closing}) ->
+ case queue:out(Q) of
+ {{value, {From, Sender, Method, Content, Flow}}, NewQ} ->
+ State1 = pre_do(Method, Content, Sender, State),
+ DoRet = do(Method, Content, Flow, State1),
+ case ?PROTOCOL:is_method_synchronous(Method) of
+ true -> State1;
+ false -> case {From, DoRet} of
+ {none, _} -> ok;
+ {_, ok} -> gen_server:reply(From, ok);
+ _ -> ok
+ %% Do not reply if error in do. Expecting
+ %% {channel_exit, _, _}
+ end,
+ do_rpc(State1#state{rpc_requests = NewQ})
+ end;
+ {empty, NewQ} ->
+ case Closing of
+ {connection, Reason} ->
+ gen_server:cast(self(),
+ {shutdown, {connection_closing, Reason}});
+ _ ->
+ ok
+ end,
+ State#state{rpc_requests = NewQ}
+ end.
+
+pending_rpc_method(#state{rpc_requests = Q}) ->
+ {value, {_From, _Sender, Method, _Content, _Flow}} = queue:peek(Q),
+ Method.
+
+pre_do(#'channel.close'{reply_code = Code, reply_text = Text}, none,
+ _Sender, State) ->
+ State#state{closing = {just_channel, {app_initiated_close, Code, Text}}};
+pre_do(#'basic.consume'{} = Method, none, Sender, State) ->
+ ok = call_to_consumer(Method, Sender, State),
+ State;
+pre_do(#'basic.cancel'{} = Method, none, Sender, State) ->
+ ok = call_to_consumer(Method, Sender, State),
+ State;
+pre_do(_, _, _, State) ->
+ State.
+
+%%---------------------------------------------------------------------------
+%% Handling of methods from the server
+%%---------------------------------------------------------------------------
+
+handle_method_from_server(Method, Content, State = #state{closing = Closing}) ->
+ case is_connection_method(Method) of
+ true -> server_misbehaved(
+ #amqp_error{name = command_invalid,
+ explanation = "connection method on "
+ "non-zero channel",
+ method = element(1, Method)},
+ State);
+ false -> Drop = case {Closing, Method} of
+ {{just_channel, _}, #'channel.close'{}} -> false;
+ {{just_channel, _}, #'channel.close_ok'{}} -> false;
+ {{just_channel, _}, _} -> true;
+ _ -> false
+ end,
+ if Drop -> ?LOG_INFO("Channel (~p): dropping method ~p from "
+ "server because channel is closing~n",
+ [self(), {Method, Content}]),
+ {noreply, State};
+ true -> handle_method_from_server1(Method,
+ amqp_msg(Content), State)
+ end
+ end.
+
+handle_method_from_server1(#'channel.open_ok'{}, none, State) ->
+ {noreply, rpc_bottom_half(ok, State)};
+handle_method_from_server1(#'channel.close'{reply_code = Code,
+ reply_text = Text},
+ none,
+ State = #state{closing = {just_channel, _}}) ->
+ %% Both client and server sent close at the same time. Don't shutdown yet,
+ %% wait for close_ok.
+ do(#'channel.close_ok'{}, none, noflow, State),
+ {noreply,
+ State#state{
+ closing = {just_channel, {server_initiated_close, Code, Text}}}};
+handle_method_from_server1(#'channel.close'{reply_code = Code,
+ reply_text = Text}, none, State) ->
+ do(#'channel.close_ok'{}, none, noflow, State),
+ handle_shutdown({server_initiated_close, Code, Text}, State);
+handle_method_from_server1(#'channel.close_ok'{}, none,
+ State = #state{closing = Closing}) ->
+ case Closing of
+ {just_channel, {app_initiated_close, _, _} = Reason} ->
+ handle_shutdown(Reason, rpc_bottom_half(ok, State));
+ {just_channel, {server_initiated_close, _, _} = Reason} ->
+ handle_shutdown(Reason,
+ rpc_bottom_half(closing, State));
+ {connection, Reason} ->
+ handle_shutdown({connection_closing, Reason}, State)
+ end;
+handle_method_from_server1(#'basic.consume_ok'{} = ConsumeOk, none, State) ->
+ Consume = #'basic.consume'{} = pending_rpc_method(State),
+ ok = call_to_consumer(ConsumeOk, Consume, State),
+ {noreply, rpc_bottom_half(ConsumeOk, State)};
+handle_method_from_server1(#'basic.cancel_ok'{} = CancelOk, none, State) ->
+ Cancel = #'basic.cancel'{} = pending_rpc_method(State),
+ ok = call_to_consumer(CancelOk, Cancel, State),
+ {noreply, rpc_bottom_half(CancelOk, State)};
+handle_method_from_server1(#'basic.cancel'{} = Cancel, none, State) ->
+ ok = call_to_consumer(Cancel, none, State),
+ {noreply, State};
+handle_method_from_server1(#'basic.deliver'{} = Deliver, AmqpMsg, State) ->
+ ok = call_to_consumer(Deliver, AmqpMsg, State),
+ {noreply, State};
+handle_method_from_server1(#'channel.flow'{active = Active} = Flow, none,
+ State = #state{flow_handler = FlowHandler}) ->
+ case FlowHandler of none -> ok;
+ {Pid, _Ref} -> Pid ! Flow
+ end,
+ %% Putting the flow_ok in the queue so that the RPC queue can be
+ %% flushed beforehand. Methods that made it to the queue are not
+ %% blocked in any circumstance.
+ {noreply, rpc_top_half(#'channel.flow_ok'{active = Active}, none, none,
+ none, noflow, State#state{flow_active = Active})};
+handle_method_from_server1(
+ #'basic.return'{} = BasicReturn, AmqpMsg,
+ State = #state{return_handler = ReturnHandler}) ->
+ case ReturnHandler of
+ none -> ?LOG_WARN("Channel (~p): received {~p, ~p} but there is "
+ "no return handler registered~n",
+ [self(), BasicReturn, AmqpMsg]);
+ {Pid, _Ref} -> Pid ! {BasicReturn, AmqpMsg}
+ end,
+ {noreply, State};
+handle_method_from_server1(#'basic.ack'{} = BasicAck, none,
+ #state{confirm_handler = none} = State) ->
+ {noreply, update_confirm_set(BasicAck, State)};
+handle_method_from_server1(#'basic.ack'{} = BasicAck, none,
+ #state{confirm_handler = {CH, _Ref}} = State) ->
+ CH ! BasicAck,
+ {noreply, update_confirm_set(BasicAck, State)};
+handle_method_from_server1(#'basic.nack'{} = BasicNack, none,
+ #state{confirm_handler = none} = State) ->
+ ?LOG_WARN("Channel (~p): received ~p but there is no "
+ "confirm handler registered~n", [self(), BasicNack]),
+ {noreply, update_confirm_set(BasicNack, State)};
+handle_method_from_server1(#'basic.nack'{} = BasicNack, none,
+ #state{confirm_handler = {CH, _Ref}} = State) ->
+ CH ! BasicNack,
+ {noreply, update_confirm_set(BasicNack, State)};
+
+handle_method_from_server1(Method, none, State) ->
+ {noreply, rpc_bottom_half(Method, State)};
+handle_method_from_server1(Method, Content, State) ->
+ {noreply, rpc_bottom_half({Method, Content}, State)}.
+
+%%---------------------------------------------------------------------------
+%% Other handle_* functions
+%%---------------------------------------------------------------------------
+
+handle_connection_closing(CloseType, Reason,
+ State = #state{rpc_requests = RpcQueue,
+ closing = Closing}) ->
+ NewState = State#state{closing = {connection, Reason}},
+ case {CloseType, Closing, queue:is_empty(RpcQueue)} of
+ {flush, false, false} ->
+ erlang:send_after(?TIMEOUT_FLUSH, self(),
+ timed_out_flushing_channel),
+ {noreply, NewState};
+ {flush, {just_channel, _}, false} ->
+ {noreply, NewState};
+ _ ->
+ handle_shutdown({connection_closing, Reason}, NewState)
+ end.
+
+handle_channel_exit(Reason = #amqp_error{name = ErrorName, explanation = Expl},
+ State = #state{connection = Connection, number = Number}) ->
+ %% Sent by rabbit_channel for hard errors in the direct case
+ ?LOG_ERR("connection ~p, channel ~p - error:~n~p~n",
+ [Connection, Number, Reason]),
+ {true, Code, _} = ?PROTOCOL:lookup_amqp_exception(ErrorName),
+ ReportedReason = {server_initiated_close, Code, Expl},
+ amqp_gen_connection:hard_error_in_channel(
+ Connection, self(), ReportedReason),
+ handle_shutdown({connection_closing, ReportedReason}, State);
+handle_channel_exit(Reason, State) ->
+ %% Unexpected death of a channel infrastructure process
+ {stop, {infrastructure_died, Reason}, State}.
+
+handle_shutdown({_, 200, _}, State) ->
+ {stop, normal, State};
+handle_shutdown({connection_closing, {_, 200, _}}, State) ->
+ {stop, normal, State};
+handle_shutdown({connection_closing, normal}, State) ->
+ {stop, normal, State};
+handle_shutdown(Reason, State) ->
+ {stop, {shutdown, Reason}, State}.
+
+%%---------------------------------------------------------------------------
+%% Internal plumbing
+%%---------------------------------------------------------------------------
+
+do(Method, Content, Flow, #state{driver = network, writer = W}) ->
+ %% Catching because it expects the {channel_exit, _, _} message on error
+ catch case {Content, Flow} of
+ {none, _} -> rabbit_writer:send_command(W, Method);
+ {_, flow} -> rabbit_writer:send_command_flow(W, Method,
+ Content);
+ {_, noflow} -> rabbit_writer:send_command(W, Method, Content)
+ end;
+do(Method, Content, Flow, #state{driver = direct, writer = W}) ->
+ %% ditto catching because...
+ catch case {Content, Flow} of
+ {none, _} -> rabbit_channel:do(W, Method);
+ {_, flow} -> rabbit_channel:do_flow(W, Method, Content);
+ {_, noflow} -> rabbit_channel:do(W, Method, Content)
+ end.
+
+
+flush_writer(#state{driver = network, writer = Writer}) ->
+ try
+ rabbit_writer:flush(Writer)
+ catch
+ exit:noproc -> ok
+ end;
+flush_writer(#state{driver = direct}) ->
+ ok.
+amqp_msg(none) ->
+ none;
+amqp_msg(Content) ->
+ {Props, Payload} = rabbit_basic:from_content(Content),
+ #amqp_msg{props = Props, payload = Payload}.
+
+build_content(none) ->
+ none;
+build_content(#amqp_msg{props = Props, payload = Payload}) ->
+ rabbit_basic:build_content(Props, Payload).
+
+check_block(_Method, _AmqpMsg, #state{closing = {just_channel, _}}) ->
+ closing;
+check_block(_Method, _AmqpMsg, #state{closing = {connection, _}}) ->
+ closing;
+check_block(_Method, none, #state{}) ->
+ ok;
+check_block(_Method, #amqp_msg{}, #state{flow_active = false}) ->
+ blocked;
+check_block(_Method, _AmqpMsg, #state{}) ->
+ ok.
+
+check_invalid_method(#'channel.open'{}) ->
+ {use_amqp_connection_module,
+ "Use amqp_connection:open_channel/{1,2} instead"};
+check_invalid_method(#'channel.close'{}) ->
+ {use_close_function, "Use close/{1,3} instead"};
+check_invalid_method(Method) ->
+ case is_connection_method(Method) of
+ true -> {connection_methods_not_allowed,
+ "Sending connection methods is not allowed"};
+ false -> ok
+ end.
+
+is_connection_method(Method) ->
+ {ClassId, _} = ?PROTOCOL:method_id(element(1, Method)),
+ ?PROTOCOL:lookup_class_name(ClassId) == connection.
+
+server_misbehaved(#amqp_error{} = AmqpError, State = #state{number = Number}) ->
+ case rabbit_binary_generator:map_exception(Number, AmqpError, ?PROTOCOL) of
+ {0, _} ->
+ handle_shutdown({server_misbehaved, AmqpError}, State);
+ {_, Close} ->
+ ?LOG_WARN("Channel (~p) flushing and closing due to soft "
+ "error caused by the server ~p~n", [self(), AmqpError]),
+ Self = self(),
+ spawn(fun () -> call(Self, Close) end),
+ {noreply, State}
+ end.
+
+update_confirm_set(#'basic.ack'{delivery_tag = SeqNo,
+ multiple = Multiple},
+ State = #state{unconfirmed_set = USet}) ->
+ maybe_notify_waiters(
+ State#state{unconfirmed_set =
+ update_unconfirmed(SeqNo, Multiple, USet)});
+update_confirm_set(#'basic.nack'{delivery_tag = SeqNo,
+ multiple = Multiple},
+ State = #state{unconfirmed_set = USet}) ->
+ maybe_notify_waiters(
+ State#state{unconfirmed_set = update_unconfirmed(SeqNo, Multiple, USet),
+ only_acks_received = false}).
+
+update_unconfirmed(SeqNo, false, USet) ->
+ gb_sets:del_element(SeqNo, USet);
+update_unconfirmed(SeqNo, true, USet) ->
+ case gb_sets:is_empty(USet) of
+ true -> USet;
+ false -> {S, USet1} = gb_sets:take_smallest(USet),
+ case S > SeqNo of
+ true -> USet;
+ false -> update_unconfirmed(SeqNo, true, USet1)
+ end
+ end.
+
+maybe_notify_waiters(State = #state{unconfirmed_set = USet}) ->
+ case gb_sets:is_empty(USet) of
+ false -> State;
+ true -> notify_confirm_waiters(State)
+ end.
+
+notify_confirm_waiters(State = #state{waiting_set = WSet,
+ only_acks_received = OAR}) ->
+ [begin
+ safe_cancel_timer(TRef),
+ gen_server:reply(From, OAR)
+ end || {From, TRef} <- gb_trees:to_list(WSet)],
+ State#state{waiting_set = gb_trees:empty(),
+ only_acks_received = true}.
+
+handle_wait_for_confirms(_From, _Timeout, State = #state{next_pub_seqno = 0}) ->
+ {reply, {error, not_in_confirm_mode}, State};
+handle_wait_for_confirms(From, Timeout,
+ State = #state{unconfirmed_set = USet,
+ waiting_set = WSet}) ->
+ case gb_sets:is_empty(USet) of
+ true -> {reply, true, State};
+ false -> TRef = case Timeout of
+ infinity -> undefined;
+ _ -> erlang:send_after(
+ Timeout * 1000, self(),
+ {confirm_timeout, From})
+ end,
+ {noreply,
+ State#state{waiting_set = gb_trees:insert(From, TRef, WSet)}}
+ end.
+
+call_to_consumer(Method, Args, #state{consumer = Consumer}) ->
+ amqp_gen_consumer:call_consumer(Consumer, Method, Args).
+
+safe_cancel_timer(undefined) -> ok;
+safe_cancel_timer(TRef) -> erlang:cancel_timer(TRef).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% @private
+-module(amqp_channel_sup).
+
+-include("amqp_client_internal.hrl").
+
+-behaviour(supervisor2).
+
+-export([start_link/6]).
+-export([init/1]).
+
+%%---------------------------------------------------------------------------
+%% Interface
+%%---------------------------------------------------------------------------
+
+start_link(Type, Connection, ConnName, InfraArgs, ChNumber,
+ Consumer = {_, _}) ->
+ Identity = {ConnName, ChNumber},
+ {ok, Sup} = supervisor2:start_link(?MODULE, [Consumer, Identity]),
+ [{gen_consumer, ConsumerPid, _, _}] = supervisor2:which_children(Sup),
+ {ok, ChPid} = supervisor2:start_child(
+ Sup, {channel,
+ {amqp_channel, start_link,
+ [Type, Connection, ChNumber, ConsumerPid, Identity]},
+ intrinsic, ?MAX_WAIT, worker, [amqp_channel]}),
+ Writer = start_writer(Sup, Type, InfraArgs, ConnName, ChNumber, ChPid),
+ amqp_channel:set_writer(ChPid, Writer),
+ {ok, AState} = init_command_assembler(Type),
+ {ok, Sup, {ChPid, AState}}.
+
+%%---------------------------------------------------------------------------
+%% Internal plumbing
+%%---------------------------------------------------------------------------
+
+start_writer(_Sup, direct, [ConnPid, Node, User, VHost, Collector],
+ ConnName, ChNumber, ChPid) ->
+ {ok, RabbitCh} =
+ rpc:call(Node, rabbit_direct, start_channel,
+ [ChNumber, ChPid, ConnPid, ConnName, ?PROTOCOL, User,
+ VHost, ?CLIENT_CAPABILITIES, Collector]),
+ link(RabbitCh),
+ RabbitCh;
+start_writer(Sup, network, [Sock, FrameMax], ConnName, ChNumber, ChPid) ->
+ {ok, Writer} = supervisor2:start_child(
+ Sup,
+ {writer, {rabbit_writer, start_link,
+ [Sock, ChNumber, FrameMax, ?PROTOCOL, ChPid,
+ {ConnName, ChNumber}]},
+ intrinsic, ?MAX_WAIT, worker, [rabbit_writer]}),
+ Writer.
+
+init_command_assembler(direct) -> {ok, none};
+init_command_assembler(network) -> rabbit_command_assembler:init(?PROTOCOL).
+
+%%---------------------------------------------------------------------------
+%% supervisor2 callbacks
+%%---------------------------------------------------------------------------
+
+init([{ConsumerModule, ConsumerArgs}, Identity]) ->
+ {ok, {{one_for_all, 0, 1},
+ [{gen_consumer, {amqp_gen_consumer, start_link,
+ [ConsumerModule, ConsumerArgs, Identity]},
+ intrinsic, ?MAX_WAIT, worker, [amqp_gen_consumer]}]}}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% @private
+-module(amqp_channel_sup_sup).
+
+-include("amqp_client.hrl").
+
+-behaviour(supervisor2).
+
+-export([start_link/3, start_channel_sup/4]).
+-export([init/1]).
+
+%%---------------------------------------------------------------------------
+%% Interface
+%%---------------------------------------------------------------------------
+
+start_link(Type, Connection, ConnName) ->
+ supervisor2:start_link(?MODULE, [Type, Connection, ConnName]).
+
+start_channel_sup(Sup, InfraArgs, ChannelNumber, Consumer) ->
+ supervisor2:start_child(Sup, [InfraArgs, ChannelNumber, Consumer]).
+
+%%---------------------------------------------------------------------------
+%% supervisor2 callbacks
+%%---------------------------------------------------------------------------
+
+init([Type, Connection, ConnName]) ->
+ {ok, {{simple_one_for_one, 0, 1},
+ [{channel_sup,
+ {amqp_channel_sup, start_link, [Type, Connection, ConnName]},
+ temporary, brutal_kill, supervisor, [amqp_channel_sup]}]}}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% @private
+-module(amqp_channels_manager).
+
+-include("amqp_client_internal.hrl").
+
+-behaviour(gen_server).
+
+-export([start_link/3, open_channel/4, set_channel_max/2, is_empty/1,
+ num_channels/1, pass_frame/3, signal_connection_closing/3,
+ process_channel_frame/4]).
+-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
+ handle_info/2]).
+
+-record(state, {connection,
+ channel_sup_sup,
+ map_num_pa = gb_trees:empty(), %% Number -> {Pid, AState}
+ map_pid_num = dict:new(), %% Pid -> Number
+ channel_max = ?MAX_CHANNEL_NUMBER,
+ closing = false}).
+
+%%---------------------------------------------------------------------------
+%% Interface
+%%---------------------------------------------------------------------------
+
+start_link(Connection, ConnName, ChSupSup) ->
+ gen_server:start_link(?MODULE, [Connection, ConnName, ChSupSup], []).
+
+open_channel(ChMgr, ProposedNumber, Consumer, InfraArgs) ->
+ gen_server:call(ChMgr, {open_channel, ProposedNumber, Consumer, InfraArgs},
+ infinity).
+
+set_channel_max(ChMgr, ChannelMax) ->
+ gen_server:cast(ChMgr, {set_channel_max, ChannelMax}).
+
+is_empty(ChMgr) ->
+ gen_server:call(ChMgr, is_empty, infinity).
+
+num_channels(ChMgr) ->
+ gen_server:call(ChMgr, num_channels, infinity).
+
+pass_frame(ChMgr, ChNumber, Frame) ->
+ gen_server:cast(ChMgr, {pass_frame, ChNumber, Frame}).
+
+signal_connection_closing(ChMgr, ChannelCloseType, Reason) ->
+ gen_server:cast(ChMgr, {connection_closing, ChannelCloseType, Reason}).
+
+process_channel_frame(Frame, Channel, ChPid, AState) ->
+ case rabbit_command_assembler:process(Frame, AState) of
+ {ok, NewAState} -> NewAState;
+ {ok, Method, NewAState} -> rabbit_channel:do(ChPid, Method),
+ NewAState;
+ {ok, Method, Content, NewAState} -> rabbit_channel:do(ChPid, Method,
+ Content),
+ NewAState;
+ {error, Reason} -> ChPid ! {channel_exit, Channel,
+ Reason},
+ AState
+ end.
+
+%%---------------------------------------------------------------------------
+%% gen_server callbacks
+%%---------------------------------------------------------------------------
+
+init([Connection, ConnName, ChSupSup]) ->
+ ?store_proc_name(ConnName),
+ {ok, #state{connection = Connection, channel_sup_sup = ChSupSup}}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+handle_call({open_channel, ProposedNumber, Consumer, InfraArgs}, _,
+ State = #state{closing = false}) ->
+ handle_open_channel(ProposedNumber, Consumer, InfraArgs, State);
+handle_call(is_empty, _, State) ->
+ {reply, internal_is_empty(State), State};
+handle_call(num_channels, _, State) ->
+ {reply, internal_num_channels(State), State}.
+
+handle_cast({set_channel_max, ChannelMax}, State) ->
+ {noreply, State#state{channel_max = ChannelMax}};
+handle_cast({pass_frame, ChNumber, Frame}, State) ->
+ {noreply, internal_pass_frame(ChNumber, Frame, State)};
+handle_cast({connection_closing, ChannelCloseType, Reason}, State) ->
+ handle_connection_closing(ChannelCloseType, Reason, State).
+
+handle_info({'DOWN', _, process, Pid, Reason}, State) ->
+ handle_down(Pid, Reason, State).
+
+%%---------------------------------------------------------------------------
+%% Internal plumbing
+%%---------------------------------------------------------------------------
+
+handle_open_channel(ProposedNumber, Consumer, InfraArgs,
+ State = #state{channel_sup_sup = ChSupSup}) ->
+ case new_number(ProposedNumber, State) of
+ {ok, Number} ->
+ {ok, _ChSup, {Ch, AState}} =
+ amqp_channel_sup_sup:start_channel_sup(ChSupSup, InfraArgs,
+ Number, Consumer),
+ NewState = internal_register(Number, Ch, AState, State),
+ erlang:monitor(process, Ch),
+ {reply, {ok, Ch}, NewState};
+ {error, _} = Error ->
+ {reply, Error, State}
+ end.
+
+new_number(none, #state{channel_max = ChannelMax, map_num_pa = MapNPA}) ->
+ case gb_trees:is_empty(MapNPA) of
+ true -> {ok, 1};
+ false -> {Smallest, _} = gb_trees:smallest(MapNPA),
+ if Smallest > 1 ->
+ {ok, Smallest - 1};
+ true ->
+ {Largest, _} = gb_trees:largest(MapNPA),
+ if Largest < ChannelMax -> {ok, Largest + 1};
+ true -> find_free(MapNPA)
+ end
+ end
+ end;
+new_number(Proposed, State = #state{channel_max = ChannelMax,
+ map_num_pa = MapNPA}) ->
+ IsValid = Proposed > 0 andalso Proposed =< ChannelMax andalso
+ not gb_trees:is_defined(Proposed, MapNPA),
+ case IsValid of true -> {ok, Proposed};
+ false -> new_number(none, State)
+ end.
+
+find_free(MapNPA) ->
+ find_free(gb_trees:iterator(MapNPA), 1).
+
+find_free(It, Candidate) ->
+ case gb_trees:next(It) of
+ {Number, _, It1} -> if Number > Candidate ->
+ {ok, Number - 1};
+ Number =:= Candidate ->
+ find_free(It1, Candidate + 1)
+ end;
+ none -> {error, out_of_channel_numbers}
+ end.
+
+handle_down(Pid, Reason, State) ->
+ case internal_lookup_pn(Pid, State) of
+ undefined -> {stop, {error, unexpected_down}, State};
+ Number -> handle_channel_down(Pid, Number, Reason, State)
+ end.
+
+handle_channel_down(Pid, Number, Reason, State) ->
+ maybe_report_down(Pid, case Reason of {shutdown, R} -> R;
+ _ -> Reason
+ end,
+ State),
+ NewState = internal_unregister(Number, Pid, State),
+ check_all_channels_terminated(NewState),
+ {noreply, NewState}.
+
+maybe_report_down(_Pid, normal, _State) ->
+ ok;
+maybe_report_down(_Pid, shutdown, _State) ->
+ ok;
+maybe_report_down(_Pid, {app_initiated_close, _, _}, _State) ->
+ ok;
+maybe_report_down(_Pid, {server_initiated_close, _, _}, _State) ->
+ ok;
+maybe_report_down(_Pid, {connection_closing, _}, _State) ->
+ ok;
+maybe_report_down(_Pid, {server_misbehaved, AmqpError},
+ #state{connection = Connection}) ->
+ amqp_gen_connection:server_misbehaved(Connection, AmqpError);
+maybe_report_down(Pid, Other, #state{connection = Connection}) ->
+ amqp_gen_connection:channel_internal_error(Connection, Pid, Other).
+
+check_all_channels_terminated(#state{closing = false}) ->
+ ok;
+check_all_channels_terminated(State = #state{closing = true,
+ connection = Connection}) ->
+ case internal_is_empty(State) of
+ true -> amqp_gen_connection:channels_terminated(Connection);
+ false -> ok
+ end.
+
+handle_connection_closing(ChannelCloseType, Reason,
+ State = #state{connection = Connection}) ->
+ case internal_is_empty(State) of
+ true -> amqp_gen_connection:channels_terminated(Connection);
+ false -> signal_channels_connection_closing(ChannelCloseType, Reason,
+ State)
+ end,
+ {noreply, State#state{closing = true}}.
+
+%%---------------------------------------------------------------------------
+
+internal_pass_frame(Number, Frame, State) ->
+ case internal_lookup_npa(Number, State) of
+ undefined ->
+ ?LOG_INFO("Dropping frame ~p for invalid or closed "
+ "channel number ~p~n", [Frame, Number]),
+ State;
+ {ChPid, AState} ->
+ NewAState = process_channel_frame(Frame, Number, ChPid, AState),
+ internal_update_npa(Number, ChPid, NewAState, State)
+ end.
+
+internal_register(Number, Pid, AState,
+ State = #state{map_num_pa = MapNPA, map_pid_num = MapPN}) ->
+ MapNPA1 = gb_trees:enter(Number, {Pid, AState}, MapNPA),
+ MapPN1 = dict:store(Pid, Number, MapPN),
+ State#state{map_num_pa = MapNPA1,
+ map_pid_num = MapPN1}.
+
+internal_unregister(Number, Pid,
+ State = #state{map_num_pa = MapNPA, map_pid_num = MapPN}) ->
+ MapNPA1 = gb_trees:delete(Number, MapNPA),
+ MapPN1 = dict:erase(Pid, MapPN),
+ State#state{map_num_pa = MapNPA1,
+ map_pid_num = MapPN1}.
+
+internal_is_empty(#state{map_num_pa = MapNPA}) ->
+ gb_trees:is_empty(MapNPA).
+
+internal_num_channels(#state{map_num_pa = MapNPA}) ->
+ gb_trees:size(MapNPA).
+
+internal_lookup_npa(Number, #state{map_num_pa = MapNPA}) ->
+ case gb_trees:lookup(Number, MapNPA) of {value, PA} -> PA;
+ none -> undefined
+ end.
+
+internal_lookup_pn(Pid, #state{map_pid_num = MapPN}) ->
+ case dict:find(Pid, MapPN) of {ok, Number} -> Number;
+ error -> undefined
+ end.
+
+internal_update_npa(Number, Pid, AState, State = #state{map_num_pa = MapNPA}) ->
+ State#state{map_num_pa = gb_trees:update(Number, {Pid, AState}, MapNPA)}.
+
+signal_channels_connection_closing(ChannelCloseType, Reason,
+ #state{map_pid_num = MapPN}) ->
+ [amqp_channel:connection_closing(Pid, ChannelCloseType, Reason)
+ || Pid <- dict:fetch_keys(MapPN)].
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% @private
+-module(amqp_client).
+
+-behaviour(application).
+
+-export([start/0]).
+-export([start/2, stop/1]).
+
+%%---------------------------------------------------------------------------
+%% Interface
+%%---------------------------------------------------------------------------
+
+start() ->
+ application:start(amqp_client).
+
+%%---------------------------------------------------------------------------
+%% application callbacks
+%%---------------------------------------------------------------------------
+
+start(_StartType, _StartArgs) ->
+ amqp_sup:start_link().
+
+stop(_State) ->
+ ok.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% @type close_reason(Type) = {shutdown, amqp_reason(Type)}.
+%% @type amqp_reason(Type) = {Type, Code, Text}
+%% Code = non_neg_integer()
+%% Text = binary().
+%% @doc This module is responsible for maintaining a connection to an AMQP
+%% broker and manages channels within the connection. This module is used to
+%% open and close connections to the broker as well as creating new channels
+%% within a connection.<br/>
+%% The connections and channels created by this module are supervised under
+%% amqp_client's supervision tree. Please note that connections and channels
+%% do not get restarted automatically by the supervision tree in the case of a
+%% failure. If you need robust connections and channels, we recommend you use
+%% Erlang monitors on the returned connection and channel PIDs.<br/>
+%% <br/>
+%% In case of a failure or an AMQP error, the connection process exits with a
+%% meaningful exit reason:<br/>
+%% <br/>
+%% <table>
+%% <tr>
+%% <td><strong>Cause</strong></td>
+%% <td><strong>Exit reason</strong></td>
+%% </tr>
+%% <tr>
+%% <td>Any reason, where Code would have been 200 otherwise</td>
+%% <td>```normal'''</td>
+%% </tr>
+%% <tr>
+%% <td>User application calls amqp_connection:close/3</td>
+%% <td>```close_reason(app_initiated_close)'''</td>
+%% </tr>
+%% <tr>
+%% <td>Server closes connection (hard error)</td>
+%% <td>```close_reason(server_initiated_close)'''</td>
+%% </tr>
+%% <tr>
+%% <td>Server misbehaved (did not follow protocol)</td>
+%% <td>```close_reason(server_misbehaved)'''</td>
+%% </tr>
+%% <tr>
+%% <td>AMQP client internal error - usually caused by a channel exiting
+%% with an unusual reason. This is usually accompanied by a more
+%% detailed error log from the channel</td>
+%% <td>```close_reason(internal_error)'''</td>
+%% </tr>
+%% <tr>
+%% <td>Other error</td>
+%% <td>(various error reasons, causing more detailed logging)</td>
+%% </tr>
+%% </table>
+%% <br/>
+%% See type definitions below.
+-module(amqp_connection).
+
+-include("amqp_client_internal.hrl").
+
+-export([open_channel/1, open_channel/2, open_channel/3, register_blocked_handler/2]).
+-export([start/1, close/1, close/2, close/3]).
+-export([error_atom/1]).
+-export([info/2, info_keys/1, info_keys/0]).
+-export([socket_adapter_info/2]).
+
+-define(DEFAULT_CONSUMER, {amqp_selective_consumer, []}).
+
+-define(PROTOCOL_SSL_PORT, (?PROTOCOL_PORT - 1)).
+
+%%---------------------------------------------------------------------------
+%% Type Definitions
+%%---------------------------------------------------------------------------
+
+%% @type amqp_adapter_info() = #amqp_adapter_info{}.
+%% @type amqp_params_direct() = #amqp_params_direct{}.
+%% As defined in amqp_client.hrl. It contains the following fields:
+%% <ul>
+%% <li>username :: binary() - The name of a user registered with the broker,
+%% defaults to <<guest">></li>
+%% <li>password :: binary() - The password of user, defaults to 'none'</li>
+%% <li>virtual_host :: binary() - The name of a virtual host in the broker,
+%% defaults to <<"/">></li>
+%% <li>node :: atom() - The node the broker runs on (direct only)</li>
+%% <li>adapter_info :: amqp_adapter_info() - Extra management information for if
+%% this connection represents a non-AMQP network connection.</li>
+%% <li>client_properties :: [{binary(), atom(), binary()}] - A list of extra
+%% client properties to be sent to the server, defaults to []</li>
+%% </ul>
+%%
+%% @type amqp_params_network() = #amqp_params_network{}.
+%% As defined in amqp_client.hrl. It contains the following fields:
+%% <ul>
+%% <li>username :: binary() - The name of a user registered with the broker,
+%% defaults to <<guest">></li>
+%% <li>password :: binary() - The user's password, defaults to
+%% <<"guest">></li>
+%% <li>virtual_host :: binary() - The name of a virtual host in the broker,
+%% defaults to <<"/">></li>
+%% <li>host :: string() - The hostname of the broker,
+%% defaults to "localhost" (network only)</li>
+%% <li>port :: integer() - The port the broker is listening on,
+%% defaults to 5672 (network only)</li>
+%% <li>channel_max :: non_neg_integer() - The channel_max handshake parameter,
+%% defaults to 0</li>
+%% <li>frame_max :: non_neg_integer() - The frame_max handshake parameter,
+%% defaults to 0 (network only)</li>
+%% <li>heartbeat :: non_neg_integer() - The hearbeat interval in seconds,
+%% defaults to 0 (turned off) (network only)</li>
+%% <li>connection_timeout :: non_neg_integer() | 'infinity'
+%% - The connection timeout in milliseconds,
+%% defaults to 'infinity' (network only)</li>
+%% <li>ssl_options :: term() - The second parameter to be used with the
+%% ssl:connect/2 function, defaults to 'none' (network only)</li>
+%% <li>client_properties :: [{binary(), atom(), binary()}] - A list of extra
+%% client properties to be sent to the server, defaults to []</li>
+%% <li>socket_options :: [any()] - Extra socket options. These are
+%% appended to the default options. See
+%% <a href="http://www.erlang.org/doc/man/inet.html#setopts-2">inet:setopts/2</a>
+%% and <a href="http://www.erlang.org/doc/man/gen_tcp.html#connect-4">
+%% gen_tcp:connect/4</a> for descriptions of the available options.</li>
+%% </ul>
+
+
+%%---------------------------------------------------------------------------
+%% Starting a connection
+%%---------------------------------------------------------------------------
+
+%% @spec (Params) -> {ok, Connection} | {error, Error}
+%% where
+%% Params = amqp_params_network() | amqp_params_direct()
+%% Connection = pid()
+%% @doc Starts a connection to an AMQP server. Use network params to
+%% connect to a remote AMQP server or direct params for a direct
+%% connection to a RabbitMQ server, assuming that the server is
+%% running in the same process space. If the port is set to 'undefined',
+%% the default ports will be selected depending on whether this is a
+%% normal or an SSL connection.
+start(AmqpParams) ->
+ ensure_started(),
+ AmqpParams1 =
+ case AmqpParams of
+ #amqp_params_network{port = undefined, ssl_options = none} ->
+ AmqpParams#amqp_params_network{port = ?PROTOCOL_PORT};
+ #amqp_params_network{port = undefined, ssl_options = _} ->
+ AmqpParams#amqp_params_network{port = ?PROTOCOL_SSL_PORT};
+ _ ->
+ AmqpParams
+ end,
+ {ok, _Sup, Connection} = amqp_sup:start_connection_sup(AmqpParams1),
+ amqp_gen_connection:connect(Connection).
+
+%% Usually the amqp_client application will already be running. We
+%% check whether that is the case by invoking an undocumented function
+%% which does not require a synchronous call to the application
+%% controller. That way we don't risk a dead-lock if, say, the
+%% application controller is in the process of shutting down the very
+%% application which is making this call.
+ensure_started() ->
+ [ensure_started(App) || App <- [xmerl, amqp_client]].
+
+ensure_started(App) ->
+ case application_controller:get_master(App) of
+ undefined -> case application:start(App) of
+ ok -> ok;
+ {error, {already_started, App}} -> ok;
+ {error, _} = E -> throw(E)
+ end;
+ _ -> ok
+ end.
+
+%%---------------------------------------------------------------------------
+%% Commands
+%%---------------------------------------------------------------------------
+
+%% @doc Invokes open_channel(ConnectionPid, none,
+%% {amqp_selective_consumer, []}). Opens a channel without having to
+%% specify a channel number. This uses the default consumer
+%% implementation.
+open_channel(ConnectionPid) ->
+ open_channel(ConnectionPid, none, ?DEFAULT_CONSUMER).
+
+%% @doc Invokes open_channel(ConnectionPid, none, Consumer).
+%% Opens a channel without having to specify a channel number.
+open_channel(ConnectionPid, {_, _} = Consumer) ->
+ open_channel(ConnectionPid, none, Consumer);
+
+%% @doc Invokes open_channel(ConnectionPid, ChannelNumber,
+%% {amqp_selective_consumer, []}). Opens a channel, using the default
+%% consumer implementation.
+open_channel(ConnectionPid, ChannelNumber)
+ when is_number(ChannelNumber) orelse ChannelNumber =:= none ->
+ open_channel(ConnectionPid, ChannelNumber, ?DEFAULT_CONSUMER).
+
+%% @spec (ConnectionPid, ChannelNumber, Consumer) -> Result
+%% where
+%% ConnectionPid = pid()
+%% ChannelNumber = pos_integer() | 'none'
+%% Consumer = {ConsumerModule, ConsumerArgs}
+%% ConsumerModule = atom()
+%% ConsumerArgs = [any()]
+%% Result = {ok, ChannelPid} | {error, Error}
+%% ChannelPid = pid()
+%% @doc Opens an AMQP channel.<br/>
+%% Opens a channel, using a proposed channel number and a specific consumer
+%% implementation.<br/>
+%% ConsumerModule must implement the amqp_gen_consumer behaviour. ConsumerArgs
+%% is passed as parameter to ConsumerModule:init/1.<br/>
+%% This function assumes that an AMQP connection (networked or direct)
+%% has already been successfully established.<br/>
+%% ChannelNumber must be less than or equal to the negotiated
+%% max_channel value, or less than or equal to ?MAX_CHANNEL_NUMBER
+%% (65535) if the negotiated max_channel value is 0.<br/>
+%% In the direct connection, max_channel is always 0.
+open_channel(ConnectionPid, ChannelNumber,
+ {_ConsumerModule, _ConsumerArgs} = Consumer) ->
+ amqp_gen_connection:open_channel(ConnectionPid, ChannelNumber, Consumer).
+
+%% @spec (ConnectionPid) -> ok | Error
+%% where
+%% ConnectionPid = pid()
+%% @doc Closes the channel, invokes
+%% close(Channel, 200, <<"Goodbye">>).
+close(ConnectionPid) ->
+ close(ConnectionPid, 200, <<"Goodbye">>).
+
+%% @spec (ConnectionPid, Timeout) -> ok | Error
+%% where
+%% ConnectionPid = pid()
+%% Timeout = integer()
+%% @doc Closes the channel, using the supplied Timeout value.
+close(ConnectionPid, Timeout) ->
+ close(ConnectionPid, 200, <<"Goodbye">>, Timeout).
+
+%% @spec (ConnectionPid, Code, Text) -> ok | closing
+%% where
+%% ConnectionPid = pid()
+%% Code = integer()
+%% Text = binary()
+%% @doc Closes the AMQP connection, allowing the caller to set the reply
+%% code and text.
+close(ConnectionPid, Code, Text) ->
+ close(ConnectionPid, Code, Text, infinity).
+
+%% @spec (ConnectionPid, Code, Text, Timeout) -> ok | closing
+%% where
+%% ConnectionPid = pid()
+%% Code = integer()
+%% Text = binary()
+%% Timeout = integer()
+%% @doc Closes the AMQP connection, allowing the caller to set the reply
+%% code and text, as well as a timeout for the operation, after which the
+%% connection will be abruptly terminated.
+close(ConnectionPid, Code, Text, Timeout) ->
+ Close = #'connection.close'{reply_text = Text,
+ reply_code = Code,
+ class_id = 0,
+ method_id = 0},
+ amqp_gen_connection:close(ConnectionPid, Close, Timeout).
+
+register_blocked_handler(ConnectionPid, BlockHandler) ->
+ amqp_gen_connection:register_blocked_handler(ConnectionPid, BlockHandler).
+
+%%---------------------------------------------------------------------------
+%% Other functions
+%%---------------------------------------------------------------------------
+
+%% @spec (Code) -> atom()
+%% where
+%% Code = integer()
+%% @doc Returns a descriptive atom corresponding to the given AMQP
+%% error code.
+error_atom(Code) -> ?PROTOCOL:amqp_exception(Code).
+
+%% @spec (ConnectionPid, Items) -> ResultList
+%% where
+%% ConnectionPid = pid()
+%% Items = [Item]
+%% ResultList = [{Item, Result}]
+%% Item = atom()
+%% Result = term()
+%% @doc Returns information about the connection, as specified by the Items
+%% list. Item may be any atom returned by info_keys/1:
+%%<ul>
+%%<li>type - returns the type of the connection (network or direct)</li>
+%%<li>server_properties - returns the server_properties fields sent by the
+%% server while establishing the connection</li>
+%%<li>is_closing - returns true if the connection is in the process of closing
+%% and false otherwise</li>
+%%<li>amqp_params - returns the #amqp_params{} structure used to start the
+%% connection</li>
+%%<li>num_channels - returns the number of channels currently open under the
+%% connection (excluding channel 0)</li>
+%%<li>channel_max - returns the channel_max value negotiated with the
+%% server</li>
+%%<li>heartbeat - returns the heartbeat value negotiated with the server
+%% (only for the network connection)</li>
+%%<li>frame_max - returns the frame_max value negotiated with the
+%% server (only for the network connection)</li>
+%%<li>sock - returns the socket for the network connection (for use with
+%% e.g. inet:sockname/1) (only for the network connection)</li>
+%%<li>any other value - throws an exception</li>
+%%</ul>
+info(ConnectionPid, Items) ->
+ amqp_gen_connection:info(ConnectionPid, Items).
+
+%% @spec (ConnectionPid) -> Items
+%% where
+%% ConnectionPid = pid()
+%% Items = [Item]
+%% Item = atom()
+%% @doc Returns a list of atoms that can be used in conjunction with info/2.
+%% Note that the list differs from a type of connection to another (network vs.
+%% direct). Use info_keys/0 to get a list of info keys that can be used for
+%% any connection.
+info_keys(ConnectionPid) ->
+ amqp_gen_connection:info_keys(ConnectionPid).
+
+%% @spec () -> Items
+%% where
+%% Items = [Item]
+%% Item = atom()
+%% @doc Returns a list of atoms that can be used in conjunction with info/2.
+%% These are general info keys, which can be used in any type of connection.
+%% Other info keys may exist for a specific type. To get the full list of
+%% atoms that can be used for a certain connection, use info_keys/1.
+info_keys() ->
+ amqp_gen_connection:info_keys().
+
+%% @doc Takes a socket and a protocol, returns an #amqp_adapter_info{}
+%% based on the socket for the protocol given.
+socket_adapter_info(Sock, Protocol) ->
+ amqp_direct_connection:socket_adapter_info(Sock, Protocol).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% @private
+-module(amqp_connection_sup).
+
+-include("amqp_client.hrl").
+
+-behaviour(supervisor2).
+
+-export([start_link/1]).
+-export([init/1]).
+
+%%---------------------------------------------------------------------------
+%% Interface
+%%---------------------------------------------------------------------------
+
+start_link(AMQPParams) ->
+ {ok, Sup} = supervisor2:start_link(?MODULE, []),
+ {ok, TypeSup} = supervisor2:start_child(
+ Sup, {connection_type_sup,
+ {amqp_connection_type_sup, start_link, []},
+ transient, infinity, supervisor,
+ [amqp_connection_type_sup]}),
+ {ok, Connection} = supervisor2:start_child(
+ Sup, {connection, {amqp_gen_connection, start_link,
+ [TypeSup, AMQPParams]},
+ intrinsic, brutal_kill, worker,
+ [amqp_gen_connection]}),
+ {ok, Sup, Connection}.
+
+%%---------------------------------------------------------------------------
+%% supervisor2 callbacks
+%%---------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{one_for_all, 0, 1}, []}}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% @private
+-module(amqp_connection_type_sup).
+
+-include("amqp_client_internal.hrl").
+
+-behaviour(supervisor2).
+
+-export([start_link/0, start_infrastructure_fun/3, type_module/1]).
+
+-export([init/1]).
+
+%%---------------------------------------------------------------------------
+%% Interface
+%%---------------------------------------------------------------------------
+
+start_link() ->
+ supervisor2:start_link(?MODULE, []).
+
+type_module(#amqp_params_direct{}) -> {direct, amqp_direct_connection};
+type_module(#amqp_params_network{}) -> {network, amqp_network_connection}.
+
+%%---------------------------------------------------------------------------
+
+start_channels_manager(Sup, Conn, ConnName, Type) ->
+ {ok, ChSupSup} = supervisor2:start_child(
+ Sup,
+ {channel_sup_sup, {amqp_channel_sup_sup, start_link,
+ [Type, Conn, ConnName]},
+ intrinsic, infinity, supervisor,
+ [amqp_channel_sup_sup]}),
+ {ok, _} = supervisor2:start_child(
+ Sup,
+ {channels_manager, {amqp_channels_manager, start_link,
+ [Conn, ConnName, ChSupSup]},
+ transient, ?MAX_WAIT, worker, [amqp_channels_manager]}).
+
+start_infrastructure_fun(Sup, Conn, network) ->
+ fun (Sock, ConnName) ->
+ {ok, ChMgr} = start_channels_manager(Sup, Conn, ConnName, network),
+ {ok, AState} = rabbit_command_assembler:init(?PROTOCOL),
+ {ok, Writer} =
+ supervisor2:start_child(
+ Sup,
+ {writer,
+ {rabbit_writer, start_link,
+ [Sock, 0, ?FRAME_MIN_SIZE, ?PROTOCOL, Conn, ConnName]},
+ transient, ?MAX_WAIT, worker, [rabbit_writer]}),
+ {ok, _Reader} =
+ supervisor2:start_child(
+ Sup,
+ {main_reader, {amqp_main_reader, start_link,
+ [Sock, Conn, ChMgr, AState, ConnName]},
+ transient, ?MAX_WAIT, worker, [amqp_main_reader]}),
+ {ok, ChMgr, Writer}
+ end;
+start_infrastructure_fun(Sup, Conn, direct) ->
+ fun (ConnName) ->
+ {ok, ChMgr} = start_channels_manager(Sup, Conn, ConnName, direct),
+ {ok, Collector} =
+ supervisor2:start_child(
+ Sup,
+ {collector, {rabbit_queue_collector, start_link, [ConnName]},
+ transient, ?MAX_WAIT, worker, [rabbit_queue_collector]}),
+ {ok, ChMgr, Collector}
+ end.
+
+%%---------------------------------------------------------------------------
+%% supervisor2 callbacks
+%%---------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{one_for_all, 0, 1}, []}}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% @private
+-module(amqp_direct_connection).
+
+-include("amqp_client_internal.hrl").
+
+-behaviour(amqp_gen_connection).
+
+-export([server_close/3]).
+
+-export([init/0, terminate/2, connect/4, do/2, open_channel_args/1, i/2,
+ info_keys/0, handle_message/2, closing/3, channels_terminated/1]).
+
+-export([socket_adapter_info/2]).
+
+-record(state, {node,
+ user,
+ vhost,
+ params,
+ adapter_info,
+ collector,
+ closing_reason %% undefined | Reason
+ }).
+
+-define(INFO_KEYS, [type]).
+
+-define(CREATION_EVENT_KEYS, [pid, protocol, host, port, name,
+ peer_host, peer_port,
+ user, vhost, client_properties, type]).
+
+%%---------------------------------------------------------------------------
+
+%% amqp_connection:close() logically closes from the client end. We may
+%% want to close from the server end.
+server_close(ConnectionPid, Code, Text) ->
+ Close = #'connection.close'{reply_text = Text,
+ reply_code = Code,
+ class_id = 0,
+ method_id = 0},
+ amqp_gen_connection:server_close(ConnectionPid, Close).
+
+init() ->
+ {ok, #state{}}.
+
+open_channel_args(#state{node = Node,
+ user = User,
+ vhost = VHost,
+ collector = Collector}) ->
+ [self(), Node, User, VHost, Collector].
+
+do(_Method, _State) ->
+ ok.
+
+handle_message({force_event_refresh, Ref}, State = #state{node = Node}) ->
+ rpc:call(Node, rabbit_event, notify,
+ [connection_created, connection_info(State), Ref]),
+ {ok, State};
+handle_message(closing_timeout, State = #state{closing_reason = Reason}) ->
+ {stop, {closing_timeout, Reason}, State};
+handle_message(Msg, State) ->
+ {stop, {unexpected_msg, Msg}, State}.
+
+closing(_ChannelCloseType, Reason, State) ->
+ {ok, State#state{closing_reason = Reason}}.
+
+channels_terminated(State = #state{closing_reason = Reason,
+ collector = Collector}) ->
+ rabbit_queue_collector:delete_all(Collector),
+ {stop, {shutdown, Reason}, State}.
+
+terminate(_Reason, #state{node = Node}) ->
+ rpc:call(Node, rabbit_direct, disconnect, [self(), [{pid, self()}]]),
+ ok.
+
+i(type, _State) -> direct;
+i(pid, _State) -> self();
+%% AMQP Params
+i(user, #state{params = P}) -> P#amqp_params_direct.username;
+i(vhost, #state{params = P}) -> P#amqp_params_direct.virtual_host;
+i(client_properties, #state{params = P}) ->
+ P#amqp_params_direct.client_properties;
+%% Optional adapter info
+i(protocol, #state{adapter_info = I}) -> I#amqp_adapter_info.protocol;
+i(host, #state{adapter_info = I}) -> I#amqp_adapter_info.host;
+i(port, #state{adapter_info = I}) -> I#amqp_adapter_info.port;
+i(peer_host, #state{adapter_info = I}) -> I#amqp_adapter_info.peer_host;
+i(peer_port, #state{adapter_info = I}) -> I#amqp_adapter_info.peer_port;
+i(name, #state{adapter_info = I}) -> I#amqp_adapter_info.name;
+
+i(Item, _State) -> throw({bad_argument, Item}).
+
+info_keys() ->
+ ?INFO_KEYS.
+
+infos(Items, State) ->
+ [{Item, i(Item, State)} || Item <- Items].
+
+connection_info(State = #state{adapter_info = I}) ->
+ infos(?CREATION_EVENT_KEYS, State) ++ I#amqp_adapter_info.additional_info.
+
+connect(Params = #amqp_params_direct{username = Username,
+ password = Password,
+ node = Node,
+ adapter_info = Info,
+ virtual_host = VHost},
+ SIF, _TypeSup, State) ->
+ State1 = State#state{node = Node,
+ vhost = VHost,
+ params = Params,
+ adapter_info = ensure_adapter_info(Info)},
+ case rpc:call(Node, rabbit_direct, connect,
+ [{Username, Password}, VHost, ?PROTOCOL, self(),
+ connection_info(State1)]) of
+ {ok, {User, ServerProperties}} ->
+ {ok, ChMgr, Collector} = SIF(i(name, State1)),
+ State2 = State1#state{user = User,
+ collector = Collector},
+ {ok, {ServerProperties, 0, ChMgr, State2}};
+ {error, _} = E ->
+ E;
+ {badrpc, nodedown} ->
+ {error, {nodedown, Node}}
+ end.
+
+ensure_adapter_info(none) ->
+ ensure_adapter_info(#amqp_adapter_info{});
+
+ensure_adapter_info(A = #amqp_adapter_info{protocol = unknown}) ->
+ ensure_adapter_info(A#amqp_adapter_info{
+ protocol = {'Direct', ?PROTOCOL:version()}});
+
+ensure_adapter_info(A = #amqp_adapter_info{name = unknown}) ->
+ Name = list_to_binary(rabbit_misc:pid_to_string(self())),
+ ensure_adapter_info(A#amqp_adapter_info{name = Name});
+
+ensure_adapter_info(Info) -> Info.
+
+socket_adapter_info(Sock, Protocol) ->
+ {PeerHost, PeerPort, Host, Port} =
+ case rabbit_net:socket_ends(Sock, inbound) of
+ {ok, Res} -> Res;
+ _ -> {unknown, unknown, unknown, unknown}
+ end,
+ Name = case rabbit_net:connection_string(Sock, inbound) of
+ {ok, Res1} -> Res1;
+ _Error -> "(unknown)"
+ end,
+ #amqp_adapter_info{protocol = Protocol,
+ name = list_to_binary(Name),
+ host = Host,
+ port = Port,
+ peer_host = PeerHost,
+ peer_port = PeerPort,
+ additional_info = maybe_ssl_info(Sock)}.
+
+maybe_ssl_info(Sock) ->
+ case rabbit_net:is_ssl(Sock) of
+ true -> [{ssl, true}] ++ ssl_info(Sock) ++ ssl_cert_info(Sock);
+ false -> [{ssl, false}]
+ end.
+
+ssl_info(Sock) ->
+ {Protocol, KeyExchange, Cipher, Hash} =
+ case rabbit_net:ssl_info(Sock) of
+ {ok, {P, {K, C, H}}} -> {P, K, C, H};
+ {ok, {P, {K, C, H, _}}} -> {P, K, C, H};
+ _ -> {unknown, unknown, unknown, unknown}
+ end,
+ [{ssl_protocol, Protocol},
+ {ssl_key_exchange, KeyExchange},
+ {ssl_cipher, Cipher},
+ {ssl_hash, Hash}].
+
+ssl_cert_info(Sock) ->
+ case rabbit_net:peercert(Sock) of
+ {ok, Cert} ->
+ [{peer_cert_issuer, list_to_binary(
+ rabbit_ssl:peer_cert_issuer(Cert))},
+ {peer_cert_subject, list_to_binary(
+ rabbit_ssl:peer_cert_subject(Cert))},
+ {peer_cert_validity, list_to_binary(
+ rabbit_ssl:peer_cert_validity(Cert))}];
+ _ ->
+ []
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% @doc This module is an implementation of the amqp_gen_consumer
+%% behaviour and can be used as part of the Consumer parameter when
+%% opening AMQP channels.
+%% <br/>
+%% <br/>
+%% The Consumer parameter for this implementation is {{@module},
+%% [ConsumerPid]@}, where ConsumerPid is a process that will receive
+%% queue subscription-related messages.<br/>
+%% <br/>
+%% This consumer implementation causes the channel to send to the
+%% ConsumerPid all basic.consume, basic.consume_ok, basic.cancel,
+%% basic.cancel_ok and basic.deliver messages received from the
+%% server.
+%% <br/>
+%% <br/>
+%% In addition, this consumer implementation monitors the ConsumerPid
+%% and exits with the same shutdown reason when it dies. 'DOWN'
+%% messages from other sources are passed to ConsumerPid.
+%% <br/>
+%% Warning! It is not recommended to rely on a consumer on killing off the
+%% channel (through the exit signal). That may cause messages to get lost.
+%% Always use amqp_channel:close/{1,3} for a clean shut down.<br/>
+%% <br/>
+%% This module has no public functions.
+-module(amqp_direct_consumer).
+
+-include("amqp_gen_consumer_spec.hrl").
+
+-behaviour(amqp_gen_consumer).
+
+-export([init/1, handle_consume_ok/3, handle_consume/3, handle_cancel_ok/3,
+ handle_cancel/2, handle_server_cancel/2, handle_deliver/3,
+ handle_info/2, handle_call/3, terminate/2]).
+
+%%---------------------------------------------------------------------------
+%% amqp_gen_consumer callbacks
+%%---------------------------------------------------------------------------
+
+%% @private
+init([ConsumerPid]) ->
+ erlang:monitor(process, ConsumerPid),
+ {ok, ConsumerPid}.
+
+%% @private
+handle_consume(M, A, C) ->
+ C ! {M, A},
+ {ok, C}.
+
+%% @private
+handle_consume_ok(M, _, C) ->
+ C ! M,
+ {ok, C}.
+
+%% @private
+handle_cancel(M, C) ->
+ C ! M,
+ {ok, C}.
+
+%% @private
+handle_cancel_ok(M, _, C) ->
+ C ! M,
+ {ok, C}.
+
+%% @private
+handle_server_cancel(M, C) ->
+ C ! {server_cancel, M},
+ {ok, C}.
+
+%% @private
+handle_deliver(M, A, C) ->
+ C ! {M, A},
+ {ok, C}.
+
+%% @private
+handle_info({'DOWN', _MRef, process, C, Info}, C) ->
+ {error, {consumer_died, Info}, C};
+handle_info({'DOWN', MRef, process, Pid, Info}, C) ->
+ C ! {'DOWN', MRef, process, Pid, Info},
+ {ok, C}.
+
+%% @private
+handle_call(M, A, C) ->
+ C ! {M, A},
+ {reply, ok, C}.
+
+%% @private
+terminate(_Reason, C) ->
+ C.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% @private
+-module(amqp_gen_connection).
+
+-include("amqp_client_internal.hrl").
+
+-behaviour(gen_server).
+
+-export([start_link/2, connect/1, open_channel/3, hard_error_in_channel/3,
+ channel_internal_error/3, server_misbehaved/2, channels_terminated/1,
+ close/3, server_close/2, info/2, info_keys/0, info_keys/1,
+ register_blocked_handler/2]).
+-export([behaviour_info/1]).
+-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
+ handle_info/2]).
+
+-define(INFO_KEYS, [server_properties, is_closing, amqp_params, num_channels,
+ channel_max]).
+
+-record(state, {module,
+ module_state,
+ channels_manager,
+ amqp_params,
+ channel_max,
+ server_properties,
+ %% connection.block, connection.unblock handler
+ block_handler,
+ closing = false %% #closing{} | false
+ }).
+
+-record(closing, {reason,
+ close,
+ from = none}).
+
+%%---------------------------------------------------------------------------
+%% Interface
+%%---------------------------------------------------------------------------
+
+start_link(TypeSup, AMQPParams) ->
+ gen_server:start_link(?MODULE, {TypeSup, AMQPParams}, []).
+
+connect(Pid) ->
+ gen_server:call(Pid, connect, infinity).
+
+open_channel(Pid, ProposedNumber, Consumer) ->
+ case gen_server:call(Pid,
+ {command, {open_channel, ProposedNumber, Consumer}},
+ infinity) of
+ {ok, ChannelPid} -> ok = amqp_channel:open(ChannelPid),
+ {ok, ChannelPid};
+ Error -> Error
+ end.
+
+hard_error_in_channel(Pid, ChannelPid, Reason) ->
+ gen_server:cast(Pid, {hard_error_in_channel, ChannelPid, Reason}).
+
+channel_internal_error(Pid, ChannelPid, Reason) ->
+ gen_server:cast(Pid, {channel_internal_error, ChannelPid, Reason}).
+
+server_misbehaved(Pid, AmqpError) ->
+ gen_server:cast(Pid, {server_misbehaved, AmqpError}).
+
+channels_terminated(Pid) ->
+ gen_server:cast(Pid, channels_terminated).
+
+close(Pid, Close, Timeout) ->
+ gen_server:call(Pid, {command, {close, Close, Timeout}}, infinity).
+
+server_close(Pid, Close) ->
+ gen_server:cast(Pid, {server_close, Close}).
+
+info(Pid, Items) ->
+ gen_server:call(Pid, {info, Items}, infinity).
+
+info_keys() ->
+ ?INFO_KEYS.
+
+info_keys(Pid) ->
+ gen_server:call(Pid, info_keys, infinity).
+
+%%---------------------------------------------------------------------------
+%% Behaviour
+%%---------------------------------------------------------------------------
+
+behaviour_info(callbacks) ->
+ [
+ %% init() -> {ok, InitialState}
+ {init, 0},
+
+ %% terminate(Reason, FinalState) -> Ignored
+ {terminate, 2},
+
+ %% connect(AmqpParams, SIF, TypeSup, State) ->
+ %% {ok, ConnectParams} | {closing, ConnectParams, AmqpError, Reply} |
+ %% {error, Error}
+ %% where
+ %% ConnectParams = {ServerProperties, ChannelMax, ChMgr, NewState}
+ {connect, 4},
+
+ %% do(Method, State) -> Ignored
+ {do, 2},
+
+ %% open_channel_args(State) -> OpenChannelArgs
+ {open_channel_args, 1},
+
+ %% i(InfoItem, State) -> Info
+ {i, 2},
+
+ %% info_keys() -> [InfoItem]
+ {info_keys, 0},
+
+ %% CallbackReply = {ok, NewState} | {stop, Reason, FinalState}
+
+ %% handle_message(Message, State) -> CallbackReply
+ {handle_message, 2},
+
+ %% closing(flush|abrupt, Reason, State) -> CallbackReply
+ {closing, 3},
+
+ %% channels_terminated(State) -> CallbackReply
+ {channels_terminated, 1}
+ ];
+behaviour_info(_Other) ->
+ undefined.
+
+callback(Function, Params, State = #state{module = Mod,
+ module_state = MState}) ->
+ case erlang:apply(Mod, Function, Params ++ [MState]) of
+ {ok, NewMState} -> {noreply,
+ State#state{module_state = NewMState}};
+ {stop, Reason, NewMState} -> {stop, Reason,
+ State#state{module_state = NewMState}}
+ end.
+
+%%---------------------------------------------------------------------------
+%% gen_server callbacks
+%%---------------------------------------------------------------------------
+
+init({TypeSup, AMQPParams}) ->
+ %% Trapping exits since we need to make sure that the `terminate/2' is
+ %% called in the case of direct connection (it does not matter for a network
+ %% connection). See bug25116.
+ process_flag(trap_exit, true),
+ %% connect() has to be called first, so we can use a special state here
+ {ok, {TypeSup, AMQPParams}}.
+
+handle_call(connect, _From, {TypeSup, AMQPParams}) ->
+ {Type, Mod} = amqp_connection_type_sup:type_module(AMQPParams),
+ {ok, MState} = Mod:init(),
+ SIF = amqp_connection_type_sup:start_infrastructure_fun(
+ TypeSup, self(), Type),
+ State = #state{module = Mod,
+ module_state = MState,
+ amqp_params = AMQPParams,
+ block_handler = none},
+ case Mod:connect(AMQPParams, SIF, TypeSup, MState) of
+ {ok, Params} ->
+ {reply, {ok, self()}, after_connect(Params, State)};
+ {closing, #amqp_error{name = access_refused} = AmqpError, Error} ->
+ {stop, {shutdown, AmqpError}, Error, State};
+ {closing, Params, #amqp_error{} = AmqpError, Error} ->
+ server_misbehaved(self(), AmqpError),
+ {reply, Error, after_connect(Params, State)};
+ {error, _} = Error ->
+ {stop, {shutdown, Error}, Error, State}
+ end;
+handle_call({command, Command}, From, State = #state{closing = false}) ->
+ handle_command(Command, From, State);
+handle_call({command, _Command}, _From, State) ->
+ {reply, closing, State};
+handle_call({info, Items}, _From, State) ->
+ {reply, [{Item, i(Item, State)} || Item <- Items], State};
+handle_call(info_keys, _From, State = #state{module = Mod}) ->
+ {reply, ?INFO_KEYS ++ Mod:info_keys(), State}.
+
+after_connect({ServerProperties, ChannelMax, ChMgr, NewMState}, State) ->
+ case ChannelMax of
+ 0 -> ok;
+ _ -> amqp_channels_manager:set_channel_max(ChMgr, ChannelMax)
+ end,
+ State1 = State#state{server_properties = ServerProperties,
+ channel_max = ChannelMax,
+ channels_manager = ChMgr,
+ module_state = NewMState},
+ rabbit_misc:store_proc_name(?MODULE, i(name, State1)),
+ State1.
+
+handle_cast({method, Method, none, noflow}, State) ->
+ handle_method(Method, State);
+handle_cast(channels_terminated, State) ->
+ handle_channels_terminated(State);
+handle_cast({hard_error_in_channel, _Pid, Reason}, State) ->
+ server_initiated_close(Reason, State);
+handle_cast({channel_internal_error, Pid, Reason}, State) ->
+ ?LOG_WARN("Connection (~p) closing: internal error in channel (~p): ~p~n",
+ [self(), Pid, Reason]),
+ internal_error(Pid, Reason, State);
+handle_cast({server_misbehaved, AmqpError}, State) ->
+ server_misbehaved_close(AmqpError, State);
+handle_cast({server_close, #'connection.close'{} = Close}, State) ->
+ server_initiated_close(Close, State);
+handle_cast({register_blocked_handler, HandlerPid}, State) ->
+ Ref = erlang:monitor(process, HandlerPid),
+ {noreply, State#state{block_handler = {HandlerPid, Ref}}}.
+
+%% @private
+handle_info({'DOWN', _, process, BlockHandler, Reason},
+ State = #state{block_handler = {BlockHandler, _Ref}}) ->
+ ?LOG_WARN("Connection (~p): Unregistering block handler ~p because it died. "
+ "Reason: ~p~n", [self(), BlockHandler, Reason]),
+ {noreply, State#state{block_handler = none}};
+handle_info(Info, State) ->
+ callback(handle_message, [Info], State).
+
+terminate(Reason, #state{module = Mod, module_state = MState}) ->
+ Mod:terminate(Reason, MState).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%---------------------------------------------------------------------------
+%% Infos
+%%---------------------------------------------------------------------------
+
+i(server_properties, State) -> State#state.server_properties;
+i(is_closing, State) -> State#state.closing =/= false;
+i(amqp_params, State) -> State#state.amqp_params;
+i(channel_max, State) -> State#state.channel_max;
+i(num_channels, State) -> amqp_channels_manager:num_channels(
+ State#state.channels_manager);
+i(Item, #state{module = Mod, module_state = MState}) -> Mod:i(Item, MState).
+
+%%---------------------------------------------------------------------------
+%% connection.blocked, connection.unblocked
+%%---------------------------------------------------------------------------
+
+register_blocked_handler(Pid, HandlerPid) ->
+ gen_server:cast(Pid, {register_blocked_handler, HandlerPid}).
+
+%%---------------------------------------------------------------------------
+%% Command handling
+%%---------------------------------------------------------------------------
+
+handle_command({open_channel, ProposedNumber, Consumer}, _From,
+ State = #state{channels_manager = ChMgr,
+ module = Mod,
+ module_state = MState}) ->
+ {reply, amqp_channels_manager:open_channel(ChMgr, ProposedNumber, Consumer,
+ Mod:open_channel_args(MState)),
+ State};
+handle_command({close, #'connection.close'{} = Close, Timeout}, From, State) ->
+ app_initiated_close(Close, From, Timeout, State).
+
+%%---------------------------------------------------------------------------
+%% Handling methods from broker
+%%---------------------------------------------------------------------------
+
+handle_method(#'connection.close'{} = Close, State) ->
+ server_initiated_close(Close, State);
+handle_method(#'connection.close_ok'{}, State = #state{closing = Closing}) ->
+ case Closing of #closing{from = none} -> ok;
+ #closing{from = From} -> gen_server:reply(From, ok)
+ end,
+ {stop, {shutdown, closing_to_reason(Closing)}, State};
+handle_method(#'connection.blocked'{} = Blocked, State = #state{block_handler = BlockHandler}) ->
+ case BlockHandler of none -> ok;
+ {Pid, _Ref} -> Pid ! Blocked
+ end,
+ {noreply, State};
+handle_method(#'connection.unblocked'{} = Unblocked, State = #state{block_handler = BlockHandler}) ->
+ case BlockHandler of none -> ok;
+ {Pid, _Ref} -> Pid ! Unblocked
+ end,
+ {noreply, State};
+handle_method(Other, State) ->
+ server_misbehaved_close(#amqp_error{name = command_invalid,
+ explanation = "unexpected method on "
+ "channel 0",
+ method = element(1, Other)},
+ State).
+
+%%---------------------------------------------------------------------------
+%% Closing
+%%---------------------------------------------------------------------------
+
+app_initiated_close(Close, From, Timeout, State) ->
+ case Timeout of
+ infinity -> ok;
+ _ -> erlang:send_after(Timeout, self(), closing_timeout)
+ end,
+ set_closing_state(flush, #closing{reason = app_initiated_close,
+ close = Close,
+ from = From}, State).
+
+internal_error(Pid, Reason, State) ->
+ Str = list_to_binary(rabbit_misc:format("~p:~p", [Pid, Reason])),
+ Close = #'connection.close'{reply_text = Str,
+ reply_code = ?INTERNAL_ERROR,
+ class_id = 0,
+ method_id = 0},
+ set_closing_state(abrupt, #closing{reason = internal_error, close = Close},
+ State).
+
+server_initiated_close(Close, State) ->
+ ?LOG_WARN("Connection (~p) closing: received hard error ~p "
+ "from server~n", [self(), Close]),
+ set_closing_state(abrupt, #closing{reason = server_initiated_close,
+ close = Close}, State).
+
+server_misbehaved_close(AmqpError, State) ->
+ ?LOG_WARN("Connection (~p) closing: server misbehaved: ~p~n",
+ [self(), AmqpError]),
+ {0, Close} = rabbit_binary_generator:map_exception(0, AmqpError, ?PROTOCOL),
+ set_closing_state(abrupt, #closing{reason = server_misbehaved,
+ close = Close}, State).
+
+set_closing_state(ChannelCloseType, NewClosing,
+ State = #state{channels_manager = ChMgr,
+ closing = CurClosing}) ->
+ ResClosing =
+ case closing_priority(NewClosing) =< closing_priority(CurClosing) of
+ true -> NewClosing;
+ false -> CurClosing
+ end,
+ ClosingReason = closing_to_reason(ResClosing),
+ amqp_channels_manager:signal_connection_closing(ChMgr, ChannelCloseType,
+ ClosingReason),
+ callback(closing, [ChannelCloseType, ClosingReason],
+ State#state{closing = ResClosing}).
+
+closing_priority(false) -> 99;
+closing_priority(#closing{reason = app_initiated_close}) -> 4;
+closing_priority(#closing{reason = internal_error}) -> 3;
+closing_priority(#closing{reason = server_misbehaved}) -> 2;
+closing_priority(#closing{reason = server_initiated_close}) -> 1.
+
+closing_to_reason(#closing{close = #'connection.close'{reply_code = 200}}) ->
+ normal;
+closing_to_reason(#closing{reason = Reason,
+ close = #'connection.close'{reply_code = Code,
+ reply_text = Text}}) ->
+ {Reason, Code, Text};
+closing_to_reason(#closing{reason = Reason,
+ close = {Reason, _Code, _Text} = Close}) ->
+ Close.
+
+handle_channels_terminated(State = #state{closing = Closing,
+ module = Mod,
+ module_state = MState}) ->
+ #closing{reason = Reason, close = Close, from = From} = Closing,
+ case Reason of
+ server_initiated_close ->
+ Mod:do(#'connection.close_ok'{}, MState);
+ _ ->
+ Mod:do(Close, MState)
+ end,
+ case callback(channels_terminated, [], State) of
+ {stop, _, _} = Stop -> case From of none -> ok;
+ _ -> gen_server:reply(From, ok)
+ end,
+ Stop;
+ Other -> Other
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% @doc A behaviour module for implementing consumers for
+%% amqp_channel. To specify a consumer implementation for a channel,
+%% use amqp_connection:open_channel/{2,3}.
+%% <br/>
+%% All callbacks are called within the gen_consumer process. <br/>
+%% <br/>
+%% See comments in amqp_gen_consumer.erl source file for documentation
+%% on the callback functions.
+%% <br/>
+%% Note that making calls to the channel from the callback module will
+%% result in deadlock.
+-module(amqp_gen_consumer).
+
+-include("amqp_client.hrl").
+
+-behaviour(gen_server2).
+
+-export([start_link/3, call_consumer/2, call_consumer/3]).
+-export([behaviour_info/1]).
+-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
+ handle_info/2, prioritise_info/3]).
+
+-record(state, {module,
+ module_state}).
+
+%%---------------------------------------------------------------------------
+%% Interface
+%%---------------------------------------------------------------------------
+
+%% @type ok_error() = {ok, state()} | {error, reason(), state()}.
+%% Denotes a successful or an error return from a consumer module call.
+
+start_link(ConsumerModule, ExtraParams, Identity) ->
+ gen_server2:start_link(
+ ?MODULE, [ConsumerModule, ExtraParams, Identity], []).
+
+%% @spec (Consumer, Msg) -> ok
+%% where
+%% Consumer = pid()
+%% Msg = any()
+%%
+%% @doc This function is used to perform arbitrary calls into the
+%% consumer module.
+call_consumer(Pid, Msg) ->
+ gen_server2:call(Pid, {consumer_call, Msg}, infinity).
+
+%% @spec (Consumer, Method, Args) -> ok
+%% where
+%% Consumer = pid()
+%% Method = amqp_method()
+%% Args = any()
+%%
+%% @doc This function is used by amqp_channel to forward received
+%% methods and deliveries to the consumer module.
+call_consumer(Pid, Method, Args) ->
+ gen_server2:call(Pid, {consumer_call, Method, Args}, infinity).
+
+%%---------------------------------------------------------------------------
+%% Behaviour
+%%---------------------------------------------------------------------------
+
+%% @private
+behaviour_info(callbacks) ->
+ [
+ %% init(Args) -> {ok, InitialState} | {stop, Reason} | ignore
+ %% where
+ %% Args = [any()]
+ %% InitialState = state()
+ %% Reason = term()
+ %%
+ %% This callback is invoked by the channel, when it starts
+ %% up. Use it to initialize the state of the consumer. In case of
+ %% an error, return {stop, Reason} or ignore.
+ {init, 1},
+
+ %% handle_consume(Consume, Sender, State) -> ok_error()
+ %% where
+ %% Consume = #'basic.consume'{}
+ %% Sender = pid()
+ %% State = state()
+ %%
+ %% This callback is invoked by the channel before a basic.consume
+ %% is sent to the server.
+ {handle_consume, 3},
+
+ %% handle_consume_ok(ConsumeOk, Consume, State) -> ok_error()
+ %% where
+ %% ConsumeOk = #'basic.consume_ok'{}
+ %% Consume = #'basic.consume'{}
+ %% State = state()
+ %%
+ %% This callback is invoked by the channel every time a
+ %% basic.consume_ok is received from the server. Consume is the original
+ %% method sent out to the server - it can be used to associate the
+ %% call with the response.
+ {handle_consume_ok, 3},
+
+ %% handle_cancel(Cancel, State) -> ok_error()
+ %% where
+ %% Cancel = #'basic.cancel'{}
+ %% State = state()
+ %%
+ %% This callback is invoked by the channel every time a basic.cancel
+ %% is sent to the server.
+ {handle_cancel, 2},
+
+ %% handle_cancel_ok(CancelOk, Cancel, State) -> ok_error()
+ %% where
+ %% CancelOk = #'basic.cancel_ok'{}
+ %% Cancel = #'basic.cancel'{}
+ %% State = state()
+ %%
+ %% This callback is invoked by the channel every time a basic.cancel_ok
+ %% is received from the server.
+ {handle_cancel_ok, 3},
+
+ %% handle_server_cancel(Cancel, State) -> ok_error()
+ %% where
+ %% Cancel = #'basic.cancel'{}
+ %% State = state()
+ %%
+ %% This callback is invoked by the channel every time a basic.cancel
+ %% is received from the server.
+ {handle_server_cancel, 2},
+
+ %% handle_deliver(Deliver, Message, State) -> ok_error()
+ %% where
+ %% Deliver = #'basic.deliver'{}
+ %% Message = #amqp_msg{}
+ %% State = state()
+ %%
+ %% This callback is invoked by the channel every time a basic.deliver
+ %% is received from the server.
+ {handle_deliver, 3},
+
+ %% handle_info(Info, State) -> ok_error()
+ %% where
+ %% Info = any()
+ %% State = state()
+ %%
+ %% This callback is invoked the consumer process receives a
+ %% message.
+ {handle_info, 2},
+
+ %% handle_call(Msg, From, State) -> {reply, Reply, NewState} |
+ %% {noreply, NewState} |
+ %% {error, Reason, NewState}
+ %% where
+ %% Msg = any()
+ %% From = any()
+ %% Reply = any()
+ %% State = state()
+ %% NewState = state()
+ %%
+ %% This callback is invoked by the channel when calling
+ %% amqp_channel:call_consumer/2. Reply is the term that
+ %% amqp_channel:call_consumer/2 will return. If the callback
+ %% returns {noreply, _}, then the caller to
+ %% amqp_channel:call_consumer/2 and the channel remain blocked
+ %% until gen_server2:reply/2 is used with the provided From as
+ %% the first argument.
+ {handle_call, 3},
+
+ %% terminate(Reason, State) -> any()
+ %% where
+ %% Reason = any()
+ %% State = state()
+ %%
+ %% This callback is invoked by the channel after it has shut down and
+ %% just before its process exits.
+ {terminate, 2}
+ ];
+behaviour_info(_Other) ->
+ undefined.
+
+%%---------------------------------------------------------------------------
+%% gen_server2 callbacks
+%%---------------------------------------------------------------------------
+
+init([ConsumerModule, ExtraParams, Identity]) ->
+ ?store_proc_name(Identity),
+ case ConsumerModule:init(ExtraParams) of
+ {ok, MState} ->
+ {ok, #state{module = ConsumerModule, module_state = MState}};
+ {stop, Reason} ->
+ {stop, Reason};
+ ignore ->
+ ignore
+ end.
+
+prioritise_info({'DOWN', _MRef, process, _Pid, _Info}, _Len, _State) -> 1;
+prioritise_info(_, _Len, _State) -> 0.
+
+handle_call({consumer_call, Msg}, From,
+ State = #state{module = ConsumerModule,
+ module_state = MState}) ->
+ case ConsumerModule:handle_call(Msg, From, MState) of
+ {noreply, NewMState} ->
+ {noreply, State#state{module_state = NewMState}};
+ {reply, Reply, NewMState} ->
+ {reply, Reply, State#state{module_state = NewMState}};
+ {error, Reason, NewMState} ->
+ {stop, {error, Reason}, {error, Reason},
+ State#state{module_state = NewMState}}
+ end;
+handle_call({consumer_call, Method, Args}, _From,
+ State = #state{module = ConsumerModule,
+ module_state = MState}) ->
+ Return =
+ case Method of
+ #'basic.consume'{} ->
+ ConsumerModule:handle_consume(Method, Args, MState);
+ #'basic.consume_ok'{} ->
+ ConsumerModule:handle_consume_ok(Method, Args, MState);
+ #'basic.cancel'{} ->
+ case Args of
+ none -> %% server-sent
+ ConsumerModule:handle_server_cancel(Method, MState);
+ Pid when is_pid(Pid) -> %% client-sent
+ ConsumerModule:handle_cancel(Method, MState)
+ end;
+ #'basic.cancel_ok'{} ->
+ ConsumerModule:handle_cancel_ok(Method, Args, MState);
+ #'basic.deliver'{} ->
+ ConsumerModule:handle_deliver(Method, Args, MState)
+ end,
+ case Return of
+ {ok, NewMState} ->
+ {reply, ok, State#state{module_state = NewMState}};
+ {error, Reason, NewMState} ->
+ {stop, {error, Reason}, {error, Reason},
+ State#state{module_state = NewMState}}
+ end.
+
+handle_cast(_What, State) ->
+ {noreply, State}.
+
+handle_info(Info, State = #state{module_state = MState,
+ module = ConsumerModule}) ->
+ case ConsumerModule:handle_info(Info, MState) of
+ {ok, NewMState} ->
+ {noreply, State#state{module_state = NewMState}};
+ {error, Reason, NewMState} ->
+ {stop, {error, Reason}, State#state{module_state = NewMState}}
+ end.
+
+terminate(Reason, #state{module = ConsumerModule, module_state = MState}) ->
+ ConsumerModule:terminate(Reason, MState).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% @private
+-module(amqp_main_reader).
+
+-include("amqp_client_internal.hrl").
+
+-behaviour(gen_server).
+
+-export([start_link/5]).
+-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
+ handle_info/2]).
+
+-record(state, {sock,
+ connection,
+ channels_manager,
+ astate,
+ message = none %% none | {Type, Channel, Length}
+ }).
+
+%%---------------------------------------------------------------------------
+%% Interface
+%%---------------------------------------------------------------------------
+
+start_link(Sock, Connection, ChMgr, AState, ConnName) ->
+ gen_server:start_link(
+ ?MODULE, [Sock, Connection, ConnName, ChMgr, AState], []).
+
+%%---------------------------------------------------------------------------
+%% gen_server callbacks
+%%---------------------------------------------------------------------------
+
+init([Sock, Connection, ConnName, ChMgr, AState]) ->
+ ?store_proc_name(ConnName),
+ State = #state{sock = Sock,
+ connection = Connection,
+ channels_manager = ChMgr,
+ astate = AState,
+ message = none},
+ case rabbit_net:async_recv(Sock, 0, infinity) of
+ {ok, _} -> {ok, State};
+ {error, Reason} -> {stop, Reason, _} = handle_error(Reason, State),
+ {stop, Reason}
+ end.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+handle_call(Call, From, State) ->
+ {stop, {unexpected_call, Call, From}, State}.
+
+handle_cast(Cast, State) ->
+ {stop, {unexpected_cast, Cast}, State}.
+
+handle_info({inet_async, Sock, _, {ok, Data}},
+ State = #state {sock = Sock}) ->
+ %% Latency hiding: Request next packet first, then process data
+ case rabbit_net:async_recv(Sock, 0, infinity) of
+ {ok, _} -> handle_data(Data, State);
+ {error, Reason} -> handle_error(Reason, State)
+ end;
+handle_info({inet_async, Sock, _, {error, Reason}},
+ State = #state{sock = Sock}) ->
+ handle_error(Reason, State).
+
+handle_data(<<Type:8, Channel:16, Length:32, Payload:Length/binary, ?FRAME_END,
+ More/binary>>,
+ #state{message = none} = State) when
+ Type =:= ?FRAME_METHOD; Type =:= ?FRAME_HEADER;
+ Type =:= ?FRAME_BODY; Type =:= ?FRAME_HEARTBEAT ->
+ %% Optimisation for the direct match
+ handle_data(
+ More, process_frame(Type, Channel, Payload, State#state{message = none}));
+handle_data(<<Type:8, Channel:16, Length:32, Data/binary>>,
+ #state{message = none} = State) when
+ Type =:= ?FRAME_METHOD; Type =:= ?FRAME_HEADER;
+ Type =:= ?FRAME_BODY; Type =:= ?FRAME_HEARTBEAT ->
+ {noreply, State#state{message = {Type, Channel, Length, Data}}};
+handle_data(<<"AMQP", A, B, C>>, #state{sock = Sock, message = none} = State) ->
+ {ok, <<D>>} = rabbit_net:sync_recv(Sock, 1),
+ handle_error({refused, {A, B, C, D}}, State);
+handle_data(<<Malformed:7/binary, _Rest/binary>>,
+ #state{message = none} = State) ->
+ handle_error({malformed_header, Malformed}, State);
+handle_data(<<Data/binary>>, #state{message = none} = State) ->
+ {noreply, State#state{message = {expecting_header, Data}}};
+handle_data(Data, #state{message = {Type, Channel, L, OldData}} = State) ->
+ case <<OldData/binary, Data/binary>> of
+ <<Payload:L/binary, ?FRAME_END, More/binary>> ->
+ handle_data(More,
+ process_frame(Type, Channel, Payload,
+ State#state{message = none}));
+ NotEnough ->
+ %% Read in more data from the socket
+ {noreply, State#state{message = {Type, Channel, L, NotEnough}}}
+ end;
+handle_data(Data,
+ #state{message = {expecting_header, Old}} = State) ->
+ handle_data(<<Old/binary, Data/binary>>, State#state{message = none});
+handle_data(<<>>, State) ->
+ {noreply, State}.
+
+%%---------------------------------------------------------------------------
+%% Internal plumbing
+%%---------------------------------------------------------------------------
+
+process_frame(Type, ChNumber, Payload,
+ State = #state{connection = Connection,
+ channels_manager = ChMgr,
+ astate = AState}) ->
+ case rabbit_command_assembler:analyze_frame(Type, Payload, ?PROTOCOL) of
+ heartbeat when ChNumber /= 0 ->
+ amqp_gen_connection:server_misbehaved(
+ Connection,
+ #amqp_error{name = command_invalid,
+ explanation = "heartbeat on non-zero channel"}),
+ State;
+ %% Match heartbeats but don't do anything with them
+ heartbeat ->
+ State;
+ AnalyzedFrame when ChNumber /= 0 ->
+ amqp_channels_manager:pass_frame(ChMgr, ChNumber, AnalyzedFrame),
+ State;
+ AnalyzedFrame ->
+ State#state{astate = amqp_channels_manager:process_channel_frame(
+ AnalyzedFrame, 0, Connection, AState)}
+ end.
+
+handle_error(closed, State = #state{connection = Conn}) ->
+ Conn ! socket_closed,
+ {noreply, State};
+handle_error({refused, Version}, State = #state{connection = Conn}) ->
+ Conn ! {refused, Version},
+ {noreply, State};
+handle_error({malformed_header, Version}, State = #state{connection = Conn}) ->
+ Conn ! {malformed_header, Version},
+ {noreply, State};
+handle_error(Reason, State = #state{connection = Conn}) ->
+ Conn ! {socket_error, Reason},
+ {stop, {socket_error, Reason}, State}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% @private
+-module(amqp_network_connection).
+
+-include("amqp_client_internal.hrl").
+
+-behaviour(amqp_gen_connection).
+-export([init/0, terminate/2, connect/4, do/2, open_channel_args/1, i/2,
+ info_keys/0, handle_message/2, closing/3, channels_terminated/1]).
+
+-define(RABBIT_TCP_OPTS, [binary, {packet, 0}, {active,false}, {nodelay, true}]).
+-define(SOCKET_CLOSING_TIMEOUT, 1000).
+-define(HANDSHAKE_RECEIVE_TIMEOUT, 60000).
+-define(TCP_MAX_PACKET_SIZE, (16#4000000 + ?EMPTY_FRAME_SIZE - 1)).
+
+-record(state, {sock,
+ name,
+ heartbeat,
+ writer0,
+ frame_max,
+ type_sup,
+ closing_reason, %% undefined | Reason
+ waiting_socket_close = false}).
+
+-define(INFO_KEYS, [type, heartbeat, frame_max, sock, name]).
+
+%%---------------------------------------------------------------------------
+
+init() ->
+ {ok, #state{}}.
+
+open_channel_args(#state{sock = Sock, frame_max = FrameMax}) ->
+ [Sock, FrameMax].
+
+do(#'connection.close_ok'{} = CloseOk, State) ->
+ erlang:send_after(?SOCKET_CLOSING_TIMEOUT, self(), socket_closing_timeout),
+ do2(CloseOk, State);
+do(Method, State) ->
+ do2(Method, State).
+
+do2(Method, #state{writer0 = Writer}) ->
+ %% Catching because it expects the {channel_exit, _, _} message on error
+ catch rabbit_writer:send_command_sync(Writer, Method).
+
+handle_message(socket_closing_timeout,
+ State = #state{closing_reason = Reason}) ->
+ {stop, {socket_closing_timeout, Reason}, State};
+handle_message(socket_closed, State = #state{waiting_socket_close = true,
+ closing_reason = Reason}) ->
+ {stop, {shutdown, Reason}, State};
+handle_message(socket_closed, State = #state{waiting_socket_close = false}) ->
+ {stop, socket_closed_unexpectedly, State};
+handle_message({socket_error, _} = SocketError, State) ->
+ {stop, SocketError, State};
+handle_message({channel_exit, 0, Reason}, State) ->
+ {stop, {channel0_died, Reason}, State};
+handle_message(heartbeat_timeout, State) ->
+ {stop, heartbeat_timeout, State};
+handle_message(closing_timeout, State = #state{closing_reason = Reason}) ->
+ {stop, Reason, State};
+%% see http://erlang.org/pipermail/erlang-bugs/2012-June/002933.html
+handle_message({Ref, {error, Reason}},
+ State = #state{waiting_socket_close = Waiting,
+ closing_reason = CloseReason})
+ when is_reference(Ref) ->
+ {stop, case {Reason, Waiting} of
+ {closed, true} -> {shutdown, CloseReason};
+ {closed, false} -> socket_closed_unexpectedly;
+ {_, _} -> {socket_error, Reason}
+ end, State}.
+
+closing(_ChannelCloseType, Reason, State) ->
+ {ok, State#state{closing_reason = Reason}}.
+
+channels_terminated(State = #state{closing_reason =
+ {server_initiated_close, _, _}}) ->
+ {ok, State#state{waiting_socket_close = true}};
+channels_terminated(State) ->
+ {ok, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+i(type, _State) -> network;
+i(heartbeat, State) -> State#state.heartbeat;
+i(frame_max, State) -> State#state.frame_max;
+i(sock, State) -> State#state.sock;
+i(name, State) -> State#state.name;
+i(Item, _State) -> throw({bad_argument, Item}).
+
+info_keys() ->
+ ?INFO_KEYS.
+
+%%---------------------------------------------------------------------------
+%% Handshake
+%%---------------------------------------------------------------------------
+
+connect(AmqpParams = #amqp_params_network{host = Host}, SIF, TypeSup, State) ->
+ case gethostaddr(Host) of
+ [] -> {error, unknown_host};
+ [AF|_] -> do_connect(
+ AF, AmqpParams, SIF, State#state{type_sup = TypeSup})
+ end.
+
+do_connect({Addr, Family},
+ AmqpParams = #amqp_params_network{ssl_options = none,
+ port = Port,
+ connection_timeout = Timeout,
+ socket_options = ExtraOpts},
+ SIF, State) ->
+ obtain(),
+ case gen_tcp:connect(Addr, Port,
+ [Family | ?RABBIT_TCP_OPTS] ++ ExtraOpts,
+ Timeout) of
+ {ok, Sock} -> try_handshake(AmqpParams, SIF,
+ State#state{sock = Sock});
+ {error, _} = E -> E
+ end;
+do_connect({Addr, Family},
+ AmqpParams = #amqp_params_network{ssl_options = SslOpts,
+ port = Port,
+ connection_timeout = Timeout,
+ socket_options = ExtraOpts},
+ SIF, State) ->
+ app_utils:start_applications([asn1, crypto, public_key, ssl]),
+ obtain(),
+ case gen_tcp:connect(Addr, Port,
+ [Family | ?RABBIT_TCP_OPTS] ++ ExtraOpts,
+ Timeout) of
+ {ok, Sock} ->
+ case ssl:connect(Sock, SslOpts) of
+ {ok, SslSock} ->
+ RabbitSslSock = #ssl_socket{ssl = SslSock, tcp = Sock},
+ try_handshake(AmqpParams, SIF,
+ State#state{sock = RabbitSslSock});
+ {error, _} = E ->
+ E
+ end;
+ {error, _} = E ->
+ E
+ end.
+
+inet_address_preference() ->
+ case application:get_env(amqp_client, prefer_ipv6) of
+ {ok, true} -> [inet6, inet];
+ {ok, false} -> [inet, inet6]
+ end.
+
+gethostaddr(Host) ->
+ Lookups = [{Family, inet:getaddr(Host, Family)}
+ || Family <- inet_address_preference()],
+ [{IP, Family} || {Family, {ok, IP}} <- Lookups].
+
+try_handshake(AmqpParams, SIF, State = #state{sock = Sock}) ->
+ Name = case rabbit_net:connection_string(Sock, outbound) of
+ {ok, Str} -> list_to_binary(Str);
+ {error, _} -> <<"unknown">>
+ end,
+ try handshake(AmqpParams, SIF,
+ State#state{name = <<"client ", Name/binary>>}) of
+ Return -> Return
+ catch exit:Reason -> {error, Reason}
+ end.
+
+handshake(AmqpParams, SIF, State0 = #state{sock = Sock}) ->
+ ok = rabbit_net:send(Sock, ?PROTOCOL_HEADER),
+ network_handshake(AmqpParams, start_infrastructure(SIF, State0)).
+
+start_infrastructure(SIF, State = #state{sock = Sock, name = Name}) ->
+ {ok, ChMgr, Writer} = SIF(Sock, Name),
+ {ChMgr, State#state{writer0 = Writer}}.
+
+network_handshake(AmqpParams = #amqp_params_network{virtual_host = VHost},
+ {ChMgr, State0}) ->
+ Start = #'connection.start'{server_properties = ServerProperties,
+ mechanisms = Mechanisms} =
+ handshake_recv('connection.start'),
+ ok = check_version(Start),
+ case login(AmqpParams, Mechanisms, State0) of
+ {closing, #amqp_error{}, _Error} = Err ->
+ do(#'connection.close_ok'{}, State0),
+ Err;
+ Tune ->
+ {TuneOk, ChannelMax, State1} = tune(Tune, AmqpParams, State0),
+ do2(TuneOk, State1),
+ do2(#'connection.open'{virtual_host = VHost}, State1),
+ Params = {ServerProperties, ChannelMax, ChMgr, State1},
+ case handshake_recv('connection.open_ok') of
+ #'connection.open_ok'{} -> {ok, Params};
+ {closing, #amqp_error{} = AmqpError, Error} -> {closing, Params,
+ AmqpError, Error}
+ end
+ end.
+
+check_version(#'connection.start'{version_major = ?PROTOCOL_VERSION_MAJOR,
+ version_minor = ?PROTOCOL_VERSION_MINOR}) ->
+ ok;
+check_version(#'connection.start'{version_major = 8,
+ version_minor = 0}) ->
+ exit({protocol_version_mismatch, 0, 8});
+check_version(#'connection.start'{version_major = Major,
+ version_minor = Minor}) ->
+ exit({protocol_version_mismatch, Major, Minor}).
+
+tune(#'connection.tune'{channel_max = ServerChannelMax,
+ frame_max = ServerFrameMax,
+ heartbeat = ServerHeartbeat},
+ #amqp_params_network{channel_max = ClientChannelMax,
+ frame_max = ClientFrameMax,
+ heartbeat = ClientHeartbeat}, State) ->
+ [ChannelMax, Heartbeat, FrameMax] =
+ lists:zipwith(fun (Client, Server) when Client =:= 0; Server =:= 0 ->
+ lists:max([Client, Server]);
+ (Client, Server) ->
+ lists:min([Client, Server])
+ end,
+ [ClientChannelMax, ClientHeartbeat, ClientFrameMax],
+ [ServerChannelMax, ServerHeartbeat, ServerFrameMax]),
+ %% If we attempt to recv > 64Mb, inet_drv will return enomem, so
+ %% we cap the max negotiated frame size accordingly. Note that
+ %% since we receive the frame header separately, we can actually
+ %% cope with frame sizes of 64M + ?EMPTY_FRAME_SIZE - 1.
+ CappedFrameMax = case FrameMax of
+ 0 -> ?TCP_MAX_PACKET_SIZE;
+ _ -> lists:min([FrameMax, ?TCP_MAX_PACKET_SIZE])
+ end,
+ NewState = State#state{heartbeat = Heartbeat, frame_max = CappedFrameMax},
+ start_heartbeat(NewState),
+ {#'connection.tune_ok'{channel_max = ChannelMax,
+ frame_max = CappedFrameMax,
+ heartbeat = Heartbeat}, ChannelMax, NewState}.
+
+start_heartbeat(#state{sock = Sock,
+ name = Name,
+ heartbeat = Heartbeat,
+ type_sup = Sup}) ->
+ Frame = rabbit_binary_generator:build_heartbeat_frame(),
+ SendFun = fun () -> catch rabbit_net:send(Sock, Frame) end,
+ Connection = self(),
+ ReceiveFun = fun () -> Connection ! heartbeat_timeout end,
+ rabbit_heartbeat:start(
+ Sup, Sock, Name, Heartbeat, SendFun, Heartbeat, ReceiveFun).
+
+login(Params = #amqp_params_network{auth_mechanisms = ClientMechanisms,
+ client_properties = UserProps},
+ ServerMechanismsStr, State) ->
+ ServerMechanisms = string:tokens(binary_to_list(ServerMechanismsStr), " "),
+ case [{N, S, F} || F <- ClientMechanisms,
+ {N, S} <- [F(none, Params, init)],
+ lists:member(binary_to_list(N), ServerMechanisms)] of
+ [{Name, MState0, Mech}|_] ->
+ {Resp, MState1} = Mech(none, Params, MState0),
+ StartOk = #'connection.start_ok'{
+ client_properties = client_properties(UserProps),
+ mechanism = Name,
+ response = Resp},
+ do2(StartOk, State),
+ login_loop(Mech, MState1, Params, State);
+ [] ->
+ exit({no_suitable_auth_mechanism, ServerMechanisms})
+ end.
+
+login_loop(Mech, MState0, Params, State) ->
+ case handshake_recv('connection.tune') of
+ Tune = #'connection.tune'{} ->
+ Tune;
+ #'connection.secure'{challenge = Challenge} ->
+ {Resp, MState1} = Mech(Challenge, Params, MState0),
+ do2(#'connection.secure_ok'{response = Resp}, State),
+ login_loop(Mech, MState1, Params, State);
+ #'connection.close'{reply_code = ?ACCESS_REFUSED,
+ reply_text = ExplanationBin} ->
+ Explanation = binary_to_list(ExplanationBin),
+ {closing,
+ #amqp_error{name = access_refused,
+ explanation = Explanation},
+ {error, {auth_failure, Explanation}}}
+ end.
+
+client_properties(UserProperties) ->
+ {ok, Vsn} = application:get_key(amqp_client, vsn),
+ Default = [{<<"product">>, longstr, <<"RabbitMQ">>},
+ {<<"version">>, longstr, list_to_binary(Vsn)},
+ {<<"platform">>, longstr, <<"Erlang">>},
+ {<<"copyright">>, longstr,
+ <<"Copyright (c) 2007-2014 GoPivotal, Inc.">>},
+ {<<"information">>, longstr,
+ <<"Licensed under the MPL. "
+ "See http://www.rabbitmq.com/">>},
+ {<<"capabilities">>, table, ?CLIENT_CAPABILITIES}],
+ lists:foldl(fun({K, _, _} = Tuple, Acc) ->
+ lists:keystore(K, 1, Acc, Tuple)
+ end, Default, UserProperties).
+
+handshake_recv(Expecting) ->
+ receive
+ {'$gen_cast', {method, Method, none, noflow}} ->
+ case {Expecting, element(1, Method)} of
+ {E, M} when E =:= M ->
+ Method;
+ {'connection.tune', 'connection.secure'} ->
+ Method;
+ {'connection.tune', 'connection.close'} ->
+ Method;
+ {'connection.open_ok', _} ->
+ {closing,
+ #amqp_error{name = command_invalid,
+ explanation = "was expecting "
+ "connection.open_ok"},
+ {error, {unexpected_method, Method,
+ {expecting, Expecting}}}};
+ _ ->
+ throw({unexpected_method, Method,
+ {expecting, Expecting}})
+ end;
+ socket_closed ->
+ case Expecting of
+ 'connection.tune' -> exit({auth_failure, "Disconnected"});
+ 'connection.open_ok' -> exit(access_refused);
+ _ -> exit({socket_closed_unexpectedly,
+ Expecting})
+ end;
+ {socket_error, _} = SocketError ->
+ exit({SocketError, {expecting, Expecting}});
+ {refused, Version} ->
+ exit({server_refused_connection, Version});
+ {malformed_header, All} ->
+ exit({server_sent_malformed_header, All});
+ heartbeat_timeout ->
+ exit(heartbeat_timeout);
+ Other ->
+ throw({handshake_recv_unexpected_message, Other})
+ after ?HANDSHAKE_RECEIVE_TIMEOUT ->
+ case Expecting of
+ 'connection.open_ok' ->
+ {closing,
+ #amqp_error{name = internal_error,
+ explanation = "handshake timed out waiting "
+ "connection.open_ok"},
+ {error, handshake_receive_timed_out}};
+ _ ->
+ exit(handshake_receive_timed_out)
+ end
+ end.
+
+obtain() ->
+ case code:is_loaded(file_handle_cache) of
+ false -> ok;
+ _ -> file_handle_cache:obtain()
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% @doc This module allows the simple execution of an asynchronous RPC over
+%% AMQP. It frees a client programmer of the necessary having to AMQP
+%% plumbing. Note that the this module does not handle any data encoding,
+%% so it is up to the caller to marshall and unmarshall message payloads
+%% accordingly.
+-module(amqp_rpc_client).
+
+-include("amqp_client.hrl").
+
+-behaviour(gen_server).
+
+-export([start/2, start_link/2, stop/1]).
+-export([call/2]).
+-export([init/1, terminate/2, code_change/3, handle_call/3,
+ handle_cast/2, handle_info/2]).
+
+-record(state, {channel,
+ reply_queue,
+ exchange,
+ routing_key,
+ continuations = dict:new(),
+ correlation_id = 0}).
+
+%%--------------------------------------------------------------------------
+%% API
+%%--------------------------------------------------------------------------
+
+%% @spec (Connection, Queue) -> RpcClient
+%% where
+%% Connection = pid()
+%% Queue = binary()
+%% RpcClient = pid()
+%% @doc Starts a new RPC client instance that sends requests to a
+%% specified queue. This function returns the pid of the RPC client process
+%% that can be used to invoke RPCs and stop the client.
+start(Connection, Queue) ->
+ {ok, Pid} = gen_server:start(?MODULE, [Connection, Queue], []),
+ Pid.
+
+%% @spec (Connection, Queue) -> RpcClient
+%% where
+%% Connection = pid()
+%% Queue = binary()
+%% RpcClient = pid()
+%% @doc Starts, and links to, a new RPC client instance that sends requests
+%% to a specified queue. This function returns the pid of the RPC client
+%% process that can be used to invoke RPCs and stop the client.
+start_link(Connection, Queue) ->
+ {ok, Pid} = gen_server:start_link(?MODULE, [Connection, Queue], []),
+ Pid.
+
+%% @spec (RpcClient) -> ok
+%% where
+%% RpcClient = pid()
+%% @doc Stops an exisiting RPC client.
+stop(Pid) ->
+ gen_server:call(Pid, stop, infinity).
+
+%% @spec (RpcClient, Payload) -> ok
+%% where
+%% RpcClient = pid()
+%% Payload = binary()
+%% @doc Invokes an RPC. Note the caller of this function is responsible for
+%% encoding the request and decoding the response.
+call(RpcClient, Payload) ->
+ gen_server:call(RpcClient, {call, Payload}, infinity).
+
+%%--------------------------------------------------------------------------
+%% Plumbing
+%%--------------------------------------------------------------------------
+
+%% Sets up a reply queue for this client to listen on
+setup_reply_queue(State = #state{channel = Channel}) ->
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Channel, #'queue.declare'{}),
+ State#state{reply_queue = Q}.
+
+%% Registers this RPC client instance as a consumer to handle rpc responses
+setup_consumer(#state{channel = Channel, reply_queue = Q}) ->
+ amqp_channel:call(Channel, #'basic.consume'{queue = Q}).
+
+%% Publishes to the broker, stores the From address against
+%% the correlation id and increments the correlationid for
+%% the next request
+publish(Payload, From,
+ State = #state{channel = Channel,
+ reply_queue = Q,
+ exchange = X,
+ routing_key = RoutingKey,
+ correlation_id = CorrelationId,
+ continuations = Continuations}) ->
+ EncodedCorrelationId = base64:encode(<<CorrelationId:64>>),
+ Props = #'P_basic'{correlation_id = EncodedCorrelationId,
+ content_type = <<"application/octet-stream">>,
+ reply_to = Q},
+ Publish = #'basic.publish'{exchange = X,
+ routing_key = RoutingKey,
+ mandatory = true},
+ amqp_channel:call(Channel, Publish, #amqp_msg{props = Props,
+ payload = Payload}),
+ State#state{correlation_id = CorrelationId + 1,
+ continuations = dict:store(EncodedCorrelationId, From, Continuations)}.
+
+%%--------------------------------------------------------------------------
+%% gen_server callbacks
+%%--------------------------------------------------------------------------
+
+%% Sets up a reply queue and consumer within an existing channel
+%% @private
+init([Connection, RoutingKey]) ->
+ {ok, Channel} = amqp_connection:open_channel(
+ Connection, {amqp_direct_consumer, [self()]}),
+ InitialState = #state{channel = Channel,
+ exchange = <<>>,
+ routing_key = RoutingKey},
+ State = setup_reply_queue(InitialState),
+ setup_consumer(State),
+ {ok, State}.
+
+%% Closes the channel this gen_server instance started
+%% @private
+terminate(_Reason, #state{channel = Channel}) ->
+ amqp_channel:close(Channel),
+ ok.
+
+%% Handle the application initiated stop by just stopping this gen server
+%% @private
+handle_call(stop, _From, State) ->
+ {stop, normal, ok, State};
+
+%% @private
+handle_call({call, Payload}, From, State) ->
+ NewState = publish(Payload, From, State),
+ {noreply, NewState}.
+
+%% @private
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+%% @private
+handle_info({#'basic.consume'{}, _Pid}, State) ->
+ {noreply, State};
+
+%% @private
+handle_info(#'basic.consume_ok'{}, State) ->
+ {noreply, State};
+
+%% @private
+handle_info(#'basic.cancel'{}, State) ->
+ {noreply, State};
+
+%% @private
+handle_info(#'basic.cancel_ok'{}, State) ->
+ {stop, normal, State};
+
+%% @private
+handle_info({#'basic.deliver'{delivery_tag = DeliveryTag},
+ #amqp_msg{props = #'P_basic'{correlation_id = Id},
+ payload = Payload}},
+ State = #state{continuations = Conts, channel = Channel}) ->
+ From = dict:fetch(Id, Conts),
+ gen_server:reply(From, Payload),
+ amqp_channel:call(Channel, #'basic.ack'{delivery_tag = DeliveryTag}),
+ {noreply, State#state{continuations = dict:erase(Id, Conts) }}.
+
+%% @private
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% @doc This is a utility module that is used to expose an arbitrary function
+%% via an asynchronous RPC over AMQP mechanism. It frees the implementor of
+%% a simple function from having to plumb this into AMQP. Note that the
+%% RPC server does not handle any data encoding, so it is up to the callback
+%% function to marshall and unmarshall message payloads accordingly.
+-module(amqp_rpc_server).
+
+-behaviour(gen_server).
+
+-include("amqp_client.hrl").
+
+-export([init/1, terminate/2, code_change/3, handle_call/3,
+ handle_cast/2, handle_info/2]).
+-export([start/3, start_link/3]).
+-export([stop/1]).
+
+-record(state, {channel,
+ handler}).
+
+%%--------------------------------------------------------------------------
+%% API
+%%--------------------------------------------------------------------------
+
+%% @spec (Connection, Queue, RpcHandler) -> RpcServer
+%% where
+%% Connection = pid()
+%% Queue = binary()
+%% RpcHandler = function()
+%% RpcServer = pid()
+%% @doc Starts a new RPC server instance that receives requests via a
+%% specified queue and dispatches them to a specified handler function. This
+%% function returns the pid of the RPC server that can be used to stop the
+%% server.
+start(Connection, Queue, Fun) ->
+ {ok, Pid} = gen_server:start(?MODULE, [Connection, Queue, Fun], []),
+ Pid.
+
+%% @spec (Connection, Queue, RpcHandler) -> RpcServer
+%% where
+%% Connection = pid()
+%% Queue = binary()
+%% RpcHandler = function()
+%% RpcServer = pid()
+%% @doc Starts, and links to, a new RPC server instance that receives
+%% requests via a specified queue and dispatches them to a specified
+%% handler function. This function returns the pid of the RPC server that
+%% can be used to stop the server.
+start_link(Connection, Queue, Fun) ->
+ {ok, Pid} = gen_server:start_link(?MODULE, [Connection, Queue, Fun], []),
+ Pid.
+
+%% @spec (RpcServer) -> ok
+%% where
+%% RpcServer = pid()
+%% @doc Stops an exisiting RPC server.
+stop(Pid) ->
+ gen_server:call(Pid, stop, infinity).
+
+%%--------------------------------------------------------------------------
+%% gen_server callbacks
+%%--------------------------------------------------------------------------
+
+%% @private
+init([Connection, Q, Fun]) ->
+ {ok, Channel} = amqp_connection:open_channel(
+ Connection, {amqp_direct_consumer, [self()]}),
+ amqp_channel:call(Channel, #'queue.declare'{queue = Q}),
+ amqp_channel:call(Channel, #'basic.consume'{queue = Q}),
+ {ok, #state{channel = Channel, handler = Fun} }.
+
+%% @private
+handle_info(shutdown, State) ->
+ {stop, normal, State};
+
+%% @private
+handle_info({#'basic.consume'{}, _}, State) ->
+ {noreply, State};
+
+%% @private
+handle_info(#'basic.consume_ok'{}, State) ->
+ {noreply, State};
+
+%% @private
+handle_info(#'basic.cancel'{}, State) ->
+ {noreply, State};
+
+%% @private
+handle_info(#'basic.cancel_ok'{}, State) ->
+ {stop, normal, State};
+
+%% @private
+handle_info({#'basic.deliver'{delivery_tag = DeliveryTag},
+ #amqp_msg{props = Props, payload = Payload}},
+ State = #state{handler = Fun, channel = Channel}) ->
+ #'P_basic'{correlation_id = CorrelationId,
+ reply_to = Q} = Props,
+ Response = Fun(Payload),
+ Properties = #'P_basic'{correlation_id = CorrelationId},
+ Publish = #'basic.publish'{exchange = <<>>,
+ routing_key = Q,
+ mandatory = true},
+ amqp_channel:call(Channel, Publish, #amqp_msg{props = Properties,
+ payload = Response}),
+ amqp_channel:call(Channel, #'basic.ack'{delivery_tag = DeliveryTag}),
+ {noreply, State};
+
+%% @private
+handle_info({'DOWN', _MRef, process, _Pid, _Info}, State) ->
+ {noreply, State}.
+
+%% @private
+handle_call(stop, _From, State) ->
+ {stop, normal, ok, State}.
+
+%%--------------------------------------------------------------------------
+%% Rest of the gen_server callbacks
+%%--------------------------------------------------------------------------
+
+%% @private
+handle_cast(_Message, State) ->
+ {noreply, State}.
+
+%% Closes the channel this gen_server instance started
+%% @private
+terminate(_Reason, #state{channel = Channel}) ->
+ amqp_channel:close(Channel),
+ ok.
+
+%% @private
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public Licensbe
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% @doc This module is an implementation of the amqp_gen_consumer
+%% behaviour and can be used as part of the Consumer parameter when
+%% opening AMQP channels. This is the default implementation selected
+%% by channel. <br/>
+%% <br/>
+%% The Consumer parameter for this implementation is {{@module}, []@}<br/>
+%% This consumer implementation keeps track of consumer tags and sends
+%% the subscription-relevant messages to the registered consumers, according
+%% to an internal tag dictionary.<br/>
+%% <br/>
+%% Send a #basic.consume{} message to the channel to subscribe a
+%% consumer to a queue and send a #basic.cancel{} message to cancel a
+%% subscription.<br/>
+%% <br/>
+%% The channel will send to the relevant registered consumers the
+%% basic.consume_ok, basic.cancel_ok, basic.cancel and basic.deliver messages
+%% received from the server.<br/>
+%% <br/>
+%% If a consumer is not registered for a given consumer tag, the message
+%% is sent to the default consumer registered with
+%% {@module}:register_default_consumer. If there is no default consumer
+%% registered in this case, an exception occurs and the channel is abruptly
+%% terminated.<br/>
+-module(amqp_selective_consumer).
+
+-include("amqp_gen_consumer_spec.hrl").
+
+-behaviour(amqp_gen_consumer).
+
+-export([register_default_consumer/2]).
+-export([init/1, handle_consume_ok/3, handle_consume/3, handle_cancel_ok/3,
+ handle_cancel/2, handle_server_cancel/2, handle_deliver/3,
+ handle_info/2, handle_call/3, terminate/2]).
+
+-record(state, {consumers = dict:new(), %% Tag -> ConsumerPid
+ unassigned = undefined, %% Pid
+ monitors = dict:new(), %% Pid -> {Count, MRef}
+ default_consumer = none}).
+
+%%---------------------------------------------------------------------------
+%% Interface
+%%---------------------------------------------------------------------------
+
+%% @spec (ChannelPid, ConsumerPid) -> ok
+%% where
+%% ChannelPid = pid()
+%% ConsumerPid = pid()
+%% @doc This function registers a default consumer with the channel. A
+%% default consumer is used when a subscription is made via
+%% amqp_channel:call(ChannelPid, #'basic.consume'{}) (rather than
+%% {@module}:subscribe/3) and hence there is no consumer pid
+%% registered with the consumer tag. In this case, the relevant
+%% deliveries will be sent to the default consumer.
+register_default_consumer(ChannelPid, ConsumerPid) ->
+ amqp_channel:call_consumer(ChannelPid,
+ {register_default_consumer, ConsumerPid}).
+
+%%---------------------------------------------------------------------------
+%% amqp_gen_consumer callbacks
+%%---------------------------------------------------------------------------
+
+%% @private
+init([]) ->
+ {ok, #state{}}.
+
+%% @private
+handle_consume(#'basic.consume'{consumer_tag = Tag,
+ nowait = NoWait},
+ Pid, State = #state{consumers = Consumers,
+ monitors = Monitors}) ->
+ Result = case NoWait of
+ true when Tag =:= undefined orelse size(Tag) == 0 ->
+ no_consumer_tag_specified;
+ _ when is_binary(Tag) andalso size(Tag) >= 0 ->
+ case resolve_consumer(Tag, State) of
+ {consumer, _} -> consumer_tag_in_use;
+ _ -> ok
+ end;
+ _ ->
+ ok
+ end,
+ case {Result, NoWait} of
+ {ok, true} ->
+ {ok, State#state
+ {consumers = dict:store(Tag, Pid, Consumers),
+ monitors = add_to_monitor_dict(Pid, Monitors)}};
+ {ok, false} ->
+ {ok, State#state{unassigned = Pid}};
+ {Err, true} ->
+ {error, Err, State};
+ {_Err, false} ->
+ %% Don't do anything (don't override existing
+ %% consumers), the server will close the channel with an error.
+ {ok, State}
+ end.
+
+%% @private
+handle_consume_ok(BasicConsumeOk, _BasicConsume,
+ State = #state{unassigned = Pid,
+ consumers = Consumers,
+ monitors = Monitors})
+ when is_pid(Pid) ->
+ State1 =
+ State#state{
+ consumers = dict:store(tag(BasicConsumeOk), Pid, Consumers),
+ monitors = add_to_monitor_dict(Pid, Monitors),
+ unassigned = undefined},
+ deliver(BasicConsumeOk, State1),
+ {ok, State1}.
+
+%% @private
+%% We sent a basic.cancel.
+handle_cancel(#'basic.cancel'{nowait = true},
+ #state{default_consumer = none}) ->
+ exit(cancel_nowait_requires_default_consumer);
+
+handle_cancel(Cancel = #'basic.cancel'{nowait = NoWait}, State) ->
+ State1 = case NoWait of
+ true -> do_cancel(Cancel, State);
+ false -> State
+ end,
+ {ok, State1}.
+
+%% @private
+%% We sent a basic.cancel and now receive the ok.
+handle_cancel_ok(CancelOk, _Cancel, State) ->
+ State1 = do_cancel(CancelOk, State),
+ %% Use old state
+ deliver(CancelOk, State),
+ {ok, State1}.
+
+%% @private
+%% The server sent a basic.cancel.
+handle_server_cancel(Cancel = #'basic.cancel'{nowait = true}, State) ->
+ State1 = do_cancel(Cancel, State),
+ %% Use old state
+ deliver(Cancel, State),
+ {ok, State1}.
+
+%% @private
+handle_deliver(Deliver, Message, State) ->
+ deliver(Deliver, Message, State),
+ {ok, State}.
+
+%% @private
+handle_info({'DOWN', _MRef, process, Pid, _Info},
+ State = #state{monitors = Monitors,
+ consumers = Consumers,
+ default_consumer = DConsumer }) ->
+ case dict:find(Pid, Monitors) of
+ {ok, _CountMRef} ->
+ {ok, State#state{monitors = dict:erase(Pid, Monitors),
+ consumers =
+ dict:filter(
+ fun (_, Pid1) when Pid1 =:= Pid -> false;
+ (_, _) -> true
+ end, Consumers)}};
+ error ->
+ case Pid of
+ DConsumer -> {ok, State#state{
+ monitors = dict:erase(Pid, Monitors),
+ default_consumer = none}};
+ _ -> {ok, State} %% unnamed consumer went down
+ %% before receiving consume_ok
+ end
+ end.
+
+%% @private
+handle_call({register_default_consumer, Pid}, _From,
+ State = #state{default_consumer = PrevPid,
+ monitors = Monitors}) ->
+ Monitors1 = case PrevPid of
+ none -> Monitors;
+ _ -> remove_from_monitor_dict(PrevPid, Monitors)
+ end,
+ {reply, ok,
+ State#state{default_consumer = Pid,
+ monitors = add_to_monitor_dict(Pid, Monitors1)}}.
+
+%% @private
+terminate(_Reason, State) ->
+ State.
+
+%%---------------------------------------------------------------------------
+%% Internal plumbing
+%%---------------------------------------------------------------------------
+
+deliver(Msg, State) ->
+ deliver(Msg, undefined, State).
+deliver(Msg, Message, State) ->
+ Combined = if Message =:= undefined -> Msg;
+ true -> {Msg, Message}
+ end,
+ case resolve_consumer(tag(Msg), State) of
+ {consumer, Pid} -> Pid ! Combined;
+ {default, Pid} -> Pid ! Combined;
+ error -> exit(unexpected_delivery_and_no_default_consumer)
+ end.
+
+do_cancel(Cancel, State = #state{consumers = Consumers,
+ monitors = Monitors}) ->
+ Tag = tag(Cancel),
+ case dict:find(Tag, Consumers) of
+ {ok, Pid} -> State#state{
+ consumers = dict:erase(Tag, Consumers),
+ monitors = remove_from_monitor_dict(Pid, Monitors)};
+ error -> %% Untracked consumer. Do nothing.
+ State
+ end.
+
+resolve_consumer(Tag, #state{consumers = Consumers,
+ default_consumer = DefaultConsumer}) ->
+ case dict:find(Tag, Consumers) of
+ {ok, ConsumerPid} -> {consumer, ConsumerPid};
+ error -> case DefaultConsumer of
+ none -> error;
+ _ -> {default, DefaultConsumer}
+ end
+ end.
+
+tag(#'basic.consume'{consumer_tag = Tag}) -> Tag;
+tag(#'basic.consume_ok'{consumer_tag = Tag}) -> Tag;
+tag(#'basic.cancel'{consumer_tag = Tag}) -> Tag;
+tag(#'basic.cancel_ok'{consumer_tag = Tag}) -> Tag;
+tag(#'basic.deliver'{consumer_tag = Tag}) -> Tag.
+
+add_to_monitor_dict(Pid, Monitors) ->
+ case dict:find(Pid, Monitors) of
+ error -> dict:store(Pid,
+ {1, erlang:monitor(process, Pid)},
+ Monitors);
+ {ok, {Count, MRef}} -> dict:store(Pid, {Count + 1, MRef}, Monitors)
+ end.
+
+remove_from_monitor_dict(Pid, Monitors) ->
+ case dict:fetch(Pid, Monitors) of
+ {1, MRef} -> erlang:demonitor(MRef),
+ dict:erase(Pid, Monitors);
+ {Count, MRef} -> dict:store(Pid, {Count - 1, MRef}, Monitors)
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% @private
+-module(amqp_sup).
+
+-include("amqp_client.hrl").
+
+-behaviour(supervisor2).
+
+-export([start_link/0, start_connection_sup/1]).
+-export([init/1]).
+
+%%---------------------------------------------------------------------------
+%% Interface
+%%---------------------------------------------------------------------------
+
+start_link() ->
+ supervisor2:start_link({local, amqp_sup}, ?MODULE, []).
+
+start_connection_sup(AmqpParams) ->
+ supervisor2:start_child(amqp_sup, [AmqpParams]).
+
+%%---------------------------------------------------------------------------
+%% supervisor2 callbacks
+%%---------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{simple_one_for_one, 0, 1},
+ [{connection_sup, {amqp_connection_sup, start_link, []},
+ temporary, infinity, supervisor, [amqp_connection_sup]}]}}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(amqp_uri).
+
+-include("amqp_client.hrl").
+
+-export([parse/1, parse/2, remove_credentials/1]).
+
+%%---------------------------------------------------------------------------
+%% AMQP URI Parsing
+%%---------------------------------------------------------------------------
+
+%% Reformat a URI to remove authentication secrets from it (before we
+%% log it or display it anywhere).
+remove_credentials(URI) ->
+ Props = uri_parser:parse(URI,
+ [{host, undefined}, {path, undefined},
+ {port, undefined}, {'query', []}]),
+ PortPart = case proplists:get_value(port, Props) of
+ undefined -> "";
+ Port -> rabbit_misc:format(":~B", [Port])
+ end,
+ PGet = fun(K, P) -> case proplists:get_value(K, P) of
+ undefined -> "";
+ R -> R
+ end
+ end,
+ rabbit_misc:format(
+ "~s://~s~s~s", [proplists:get_value(scheme, Props), PGet(host, Props),
+ PortPart, PGet(path, Props)]).
+
+%% @spec (Uri) -> {ok, #amqp_params_network{} | #amqp_params_direct{}} |
+%% {error, {Info, Uri}}
+%% where
+%% Uri = string()
+%% Info = any()
+%%
+%% @doc Parses an AMQP URI. If any of the URI parts are missing, the
+%% default values are used. If the hostname is zero-length, an
+%% #amqp_params_direct{} record is returned; otherwise, an
+%% #amqp_params_network{} record is returned. Extra parameters may be
+%% specified via the query string
+%% (e.g. "?heartbeat=5&auth_mechanism=external"). In case of failure,
+%% an {error, {Info, Uri}} tuple is returned.
+%%
+%% The extra parameters that may be specified are channel_max,
+%% frame_max, heartbeat and auth_mechanism (the latter can appear more
+%% than once). The extra parameters that may be specified for an SSL
+%% connection are cacertfile, certfile, keyfile, verify, and
+%% fail_if_no_peer_cert.
+parse(Uri) -> parse(Uri, <<"/">>).
+
+parse(Uri, DefaultVHost) ->
+ try return(parse1(Uri, DefaultVHost))
+ catch throw:Err -> {error, {Err, Uri}};
+ error:Err -> {error, {Err, Uri}}
+ end.
+
+parse1(Uri, DefaultVHost) when is_list(Uri) ->
+ case uri_parser:parse(Uri, [{host, undefined}, {path, undefined},
+ {port, undefined}, {'query', []}]) of
+ {error, Err} ->
+ throw({unable_to_parse_uri, Err});
+ Parsed ->
+ Endpoint =
+ case string:to_lower(proplists:get_value(scheme, Parsed)) of
+ "amqp" -> build_broker(Parsed, DefaultVHost);
+ "amqps" -> build_ssl_broker(Parsed, DefaultVHost);
+ Scheme -> fail({unexpected_uri_scheme, Scheme})
+ end,
+ return({ok, broker_add_query(Endpoint, Parsed)})
+ end;
+parse1(_, _DefaultVHost) ->
+ fail(expected_string_uri).
+
+unescape_string(Atom) when is_atom(Atom) ->
+ Atom;
+unescape_string([]) ->
+ [];
+unescape_string([$%, N1, N2 | Rest]) ->
+ try
+ [erlang:list_to_integer([N1, N2], 16) | unescape_string(Rest)]
+ catch
+ error:badarg -> throw({invalid_entitiy, ['%', N1, N2]})
+ end;
+unescape_string([$% | Rest]) ->
+ fail({unterminated_entity, ['%' | Rest]});
+unescape_string([C | Rest]) ->
+ [C | unescape_string(Rest)].
+
+build_broker(ParsedUri, DefaultVHost) ->
+ [Host, Port, Path] =
+ [proplists:get_value(F, ParsedUri) || F <- [host, port, path]],
+ case Port =:= undefined orelse (0 < Port andalso Port =< 65535) of
+ true -> ok;
+ false -> fail({port_out_of_range, Port})
+ end,
+ VHost = case Path of
+ undefined -> DefaultVHost;
+ [$/|Rest] -> case string:chr(Rest, $/) of
+ 0 -> list_to_binary(unescape_string(Rest));
+ _ -> fail({invalid_vhost, Rest})
+ end
+ end,
+ UserInfo = proplists:get_value(userinfo, ParsedUri),
+ set_user_info(case unescape_string(Host) of
+ undefined -> #amqp_params_direct{virtual_host = VHost};
+ Host1 -> Mech = mechanisms(ParsedUri),
+ #amqp_params_network{host = Host1,
+ port = Port,
+ virtual_host = VHost,
+ auth_mechanisms = Mech}
+ end, UserInfo).
+
+set_user_info(Ps, UserInfo) ->
+ case UserInfo of
+ [U, P | _] -> set([{username, list_to_binary(unescape_string(U))},
+ {password, list_to_binary(unescape_string(P))}], Ps);
+
+ [U] -> set([{username, list_to_binary(unescape_string(U))}], Ps);
+ [] -> Ps
+ end.
+
+set(KVs, Ps = #amqp_params_direct{}) ->
+ set(KVs, Ps, record_info(fields, amqp_params_direct));
+set(KVs, Ps = #amqp_params_network{}) ->
+ set(KVs, Ps, record_info(fields, amqp_params_network)).
+
+set(KVs, Ps, Fields) ->
+ {Ps1, _Ix} = lists:foldl(fun (Field, {PsN, Ix}) ->
+ {case lists:keyfind(Field, 1, KVs) of
+ false -> PsN;
+ {_, V} -> setelement(Ix, PsN, V)
+ end, Ix + 1}
+ end, {Ps, 2}, Fields),
+ Ps1.
+
+build_ssl_broker(ParsedUri, DefaultVHost) ->
+ Params = build_broker(ParsedUri, DefaultVHost),
+ Query = proplists:get_value('query', ParsedUri),
+ SSLOptions =
+ run_state_monad(
+ [fun (L) -> KeyString = atom_to_list(Key),
+ case lists:keysearch(KeyString, 1, Query) of
+ {value, {_, Value}} ->
+ try return([{Key, unescape_string(Fun(Value))} | L])
+ catch throw:Reason ->
+ fail({invalid_ssl_parameter,
+ Key, Value, Query, Reason})
+ end;
+ false ->
+ L
+ end
+ end || {Fun, Key} <-
+ [{fun find_path_parameter/1, cacertfile},
+ {fun find_path_parameter/1, certfile},
+ {fun find_path_parameter/1, keyfile},
+ {fun find_atom_parameter/1, verify},
+ {fun find_boolean_parameter/1, fail_if_no_peer_cert}]],
+ []),
+ Params#amqp_params_network{ssl_options = SSLOptions}.
+
+broker_add_query(Params = #amqp_params_direct{}, Uri) ->
+ broker_add_query(Params, Uri, record_info(fields, amqp_params_direct));
+broker_add_query(Params = #amqp_params_network{}, Uri) ->
+ broker_add_query(Params, Uri, record_info(fields, amqp_params_network)).
+
+broker_add_query(Params, ParsedUri, Fields) ->
+ Query = proplists:get_value('query', ParsedUri),
+ {Params1, _Pos} =
+ run_state_monad(
+ [fun ({ParamsN, Pos}) ->
+ Pos1 = Pos + 1,
+ KeyString = atom_to_list(Field),
+ case proplists:get_value(KeyString, Query) of
+ undefined ->
+ return({ParamsN, Pos1});
+ true -> %% proplists short form, not permitted
+ return({ParamsN, Pos1});
+ Value ->
+ try
+ ValueParsed = parse_amqp_param(Field, Value),
+ return(
+ {setelement(Pos, ParamsN, ValueParsed), Pos1})
+ catch throw:Reason ->
+ fail({invalid_amqp_params_parameter,
+ Field, Value, Query, Reason})
+ end
+ end
+ end || Field <- Fields], {Params, 2}),
+ Params1.
+
+parse_amqp_param(Field, String) when Field =:= channel_max orelse
+ Field =:= frame_max orelse
+ Field =:= heartbeat orelse
+ Field =:= connection_timeout ->
+ try return(list_to_integer(String))
+ catch error:badarg -> fail({not_an_integer, String})
+ end;
+parse_amqp_param(Field, String) ->
+ fail({parameter_unconfigurable_in_query, Field, String}).
+
+find_path_parameter(Value) -> return(Value).
+
+find_boolean_parameter(Value) ->
+ Bool = list_to_atom(Value),
+ case is_boolean(Bool) of
+ true -> return(Bool);
+ false -> fail({require_boolean, Bool})
+ end.
+
+find_atom_parameter(Value) -> return(list_to_atom(Value)).
+
+mechanisms(ParsedUri) ->
+ Query = proplists:get_value('query', ParsedUri),
+ Mechanisms = case proplists:get_all_values("auth_mechanism", Query) of
+ [] -> ["plain", "amqplain"];
+ Mechs -> Mechs
+ end,
+ [case [list_to_atom(T) || T <- string:tokens(Mech, ":")] of
+ [F] -> fun (R, P, S) -> amqp_auth_mechanisms:F(R, P, S) end;
+ [M, F] -> fun (R, P, S) -> M:F(R, P, S) end;
+ L -> throw({not_mechanism, L})
+ end || Mech <- Mechanisms].
+
+%% --=: Plain state monad implementation start :=--
+run_state_monad(FunList, State) ->
+ lists:foldl(fun (Fun, StateN) -> Fun(StateN) end, State, FunList).
+
+return(V) -> V.
+
+fail(Reason) -> throw(Reason).
+%% --=: end :=--
--- /dev/null
+@title AMQP Client for Erlang
+@author GoPivotal Inc. <support@rabbitmq.com>
+@copyright 2007-2013 GoPivotal, Inc.
+
+@version %%VERSION%%
+
+@reference <a href="http://www.rabbitmq.com/protocol.html" target="_top">AMQP documentation</a> on the RabbitMQ website.
+
+@doc
+
+== Overview ==
+
+This application provides an Erlang library to interact with an AMQP 0-9-1 compliant message broker. The module documentation assumes that the programmer has some basic familiarity with the execution model defined in the AMQP specification.
+
+The main components are {@link amqp_connection} and {@link amqp_channel}. The {@link amqp_connection} module is used to open and close connections to an AMQP broker as well as creating channels. The {@link amqp_channel} module is used to send and receive commands and messages to and from a broker within the context of a channel.
+
+== AMQP Record Definitions ==
+
+Many of the API functions take structured records as arguments. These records represent the commands defined in the AMQP execution model. The definitions for these records are automatically generated by the rabbitmq-codegen project. rabbitmq-codegen parses a machine readable view of the specification and generates a header file that includes the entire command set of AMQP. Each command in AMQP has an identically named record. The protocol documentation serves as a reference for the attributes of each command.
+
+== Programming Model ==
+
+For more information, refer to the Erlang AMQP client <a href="http://www.rabbitmq.com/erlang-client-user-guide.html">developer's guide</a> on the RabbitMQ website.
+
+== RPC Components ==
+
+The {@link amqp_rpc_server} module provides a generic building block to expose Erlang functions via an RPC over AMQP mechanism. The {@link amqp_rpc_client} provides a simple client utility to submit RPC requests to a server via AMQP.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2013-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_routing_util).
+
+-export([init_state/0, dest_prefixes/0, all_dest_prefixes/0]).
+-export([ensure_endpoint/4, ensure_endpoint/5, ensure_binding/3]).
+-export([parse_endpoint/1, parse_endpoint/2]).
+-export([parse_routing/1, dest_temp_queue/1]).
+
+-include("amqp_client.hrl").
+-include("rabbit_routing_prefixes.hrl").
+
+%%----------------------------------------------------------------------------
+
+init_state() -> sets:new().
+
+dest_prefixes() -> [?EXCHANGE_PREFIX, ?TOPIC_PREFIX, ?QUEUE_PREFIX,
+ ?AMQQUEUE_PREFIX, ?REPLY_QUEUE_PREFIX].
+
+all_dest_prefixes() -> [?TEMP_QUEUE_PREFIX | dest_prefixes()].
+
+%% --------------------------------------------------------------------------
+
+parse_endpoint(Destination) ->
+ parse_endpoint(Destination, false).
+
+parse_endpoint(undefined, AllowAnonymousQueue) ->
+ parse_endpoint("/queue", AllowAnonymousQueue);
+
+parse_endpoint(Destination, AllowAnonymousQueue) when is_binary(Destination) ->
+ parse_endpoint(unicode:characters_to_list(Destination),
+ AllowAnonymousQueue);
+parse_endpoint(Destination, AllowAnonymousQueue) when is_list(Destination) ->
+ case re:split(Destination, "/", [{return, list}]) of
+ [Name] ->
+ {ok, {queue, unescape(Name)}};
+ ["", Type | Rest]
+ when Type =:= "exchange" orelse Type =:= "queue" orelse
+ Type =:= "topic" orelse Type =:= "temp-queue" ->
+ parse_endpoint0(atomise(Type), Rest, AllowAnonymousQueue);
+ ["", "amq", "queue" | Rest] ->
+ parse_endpoint0(amqqueue, Rest, AllowAnonymousQueue);
+ ["", "reply-queue" = Prefix | [_|_]] ->
+ parse_endpoint0(reply_queue,
+ [lists:nthtail(2 + length(Prefix), Destination)],
+ AllowAnonymousQueue);
+ _ ->
+ {error, {unknown_destination, Destination}}
+ end.
+
+parse_endpoint0(exchange, ["" | _] = Rest, _) ->
+ {error, {invalid_destination, exchange, to_url(Rest)}};
+parse_endpoint0(exchange, [Name], _) ->
+ {ok, {exchange, {unescape(Name), undefined}}};
+parse_endpoint0(exchange, [Name, Pattern], _) ->
+ {ok, {exchange, {unescape(Name), unescape(Pattern)}}};
+parse_endpoint0(queue, [], false) ->
+ {error, {invalid_destination, queue, []}};
+parse_endpoint0(queue, [], true) ->
+ {ok, {queue, undefined}};
+parse_endpoint0(Type, [[_|_]] = [Name], _) ->
+ {ok, {Type, unescape(Name)}};
+parse_endpoint0(Type, Rest, _) ->
+ {error, {invalid_destination, Type, to_url(Rest)}}.
+
+%% --------------------------------------------------------------------------
+
+ensure_endpoint(Dir, Channel, EndPoint, State) ->
+ ensure_endpoint(Dir, Channel, EndPoint, [], State).
+
+ensure_endpoint(source, Channel, {exchange, {Name, _}}, Params, State) ->
+ check_exchange(Name, Channel,
+ proplists:get_value(check_exchange, Params, false)),
+ Method = queue_declare_method(#'queue.declare'{}, exchange, Params),
+ #'queue.declare_ok'{queue = Queue} = amqp_channel:call(Channel, Method),
+ {ok, Queue, State};
+
+ensure_endpoint(source, Channel, {topic, _}, Params, State) ->
+ Method = queue_declare_method(#'queue.declare'{}, topic, Params),
+ #'queue.declare_ok'{queue = Queue} = amqp_channel:call(Channel, Method),
+ {ok, Queue, State};
+
+ensure_endpoint(_Dir, _Channel, {queue, undefined}, _Params, State) ->
+ {ok, undefined, State};
+
+ensure_endpoint(_, Channel, {queue, Name}, Params, State) ->
+ Params1 = rabbit_misc:pset(durable, true, Params),
+ Queue = list_to_binary(Name),
+ State1 = case sets:is_element(Queue, State) of
+ true -> State;
+ _ -> Method = queue_declare_method(
+ #'queue.declare'{queue = Queue,
+ nowait = true},
+ queue, Params1),
+ amqp_channel:cast(Channel, Method),
+ sets:add_element(Queue, State)
+ end,
+ {ok, Queue, State1};
+
+ensure_endpoint(dest, Channel, {exchange, {Name, _}}, Params, State) ->
+ check_exchange(Name, Channel,
+ proplists:get_value(check_exchange, Params, false)),
+ {ok, undefined, State};
+
+ensure_endpoint(dest, _Ch, {topic, _}, _Params, State) ->
+ {ok, undefined, State};
+
+ensure_endpoint(_, _Ch, {Type, Name}, _Params, State)
+ when Type =:= reply_queue orelse Type =:= amqqueue ->
+ {ok, list_to_binary(Name), State};
+
+ensure_endpoint(_Direction, _Ch, _Endpoint, _Params, _State) ->
+ {error, invalid_endpoint}.
+
+%% --------------------------------------------------------------------------
+
+ensure_binding(QueueBin, {"", Queue}, _Channel) ->
+ %% i.e., we should only be asked to bind to the default exchange a
+ %% queue with its own name
+ QueueBin = list_to_binary(Queue),
+ ok;
+ensure_binding(Queue, {Exchange, RoutingKey}, Channel) ->
+ #'queue.bind_ok'{} =
+ amqp_channel:call(Channel,
+ #'queue.bind'{
+ queue = Queue,
+ exchange = list_to_binary(Exchange),
+ routing_key = list_to_binary(RoutingKey)}),
+ ok.
+
+%% --------------------------------------------------------------------------
+
+parse_routing({exchange, {Name, undefined}}) ->
+ {Name, ""};
+parse_routing({exchange, {Name, Pattern}}) ->
+ {Name, Pattern};
+parse_routing({topic, Name}) ->
+ {"amq.topic", Name};
+parse_routing({Type, Name})
+ when Type =:= queue orelse Type =:= reply_queue orelse Type =:= amqqueue ->
+ {"", Name}.
+
+dest_temp_queue({temp_queue, Name}) -> Name;
+dest_temp_queue(_) -> none.
+
+%% --------------------------------------------------------------------------
+
+check_exchange(_, _, false) ->
+ ok;
+check_exchange(ExchangeName, Channel, true) ->
+ XDecl = #'exchange.declare'{ exchange = list_to_binary(ExchangeName),
+ passive = true },
+ #'exchange.declare_ok'{} = amqp_channel:call(Channel, XDecl),
+ ok.
+
+queue_declare_method(#'queue.declare'{} = Method, Type, Params) ->
+ Method1 = case proplists:get_value(durable, Params, false) of
+ true -> Method#'queue.declare'{durable = true};
+ false -> Method#'queue.declare'{auto_delete = true,
+ exclusive = true}
+ end,
+ case {Type, proplists:get_value(subscription_queue_name_gen, Params)} of
+ {topic, SQNG} when is_function(SQNG) ->
+ Method1#'queue.declare'{queue = SQNG()};
+ _ ->
+ Method1
+ end.
+
+%% --------------------------------------------------------------------------
+
+to_url([]) -> [];
+to_url(Lol) -> "/" ++ string:join(Lol, "/").
+
+atomise(Name) when is_list(Name) ->
+ list_to_atom(re:replace(Name, "-", "_", [{return,list}, global])).
+
+unescape(Str) -> unescape(Str, []).
+
+unescape("%2F" ++ Str, Acc) -> unescape(Str, [$/ | Acc]);
+unescape([C | Str], Acc) -> unescape(Str, [C | Acc]);
+unescape([], Acc) -> lists:reverse(Acc).
+
--- /dev/null
+%% This file is a copy of http_uri.erl from the R13B-1 Erlang/OTP
+%% distribution with several modifications.
+
+%% All modifications are Copyright (c) 2009-2014 GoPivotal, Ltd.
+
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+
+%% See http://tools.ietf.org/html/rfc3986
+
+-module(uri_parser).
+
+-export([parse/2]).
+
+%%%=========================================================================
+%%% API
+%%%=========================================================================
+
+%% Returns a key list of elements extracted from the URI. Note that
+%% only 'scheme' is guaranteed to exist. Key-Value pairs from the
+%% Defaults list will be used absence of a non-empty value extracted
+%% from the URI. The values extracted are strings, except for 'port'
+%% which is an integer, 'userinfo' which is a list of strings (split
+%% on $:), and 'query' which is a list of strings where no $= char
+%% found, or a {key,value} pair where a $= char is found (initial
+%% split on $& and subsequent optional split on $=). Possible keys
+%% are: 'scheme', 'userinfo', 'host', 'port', 'path', 'query',
+%% 'fragment'.
+
+parse(AbsURI, Defaults) ->
+ case parse_scheme(AbsURI) of
+ {error, Reason} ->
+ {error, Reason};
+ {Scheme, Rest} ->
+ case (catch parse_uri_rest(Rest, true)) of
+ [_|_] = List ->
+ merge_keylists([{scheme, Scheme} | List], Defaults);
+ E ->
+ {error, {malformed_uri, AbsURI, E}}
+ end
+ end.
+
+%%%========================================================================
+%%% Internal functions
+%%%========================================================================
+parse_scheme(AbsURI) ->
+ split_uri(AbsURI, ":", {error, no_scheme}).
+
+parse_uri_rest("//" ++ URIPart, true) ->
+ %% we have an authority
+ {Authority, PathQueryFrag} =
+ split_uri(URIPart, "/|\\?|#", {URIPart, ""}, 1, 0),
+ AuthorityParts = parse_authority(Authority),
+ parse_uri_rest(PathQueryFrag, false) ++ AuthorityParts;
+parse_uri_rest(PathQueryFrag, _Bool) ->
+ %% no authority, just a path and maybe query
+ {PathQuery, Frag} = split_uri(PathQueryFrag, "#", {PathQueryFrag, ""}),
+ {Path, QueryString} = split_uri(PathQuery, "\\?", {PathQuery, ""}),
+ QueryPropList = split_query(QueryString),
+ [{path, Path}, {'query', QueryPropList}, {fragment, Frag}].
+
+parse_authority(Authority) ->
+ {UserInfo, HostPort} = split_uri(Authority, "@", {"", Authority}),
+ UserInfoSplit = case re:split(UserInfo, ":", [{return, list}]) of
+ [""] -> [];
+ UIS -> UIS
+ end,
+ [{userinfo, UserInfoSplit} | parse_host_port(HostPort)].
+
+parse_host_port("[" ++ HostPort) -> %ipv6
+ {Host, ColonPort} = split_uri(HostPort, "\\]", {HostPort, ""}),
+ [{host, Host} | case split_uri(ColonPort, ":", not_found, 0, 1) of
+ not_found -> case ColonPort of
+ [] -> [];
+ _ -> throw({invalid_port, ColonPort})
+ end;
+ {_, Port} -> [{port, list_to_integer(Port)}]
+ end];
+
+parse_host_port(HostPort) ->
+ {Host, Port} = split_uri(HostPort, ":", {HostPort, not_found}),
+ [{host, Host} | case Port of
+ not_found -> [];
+ _ -> [{port, list_to_integer(Port)}]
+ end].
+
+split_query(Query) ->
+ case re:split(Query, "&", [{return, list}]) of
+ [""] -> [];
+ QParams -> [split_uri(Param, "=", Param) || Param <- QParams]
+ end.
+
+split_uri(UriPart, SplitChar, NoMatchResult) ->
+ split_uri(UriPart, SplitChar, NoMatchResult, 1, 1).
+
+split_uri(UriPart, SplitChar, NoMatchResult, SkipLeft, SkipRight) ->
+ case re:run(UriPart, SplitChar) of
+ {match, [{Match, _}]} ->
+ {string:substr(UriPart, 1, Match + 1 - SkipLeft),
+ string:substr(UriPart, Match + 1 + SkipRight, length(UriPart))};
+ nomatch ->
+ NoMatchResult
+ end.
+
+merge_keylists(A, B) ->
+ {AEmpty, ANonEmpty} = lists:partition(fun ({_Key, V}) -> V =:= [] end, A),
+ [AEmptyS, ANonEmptyS, BS] =
+ [lists:ukeysort(1, X) || X <- [AEmpty, ANonEmpty, B]],
+ lists:ukeymerge(1, lists:ukeymerge(1, ANonEmptyS, BS), AEmptyS).
--- /dev/null
+# The contents of this file are subject to the Mozilla Public License
+# Version 1.1 (the "License"); you may not use this file except in
+# compliance with the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+# License for the specific language governing rights and limitations
+# under the License.
+#
+# The Original Code is RabbitMQ.
+#
+# The Initial Developer of the Original Code is GoPivotal, Inc.
+# Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+#
+
+IS_SUCCESS:=egrep "(All .+ tests (successful|passed).|Test passed.)"
+TESTING_MESSAGE:=-eval "error_logger:tty(false), error_logger:logfile({open, \"$(TMPDIR)/erlang-client-tests.log\"}), io:format(\"~nTesting in progress. Please wait...~n~n\")."
+
+prepare_tests: compile compile_tests
+
+all_tests: prepare_tests
+ OK=true && \
+ { $(MAKE) test_suites || OK=false; } && \
+ { $(MAKE) test_common_package || OK=false; } && \
+ { $(MAKE) test_direct || OK=false; } && \
+ $$OK
+
+test_suites: prepare_tests
+ OK=true && \
+ { $(MAKE) test_network || OK=false; } && \
+ { $(MAKE) test_remote_direct || OK=false; } && \
+ $(ALL_SSL) && \
+ $$OK
+
+test_suites_coverage: prepare_tests
+ OK=true && \
+ { $(MAKE) test_network_coverage || OK=false; } && \
+ { $(MAKE) test_direct_coverage || OK=false; } && \
+ $(ALL_SSL_COVERAGE) && \
+ $$OK
+
+## Starts a broker, configures users and runs the tests on the same node
+run_test_in_broker:
+ $(MAKE) start_test_broker_node
+ $(MAKE) unboot_broker
+ OK=true && \
+ TMPFILE=$(MKTEMP) && echo "Redirecting output to $$TMPFILE" && \
+ { $(MAKE) -C $(BROKER_DIR) run-node \
+ RABBITMQ_SERVER_START_ARGS="$(PA_LOAD_PATH) $(SSL_BROKER_ARGS) \
+ -noshell -s rabbit $(RUN_TEST_ARGS) -s init stop" 2>&1 | \
+ tee $$TMPFILE || OK=false; } && \
+ { $(IS_SUCCESS) $$TMPFILE || OK=false; } && \
+ rm $$TMPFILE && \
+ $(MAKE) boot_broker && \
+ $(MAKE) stop_test_broker_node && \
+ $$OK
+
+## Starts a broker, configures users and runs the tests from a different node
+run_test_detached: start_test_broker_node
+ OK=true && \
+ TMPFILE=$(MKTEMP) && echo "Redirecting output to $$TMPFILE" && \
+ { $(RUN) -noinput $(TESTING_MESSAGE) \
+ $(SSL_CLIENT_ARGS) $(RUN_TEST_ARGS) \
+ -s init stop 2>&1 | tee $$TMPFILE || OK=false; } && \
+ { $(IS_SUCCESS) $$TMPFILE || OK=false; } && \
+ rm $$TMPFILE && \
+ $(MAKE) stop_test_broker_node && \
+ $$OK
+
+## Starts a broker, configures users and runs the tests from a different node
+run_test_foreground: start_test_broker_node
+ OK=true && \
+ { $(RUN) -noinput $(TESTING_MESSAGE) \
+ $(SSL_CLIENT_ARGS) $(RUN_TEST_ARGS) \
+ -s init stop || OK=false; } && \
+ $(MAKE) stop_test_broker_node && \
+ $$OK
+
+start_test_broker_node: boot_broker
+ sleep 1
+ - $(RABBITMQCTL) delete_user test_user_no_perm
+ $(RABBITMQCTL) add_user test_user_no_perm test_user_no_perm
+ sleep 1
+
+stop_test_broker_node:
+ sleep 1
+ $(RABBITMQCTL) delete_user test_user_no_perm
+ $(MAKE) unboot_broker
+
+boot_broker:
+ $(MAKE) -C $(BROKER_DIR) start-background-node RABBITMQ_SERVER_START_ARGS="$(RABBITMQ_SERVER_START_ARGS) $(SSL_BROKER_ARGS)"
+ $(MAKE) -C $(BROKER_DIR) start-rabbit-on-node
+
+unboot_broker:
+ $(MAKE) -C $(BROKER_DIR) stop-rabbit-on-node
+ $(MAKE) -C $(BROKER_DIR) stop-node
+
+ssl:
+ $(SSL)
+
+test_ssl: prepare_tests ssl
+ $(MAKE) run_test_detached AMQP_CLIENT_TEST_CONNECTION_TYPE="network_ssl" RUN_TEST_ARGS="-s amqp_client_SUITE test"
+
+test_network: prepare_tests
+ $(MAKE) run_test_detached AMQP_CLIENT_TEST_CONNECTION_TYPE="network" RUN_TEST_ARGS="-s amqp_client_SUITE test"
+
+test_direct: prepare_tests
+ $(MAKE) run_test_in_broker AMQP_CLIENT_TEST_CONNECTION_TYPE="direct" RUN_TEST_ARGS="-s amqp_client_SUITE test"
+
+test_remote_direct: prepare_tests
+ $(MAKE) run_test_detached AMQP_CLIENT_TEST_CONNECTION_TYPE="direct" RUN_TEST_ARGS="-s amqp_client_SUITE test"
+
+test_common_package: $(DIST_DIR)/$(COMMON_PACKAGE_EZ) package prepare_tests
+ $(MAKE) run_test_detached RUN="$(LIBS_PATH) erl -pa $(TEST_DIR)" \
+ AMQP_CLIENT_TEST_CONNECTION_TYPE="network" RUN_TEST_ARGS="-s amqp_client_SUITE test"
+ $(MAKE) run_test_detached RUN="$(LIBS_PATH) erl -pa $(TEST_DIR) -sname amqp_client" \
+ AMQP_CLIENT_TEST_CONNECTION_TYPE="direct" RUN_TEST_ARGS="-s amqp_client_SUITE test"
+
+test_ssl_coverage: prepare_tests ssl
+ $(MAKE) run_test_detached AMQP_CLIENT_TEST_CONNECTION_TYPE="network_ssl" RUN_TEST_ARGS="-s amqp_client_SUITE test_coverage"
+
+test_network_coverage: prepare_tests
+ $(MAKE) run_test_detached AMQP_CLIENT_TEST_CONNECTION_TYPE="network" RUN_TEST_ARGS="-s amqp_client_SUITE test_coverage"
+
+test_remote_direct_coverage: prepare_tests
+ $(MAKE) run_test_detached AMQP_CLIENT_TEST_CONNECTION_TYPE="direct" RUN_TEST_ARGS="-s amqp_client_SUITE test_coverage"
+
+test_direct_coverage: prepare_tests
+ $(MAKE) run_test_in_broker AMQP_CLIENT_TEST_CONNECTION_TYPE="direct" RUN_TEST_ARGS="-s amqp_client_SUITE test_coverage"
--- /dev/null
+# The contents of this file are subject to the Mozilla Public License
+# Version 1.1 (the "License"); you may not use this file except in
+# compliance with the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+# License for the specific language governing rights and limitations
+# under the License.
+#
+# The Original Code is RabbitMQ.
+#
+# The Initial Developer of the Original Code is GoPivotal, Inc.
+# Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+#
+
+TEST_SOURCES=$(wildcard *.erl)
+TEST_TARGETS=$(patsubst %.erl, %.beam, $(TEST_SOURCES))
+INCLUDES=$(wildcard ../$(INCLUDE_DIR)/*.hrl)
+DEPS_DIR=../deps
+
+ERLC_OPTS=-I ../$(INCLUDE_DIR) -o ./ -Wall -v +debug_info
+LIBS_PATH=ERL_LIBS=$(DEPS_DIR)
+
+all: compile
+
+compile: $(TEST_TARGETS)
+
+%.beam: %.erl $(DEPS_DIR)/$(COMMON_PACKAGE_DIR) $(INCLUDES)
+ $(LIBS_PATH) erlc $(ERLC_OPTS) $<
+
+clean:
+ rm -f *.beam
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(amqp_client_SUITE).
+
+-export([test_coverage/0]).
+
+-include_lib("eunit/include/eunit.hrl").
+
+-define(FUNCTION,
+ begin
+ catch throw(x),
+ Fun = case erlang:get_stacktrace() of
+ [{_, F, _} | _] -> F; %% < R15
+ [{_, F, _, _} | _] -> F %% >= R15
+ end,
+ list_to_atom(string:strip(atom_to_list(Fun), right, $_))
+ end).
+
+-define(RUN(Props), run(?FUNCTION, Props)).
+
+%%---------------------------------------------------------------------------
+%% Tests
+%%---------------------------------------------------------------------------
+
+amqp_uri_parse_test_() -> ?RUN([]).
+route_destination_test_() -> ?RUN([]).
+basic_get_test_() -> ?RUN([]).
+basic_get_ipv6_test_() -> ?RUN([]).
+basic_return_test_() -> ?RUN([]).
+simultaneous_close_test_() -> ?RUN([repeat]).
+basic_qos_test_() -> ?RUN([]).
+basic_recover_test_() -> ?RUN([]).
+basic_consume_test_() -> ?RUN([]).
+consume_notification_test_() -> ?RUN([]).
+basic_nack_test_() -> ?RUN([]).
+large_content_test_() -> ?RUN([]).
+lifecycle_test_() -> ?RUN([]).
+direct_no_user_test_() -> ?RUN([]).
+direct_no_password_test_() -> ?RUN([]).
+nowait_exchange_declare_test_() -> ?RUN([]).
+channel_repeat_open_close_test_() -> ?RUN([]).
+channel_multi_open_close_test_() -> ?RUN([]).
+basic_ack_test_() -> ?RUN([]).
+basic_ack_call_test_() -> ?RUN([]).
+channel_lifecycle_test_() -> ?RUN([]).
+queue_unbind_test_() -> ?RUN([]).
+sync_method_serialization_test_() -> ?RUN([]).
+async_sync_method_serialization_test_() -> ?RUN([]).
+sync_async_method_serialization_test_() -> ?RUN([]).
+teardown_test_() -> ?RUN([repeat]).
+rpc_test_() -> ?RUN([]).
+rpc_client_test_() -> ?RUN([]).
+pub_and_close_test_() -> ?RUN([]).
+channel_tune_negotiation_test_() -> ?RUN([]).
+confirm_test_() -> ?RUN([]).
+confirm_barrier_test_() -> ?RUN([]).
+confirm_select_before_wait_test_() -> ?RUN([]).
+confirm_barrier_timeout_test_() -> ?RUN([]).
+confirm_barrier_die_timeout_test_() -> ?RUN([]).
+default_consumer_test_() -> ?RUN([]).
+subscribe_nowait_test_() -> ?RUN([]).
+connection_blocked_network_test_() -> ?RUN([]).
+
+non_existent_exchange_test_() -> ?RUN([negative]).
+bogus_rpc_test_() -> ?RUN([negative, repeat]).
+hard_error_test_() -> ?RUN([negative, repeat]).
+non_existent_user_test_() -> ?RUN([negative]).
+invalid_password_test_() -> ?RUN([negative]).
+non_existent_vhost_test_() -> ?RUN([negative]).
+no_permission_test_() -> ?RUN([negative]).
+channel_writer_death_test_() -> ?RUN([negative]).
+channel_death_test_() -> ?RUN([negative]).
+shortstr_overflow_property_test_() -> ?RUN([negative]).
+shortstr_overflow_field_test_() -> ?RUN([negative]).
+command_invalid_over_channel_test_() -> ?RUN([negative]).
+command_invalid_over_channel0_test_() -> ?RUN([negative]).
+
+%%---------------------------------------------------------------------------
+%% Internal
+%%---------------------------------------------------------------------------
+
+run(TestName, Props) ->
+ RepeatCount = case proplists:get_value(repeat, Props, false) of
+ true -> 100;
+ Number when is_number(Number) -> Number;
+ false -> 1
+ end,
+ Module = case proplists:get_bool(negative, Props) of
+ true -> negative_test_util;
+ false -> test_util
+ end,
+ {timeout, proplists:get_value(timeout, Props, 60),
+ fun () ->
+ lists:foreach(
+ fun (_) ->
+ try erlang:apply(Module, TestName, []) of
+ Ret -> Ret
+ catch
+ exit:normal -> ok
+ end
+ end, lists:seq(1, RepeatCount))
+ end}.
+
+%%---------------------------------------------------------------------------
+%% Coverage
+%%---------------------------------------------------------------------------
+
+test_coverage() ->
+ rabbit_misc:enable_cover(),
+ test(),
+ rabbit_misc:report_cover().
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(amqp_dbg).
+
+-include_lib("stdlib/include/ms_transform.hrl").
+
+-export([tracer/0, all/0, c_all/0]).
+-export([supervision/0, c_supervision/0,
+ connection_lifecycle/0, c_connection_lifecycle/0,
+ channels_manager_lifecycle/0, c_channels_manager_lifecycle/0,
+ channel_lifecycle/0, c_channel_lifecycle/0,
+ methods/0, c_methods/0]).
+
+
+tracer() ->
+ Ret = dbg:tracer(),
+ {ok, _} = dbg:p(all, c),
+ Ret.
+
+all() ->
+ tpl_list(all_args()).
+
+c_all() ->
+ ctpl_list(all_args()).
+
+supervision() ->
+ tpl_list(sup_args()).
+
+c_supervision() ->
+ ctpl_list(sup_args()).
+
+connection_lifecycle() ->
+ tpl_list(cl_args()).
+
+c_connection_lifecycle() ->
+ ctpl_list(cl_args()).
+
+channels_manager_lifecycle() ->
+ tpl_list(cml_args()).
+
+c_channels_manager_lifecycle() ->
+ ctpl_list(cml_args()).
+
+channel_lifecycle() ->
+ tpl_list(cl_args()).
+
+c_channel_lifecycle() ->
+ ctpl_list(cl_args()).
+
+methods() ->
+ tpl_list(m_args()).
+
+c_methods() ->
+ ctpl_list(m_args()).
+
+%%---------------------------------------------------------------------------
+%% Internal plumbing
+%%---------------------------------------------------------------------------
+
+all_args() ->
+ sup_args() ++ ncl_args() ++ cml_args() ++ cl_args() ++
+ m_args().
+
+sup_args() ->
+ [{amqp_connection_sup, start_link, return_ms()},
+ {amqp_connection_type_sup, start_link, return_ms()},
+ {amqp_channel_sup_sup, start_link, return_ms()},
+ {amqp_channel_sup_sup, start_channel_sup, return_ms()},
+ {amqp_channel_sup, start_link, return_ms()},
+ {amqp_network_connection, start_infrastructure, return_ms()},
+ {amqp_network_connection, start_heartbeat, return_ms()},
+ {amqp_channel, start_writer, return_ms()}].
+
+ncl_args() ->
+ [{amqp_main_reader, start_link, return_ms()},
+ {amqp_gen_connection, set_closing_state, []},
+ {amqp_gen_connection, handle_channels_terminated, []},
+ {amqp_network_connection, connect, []},
+ {amqp_direct_connection, connect, []},
+ {amqp_gen_connection, terminate, []}].
+
+cml_args() ->
+ [{amqp_channels_manager, handle_open_channel, return_ms()},
+ {amqp_channels_manager, handle_channel_down, []},
+ {amqp_channels_manager, signal_channels_connection_closing, []}].
+
+cl_args() ->
+ [{amqp_channel, init, []},
+ {amqp_channel_util, open_channel, []},
+ {amqp_channel, terminate, []}].
+
+m_args() ->
+ [{amqp_channel, do, return_ms()},
+ {amqp_channel, handle_method, []},
+ {amqp_gen_connection, handle_method, []},
+ {amqp_network_connection, do, return_ms()},
+ {amqp_network_connection, handshake_recv, return_ms()}].
+
+tpl_list(ArgsList) ->
+ [{ok, _} = dbg:tpl(Module, Func, Ms) || {Module, Func, Ms} <- ArgsList],
+ ok.
+
+ctpl_list(ArgsList) ->
+ [{ok, _} = dbg:ctpl(Module, Func) || {Module, Func, _} <- ArgsList],
+ ok.
+
+return_ms() ->
+ dbg:fun2ms(fun(_) -> return_trace() end).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(negative_test_util).
+
+-include("amqp_client_internal.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+non_existent_exchange_test() ->
+ {ok, Connection} = test_util:new_connection(),
+ X = <<"test">>,
+ RoutingKey = <<"a">>,
+ Payload = <<"foobar">>,
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ {ok, OtherChannel} = amqp_connection:open_channel(Connection),
+ amqp_channel:call(Channel, #'exchange.declare'{exchange = X}),
+
+ %% Deliberately mix up the routingkey and exchange arguments
+ Publish = #'basic.publish'{exchange = RoutingKey, routing_key = X},
+ amqp_channel:call(Channel, Publish, #amqp_msg{payload = Payload}),
+ test_util:wait_for_death(Channel),
+
+ %% Make sure Connection and OtherChannel still serve us and are not dead
+ {ok, _} = amqp_connection:open_channel(Connection),
+ amqp_channel:call(OtherChannel, #'exchange.delete'{exchange = X}),
+ amqp_connection:close(Connection).
+
+bogus_rpc_test() ->
+ {ok, Connection} = test_util:new_connection(),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ %% Deliberately bind to a non-existent queue
+ Bind = #'queue.bind'{exchange = <<"amq.topic">>,
+ queue = <<"does-not-exist">>,
+ routing_key = <<>>},
+ try amqp_channel:call(Channel, Bind) of
+ _ -> exit(expected_to_exit)
+ catch
+ exit:{{shutdown, {server_initiated_close, Code, _}},_} ->
+ ?assertMatch(?NOT_FOUND, Code)
+ end,
+ test_util:wait_for_death(Channel),
+ ?assertMatch(true, is_process_alive(Connection)),
+ amqp_connection:close(Connection).
+
+hard_error_test() ->
+ {ok, Connection} = test_util:new_connection(),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ {ok, OtherChannel} = amqp_connection:open_channel(Connection),
+ OtherChannelMonitor = erlang:monitor(process, OtherChannel),
+ Qos = #'basic.qos'{prefetch_size = 10000000},
+ try amqp_channel:call(Channel, Qos) of
+ _ -> exit(expected_to_exit)
+ catch
+ exit:{{shutdown, {connection_closing,
+ {server_initiated_close, ?NOT_IMPLEMENTED, _}}}, _} ->
+ ok
+ end,
+ receive
+ {'DOWN', OtherChannelMonitor, process, OtherChannel, OtherExit} ->
+ ?assertMatch({shutdown,
+ {connection_closing,
+ {server_initiated_close, ?NOT_IMPLEMENTED, _}}},
+ OtherExit)
+ end,
+ test_util:wait_for_death(Channel),
+ test_util:wait_for_death(Connection).
+
+%% An error in a channel should result in the death of the entire connection.
+%% The death of the channel is caused by an error in generating the frames
+%% (writer dies) - only in the network case
+channel_writer_death_test() ->
+ {ok, Connection} = test_util:new_connection(just_network),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ Publish = #'basic.publish'{routing_key = <<>>, exchange = <<>>},
+ QoS = #'basic.qos'{prefetch_count = 0},
+ Message = #amqp_msg{props = <<>>, payload = <<>>},
+ amqp_channel:cast(Channel, Publish, Message),
+ ?assertExit(_, amqp_channel:call(Channel, QoS)),
+ test_util:wait_for_death(Channel),
+ test_util:wait_for_death(Connection),
+ ok.
+
+%% An error in the channel process should result in the death of the entire
+%% connection. The death of the channel is caused by making a call with an
+%% invalid message to the channel process
+channel_death_test() ->
+ {ok, Connection} = test_util:new_connection(),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ ?assertExit(_, amqp_channel:call(Channel, bogus_message)),
+ test_util:wait_for_death(Channel),
+ test_util:wait_for_death(Connection),
+ ok.
+
+%% Attempting to send a shortstr longer than 255 bytes in a property field
+%% should fail - this only applies to the network case
+shortstr_overflow_property_test() ->
+ {ok, Connection} = test_util:new_connection(just_network),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ SentString = << <<"k">> || _ <- lists:seq(1, 340)>>,
+ #'queue.declare_ok'{queue = Q}
+ = amqp_channel:call(Channel, #'queue.declare'{exclusive = true}),
+ Publish = #'basic.publish'{exchange = <<>>, routing_key = Q},
+ PBasic = #'P_basic'{content_type = SentString},
+ AmqpMsg = #amqp_msg{payload = <<"foobar">>, props = PBasic},
+ QoS = #'basic.qos'{prefetch_count = 0},
+ amqp_channel:cast(Channel, Publish, AmqpMsg),
+ ?assertExit(_, amqp_channel:call(Channel, QoS)),
+ test_util:wait_for_death(Channel),
+ test_util:wait_for_death(Connection),
+ ok.
+
+%% Attempting to send a shortstr longer than 255 bytes in a method's field
+%% should fail - this only applies to the network case
+shortstr_overflow_field_test() ->
+ {ok, Connection} = test_util:new_connection(just_network),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ SentString = << <<"k">> || _ <- lists:seq(1, 340)>>,
+ #'queue.declare_ok'{queue = Q}
+ = amqp_channel:call(Channel, #'queue.declare'{exclusive = true}),
+ ?assertExit(_, amqp_channel:call(
+ Channel, #'basic.consume'{queue = Q,
+ no_ack = true,
+ consumer_tag = SentString})),
+ test_util:wait_for_death(Channel),
+ test_util:wait_for_death(Connection),
+ ok.
+
+%% Simulates a #'connection.open'{} method received on non-zero channel. The
+%% connection is expected to send a '#connection.close{}' to the server with
+%% reply code command_invalid
+command_invalid_over_channel_test() ->
+ {ok, Connection} = test_util:new_connection(),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ MonitorRef = erlang:monitor(process, Connection),
+ case amqp_connection:info(Connection, [type]) of
+ [{type, direct}] -> Channel ! {send_command, #'connection.open'{}};
+ [{type, network}] -> gen_server:cast(Channel,
+ {method, #'connection.open'{}, none, noflow})
+ end,
+ assert_down_with_error(MonitorRef, command_invalid),
+ ?assertNot(is_process_alive(Channel)),
+ ok.
+
+%% Simulates a #'basic.ack'{} method received on channel zero. The connection
+%% is expected to send a '#connection.close{}' to the server with reply code
+%% command_invalid - this only applies to the network case
+command_invalid_over_channel0_test() ->
+ {ok, Connection} = test_util:new_connection(just_network),
+ gen_server:cast(Connection, {method, #'basic.ack'{}, none, noflow}),
+ MonitorRef = erlang:monitor(process, Connection),
+ assert_down_with_error(MonitorRef, command_invalid),
+ ok.
+
+assert_down_with_error(MonitorRef, CodeAtom) ->
+ receive
+ {'DOWN', MonitorRef, process, _, Reason} ->
+ {shutdown, {server_misbehaved, Code, _}} = Reason,
+ ?assertMatch(CodeAtom, ?PROTOCOL:amqp_exception(Code))
+ after 2000 ->
+ exit(did_not_die)
+ end.
+
+non_existent_user_test() ->
+ Params = [{username, <<"no-user">>}, {password, <<"no-user">>}],
+ ?assertMatch({error, {auth_failure, _}}, test_util:new_connection(Params)).
+
+invalid_password_test() ->
+ Params = [{username, <<"guest">>}, {password, <<"bad">>}],
+ ?assertMatch({error, {auth_failure, _}}, test_util:new_connection(Params)).
+
+non_existent_vhost_test() ->
+ Params = [{virtual_host, <<"oops">>}],
+ ?assertMatch({error, access_refused}, test_util:new_connection(Params)).
+
+no_permission_test() ->
+ Params = [{username, <<"test_user_no_perm">>},
+ {password, <<"test_user_no_perm">>}],
+ ?assertMatch({error, access_refused}, test_util:new_connection(Params)).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(test_util).
+
+-include_lib("eunit/include/eunit.hrl").
+-include("amqp_client_internal.hrl").
+
+-compile([export_all]).
+
+-define(TEST_REPEATS, 100).
+
+%% The latch constant defines how many processes are spawned in order
+%% to run certain functionality in parallel. It follows the standard
+%% countdown latch pattern.
+-define(Latch, 100).
+
+%% The wait constant defines how long a consumer waits before it
+%% unsubscribes
+-define(Wait, 200).
+
+%% How to long wait for a process to die after an expected failure
+-define(DeathWait, 5000).
+
+%% AMQP URI parsing test
+amqp_uri_parse_test() ->
+ %% From the spec (adapted)
+ ?assertMatch({ok, #amqp_params_network{username = <<"user">>,
+ password = <<"pass">>,
+ host = "host",
+ port = 10000,
+ virtual_host = <<"vhost">>,
+ heartbeat = 5}},
+ amqp_uri:parse(
+ "amqp://user:pass@host:10000/vhost?heartbeat=5")),
+ ?assertMatch({ok, #amqp_params_network{username = <<"usera">>,
+ password = <<"apass">>,
+ host = "hoast",
+ port = 10000,
+ virtual_host = <<"v/host">>}},
+ amqp_uri:parse(
+ "aMQp://user%61:%61pass@ho%61st:10000/v%2fhost")),
+ ?assertMatch({ok, #amqp_params_direct{}}, amqp_uri:parse("amqp://")),
+ ?assertMatch({ok, #amqp_params_direct{username = <<"">>,
+ virtual_host = <<"">>}},
+ amqp_uri:parse("amqp://:@/")),
+ ?assertMatch({ok, #amqp_params_network{username = <<"">>,
+ password = <<"">>,
+ virtual_host = <<"">>,
+ host = "host"}},
+ amqp_uri:parse("amqp://:@host/")),
+ ?assertMatch({ok, #amqp_params_direct{username = <<"user">>}},
+ amqp_uri:parse("amqp://user@")),
+ ?assertMatch({ok, #amqp_params_network{username = <<"user">>,
+ password = <<"pass">>,
+ host = "localhost"}},
+ amqp_uri:parse("amqp://user:pass@localhost")),
+ ?assertMatch({ok, #amqp_params_network{host = "host",
+ virtual_host = <<"/">>}},
+ amqp_uri:parse("amqp://host")),
+ ?assertMatch({ok, #amqp_params_network{port = 10000,
+ host = "localhost"}},
+ amqp_uri:parse("amqp://localhost:10000")),
+ ?assertMatch({ok, #amqp_params_direct{virtual_host = <<"vhost">>}},
+ amqp_uri:parse("amqp:///vhost")),
+ ?assertMatch({ok, #amqp_params_network{host = "host",
+ virtual_host = <<"">>}},
+ amqp_uri:parse("amqp://host/")),
+ ?assertMatch({ok, #amqp_params_network{host = "host",
+ virtual_host = <<"/">>}},
+ amqp_uri:parse("amqp://host/%2f")),
+ ?assertMatch({ok, #amqp_params_network{host = "::1"}},
+ amqp_uri:parse("amqp://[::1]")),
+
+ %% Varous other cases
+ ?assertMatch({ok, #amqp_params_network{host = "host", port = 100}},
+ amqp_uri:parse("amqp://host:100")),
+ ?assertMatch({ok, #amqp_params_network{host = "::1", port = 100}},
+ amqp_uri:parse("amqp://[::1]:100")),
+
+ ?assertMatch({ok, #amqp_params_network{host = "host",
+ virtual_host = <<"blah">>}},
+ amqp_uri:parse("amqp://host/blah")),
+ ?assertMatch({ok, #amqp_params_network{host = "host",
+ port = 100,
+ virtual_host = <<"blah">>}},
+ amqp_uri:parse("amqp://host:100/blah")),
+ ?assertMatch({ok, #amqp_params_network{host = "::1",
+ virtual_host = <<"blah">>}},
+ amqp_uri:parse("amqp://[::1]/blah")),
+ ?assertMatch({ok, #amqp_params_network{host = "::1",
+ port = 100,
+ virtual_host = <<"blah">>}},
+ amqp_uri:parse("amqp://[::1]:100/blah")),
+
+ ?assertMatch({ok, #amqp_params_network{username = <<"user">>,
+ password = <<"pass">>,
+ host = "host"}},
+ amqp_uri:parse("amqp://user:pass@host")),
+ ?assertMatch({ok, #amqp_params_network{username = <<"user">>,
+ password = <<"pass">>,
+ port = 100}},
+ amqp_uri:parse("amqp://user:pass@host:100")),
+ ?assertMatch({ok, #amqp_params_network{username = <<"user">>,
+ password = <<"pass">>,
+ host = "::1"}},
+ amqp_uri:parse("amqp://user:pass@[::1]")),
+ ?assertMatch({ok, #amqp_params_network{username = <<"user">>,
+ password = <<"pass">>,
+ host = "::1",
+ port = 100}},
+ amqp_uri:parse("amqp://user:pass@[::1]:100")),
+
+ %% Various failure cases
+ ?assertMatch({error, _}, amqp_uri:parse("http://www.rabbitmq.com")),
+ ?assertMatch({error, _}, amqp_uri:parse("amqp://foo:bar:baz")),
+ ?assertMatch({error, _}, amqp_uri:parse("amqp://foo[::1]")),
+ ?assertMatch({error, _}, amqp_uri:parse("amqp://foo:[::1]")),
+ ?assertMatch({error, _}, amqp_uri:parse("amqp://[::1]foo")),
+ ?assertMatch({error, _}, amqp_uri:parse("amqp://foo:1000xyz")),
+ ?assertMatch({error, _}, amqp_uri:parse("amqp://foo:1000000")),
+ ?assertMatch({error, _}, amqp_uri:parse("amqp://foo/bar/baz")),
+
+ ?assertMatch({error, _}, amqp_uri:parse("amqp://foo%1")),
+ ?assertMatch({error, _}, amqp_uri:parse("amqp://foo%1x")),
+ ?assertMatch({error, _}, amqp_uri:parse("amqp://foo%xy")),
+
+ ok.
+
+%%--------------------------------------------------------------------
+%% Destination Parsing Tests
+%%--------------------------------------------------------------------
+
+route_destination_test() ->
+ %% valid queue
+ ?assertMatch({ok, {queue, "test"}}, parse_dest("/queue/test")),
+
+ %% valid topic
+ ?assertMatch({ok, {topic, "test"}}, parse_dest("/topic/test")),
+
+ %% valid exchange
+ ?assertMatch({ok, {exchange, {"test", undefined}}}, parse_dest("/exchange/test")),
+
+ %% valid temp queue
+ ?assertMatch({ok, {temp_queue, "test"}}, parse_dest("/temp-queue/test")),
+
+ %% valid reply queue
+ ?assertMatch({ok, {reply_queue, "test"}}, parse_dest("/reply-queue/test")),
+ ?assertMatch({ok, {reply_queue, "test/2"}}, parse_dest("/reply-queue/test/2")),
+
+ %% valid exchange with pattern
+ ?assertMatch({ok, {exchange, {"test", "pattern"}}},
+ parse_dest("/exchange/test/pattern")),
+
+ %% valid pre-declared queue
+ ?assertMatch({ok, {amqqueue, "test"}}, parse_dest("/amq/queue/test")),
+
+ %% queue without name
+ ?assertMatch({error, {invalid_destination, queue, ""}}, parse_dest("/queue")),
+ ?assertMatch({ok, {queue, undefined}}, parse_dest("/queue", true)),
+
+ %% topic without name
+ ?assertMatch({error, {invalid_destination, topic, ""}}, parse_dest("/topic")),
+
+ %% exchange without name
+ ?assertMatch({error, {invalid_destination, exchange, ""}},
+ parse_dest("/exchange")),
+
+ %% exchange default name
+ ?assertMatch({error, {invalid_destination, exchange, "//foo"}},
+ parse_dest("/exchange//foo")),
+
+ %% amqqueue without name
+ ?assertMatch({error, {invalid_destination, amqqueue, ""}},
+ parse_dest("/amq/queue")),
+
+ %% queue without name with trailing slash
+ ?assertMatch({error, {invalid_destination, queue, "/"}}, parse_dest("/queue/")),
+
+ %% topic without name with trailing slash
+ ?assertMatch({error, {invalid_destination, topic, "/"}}, parse_dest("/topic/")),
+
+ %% exchange without name with trailing slash
+ ?assertMatch({error, {invalid_destination, exchange, "/"}},
+ parse_dest("/exchange/")),
+
+ %% queue with invalid name
+ ?assertMatch({error, {invalid_destination, queue, "/foo/bar"}},
+ parse_dest("/queue/foo/bar")),
+
+ %% topic with invalid name
+ ?assertMatch({error, {invalid_destination, topic, "/foo/bar"}},
+ parse_dest("/topic/foo/bar")),
+
+ %% exchange with invalid name
+ ?assertMatch({error, {invalid_destination, exchange, "/foo/bar/baz"}},
+ parse_dest("/exchange/foo/bar/baz")),
+
+ %% unknown destination
+ ?assertMatch({error, {unknown_destination, "/blah/boo"}},
+ parse_dest("/blah/boo")),
+
+ %% queue with escaped name
+ ?assertMatch({ok, {queue, "te/st"}}, parse_dest("/queue/te%2Fst")),
+
+ %% valid exchange with escaped name and pattern
+ ?assertMatch({ok, {exchange, {"te/st", "pa/tt/ern"}}},
+ parse_dest("/exchange/te%2Fst/pa%2Ftt%2Fern")),
+
+ ok.
+
+parse_dest(Destination, Params) ->
+ rabbit_routing_util:parse_endpoint(Destination, Params).
+parse_dest(Destination) ->
+ rabbit_routing_util:parse_endpoint(Destination).
+
+%%%%
+%%
+%% This is an example of how the client interaction should work
+%%
+%% {ok, Connection} = amqp_connection:start(network),
+%% {ok, Channel} = amqp_connection:open_channel(Connection),
+%% %%...do something useful
+%% amqp_channel:close(Channel),
+%% amqp_connection:close(Connection).
+%%
+
+lifecycle_test() ->
+ {ok, Connection} = new_connection(),
+ X = <<"x">>,
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ amqp_channel:call(Channel,
+ #'exchange.declare'{exchange = X,
+ type = <<"topic">>}),
+ Parent = self(),
+ [spawn(fun () -> queue_exchange_binding(Channel, X, Parent, Tag) end)
+ || Tag <- lists:seq(1, ?Latch)],
+ latch_loop(),
+ amqp_channel:call(Channel, #'exchange.delete'{exchange = X}),
+ teardown(Connection, Channel),
+ ok.
+
+direct_no_user_test() ->
+ {ok, Connection} = new_connection(just_direct, [{username, none},
+ {password, none}]),
+ amqp_connection:close(Connection),
+ wait_for_death(Connection).
+
+direct_no_password_test() ->
+ {ok, Connection} = new_connection(just_direct, [{username, <<"guest">>},
+ {password, none}]),
+ amqp_connection:close(Connection),
+ wait_for_death(Connection).
+
+queue_exchange_binding(Channel, X, Parent, Tag) ->
+ receive
+ nothing -> ok
+ after (?Latch - Tag rem 7) * 10 ->
+ ok
+ end,
+ Q = <<"a.b.c", Tag:32>>,
+ Binding = <<"a.b.c.*">>,
+ #'queue.declare_ok'{queue = Q1}
+ = amqp_channel:call(Channel, #'queue.declare'{queue = Q}),
+ ?assertMatch(Q, Q1),
+ Route = #'queue.bind'{queue = Q,
+ exchange = X,
+ routing_key = Binding},
+ amqp_channel:call(Channel, Route),
+ amqp_channel:call(Channel, #'queue.delete'{queue = Q}),
+ Parent ! finished.
+
+nowait_exchange_declare_test() ->
+ {ok, Connection} = new_connection(),
+ X = <<"x">>,
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ ?assertEqual(
+ ok,
+ amqp_channel:call(Channel, #'exchange.declare'{exchange = X,
+ type = <<"topic">>,
+ nowait = true})),
+ teardown(Connection, Channel).
+
+channel_lifecycle_test() ->
+ {ok, Connection} = new_connection(),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ amqp_channel:close(Channel),
+ {ok, Channel2} = amqp_connection:open_channel(Connection),
+ teardown(Connection, Channel2),
+ ok.
+
+abstract_method_serialization_test(BeforeFun, MultiOpFun, AfterFun) ->
+ {ok, Connection} = new_connection(),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ X = <<"test">>,
+ Payload = list_to_binary(["x" || _ <- lists:seq(1, 1000)]),
+ OpsPerProcess = 20,
+ #'exchange.declare_ok'{} =
+ amqp_channel:call(Channel, #'exchange.declare'{exchange = X,
+ type = <<"topic">>}),
+ BeforeRet = BeforeFun(Channel, X),
+ Parent = self(),
+ [spawn(fun () -> Ret = [MultiOpFun(Channel, X, Payload, BeforeRet, I)
+ || _ <- lists:seq(1, OpsPerProcess)],
+ Parent ! {finished, Ret}
+ end) || I <- lists:seq(1, ?Latch)],
+ MultiOpRet = latch_loop(),
+ AfterFun(Channel, X, Payload, BeforeRet, MultiOpRet),
+ amqp_channel:call(Channel, #'exchange.delete'{exchange = X}),
+ teardown(Connection, Channel).
+
+%% This is designed to exercize the internal queuing mechanism
+%% to ensure that sync methods are properly serialized
+sync_method_serialization_test() ->
+ abstract_method_serialization_test(
+ fun (_, _) -> ok end,
+ fun (Channel, _, _, _, Count) ->
+ Q = fmt("test-~p", [Count]),
+ #'queue.declare_ok'{queue = Q1} =
+ amqp_channel:call(Channel,
+ #'queue.declare'{queue = Q,
+ exclusive = true}),
+ ?assertMatch(Q, Q1)
+ end,
+ fun (_, _, _, _, _) -> ok end).
+
+%% This is designed to exercize the internal queuing mechanism
+%% to ensure that sending async methods and then a sync method is serialized
+%% properly
+async_sync_method_serialization_test() ->
+ abstract_method_serialization_test(
+ fun (Channel, _X) ->
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Channel, #'queue.declare'{}),
+ Q
+ end,
+ fun (Channel, X, Payload, _, _) ->
+ %% The async methods
+ ok = amqp_channel:call(Channel,
+ #'basic.publish'{exchange = X,
+ routing_key = <<"a">>},
+ #amqp_msg{payload = Payload})
+ end,
+ fun (Channel, X, _, Q, _) ->
+ %% The sync method
+ #'queue.bind_ok'{} =
+ amqp_channel:call(Channel,
+ #'queue.bind'{exchange = X,
+ queue = Q,
+ routing_key = <<"a">>}),
+ %% No message should have been routed
+ #'queue.declare_ok'{message_count = 0} =
+ amqp_channel:call(Channel,
+ #'queue.declare'{queue = Q,
+ passive = true})
+ end).
+
+%% This is designed to exercize the internal queuing mechanism
+%% to ensure that sending sync methods and then an async method is serialized
+%% properly
+sync_async_method_serialization_test() ->
+ abstract_method_serialization_test(
+ fun (_, _) -> ok end,
+ fun (Channel, X, _Payload, _, _) ->
+ %% The sync methods (called with cast to resume immediately;
+ %% the order should still be preserved)
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Channel,
+ #'queue.declare'{exclusive = true}),
+ amqp_channel:cast(Channel, #'queue.bind'{exchange = X,
+ queue = Q,
+ routing_key= <<"a">>}),
+ Q
+ end,
+ fun (Channel, X, Payload, _, MultiOpRet) ->
+ #'confirm.select_ok'{} = amqp_channel:call(
+ Channel, #'confirm.select'{}),
+ ok = amqp_channel:call(Channel,
+ #'basic.publish'{exchange = X,
+ routing_key = <<"a">>},
+ #amqp_msg{payload = Payload}),
+ %% All queues must have gotten this message
+ true = amqp_channel:wait_for_confirms(Channel),
+ lists:foreach(
+ fun (Q) ->
+ #'queue.declare_ok'{message_count = 1} =
+ amqp_channel:call(
+ Channel, #'queue.declare'{queue = Q,
+ passive = true})
+ end, lists:flatten(MultiOpRet))
+ end).
+
+queue_unbind_test() ->
+ {ok, Connection} = new_connection(),
+ X = <<"eggs">>, Q = <<"foobar">>, Key = <<"quay">>,
+ Payload = <<"foobar">>,
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ amqp_channel:call(Channel, #'exchange.declare'{exchange = X}),
+ amqp_channel:call(Channel, #'queue.declare'{queue = Q}),
+ Bind = #'queue.bind'{queue = Q,
+ exchange = X,
+ routing_key = Key},
+ amqp_channel:call(Channel, Bind),
+ Publish = #'basic.publish'{exchange = X, routing_key = Key},
+ amqp_channel:call(Channel, Publish, Msg = #amqp_msg{payload = Payload}),
+ get_and_assert_equals(Channel, Q, Payload),
+ Unbind = #'queue.unbind'{queue = Q,
+ exchange = X,
+ routing_key = Key},
+ amqp_channel:call(Channel, Unbind),
+ amqp_channel:call(Channel, Publish, Msg),
+ get_and_assert_empty(Channel, Q),
+ teardown(Connection, Channel).
+
+get_and_assert_empty(Channel, Q) ->
+ #'basic.get_empty'{}
+ = amqp_channel:call(Channel, #'basic.get'{queue = Q, no_ack = true}).
+
+get_and_assert_equals(Channel, Q, Payload) ->
+ get_and_assert_equals(Channel, Q, Payload, true).
+
+get_and_assert_equals(Channel, Q, Payload, NoAck) ->
+ {GetOk = #'basic.get_ok'{}, Content}
+ = amqp_channel:call(Channel, #'basic.get'{queue = Q, no_ack = NoAck}),
+ #amqp_msg{payload = Payload2} = Content,
+ ?assertMatch(Payload, Payload2),
+ GetOk.
+
+basic_get_test() ->
+ basic_get_test1(new_connection()).
+
+basic_get_ipv6_test() ->
+ basic_get_test1(new_connection(just_network, [{host, "::1"}])).
+
+basic_get_test1({ok, Connection}) ->
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ {ok, Q} = setup_publish(Channel),
+ get_and_assert_equals(Channel, Q, <<"foobar">>),
+ get_and_assert_empty(Channel, Q),
+ teardown(Connection, Channel).
+
+basic_return_test() ->
+ {ok, Connection} = new_connection(),
+ X = <<"test">>,
+ Q = <<"test">>,
+ Key = <<"test">>,
+ Payload = <<"qwerty">>,
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ amqp_channel:register_return_handler(Channel, self()),
+ amqp_channel:call(Channel, #'exchange.declare'{exchange = X}),
+ amqp_channel:call(Channel, #'queue.declare'{queue = Q,
+ exclusive = true}),
+ Publish = #'basic.publish'{exchange = X, routing_key = Key,
+ mandatory = true},
+ amqp_channel:call(Channel, Publish, #amqp_msg{payload = Payload}),
+ receive
+ {BasicReturn = #'basic.return'{}, Content} ->
+ #'basic.return'{reply_code = ReplyCode,
+ exchange = X} = BasicReturn,
+ ?assertMatch(?NO_ROUTE, ReplyCode),
+ #amqp_msg{payload = Payload2} = Content,
+ ?assertMatch(Payload, Payload2);
+ WhatsThis1 ->
+ exit({bad_message, WhatsThis1})
+ after 2000 ->
+ exit(no_return_received)
+ end,
+ amqp_channel:unregister_return_handler(Channel),
+ Publish = #'basic.publish'{exchange = X, routing_key = Key,
+ mandatory = true},
+ amqp_channel:call(Channel, Publish, #amqp_msg{payload = Payload}),
+ ok = receive
+ {_BasicReturn = #'basic.return'{}, _Content} ->
+ unexpected_return;
+ WhatsThis2 ->
+ exit({bad_message, WhatsThis2})
+ after 2000 ->
+ ok
+ end,
+ amqp_channel:call(Channel, #'exchange.delete'{exchange = X}),
+ teardown(Connection, Channel).
+
+channel_repeat_open_close_test() ->
+ {ok, Connection} = new_connection(),
+ lists:foreach(
+ fun(_) ->
+ {ok, Ch} = amqp_connection:open_channel(Connection),
+ ok = amqp_channel:close(Ch)
+ end, lists:seq(1, 50)),
+ amqp_connection:close(Connection),
+ wait_for_death(Connection).
+
+channel_multi_open_close_test() ->
+ {ok, Connection} = new_connection(),
+ [spawn_link(
+ fun() ->
+ try amqp_connection:open_channel(Connection) of
+ {ok, Ch} -> try amqp_channel:close(Ch) of
+ ok -> ok;
+ closing -> ok
+ catch
+ exit:{noproc, _} -> ok;
+ exit:{normal, _} -> ok
+ end;
+ closing -> ok
+ catch
+ exit:{noproc, _} -> ok;
+ exit:{normal, _} -> ok
+ end
+ end) || _ <- lists:seq(1, 50)],
+ erlang:yield(),
+ amqp_connection:close(Connection),
+ wait_for_death(Connection).
+
+basic_ack_test() ->
+ {ok, Connection} = new_connection(),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ {ok, Q} = setup_publish(Channel),
+ {#'basic.get_ok'{delivery_tag = Tag}, _}
+ = amqp_channel:call(Channel, #'basic.get'{queue = Q, no_ack = false}),
+ amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag}),
+ teardown(Connection, Channel).
+
+basic_ack_call_test() ->
+ {ok, Connection} = new_connection(),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ {ok, Q} = setup_publish(Channel),
+ {#'basic.get_ok'{delivery_tag = Tag}, _}
+ = amqp_channel:call(Channel, #'basic.get'{queue = Q, no_ack = false}),
+ amqp_channel:call(Channel, #'basic.ack'{delivery_tag = Tag}),
+ teardown(Connection, Channel).
+
+basic_consume_test() ->
+ {ok, Connection} = new_connection(),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ X = <<"test">>,
+ amqp_channel:call(Channel, #'exchange.declare'{exchange = X}),
+ RoutingKey = <<"key">>,
+ Parent = self(),
+ [spawn_link(fun () ->
+ consume_loop(Channel, X, RoutingKey, Parent, <<Tag:32>>)
+ end) || Tag <- lists:seq(1, ?Latch)],
+ timer:sleep(?Latch * 20),
+ Publish = #'basic.publish'{exchange = X, routing_key = RoutingKey},
+ amqp_channel:call(Channel, Publish, #amqp_msg{payload = <<"foobar">>}),
+ latch_loop(),
+ amqp_channel:call(Channel, #'exchange.delete'{exchange = X}),
+ teardown(Connection, Channel).
+
+consume_loop(Channel, X, RoutingKey, Parent, Tag) ->
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Channel, #'queue.declare'{}),
+ #'queue.bind_ok'{} =
+ amqp_channel:call(Channel, #'queue.bind'{queue = Q,
+ exchange = X,
+ routing_key = RoutingKey}),
+ #'basic.consume_ok'{} =
+ amqp_channel:call(Channel,
+ #'basic.consume'{queue = Q, consumer_tag = Tag}),
+ receive #'basic.consume_ok'{consumer_tag = Tag} -> ok end,
+ receive {#'basic.deliver'{}, _} -> ok end,
+ #'basic.cancel_ok'{} =
+ amqp_channel:call(Channel, #'basic.cancel'{consumer_tag = Tag}),
+ receive #'basic.cancel_ok'{consumer_tag = Tag} -> ok end,
+ Parent ! finished.
+
+consume_notification_test() ->
+ {ok, Connection} = new_connection(),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Channel, #'queue.declare'{}),
+ #'basic.consume_ok'{consumer_tag = CTag} = ConsumeOk =
+ amqp_channel:call(Channel, #'basic.consume'{queue = Q}),
+ receive ConsumeOk -> ok end,
+ #'queue.delete_ok'{} =
+ amqp_channel:call(Channel, #'queue.delete'{queue = Q}),
+ receive #'basic.cancel'{consumer_tag = CTag} -> ok end,
+ amqp_channel:close(Channel),
+ ok.
+
+basic_recover_test() ->
+ {ok, Connection} = new_connection(),
+ {ok, Channel} = amqp_connection:open_channel(
+ Connection, {amqp_direct_consumer, [self()]}),
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Channel, #'queue.declare'{}),
+ #'basic.consume_ok'{consumer_tag = Tag} =
+ amqp_channel:call(Channel, #'basic.consume'{queue = Q}),
+ receive #'basic.consume_ok'{consumer_tag = Tag} -> ok end,
+ Publish = #'basic.publish'{exchange = <<>>, routing_key = Q},
+ amqp_channel:call(Channel, Publish, #amqp_msg{payload = <<"foobar">>}),
+ receive
+ {#'basic.deliver'{consumer_tag = Tag}, _} ->
+ %% no_ack set to false, but don't send ack
+ ok
+ end,
+ BasicRecover = #'basic.recover'{requeue = true},
+ amqp_channel:cast(Channel, BasicRecover),
+ receive
+ {#'basic.deliver'{consumer_tag = Tag,
+ delivery_tag = DeliveryTag2}, _} ->
+ amqp_channel:cast(Channel,
+ #'basic.ack'{delivery_tag = DeliveryTag2})
+ end,
+ teardown(Connection, Channel).
+
+simultaneous_close_test() ->
+ {ok, Connection} = new_connection(),
+ ChannelNumber = 5,
+ {ok, Channel1} = amqp_connection:open_channel(Connection, ChannelNumber),
+
+ %% Publish to non-existent exchange and immediately close channel
+ amqp_channel:cast(Channel1, #'basic.publish'{exchange = <<"does-not-exist">>,
+ routing_key = <<"a">>},
+ #amqp_msg{payload = <<"foobar">>}),
+ try amqp_channel:close(Channel1) of
+ ok -> wait_for_death(Channel1);
+ closing -> wait_for_death(Channel1)
+ catch
+ exit:{noproc, _} -> ok;
+ exit:{{shutdown, {server_initiated_close, ?NOT_FOUND, _}}, _} -> ok
+ end,
+
+ %% Channel2 (opened with the exact same number as Channel1)
+ %% should not receive a close_ok (which is intended for Channel1)
+ {ok, Channel2} = amqp_connection:open_channel(Connection, ChannelNumber),
+
+ %% Make sure Channel2 functions normally
+ #'exchange.declare_ok'{} =
+ amqp_channel:call(Channel2, #'exchange.declare'{exchange = <<"test">>}),
+ #'exchange.delete_ok'{} =
+ amqp_channel:call(Channel2, #'exchange.delete'{exchange = <<"test">>}),
+
+ teardown(Connection, Channel2).
+
+channel_tune_negotiation_test() ->
+ {ok, Connection} = new_connection([{channel_max, 10}]),
+ amqp_connection:close(Connection).
+
+basic_qos_test() ->
+ [NoQos, Qos] = [basic_qos_test(Prefetch) || Prefetch <- [0,1]],
+ ExpectedRatio = (1+1) / (1+50/5),
+ FudgeFactor = 2, %% account for timing variations
+ ?assertMatch(true, Qos / NoQos < ExpectedRatio * FudgeFactor).
+
+basic_qos_test(Prefetch) ->
+ {ok, Connection} = new_connection(),
+ Messages = 100,
+ Workers = [5, 50],
+ Parent = self(),
+ {ok, Chan} = amqp_connection:open_channel(Connection),
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Chan, #'queue.declare'{}),
+ Kids = [spawn(
+ fun() ->
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ amqp_channel:call(Channel,
+ #'basic.qos'{prefetch_count = Prefetch}),
+ amqp_channel:call(Channel,
+ #'basic.consume'{queue = Q}),
+ Parent ! finished,
+ sleeping_consumer(Channel, Sleep, Parent)
+ end) || Sleep <- Workers],
+ latch_loop(length(Kids)),
+ spawn(fun() -> {ok, Channel} = amqp_connection:open_channel(Connection),
+ producer_loop(Channel, Q, Messages)
+ end),
+ {Res, _} = timer:tc(erlang, apply, [fun latch_loop/1, [Messages]]),
+ [Kid ! stop || Kid <- Kids],
+ latch_loop(length(Kids)),
+ teardown(Connection, Chan),
+ Res.
+
+sleeping_consumer(Channel, Sleep, Parent) ->
+ receive
+ stop ->
+ do_stop(Channel, Parent);
+ #'basic.consume_ok'{} ->
+ sleeping_consumer(Channel, Sleep, Parent);
+ #'basic.cancel_ok'{} ->
+ exit(unexpected_cancel_ok);
+ {#'basic.deliver'{delivery_tag = DeliveryTag}, _Content} ->
+ Parent ! finished,
+ receive stop -> do_stop(Channel, Parent)
+ after Sleep -> ok
+ end,
+ amqp_channel:cast(Channel,
+ #'basic.ack'{delivery_tag = DeliveryTag}),
+ sleeping_consumer(Channel, Sleep, Parent)
+ end.
+
+do_stop(Channel, Parent) ->
+ Parent ! finished,
+ amqp_channel:close(Channel),
+ wait_for_death(Channel),
+ exit(normal).
+
+producer_loop(Channel, _RoutingKey, 0) ->
+ amqp_channel:close(Channel),
+ wait_for_death(Channel),
+ ok;
+
+producer_loop(Channel, RoutingKey, N) ->
+ Publish = #'basic.publish'{exchange = <<>>, routing_key = RoutingKey},
+ amqp_channel:call(Channel, Publish, #amqp_msg{payload = <<>>}),
+ producer_loop(Channel, RoutingKey, N - 1).
+
+confirm_test() ->
+ {ok, Connection} = new_connection(),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ #'confirm.select_ok'{} = amqp_channel:call(Channel, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Channel, self()),
+ {ok, Q} = setup_publish(Channel),
+ {#'basic.get_ok'{}, _}
+ = amqp_channel:call(Channel, #'basic.get'{queue = Q, no_ack = false}),
+ ok = receive
+ #'basic.ack'{} -> ok;
+ #'basic.nack'{} -> fail
+ after 2000 ->
+ exit(did_not_receive_pub_ack)
+ end,
+ teardown(Connection, Channel).
+
+confirm_barrier_test() ->
+ {ok, Connection} = new_connection(),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ #'confirm.select_ok'{} = amqp_channel:call(Channel, #'confirm.select'{}),
+ [amqp_channel:call(Channel, #'basic.publish'{routing_key = <<"whoosh">>},
+ #amqp_msg{payload = <<"foo">>})
+ || _ <- lists:seq(1, 1000)], %% Hopefully enough to get a multi-ack
+ true = amqp_channel:wait_for_confirms(Channel),
+ teardown(Connection, Channel).
+
+confirm_select_before_wait_test() ->
+ {ok, Connection} = new_connection(),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ try amqp_channel:wait_for_confirms(Channel) of
+ _ -> exit(success_despite_lack_of_confirm_mode)
+ catch
+ not_in_confirm_mode -> ok
+ end,
+ teardown(Connection, Channel).
+
+confirm_barrier_timeout_test() ->
+ {ok, Connection} = new_connection(),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ #'confirm.select_ok'{} = amqp_channel:call(Channel, #'confirm.select'{}),
+ [amqp_channel:call(Channel, #'basic.publish'{routing_key = <<"whoosh">>},
+ #amqp_msg{payload = <<"foo">>})
+ || _ <- lists:seq(1, 1000)],
+ case amqp_channel:wait_for_confirms(Channel, 0) of
+ true -> ok;
+ timeout -> ok
+ end,
+ teardown(Connection, Channel).
+
+confirm_barrier_die_timeout_test() ->
+ {ok, Connection} = new_connection(),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ #'confirm.select_ok'{} = amqp_channel:call(Channel, #'confirm.select'{}),
+ [amqp_channel:call(Channel, #'basic.publish'{routing_key = <<"whoosh">>},
+ #amqp_msg{payload = <<"foo">>})
+ || _ <- lists:seq(1, 1000)],
+ try amqp_channel:wait_for_confirms_or_die(Channel, 0) of
+ true -> ok
+ catch
+ exit:timeout -> ok
+ end,
+ amqp_connection:close(Connection),
+ wait_for_death(Connection).
+
+default_consumer_test() ->
+ {ok, Connection} = new_connection(),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ amqp_selective_consumer:register_default_consumer(Channel, self()),
+
+ #'queue.declare_ok'{queue = Q}
+ = amqp_channel:call(Channel, #'queue.declare'{}),
+ Pid = spawn(fun () -> receive
+ after 10000 -> ok
+ end
+ end),
+ #'basic.consume_ok'{} =
+ amqp_channel:subscribe(Channel, #'basic.consume'{queue = Q}, Pid),
+ erlang:monitor(process, Pid),
+ exit(Pid, shutdown),
+ receive
+ {'DOWN', _, process, _, _} ->
+ io:format("little consumer died out~n")
+ end,
+ Payload = <<"for the default consumer">>,
+ amqp_channel:call(Channel,
+ #'basic.publish'{exchange = <<>>, routing_key = Q},
+ #amqp_msg{payload = Payload}),
+
+ receive
+ {#'basic.deliver'{}, #'amqp_msg'{payload = Payload}} ->
+ ok
+ after 1000 ->
+ exit('default_consumer_didnt_work')
+ end,
+ teardown(Connection, Channel).
+
+subscribe_nowait_test() ->
+ {ok, Conn} = new_connection(),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ {ok, Q} = setup_publish(Ch),
+ CTag = <<"ctag">>,
+ amqp_selective_consumer:register_default_consumer(Ch, self()),
+ ok = amqp_channel:call(Ch, #'basic.consume'{queue = Q,
+ consumer_tag = CTag,
+ nowait = true}),
+ ok = amqp_channel:call(Ch, #'basic.cancel' {consumer_tag = CTag,
+ nowait = true}),
+ ok = amqp_channel:call(Ch, #'basic.consume'{queue = Q,
+ consumer_tag = CTag,
+ nowait = true}),
+ receive
+ #'basic.consume_ok'{} ->
+ exit(unexpected_consume_ok);
+ {#'basic.deliver'{delivery_tag = DTag}, _Content} ->
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag})
+ end,
+ teardown(Conn, Ch).
+
+basic_nack_test() ->
+ {ok, Connection} = new_connection(),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ #'queue.declare_ok'{queue = Q}
+ = amqp_channel:call(Channel, #'queue.declare'{}),
+
+ Payload = <<"m1">>,
+
+ amqp_channel:call(Channel,
+ #'basic.publish'{exchange = <<>>, routing_key = Q},
+ #amqp_msg{payload = Payload}),
+
+ #'basic.get_ok'{delivery_tag = Tag} =
+ get_and_assert_equals(Channel, Q, Payload, false),
+
+ amqp_channel:call(Channel, #'basic.nack'{delivery_tag = Tag,
+ multiple = false,
+ requeue = false}),
+
+ get_and_assert_empty(Channel, Q),
+ teardown(Connection, Channel).
+
+large_content_test() ->
+ {ok, Connection} = new_connection(),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ #'queue.declare_ok'{queue = Q}
+ = amqp_channel:call(Channel, #'queue.declare'{}),
+ {A1,A2,A3} = now(), random:seed(A1, A2, A3),
+ F = list_to_binary([random:uniform(256)-1 || _ <- lists:seq(1, 1000)]),
+ Payload = list_to_binary([[F || _ <- lists:seq(1, 1000)]]),
+ Publish = #'basic.publish'{exchange = <<>>, routing_key = Q},
+ amqp_channel:call(Channel, Publish, #amqp_msg{payload = Payload}),
+ get_and_assert_equals(Channel, Q, Payload),
+ teardown(Connection, Channel).
+
+%% ----------------------------------------------------------------------------
+%% Test for the network client
+%% Sends a bunch of messages and immediatly closes the connection without
+%% closing the channel. Then gets the messages back from the queue and expects
+%% all of them to have been sent.
+pub_and_close_test() ->
+ {ok, Connection1} = new_connection(just_network),
+ Payload = <<"eggs">>,
+ NMessages = 50000,
+ {ok, Channel1} = amqp_connection:open_channel(Connection1),
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Channel1, #'queue.declare'{}),
+ %% Send messages
+ pc_producer_loop(Channel1, <<>>, Q, Payload, NMessages),
+ %% Close connection without closing channels
+ amqp_connection:close(Connection1),
+ %% Get sent messages back and count them
+ {ok, Connection2} = new_connection(just_network),
+ {ok, Channel2} = amqp_connection:open_channel(
+ Connection2, {amqp_direct_consumer, [self()]}),
+ amqp_channel:call(Channel2, #'basic.consume'{queue = Q, no_ack = true}),
+ receive #'basic.consume_ok'{} -> ok end,
+ ?assert(pc_consumer_loop(Channel2, Payload, 0) == NMessages),
+ %% Make sure queue is empty
+ #'queue.declare_ok'{queue = Q, message_count = NRemaining} =
+ amqp_channel:call(Channel2, #'queue.declare'{queue = Q,
+ passive = true}),
+ ?assert(NRemaining == 0),
+ amqp_channel:call(Channel2, #'queue.delete'{queue = Q}),
+ teardown(Connection2, Channel2),
+ ok.
+
+pc_producer_loop(_, _, _, _, 0) -> ok;
+pc_producer_loop(Channel, X, Key, Payload, NRemaining) ->
+ Publish = #'basic.publish'{exchange = X, routing_key = Key},
+ ok = amqp_channel:call(Channel, Publish, #amqp_msg{payload = Payload}),
+ pc_producer_loop(Channel, X, Key, Payload, NRemaining - 1).
+
+pc_consumer_loop(Channel, Payload, NReceived) ->
+ receive
+ {#'basic.deliver'{},
+ #amqp_msg{payload = DeliveredPayload}} ->
+ case DeliveredPayload of
+ Payload ->
+ pc_consumer_loop(Channel, Payload, NReceived + 1);
+ _ ->
+ exit(received_unexpected_content)
+ end
+ after 1000 ->
+ NReceived
+ end.
+
+%%---------------------------------------------------------------------------
+%% This tests whether RPC over AMQP produces the same result as invoking the
+%% same argument against the same underlying gen_server instance.
+rpc_test() ->
+ {ok, Connection} = new_connection(),
+ Fun = fun(X) -> X + 1 end,
+ RPCHandler = fun(X) -> term_to_binary(Fun(binary_to_term(X))) end,
+ Q = <<"rpc-test">>,
+ Server = amqp_rpc_server:start(Connection, Q, RPCHandler),
+ Client = amqp_rpc_client:start(Connection, Q),
+ Input = 1,
+ Reply = amqp_rpc_client:call(Client, term_to_binary(Input)),
+ Expected = Fun(Input),
+ DecodedReply = binary_to_term(Reply),
+ ?assertMatch(Expected, DecodedReply),
+ amqp_rpc_client:stop(Client),
+ amqp_rpc_server:stop(Server),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ amqp_channel:call(Channel, #'queue.delete'{queue = Q}),
+ amqp_connection:close(Connection),
+ wait_for_death(Connection),
+ ok.
+
+%% This tests if the RPC continues to generate valid correlation ids
+%% over a series of requests.
+rpc_client_test() ->
+ {ok, Connection} = new_connection(),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ Q = <<"rpc-test">>,
+ Latch = 255, % enough requests to tickle bad correlation ids
+ %% Start a server to return correlation ids to the client.
+ Server = spawn_link(fun() ->
+ rpc_correlation_server(Channel, Q)
+ end),
+ %% Generate a series of RPC requests on the same client.
+ Client = amqp_rpc_client:start(Connection, Q),
+ Parent = self(),
+ [spawn(fun() ->
+ Reply = amqp_rpc_client:call(Client, <<>>),
+ Parent ! {finished, Reply}
+ end) || _ <- lists:seq(1, Latch)],
+ %% Verify that the correlation ids are valid UTF-8 strings.
+ CorrelationIds = latch_loop(Latch),
+ [?assertMatch(<<_/binary>>, DecodedId)
+ || DecodedId <- [unicode:characters_to_binary(Id, utf8)
+ || Id <- CorrelationIds]],
+ %% Cleanup.
+ Server ! stop,
+ amqp_rpc_client:stop(Client),
+ amqp_channel:call(Channel, #'queue.delete'{queue = Q}),
+ teardown(Connection, Channel),
+ ok.
+
+%% Consumer of RPC requests that replies with the CorrelationId.
+rpc_correlation_server(Channel, Q) ->
+ amqp_channel:register_return_handler(Channel, self()),
+ amqp_channel:call(Channel, #'queue.declare'{queue = Q}),
+ amqp_channel:call(Channel, #'basic.consume'{queue = Q,
+ consumer_tag = <<"server">>}),
+ rpc_client_consume_loop(Channel),
+ amqp_channel:call(Channel, #'basic.cancel'{consumer_tag = <<"server">>}),
+ amqp_channel:unregister_return_handler(Channel).
+
+rpc_client_consume_loop(Channel) ->
+ receive
+ stop ->
+ ok;
+ {#'basic.deliver'{delivery_tag = DeliveryTag},
+ #amqp_msg{props = Props}} ->
+ #'P_basic'{correlation_id = CorrelationId,
+ reply_to = Q} = Props,
+ Properties = #'P_basic'{correlation_id = CorrelationId},
+ Publish = #'basic.publish'{exchange = <<>>,
+ routing_key = Q,
+ mandatory = true},
+ amqp_channel:call(
+ Channel, Publish, #amqp_msg{props = Properties,
+ payload = CorrelationId}),
+ amqp_channel:call(
+ Channel, #'basic.ack'{delivery_tag = DeliveryTag}),
+ rpc_client_consume_loop(Channel);
+ _ ->
+ rpc_client_consume_loop(Channel)
+ after 3000 ->
+ exit(no_request_received)
+ end.
+
+%%---------------------------------------------------------------------------
+
+%% connection.blocked, connection.unblocked
+
+connection_blocked_network_test() ->
+ {ok, Connection} = new_connection(just_network),
+ X = <<"amq.direct">>,
+ K = Payload = <<"x">>,
+ clear_resource_alarm(memory),
+ timer:sleep(1000),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ Parent = self(),
+ Child = spawn_link(
+ fun() ->
+ receive
+ #'connection.blocked'{} -> ok
+ end,
+ clear_resource_alarm(memory),
+ receive
+ #'connection.unblocked'{} -> ok
+ end,
+ Parent ! ok
+ end),
+ amqp_connection:register_blocked_handler(Connection, Child),
+ set_resource_alarm(memory),
+ Publish = #'basic.publish'{exchange = X,
+ routing_key = K},
+ amqp_channel:call(Channel, Publish,
+ #amqp_msg{payload = Payload}),
+ timer:sleep(1000),
+ receive
+ ok ->
+ clear_resource_alarm(memory),
+ clear_resource_alarm(disk),
+ ok
+ after 10000 ->
+ clear_resource_alarm(memory),
+ clear_resource_alarm(disk),
+ exit(did_not_receive_connection_blocked)
+ end.
+
+%%---------------------------------------------------------------------------
+
+setup_publish(Channel) ->
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Channel, #'queue.declare'{exclusive = true}),
+ ok = amqp_channel:call(Channel, #'basic.publish'{exchange = <<>>,
+ routing_key = Q},
+ #amqp_msg{payload = <<"foobar">>}),
+ {ok, Q}.
+
+teardown(Connection, Channel) ->
+ amqp_channel:close(Channel),
+ wait_for_death(Channel),
+ amqp_connection:close(Connection),
+ wait_for_death(Connection).
+
+teardown_test() ->
+ {ok, Connection} = new_connection(),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ ?assertMatch(true, is_process_alive(Channel)),
+ ?assertMatch(true, is_process_alive(Connection)),
+ teardown(Connection, Channel),
+ ?assertMatch(false, is_process_alive(Channel)),
+ ?assertMatch(false, is_process_alive(Connection)).
+
+wait_for_death(Pid) ->
+ Ref = erlang:monitor(process, Pid),
+ receive {'DOWN', Ref, process, Pid, _Reason} -> ok
+ after ?DeathWait -> exit({timed_out_waiting_for_process_death, Pid})
+ end.
+
+latch_loop() ->
+ latch_loop(?Latch, []).
+
+latch_loop(Latch) ->
+ latch_loop(Latch, []).
+
+latch_loop(0, Acc) ->
+ Acc;
+latch_loop(Latch, Acc) ->
+ receive
+ finished -> latch_loop(Latch - 1, Acc);
+ {finished, Ret} -> latch_loop(Latch - 1, [Ret | Acc])
+ after ?Latch * ?Wait -> exit(waited_too_long)
+ end.
+
+new_connection() ->
+ new_connection(both, []).
+
+new_connection(AllowedConnectionTypes) when is_atom(AllowedConnectionTypes) ->
+ new_connection(AllowedConnectionTypes, []);
+new_connection(Params) when is_list(Params) ->
+ new_connection(both, Params).
+
+new_connection(AllowedConnectionTypes, Params) ->
+ Params1 =
+ case {AllowedConnectionTypes,
+ os:getenv("AMQP_CLIENT_TEST_CONNECTION_TYPE")} of
+ {just_direct, "network"} ->
+ exit(normal);
+ {just_direct, "network_ssl"} ->
+ exit(normal);
+ {just_network, "direct"} ->
+ exit(normal);
+ {_, "network"} ->
+ make_network_params(Params);
+ {_, "network_ssl"} ->
+ {ok, [[CertsDir]]} = init:get_argument(erlang_client_ssl_dir),
+ make_network_params(
+ [{ssl_options, [{cacertfile,
+ CertsDir ++ "/testca/cacert.pem"},
+ {certfile, CertsDir ++ "/client/cert.pem"},
+ {keyfile, CertsDir ++ "/client/key.pem"},
+ {verify, verify_peer},
+ {fail_if_no_peer_cert, true}]}] ++ Params);
+ {_, "direct"} ->
+ make_direct_params([{node, rabbit_nodes:make(rabbit)}] ++
+ Params)
+ end,
+ amqp_connection:start(Params1).
+
+%% Note: not all amqp_params_network fields supported.
+make_network_params(Props) ->
+ Pgv = fun (Key, Default) ->
+ proplists:get_value(Key, Props, Default)
+ end,
+ #amqp_params_network{username = Pgv(username, <<"guest">>),
+ password = Pgv(password, <<"guest">>),
+ virtual_host = Pgv(virtual_host, <<"/">>),
+ channel_max = Pgv(channel_max, 0),
+ ssl_options = Pgv(ssl_options, none),
+ host = Pgv(host, "localhost")}.
+
+%% Note: not all amqp_params_direct fields supported.
+make_direct_params(Props) ->
+ Pgv = fun (Key, Default) ->
+ proplists:get_value(Key, Props, Default)
+ end,
+ #amqp_params_direct{username = Pgv(username, <<"guest">>),
+ password = Pgv(password, <<"guest">>),
+ virtual_host = Pgv(virtual_host, <<"/">>),
+ node = Pgv(node, node())}.
+
+set_resource_alarm(memory) ->
+ os:cmd("cd ../rabbitmq-test; make set-resource-alarm SOURCE=memory");
+set_resource_alarm(disk) ->
+ os:cmd("cd ../rabbitmq-test; make set-resource-alarm SOURCE=disk").
+
+
+clear_resource_alarm(memory) ->
+ os:cmd("cd ../rabbitmq-test; make clear-resource-alarm SOURCE=memory");
+clear_resource_alarm(disk) ->
+ os:cmd("cd ../rabbitmq-test; make clear-resource-alarm SOURCE=disk").
+
+fmt(Fmt, Args) -> list_to_binary(rabbit_misc:format(Fmt, Args)).
--- /dev/null
+include ../umbrella.mk
--- /dev/null
+Adds information on federation link status to the management
+plugin. Build it like any other plugin.
+
+If you have a heterogenous cluster (where the nodes have different
+plugins installed), this should be installed on the same nodes as the
+management plugin.
+
+The HTTP API is very simple: GET /api/federation-links.
--- /dev/null
+RELEASABLE:=true
+DEPS:=rabbitmq-management
+
+CONSTRUCT_APP_PREREQS:=$(shell find $(PACKAGE_DIR)/priv -type f)
+define construct_app_commands
+ cp -r $(PACKAGE_DIR)/priv $(APP_DIR)
+endef
--- /dev/null
+dispatcher_add(function(sammy) {
+ sammy.get('#/federation', function() {
+ render({'links': {path: '/federation-links',
+ options:{vhost:true}},
+ 'vhosts': '/vhosts'},
+ 'federation', '#/federation');
+ });
+ sammy.get('#/federation-upstreams', function() {
+ render({'upstreams': {path: '/parameters/federation-upstream',
+ options:{vhost:true}},
+ 'vhosts': '/vhosts',
+ 'globals': '/parameters/federation'},
+ 'federation-upstreams', '#/federation-upstreams');
+ });
+ sammy.get('#/federation-upstreams/:vhost/:id', function() {
+ render({'upstream': '/parameters/federation-upstream/' + esc(this.params['vhost']) + '/' + esc(this.params['id'])},
+ 'federation-upstream', '#/federation');
+ });
+ sammy.put('#/fed-parameters', function() {
+ var num_keys = ['expires', 'message-ttl', 'max-hops',
+ 'prefetch-count', 'reconnect-delay'];
+ var bool_keys = ['trust-user-id'];
+ var arrayable_keys = ['uri'];
+ put_parameter(this, [], num_keys, bool_keys, arrayable_keys);
+ return false;
+ });
+ sammy.del('#/fed-parameters', function() {
+ if (sync_delete(this, '/parameters/:component/:vhost/:name'))
+ go_to('#/federation-upstreams');
+ return false;
+ });
+});
+
+NAVIGATION['Admin'][0]['Federation Status'] = ['#/federation', "monitoring"];
+NAVIGATION['Admin'][0]['Federation Upstreams'] = ['#/federation-upstreams', "policymaker"];
+
+HELP['federation-uri'] =
+ 'URI to connect to. If upstream is a cluster and can have several URIs, you can enter them here separated by spaces.';
+
+HELP['federation-expires'] =
+ 'Time in milliseconds that the upstream should remember about this node for. After this time all upstream state will be removed. Leave this blank to mean "forever".';
+
+HELP['federation-ttl'] =
+ 'Time in milliseconds that undelivered messages should be held upstream when there is a network outage or backlog. Leave this blank to mean "forever".';
+
+HELP['federation-max-hops'] =
+ 'Maximum number of federation links that messages can traverse before being dropped. Defaults to 1 if not set.';
+
+HELP['federation-prefetch'] =
+ 'Maximum number of unacknowledged messages that may be in flight over a federation link at one time. Defaults to 1000 if not set.';
+
+HELP['federation-reconnect'] =
+ 'Time in seconds to wait after a network link goes down before attempting reconnection. Defaults to 1 if not set.';
+
+HELP['federation-ack-mode'] =
+ '<dl>\
+ <dt><code>on-confirm</code></dt>\
+ <dd>Messages are acknowledged to the upstream broker after they have been confirmed downstream. Handles network errors and broker failures without losing messages. The slowest option, and the default.</dd>\
+ <dt><code>on-publish</code></dt>\
+ <dd>Messages are acknowledged to the upstream broker after they have been published downstream. Handles network errors without losing messages, but may lose messages in the event of broker failures.</dd>\
+ <dt><code>no-ack</code></dt>\
+ <dd>Message acknowledgements are not used. The fastest option, but may lose messages in the event of network or broker failures.</dd>\
+</dl>';
+
+HELP['federation-trust-user-id'] =
+ 'Set "Yes" to preserve the "user-id" field across a federation link, even if the user-id does not match that used to republish the message. Set to "No" to clear the "user-id" field when messages are federated. Only set this to "Yes" if you trust the upstream broker not to forge user-ids.';
+
+function link_fed_conn(vhost, name) {
+ return _link_to(fmt_escape_html(name), '#/federation-upstreams/' + esc(vhost) + '/' + esc(name));
+}
--- /dev/null
+<h1>Federation Upstream: <b><%= fmt_string(upstream.name) %></b></h1>
+
+<div class="section">
+ <h2>Overview</h2>
+ <div class="hider">
+ <table class="facts">
+ <tr>
+ <th>Virtual Host</th>
+ <td><%= fmt_string(upstream.vhost) %></td>
+ </tr>
+ <tr>
+ <th>URI</th>
+ <td><%= fmt_string(upstream.value.uri) %></td>
+ </tr>
+ <tr>
+ <th>Expires</th>
+ <td><%= fmt_time(upstream.value.expires, 'ms') %></td>
+ </tr>
+ <tr>
+ <th>Message TTL</th>
+ <td><%= fmt_time(upstream.value['message-ttl'], 'ms') %></td>
+ </tr>
+ <tr>
+ <th>Max Hops</th>
+ <td><%= fmt_string(upstream.value['max-hops']) %></td>
+ </tr>
+ <tr>
+ <th>Prefetch Count</th>
+ <td><%= fmt_string(upstream.value['prefetch-count']) %></td>
+ </tr>
+ <tr>
+ <th>Reconnect Delay</th>
+ <td><%= fmt_time(upstream.value['reconnect-delay'], 's') %></td>
+ </tr>
+ <tr>
+ <th>Ack Mode</th>
+ <td><%= fmt_string(upstream.value['ack-mode']) %></td>
+ </tr>
+ <tr>
+ <th>Trust User-ID</th>
+ <td><%= fmt_boolean(upstream.value['trust-user-id']) %></td>
+ </tr>
+ </table>
+ </div>
+</div>
+
+<div class="section-hidden">
+ <h2>Delete this upstream</h2>
+ <div class="hider">
+ <form action="#/fed-parameters" method="delete" class="confirm">
+ <input type="hidden" name="component" value="federation-upstream"/>
+ <input type="hidden" name="vhost" value="<%= fmt_string(upstream.vhost) %>"/>
+ <input type="hidden" name="name" value="<%= fmt_string(upstream.name) %>"/>
+ <input type="submit" value="Delete this upstream"/>
+ </form>
+ </div>
+</div>
--- /dev/null
+<h1>Federation Upstreams</h1>
+<div class="section">
+ <h2>Upstreams</h2>
+ <div class="hider updatable">
+<% if (upstreams.length > 0) { %>
+<table class="list">
+ <thead>
+ <tr>
+<% if (vhosts_interesting) { %>
+ <th>Virtual Host</th>
+<% } %>
+ <th>Name</th>
+ <th>URI</th>
+ <th>Expiry</th>
+ <th>Message TTL</th>
+ <th>Max Hops</th>
+ <th>Prefetch Count</th>
+ <th>Reconnect Delay</th>
+ <th>Ack mode</th>
+ <th>Trust User-ID</th>
+ </tr>
+ </thead>
+ <tbody>
+<%
+ for (var i = 0; i < upstreams.length; i++) {
+ var upstream = upstreams[i];
+%>
+ <tr<%= alt_rows(i)%>>
+<% if (vhosts_interesting) { %>
+ <td><%= fmt_string(upstream.vhost) %></td>
+<% } %>
+ <td><%= link_fed_conn(upstream.vhost, upstream.name) %></td>
+ <td><%= fmt_shortened_uri(upstream.value.uri) %></td>
+ <td class="r"><%= fmt_time(upstream.value.expires, 'ms') %></td>
+ <td class="r"><%= fmt_time(upstream.value['message-ttl'], 'ms') %></td>
+ <td class="r"><%= upstream.value['max-hops'] %></td>
+ <td class="r"><%= upstream.value['prefetch-count'] %></td>
+ <td class="r"><%= fmt_time(upstream.value['reconnect-delay'], 's') %></td>
+ <td class="c"><%= fmt_string(upstream.value['ack-mode']) %></td>
+ <td class="c"><%= fmt_boolean(upstream.value['trust-user-id']) %></td>
+ </tr>
+<% } %>
+ </tbody>
+</table>
+<% } else { %>
+ <p>... no upstreams ...</p>
+<% } %>
+ </div>
+</div>
+
+<div class="section-hidden">
+ <h2>Add a new upstream</h2>
+ <div class="hider">
+ <form action="#/fed-parameters" method="put">
+ <input type="hidden" name="component" value="federation-upstream"/>
+ <table class="form">
+<% if (vhosts_interesting) { %>
+ <tr>
+ <th><label>Virtual host:</label></th>
+ <td>
+ <select name="vhost">
+ <% for (var i = 0; i < vhosts.length; i++) { %>
+ <option value="<%= fmt_string(vhosts[i].name) %>"><%= fmt_string(vhosts[i].name) %></option>
+ <% } %>
+ </select>
+ </td>
+ </tr>
+<% } else { %>
+ <tr><td><input type="hidden" name="vhost" value="<%= fmt_string(vhosts[0].name) %>"/></td></tr>
+<% } %>
+ <tr>
+ <th><label>Name:</label></th>
+ <td><input type="text" name="name"/><span class="mand">*</span></td>
+ </tr>
+ <tr>
+ <th>
+ <label>
+ URI:
+ <span class="help" id="federation-uri"></span>
+ </label>
+ </th>
+ <td><input type="text" name="uri"/><span class="mand">*</span></td>
+ </tr>
+ <tr>
+ <th>
+ <label>
+ Expires:
+ <span class="help" id="federation-expires"></span>
+ </label>
+ </th>
+ <td><input type="text" name="expires"/> ms</td>
+ </tr>
+ <tr>
+ <th>
+ <label>
+ Message TTL:
+ <span class="help" id="federation-ttl"></span>
+ </label>
+ </th>
+ <td><input type="text" name="message-ttl"/> ms</td>
+ </tr>
+ <tr>
+ <th>
+ <label>
+ Max hops:
+ <span class="help" id="federation-max-hops"></span>
+ </label>
+ </th>
+ <td><input type="text" name="max-hops"/></td>
+ </tr>
+ <tr>
+ <th>
+ <label>
+ Prefetch count:
+ <span class="help" id="federation-prefetch"></span>
+ </label>
+ </th>
+ <td><input type="text" name="prefetch-count"/></td>
+ </tr>
+ <tr>
+ <th>
+ <label>
+ Reconnect delay:
+ <span class="help" id="federation-reconnect"></span>
+ </label>
+ </th>
+ <td><input type="text" name="reconnect-delay"/> s</td>
+ </tr>
+ <tr>
+ <th>
+ <label>
+ Acknowledgement Mode:
+ <span class="help" id="federation-ack-mode"></span>
+ </label>
+ </th>
+ <td>
+ <select name="ack-mode">
+ <option value="on-confirm">On confirm</option>
+ <option value="on-publish">On publish</option>
+ <option value="no-ack">No ack</option>
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th>
+ <label>
+ Trust User-ID:
+ <span class="help" id="federation-trust-user-id"></span>
+ </label>
+ </th>
+ <td>
+ <select name="trust-user-id">
+ <option value="false">No</option>
+ <option value="true">Yes</option>
+ </select>
+ </td>
+ </tr>
+ </table>
+ <input type="submit" value="Add upstream"/>
+ </form>
+ </div>
+</div>
+<div class="section-hidden">
+ <h2>URI examples</h2>
+ <div class="hider">
+ <ul>
+ <li>
+ <code>amqp://server-name</code><br/>
+ connect to server-name, without SSL and default credentials
+ </li>
+ <li>
+ <code>amqp://user:password@server-name/my-vhost</code><br/>
+ connect to server-name, with credentials and overridden
+ virtual host
+ </li>
+ <li>
+ <code>amqps://user:password@server-name?cacertfile=/path/to/cacert.pem&certfile=/path/to/cert.pem&keyfile=/path/to/key.pem&verify=verify_peer</code><br/>
+ connect to server-name, with credentials and SSL
+ </li>
+ <li>
+ <code>amqps://server-name?cacertfile=/path/to/cacert.pem&certfile=/path/to/cert.pem&keyfile=/path/to/key.pem&verify=verify_peer&fail_if_no_peer_cert=true&auth_mechanism=external</code><br/>
+ connect to server-name, with SSL and EXTERNAL authentication
+ </li>
+ </ul>
+ </div>
+</div>
--- /dev/null
+<h1>Federation Status</h1>
+<div class="section">
+ <h2>Running Links</h2>
+ <div class="hider updatable">
+<% if (links.length > 0) { %>
+<table class="list">
+ <thead>
+ <tr>
+ <th>Upstream</th>
+ <th>URI</th>
+<% if (vhosts_interesting) { %>
+ <th>Virtual Host</th>
+<% } %>
+ <th>Exchange / Queue</th>
+<% if (nodes_interesting) { %>
+ <th>Node</th>
+<% } %>
+ <th>State</th>
+ <th>Inbound message rate</th>
+ <th>Last changed</th>
+ </tr>
+ </thead>
+ <tbody>
+<%
+ for (var i = 0; i < links.length; i++) {
+ var link = links[i];
+%>
+ <tr<%= alt_rows(i)%>>
+ <td>
+ <%= fmt_string(link.upstream) %>
+ <% if (link.type == 'exchange' &&
+ link.exchange != link.upstream_exchange) { %>
+ <sub><%= fmt_string(link.upstream_exchange) %></sub>
+ <% } else if (link.type == 'queue' &&
+ link.queue != link.upstream_queue) { %>
+ <sub><%= fmt_string(link.upstream_queue) %></sub>
+ <% } %>
+ </td>
+ <td><%= fmt_string(link.uri) %></td>
+<% if (vhosts_interesting) { %>
+ <td><%= fmt_string(link.vhost) %></td>
+<% } %>
+ <td>
+ <% if (link.type == 'exchange') { %>
+ <%= link_exchange(link.vhost, link.exchange) %>
+ <% } else { %>
+ <%= link_queue(link.vhost, link.queue) %>
+ <% } %>
+ <sub><%= fmt_string(link.type) %></sub>
+ </td>
+<% if (nodes_interesting) { %>
+ <td><%= fmt_node(link.node) %></td>
+<% } %>
+<% if (link.error) { %>
+ <td>
+ <%= fmt_state('red', link.status) %>
+ </td>
+ <td></td>
+ <td><%= link.timestamp %></td>
+ </tr>
+ <tr>
+<% if (vhosts_interesting) { %>
+ <td colspan="7">
+<% } else { %>
+ <td colspan="6">
+<% } %>
+ Error detail:
+ <pre><%= fmt_escape_html(link.error) %></pre>
+ </td>
+ </tr>
+<% } else { %>
+ <td>
+ <%= fmt_state(link.status == 'starting' ? 'yellow' : 'green', link.status) %>
+ </td>
+ <td class="r">
+ <% if (link.local_channel) { %>
+ <%= fmt_rate(link.local_channel.message_stats, 'confirm') %>
+ <% } %>
+ </td>
+ <td><%= link.timestamp %></td>
+ </tr>
+<% } %>
+ <% } %>
+ </tbody>
+</table>
+<% } else { %>
+ <p>... no links ...</p>
+<% } %>
+</div>
+</div>
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_federation_mgmt).
+
+-behaviour(rabbit_mgmt_extension).
+
+-export([dispatcher/0, web_ui/0]).
+-export([init/1, to_json/2, resource_exists/2, content_types_provided/2,
+ is_authorized/2]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include_lib("rabbitmq_management/include/rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+
+dispatcher() -> [{["federation-links"], ?MODULE, []},
+ {["federation-links", vhost], ?MODULE, []}].
+web_ui() -> [{javascript, <<"federation.js">>}].
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case rabbit_mgmt_util:vhost(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ Chs = rabbit_mgmt_db:get_all_channels(
+ rabbit_mgmt_util:range(ReqData)),
+ rabbit_mgmt_util:reply_list(
+ filter_vhost(status(Chs, ReqData, Context), ReqData), ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_monitor(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+filter_vhost(List, ReqData) ->
+ rabbit_mgmt_util:all_or_one_vhost(
+ ReqData,
+ fun(V) -> lists:filter(fun(I) -> pget(vhost, I) =:= V end, List) end).
+
+status(Chs, ReqData, Context) ->
+ rabbit_mgmt_util:filter_vhost(
+ lists:append([status(Node, Chs) || Node <- [node() | nodes()]]),
+ ReqData, Context).
+
+status(Node, Chs) ->
+ case rpc:call(Node, rabbit_federation_status, status, [], infinity) of
+ {badrpc, {'EXIT', {undef, _}}} -> [];
+ {badrpc, {'EXIT', {noproc, _}}} -> [];
+ Status -> [format(Node, I, Chs) || I <- Status]
+ end.
+
+format(Node, Info, Chs) ->
+ LocalCh = case rabbit_mgmt_format:strip_pids(
+ [Ch || Ch <- Chs,
+ pget(name, pget(connection_details, Ch))
+ =:= pget(local_connection, Info)]) of
+ [Ch] -> [{local_channel, Ch}];
+ [] -> []
+ end,
+ [{node, Node} | format_info(Info)] ++ LocalCh.
+
+format_info(Items) ->
+ [format_item(I) || I <- Items].
+
+format_item({timestamp, {{Y, M, D}, {H, Min, S}}}) ->
+ {timestamp, print("~w-~2.2.0w-~2.2.0w ~w:~2.2.0w:~2.2.0w",
+ [Y, M, D, H, Min, S])};
+format_item({error, E}) ->
+ {error, rabbit_mgmt_format:print("~p", [E])};
+format_item(I) ->
+ I.
+
+print(Fmt, Val) ->
+ list_to_binary(io_lib:format(Fmt, Val)).
--- /dev/null
+{application, rabbitmq_federation_management,
+ [{description, "RabbitMQ Federation Management"},
+ {vsn, "%%VSN%%"},
+ {modules, []},
+ {registered, []},
+ {env, []},
+ {applications, [kernel, stdlib, rabbitmq_management]}
+ ]}.
--- /dev/null
+CHAIN_TESTS=true
+include ../umbrella.mk
--- /dev/null
+Generic build instructions are at:
+ http://www.rabbitmq.com/plugin-development.html
+
+See http://www.rabbitmq.com/federation.html
--- /dev/null
+This file is intended to tell you How It All Works, concentrating on
+the things you might not expect.
+
+The theory
+==========
+
+The 'x-federation' exchange is defined in
+rabbit_federation_exchange. This starts up a bunch of link processes
+(one for each upstream) which:
+
+ * Connect to the upstream broker
+ * Create a queue and bind it to the upstream exchange
+ * Keep bindings in sync with the downstream exchange
+ * Consume messages from the upstream queue and republish them to the
+ downstream exchange (matching confirms with acks)
+
+Each link process monitors the connections / channels it opens, and
+dies if they do. We use a supervisor2 to ensure that we get some
+backoff when restarting.
+
+We use process groups to identify all link processes for a certain
+exchange, as well as all link processes together.
+
+However, there are a bunch of wrinkles:
+
+
+Wrinkle: The exchange will be recovered when the Erlang client is not available
+===============================================================================
+
+Exchange recovery happens within the rabbit application - therefore at
+the time that the exchange is recovered, we can't make any connections
+since the amqp_client application has not yet started. Each link
+therefore initially has a state 'not_started'. When it is created it
+checks to see if the rabbitmq_federation application is running. If
+so, it starts fully. If not, it goes into the 'not_started'
+state. When rabbitmq_federation starts, it sends a 'go' message to all
+links, prodding them to bring up the link.
+
+
+Wrinkle: On reconnect we want to assert bindings atomically
+===========================================================
+
+If the link goes down for whatever reason, then by the time it comes
+up again the bindings downstream may no longer be in sync with those
+upstream. Therefore on link establishment we want to ensure that a
+certain set of bindings exists. (Of course bringing up a link for the
+first time is a simple case of this.) And we want to do this with AMQP
+methods. But if we were to tear down all bindings and recreate them,
+we would have a time period when messages would not be forwarded for
+bindings that *do* still exist before and after.
+
+We use exchange to exchange bindings to work around this:
+
+We bind the upstream exchange (X) to the upstream queue (Q) via an
+internal fanout exchange (IXA) like so: (routing keys R1 and R2):
+
+ X----R1,R2--->IXA---->Q
+
+This has the same effect as binding the queue to the exchange directly.
+
+Now imagine the link has gone down, and is about to be
+reestablished. In the meanwhile, routing has changed downstream so
+that we now want routing keys R1 and R3. On link reconnection we can
+create and bind another internal fanout exchange IXB:
+
+ X----R1,R2--->IXA---->Q
+ | ^
+ | |
+ \----R1,R3--->IXB-----/
+
+and then delete the original exchange IXA:
+
+ X Q
+ | ^
+ | |
+ \----R1,R3--->IXB-----/
+
+This means that messages matching R1 are always routed during the
+switchover. Messages for R3 will start being routed as soon as we bind
+the second exchange, and messages for R2 will be stopped in a timely
+way. Of course this could lag the downstream situation somewhat, in
+which case some R2 messages will get thrown away downstream since they
+are unroutable. However this lag is inevitable when the link goes
+down.
+
+This means that the downstream only needs to keep track of whether the
+upstream is currently going via internal exchange A or B. This is
+held in the exchange scratch space in Mnesia.
+
+
+Wrinkle: We need to amalgamate bindings
+=======================================
+
+Since we only bind to one exchange upstream, but the downstream
+exchange can be bound to many queues, we can have duplicated bindings
+downstream (same source, routing key and args but different
+destination) that cannot be duplicated upstream (since the destination
+is the same). The link therefore maintains a mapping of (Key, Args) to
+set(Dest). Duplicated bindings do not get repeated upstream, and are
+only unbound upstream when the last one goes away downstream.
+
+Furthermore, this works as an optimisation since this will tend to
+reduce upstream binding count and churn.
+
+
+Wrinkle: We may receive binding events out of order
+===================================================
+
+The rabbit_federation_exchange callbacks are invoked by channel
+processes within rabbit. Therefore they can be executed concurrently,
+and can arrive at the link processes in an order that does not
+correspond to the wall clock.
+
+We need to keep the state of the link in sync with Mnesia. Therefore
+not only do we need to impose an ordering on these events, we need to
+impose Mnesia's ordering on them. We therefore added a function to the
+callback interface, serialise_events. When this returns true, the
+callback mechanism inside rabbit increments a per-exchange counter
+within an Mnesia transaction, and returns the value as part of the
+add_binding and remove_binding callbacks. The link process then queues
+up these events, and replays them in order. The link process's state
+thus always follows Mnesia (it may be delayed, but the effects happen
+in the same order).
+
+
+Other issues
+============
+
+Since links are implemented in terms of AMQP, link failure may cause
+messages to be redelivered. If you're unlucky this could lead to
+duplication.
+
+Message duplication can also happen with some topologies. In some
+cases it may not be possible to set max_hops such that messages arrive
+once at every node.
+
+While we correctly order bind / unbind events, we don't do the same
+thing for exchange creation / deletion. (This is harder - if you
+delete and recreate an exchange with the same name, is it the same
+exchange? What about if its type changes?) This would only be an issue
+if exchanges churn rapidly; however we could get into a state where
+Mnesia sees CDCD but we see CDDC and leave a process running when we
+shouldn't.
--- /dev/null
+#!/bin/sh
+CTL=$1
+
+# Test direct connections
+$CTL set_parameter federation-upstream localhost '{"uri": "amqp://"}'
+# We will test the guest:guest gets stripped out in user_id_test
+$CTL set_parameter federation-upstream local5673 '{"uri": "amqp://guest:guest@localhost:5673"}'
+
+$CTL set_parameter federation-upstream-set upstream '[{"upstream": "localhost", "exchange": "upstream", "queue": "upstream"}]'
+$CTL set_parameter federation-upstream-set upstream2 '[{"upstream": "localhost", "exchange": "upstream2", "queue": "upstream2"}]'
+$CTL set_parameter federation-upstream-set localhost '[{"upstream": "localhost"}]'
+$CTL set_parameter federation-upstream-set upstream12 '[{"upstream": "localhost", "exchange": "upstream", "queue": "upstream"},
+ {"upstream": "localhost", "exchange": "upstream2", "queue": "upstream2"}]'
+$CTL set_parameter federation-upstream-set one '[{"upstream": "localhost", "exchange": "one", "queue": "one"}]'
+$CTL set_parameter federation-upstream-set two '[{"upstream": "localhost", "exchange": "two", "queue": "two"}]'
+$CTL set_parameter federation-upstream-set upstream5673 '[{"upstream": "local5673", "exchange": "upstream"}]'
+
+$CTL set_policy fed "^fed\." '{"federation-upstream-set": "upstream"}'
+$CTL set_policy fed12 "^fed12\." '{"federation-upstream-set": "upstream12"}'
+$CTL set_policy one "^two$" '{"federation-upstream-set": "one"}'
+$CTL set_policy two "^one$" '{"federation-upstream-set": "two"}'
+$CTL set_policy hare "^hare\." '{"federation-upstream-set": "upstream5673"}'
+$CTL set_policy all "^all\." '{"federation-upstream-set": "all"}'
+$CTL set_policy new "^new\." '{"federation-upstream-set": "new-set"}'
--- /dev/null
+#!/bin/sh -e
+sh -e `dirname $0`/rabbit-test.sh "`dirname $0`/../../rabbitmq-server/scripts/rabbitmqctl -n rabbit-test"
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-record(upstream, {uris,
+ exchange_name,
+ queue_name,
+ prefetch_count,
+ max_hops,
+ reconnect_delay,
+ expires,
+ message_ttl,
+ trust_user_id,
+ ack_mode,
+ ha_policy,
+ name}).
+
+-record(upstream_params,
+ {uri,
+ params,
+ x_or_q,
+ %% The next two can be derived from the above three, but we don't
+ %% want to do that every time we forward a message.
+ safe_uri,
+ table}).
+
+-define(ROUTING_HEADER, <<"x-received-from">>).
+-define(BINDING_HEADER, <<"x-bound-from">>).
+-define(MAX_HOPS_ARG, <<"x-max-hops">>).
+-define(NODE_NAME_ARG, <<"x-downstream-name">>).
+-define(DEF_PREFETCH, 1000).
--- /dev/null
+RELEASABLE:=true
+DEPS:=rabbitmq-erlang-client rabbitmq-test
+FILTER:=all
+COVER:=false
+WITH_BROKER_TEST_COMMANDS:=rabbit_test_runner:run_in_broker(\"$(PACKAGE_DIR)/test/ebin\",\"$(FILTER)\")
+WITH_BROKER_SETUP_SCRIPTS:=$(PACKAGE_DIR)/etc/setup-rabbit-test.sh
+STANDALONE_TEST_COMMANDS:=rabbit_test_runner:run_multi(\"$(UMBRELLA_BASE_DIR)/rabbitmq-server\",\"$(PACKAGE_DIR)/test/ebin\",\"$(FILTER)\",$(COVER),\"/tmp/rabbitmq-multi-node/plugins\")
+
+# NB: we cannot use PACKAGE_DIR in the body of this rule as it gets
+# expanded at the wrong time and set to the value of a completely
+# arbitrary package!
+$(PACKAGE_DIR)+pre-test:: $(PACKAGE_DIR)+dist
+ rm -rf /tmp/rabbitmq-multi-node/plugins
+ mkdir -p /tmp/rabbitmq-multi-node/plugins/plugins
+ cp -p $(UMBRELLA_BASE_DIR)/rabbitmq-federation/dist/*.ez /tmp/rabbitmq-multi-node/plugins/plugins
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_federation_app).
+
+-behaviour(application).
+-export([start/2, stop/1]).
+
+%% Dummy supervisor - see Ulf Wiger's comment at
+%% http://erlang.2086793.n4.nabble.com/initializing-library-applications-without-processes-td2094473.html
+
+%% All of our actual server processes are supervised by
+%% rabbit_federation_sup, which is started by a rabbit_boot_step
+%% (since it needs to start up before queue / exchange recovery, so it
+%% can't be part of our application).
+%%
+%% However, we still need an application behaviour since we need to
+%% know when our application has started since then the Erlang client
+%% will have started and we can therefore start our links going. Since
+%% the application behaviour needs a tree of processes to supervise,
+%% this is it...
+-behaviour(supervisor).
+-export([init/1]).
+
+start(_Type, _StartArgs) ->
+ rabbit_federation_exchange_link:go(),
+ rabbit_federation_queue_link:go(),
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+stop(_State) ->
+ ok.
+%%----------------------------------------------------------------------------
+
+init([]) -> {ok, {{one_for_one, 3, 10}, []}}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_federation_db).
+
+-include("rabbit_federation.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-define(DICT, orddict).
+
+-export([get_active_suffix/3, set_active_suffix/3, prune_scratch/2]).
+
+%%----------------------------------------------------------------------------
+
+get_active_suffix(XName, Upstream, Default) ->
+ case rabbit_exchange:lookup_scratch(XName, federation) of
+ {ok, Dict} ->
+ case ?DICT:find(key(Upstream), Dict) of
+ {ok, Suffix} -> Suffix;
+ error -> Default
+ end;
+ {error, not_found} ->
+ Default
+ end.
+
+set_active_suffix(XName, Upstream, Suffix) ->
+ ok = rabbit_exchange:update_scratch(
+ XName, federation,
+ fun(D) -> ?DICT:store(key(Upstream), Suffix, ensure(D)) end).
+
+prune_scratch(XName, Upstreams) ->
+ ok = rabbit_exchange:update_scratch(
+ XName, federation,
+ fun(D) -> Keys = [key(U) || U <- Upstreams],
+ ?DICT:filter(
+ fun(K, _V) -> lists:member(K, Keys) end, ensure(D))
+ end).
+
+key(#upstream{name = UpstreamName, exchange_name = XNameBin}) ->
+ {UpstreamName, XNameBin}.
+
+ensure(undefined) -> ?DICT:new();
+ensure(D) -> D.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_federation_event).
+-behaviour(gen_event).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([add_handler/0]).
+
+-export([init/1, handle_call/2, handle_event/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-import(rabbit_misc, [pget/2]).
+
+%%----------------------------------------------------------------------------
+
+add_handler() ->
+ gen_event:add_handler(rabbit_event, ?MODULE, []).
+
+init([]) ->
+ {ok, []}.
+
+handle_call(_Request, State) ->
+ {ok, not_understood, State}.
+
+handle_event(#event{type = parameter_set,
+ props = Props}, State) ->
+ case {pget(component, Props), pget(name, Props)} of
+ {global, cluster_name} ->
+ rabbit_federation_parameters:adjust(everything);
+ _ ->
+ ok
+ end,
+ {ok, State};
+handle_event(_Event, State) ->
+ {ok, State}.
+
+handle_info(_Info, State) ->
+ {ok, State}.
+
+terminate(_Arg, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% TODO rename this
+-module(rabbit_federation_exchange).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "federation exchange decorator"},
+ {mfa, {rabbit_registry, register,
+ [exchange_decorator, <<"federation">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, recovery}]}).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-behaviour(rabbit_exchange_decorator).
+
+-export([description/0, serialise_events/1]).
+-export([create/2, delete/3, policy_changed/2,
+ add_binding/3, remove_bindings/3, route/2, active_for/1]).
+
+%%----------------------------------------------------------------------------
+
+description() ->
+ [{description, <<"Federation exchange decorator">>}].
+
+serialise_events(X) -> federate(X).
+
+create(transaction, _X) ->
+ ok;
+create(none, X) ->
+ maybe_start(X).
+
+delete(transaction, _X, _Bs) ->
+ ok;
+delete(none, X, _Bs) ->
+ maybe_stop(X).
+
+policy_changed(OldX, NewX) ->
+ maybe_stop(OldX),
+ maybe_start(NewX).
+
+add_binding(transaction, _X, _B) ->
+ ok;
+add_binding(Serial, X = #exchange{name = XName}, B) ->
+ case federate(X) of
+ true -> rabbit_federation_exchange_link:add_binding(Serial, XName, B),
+ ok;
+ false -> ok
+ end.
+
+remove_bindings(transaction, _X, _Bs) ->
+ ok;
+remove_bindings(Serial, X = #exchange{name = XName}, Bs) ->
+ case federate(X) of
+ true -> rabbit_federation_exchange_link:remove_bindings(Serial, XName, Bs),
+ ok;
+ false -> ok
+ end.
+
+route(_, _) -> [].
+
+active_for(X) ->
+ case federate(X) of
+ true -> noroute;
+ false -> none
+ end.
+
+%%----------------------------------------------------------------------------
+
+%% Don't federate default exchange, we can't bind to it
+federate(#exchange{name = #resource{name = <<"">>}}) ->
+ false;
+
+%% Don't federate any of our intermediate exchanges. Note that we use
+%% internal=true since older brokers may not declare
+%% x-federation-upstream on us. Also other internal exchanges should
+%% probably not be federated.
+federate(#exchange{internal = true}) ->
+ false;
+
+federate(X) ->
+ rabbit_federation_upstream:federate(X).
+
+maybe_start(X = #exchange{name = XName})->
+ case federate(X) of
+ true -> ok = rabbit_federation_db:prune_scratch(
+ XName, rabbit_federation_upstream:for(X)),
+ ok = rabbit_federation_exchange_link_sup_sup:start_child(X),
+ ok;
+ false -> ok
+ end.
+
+maybe_stop(X = #exchange{name = XName}) ->
+ case federate(X) of
+ true -> ok = rabbit_federation_exchange_link_sup_sup:stop_child(X),
+ rabbit_federation_status:remove_exchange_or_queue(XName);
+ false -> ok
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_federation_exchange_link).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_federation.hrl").
+
+-behaviour(gen_server2).
+
+-export([go/0, add_binding/3, remove_bindings/3]).
+-export([list_routing_keys/1]). %% For testing
+
+-export([start_link/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-import(rabbit_misc, [pget/2]).
+-import(rabbit_federation_util, [name/1, vhost/1, pgname/1]).
+
+-record(state, {upstream,
+ upstream_params,
+ upstream_name,
+ connection,
+ channel,
+ consumer_tag,
+ queue,
+ internal_exchange,
+ waiting_cmds = gb_trees:empty(),
+ next_serial,
+ bindings = dict:new(),
+ downstream_connection,
+ downstream_channel,
+ downstream_exchange,
+ unacked}).
+
+%%----------------------------------------------------------------------------
+
+%% We start off in a state where we do not connect, since we can first
+%% start during exchange recovery, when rabbit is not fully started
+%% and the Erlang client is not running. This then gets invoked when
+%% the federation app is started.
+go() -> cast(go).
+
+add_binding(S, XN, B) -> cast(XN, {enqueue, S, {add_binding, B}}).
+remove_bindings(S, XN, Bs) -> cast(XN, {enqueue, S, {remove_bindings, Bs}}).
+
+list_routing_keys(XN) -> call(XN, list_routing_keys).
+
+%%----------------------------------------------------------------------------
+
+start_link(Args) ->
+ gen_server2:start_link(?MODULE, Args, [{timeout, infinity}]).
+
+init({Upstream, XName}) ->
+ %% If we are starting up due to a policy change then it's possible
+ %% for the exchange to have been deleted before we got here, in which
+ %% case it's possible that delete callback would also have been called
+ %% before we got here. So check if we still exist.
+ case rabbit_exchange:lookup(XName) of
+ {ok, X} ->
+ UParams = rabbit_federation_upstream:to_params(Upstream, X),
+ rabbit_federation_status:report(Upstream, UParams, XName, starting),
+ join(rabbit_federation_exchanges),
+ join({rabbit_federation_exchange, XName}),
+ gen_server2:cast(self(), maybe_go),
+ {ok, {not_started, {Upstream, UParams, XName}}};
+ {error, not_found} ->
+ {stop, gone}
+ end.
+
+handle_call(list_routing_keys, _From, State = #state{bindings = Bindings}) ->
+ {reply, lists:sort([K || {K, _} <- dict:fetch_keys(Bindings)]), State};
+
+handle_call(Msg, _From, State) ->
+ {stop, {unexpected_call, Msg}, State}.
+
+handle_cast(maybe_go, S0 = {not_started, _Args}) ->
+ case federation_up() of
+ true -> go(S0);
+ false -> {noreply, S0}
+ end;
+
+handle_cast(go, S0 = {not_started, _Args}) ->
+ go(S0);
+
+%% There's a small race - I think we can realise federation is up
+%% before 'go' gets invoked. Ignore.
+handle_cast(go, State) ->
+ {noreply, State};
+
+handle_cast({enqueue, _, _}, State = {not_started, _}) ->
+ {noreply, State};
+
+handle_cast({enqueue, Serial, Cmd}, State = #state{waiting_cmds = Waiting}) ->
+ Waiting1 = gb_trees:insert(Serial, Cmd, Waiting),
+ {noreply, play_back_commands(State#state{waiting_cmds = Waiting1})};
+
+handle_cast(Msg, State) ->
+ {stop, {unexpected_cast, Msg}, State}.
+
+handle_info(#'basic.consume_ok'{}, State) ->
+ {noreply, State};
+
+handle_info(#'basic.ack'{} = Ack, State = #state{channel = Ch,
+ unacked = Unacked}) ->
+ Unacked1 = rabbit_federation_link_util:ack(Ack, Ch, Unacked),
+ {noreply, State#state{unacked = Unacked1}};
+
+handle_info(#'basic.nack'{} = Nack, State = #state{channel = Ch,
+ unacked = Unacked}) ->
+ Unacked1 = rabbit_federation_link_util:nack(Nack, Ch, Unacked),
+ {noreply, State#state{unacked = Unacked1}};
+
+handle_info({#'basic.deliver'{routing_key = Key,
+ redelivered = Redelivered} = DeliverMethod, Msg},
+ State = #state{
+ upstream = Upstream = #upstream{max_hops = MaxH},
+ upstream_params = UParams,
+ upstream_name = UName,
+ downstream_exchange = #resource{name = XNameBin},
+ downstream_channel = DCh,
+ channel = Ch,
+ unacked = Unacked}) ->
+ PublishMethod = #'basic.publish'{exchange = XNameBin,
+ routing_key = Key},
+ %% TODO add user information here?
+ HeadersFun = fun (H) -> update_headers(UParams, UName, Redelivered, H) end,
+ %% We need to check should_forward/2 here in case the upstream
+ %% does not have federation and thus is using a fanout exchange.
+ ForwardFun = fun (H) ->
+ DName = rabbit_nodes:cluster_name(),
+ rabbit_federation_util:should_forward(H, MaxH, DName)
+ end,
+ Unacked1 = rabbit_federation_link_util:forward(
+ Upstream, DeliverMethod, Ch, DCh, PublishMethod,
+ HeadersFun, ForwardFun, Msg, Unacked),
+ {noreply, State#state{unacked = Unacked1}};
+
+handle_info(#'basic.cancel'{}, State = #state{upstream = Upstream,
+ upstream_params = UParams,
+ downstream_exchange = XName}) ->
+ rabbit_federation_link_util:connection_error(
+ local, basic_cancel, Upstream, UParams, XName, State);
+
+handle_info({'DOWN', _Ref, process, Pid, Reason},
+ State = #state{downstream_channel = DCh,
+ channel = Ch,
+ upstream = Upstream,
+ upstream_params = UParams,
+ downstream_exchange = XName}) ->
+ rabbit_federation_link_util:handle_down(
+ Pid, Reason, Ch, DCh, {Upstream, UParams, XName}, State);
+
+handle_info(Msg, State) ->
+ {stop, {unexpected_info, Msg}, State}.
+
+terminate(_Reason, {not_started, _}) ->
+ ok;
+
+terminate(Reason, #state{downstream_connection = DConn,
+ connection = Conn,
+ upstream = Upstream,
+ upstream_params = UParams,
+ downstream_exchange = XName}) ->
+ rabbit_federation_link_util:ensure_connection_closed(DConn),
+ rabbit_federation_link_util:ensure_connection_closed(Conn),
+ rabbit_federation_link_util:log_terminate(Reason, Upstream, UParams, XName),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+call(XName, Msg) -> [gen_server2:call(Pid, Msg, infinity) || Pid <- x(XName)].
+cast(Msg) -> [gen_server2:cast(Pid, Msg) || Pid <- all()].
+cast(XName, Msg) -> [gen_server2:cast(Pid, Msg) || Pid <- x(XName)].
+
+join(Name) ->
+ pg2_fixed:create(pgname(Name)),
+ ok = pg2_fixed:join(pgname(Name), self()).
+
+all() ->
+ pg2_fixed:create(pgname(rabbit_federation_exchanges)),
+ pg2_fixed:get_members(pgname(rabbit_federation_exchanges)).
+
+x(XName) ->
+ pg2_fixed:create(pgname({rabbit_federation_exchange, XName})),
+ pg2_fixed:get_members(pgname({rabbit_federation_exchange, XName})).
+
+%%----------------------------------------------------------------------------
+
+federation_up() -> is_pid(whereis(rabbit_federation_app)).
+
+handle_command({add_binding, Binding}, State) ->
+ add_binding(Binding, State);
+
+handle_command({remove_bindings, Bindings}, State) ->
+ lists:foldl(fun remove_binding/2, State, Bindings).
+
+play_back_commands(State = #state{waiting_cmds = Waiting,
+ next_serial = Next}) ->
+ case gb_trees:is_empty(Waiting) of
+ false -> case gb_trees:take_smallest(Waiting) of
+ {Next, Cmd, Waiting1} ->
+ %% The next one. Just execute it.
+ play_back_commands(
+ handle_command(Cmd, State#state{
+ waiting_cmds = Waiting1,
+ next_serial = Next + 1}));
+ {Serial, _Cmd, Waiting1} when Serial < Next ->
+ %% This command came from before we executed
+ %% binding:list_for_source. Ignore it.
+ play_back_commands(State#state{
+ waiting_cmds = Waiting1});
+ _ ->
+ %% Some future command. Don't do anything.
+ State
+ end;
+ true -> State
+ end.
+
+add_binding(B, State) ->
+ binding_op(fun record_binding/2, bind_cmd(bind, B, State), B, State).
+
+remove_binding(B, State) ->
+ binding_op(fun forget_binding/2, bind_cmd(unbind, B, State), B, State).
+
+record_binding(B = #binding{destination = Dest},
+ State = #state{bindings = Bs}) ->
+ {DoIt, Set} = case dict:find(key(B), Bs) of
+ error -> {true, sets:from_list([Dest])};
+ {ok, Dests} -> {false, sets:add_element(
+ Dest, Dests)}
+ end,
+ {DoIt, State#state{bindings = dict:store(key(B), Set, Bs)}}.
+
+forget_binding(B = #binding{destination = Dest},
+ State = #state{bindings = Bs}) ->
+ Dests = sets:del_element(Dest, dict:fetch(key(B), Bs)),
+ {DoIt, Bs1} = case sets:size(Dests) of
+ 0 -> {true, dict:erase(key(B), Bs)};
+ _ -> {false, dict:store(key(B), Dests, Bs)}
+ end,
+ {DoIt, State#state{bindings = Bs1}}.
+
+binding_op(UpdateFun, Cmd, B = #binding{args = Args},
+ State = #state{channel = Ch}) ->
+ {DoIt, State1} =
+ case rabbit_misc:table_lookup(Args, ?BINDING_HEADER) of
+ undefined -> UpdateFun(B, State);
+ {array, _} -> {Cmd =/= ignore, State}
+ end,
+ case DoIt of
+ true -> amqp_channel:call(Ch, Cmd);
+ false -> ok
+ end,
+ State1.
+
+bind_cmd(Type, #binding{key = Key, args = Args},
+ State = #state{internal_exchange = IntXNameBin,
+ upstream_params = UpstreamParams}) ->
+ #upstream_params{x_or_q = X} = UpstreamParams,
+ case update_binding(Args, State) of
+ ignore -> ignore;
+ NewArgs -> bind_cmd0(Type, name(X), IntXNameBin, Key, NewArgs)
+ end.
+
+bind_cmd0(bind, Source, Destination, RoutingKey, Arguments) ->
+ #'exchange.bind'{source = Source,
+ destination = Destination,
+ routing_key = RoutingKey,
+ arguments = Arguments};
+
+bind_cmd0(unbind, Source, Destination, RoutingKey, Arguments) ->
+ #'exchange.unbind'{source = Source,
+ destination = Destination,
+ routing_key = RoutingKey,
+ arguments = Arguments}.
+
+%% This function adds information about the current node to the
+%% binding arguments, or returns 'ignore' if it determines the binding
+%% should propagate no further. The interesting part is the latter.
+%%
+%% We want bindings to propagate in the same way as messages
+%% w.r.t. max_hops - if we determine that a message can get from node
+%% A to B (assuming bindings are in place) then it follows that a
+%% binding at B should propagate back to A, and no further. There is
+%% no point in propagating bindings past the point where messages
+%% would propagate, and we will lose messages if bindings don't
+%% propagate as far.
+%%
+%% Note that we still want to have limits on how far messages can
+%% propagate: limiting our bindings is not enough, since other
+%% bindings from other nodes can overlap.
+%%
+%% So in short we want bindings to obey max_hops. However, they can't
+%% just obey the max_hops of the current link, since they are
+%% travelling in the opposite direction to messages! Consider the
+%% following federation:
+%%
+%% A -----------> B -----------> C
+%% max_hops=1 max_hops=2
+%%
+%% where the arrows indicate message flow. A binding created at C
+%% should propagate to B, then to A, and no further. Therefore every
+%% time we traverse a link, we keep a count of the number of hops that
+%% a message could have made so far to reach this point, and still be
+%% able to propagate. When this number ("hops" below) reaches 0 we
+%% propagate no further.
+%%
+%% hops(link(N)) is given by:
+%%
+%% min(hops(link(N-1))-1, max_hops(link(N)))
+%%
+%% where link(N) is the link that bindings propagate over after N
+%% steps (e.g. link(1) is CB above, link(2) is BA).
+%%
+%% In other words, we count down to 0 from the link with the most
+%% restrictive max_hops we have yet passed through.
+
+update_binding(Args, #state{downstream_exchange = X,
+ upstream = Upstream,
+ upstream_name = UName}) ->
+ #upstream{max_hops = MaxHops} = Upstream,
+ Hops = case rabbit_misc:table_lookup(Args, ?BINDING_HEADER) of
+ undefined -> MaxHops;
+ {array, All} -> [{table, Prev} | _] = All,
+ {short, PrevHops} =
+ rabbit_misc:table_lookup(Prev, <<"hops">>),
+ case rabbit_federation_util:already_seen(
+ UName, All) of
+ true -> 0;
+ false -> lists:min([PrevHops - 1, MaxHops])
+ end
+ end,
+ case Hops of
+ 0 -> ignore;
+ _ -> Cluster = rabbit_nodes:cluster_name(),
+ ABSuffix = rabbit_federation_db:get_active_suffix(
+ X, Upstream, <<"A">>),
+ DVHost = vhost(X),
+ DName = name(X),
+ Down = <<DVHost/binary,":", DName/binary, " ", ABSuffix/binary>>,
+ Info = [{<<"cluster-name">>, longstr, Cluster},
+ {<<"exchange">>, longstr, Down},
+ {<<"hops">>, short, Hops}],
+ rabbit_basic:prepend_table_header(?BINDING_HEADER, Info, Args)
+ end.
+
+key(#binding{key = Key, args = Args}) -> {Key, Args}.
+
+go(S0 = {not_started, {Upstream, UParams, DownXName}}) ->
+ Unacked = rabbit_federation_link_util:unacked_new(),
+ rabbit_federation_link_util:start_conn_ch(
+ fun (Conn, Ch, DConn, DCh) ->
+ Props = pget(server_properties,
+ amqp_connection:info(Conn, [server_properties])),
+ UName = case rabbit_misc:table_lookup(
+ Props, <<"cluster_name">>) of
+ {longstr, N} -> N;
+ _ -> unknown
+ end,
+ {Serial, Bindings} =
+ rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ {rabbit_exchange:peek_serial(DownXName),
+ rabbit_binding:list_for_source(DownXName)}
+ end),
+ true = is_integer(Serial),
+ %% If we are very short lived, Serial can be undefined at
+ %% this point (since the deletion of the X could have
+ %% overtaken the creation of this process). However, this
+ %% is not a big deal - 'undefined' just becomes the next
+ %% serial we will process. Since it compares larger than
+ %% any number we never process any commands. And we will
+ %% soon get told to stop anyway.
+ State = ensure_upstream_bindings(
+ consume_from_upstream_queue(
+ #state{upstream = Upstream,
+ upstream_params = UParams,
+ upstream_name = UName,
+ connection = Conn,
+ channel = Ch,
+ next_serial = Serial,
+ downstream_connection = DConn,
+ downstream_channel = DCh,
+ downstream_exchange = DownXName,
+ unacked = Unacked}),
+ Bindings),
+ {noreply, State}
+ end, Upstream, UParams, DownXName, S0).
+
+consume_from_upstream_queue(
+ State = #state{upstream = Upstream,
+ upstream_params = UParams,
+ channel = Ch,
+ downstream_exchange = DownXName}) ->
+ #upstream{prefetch_count = Prefetch,
+ expires = Expiry,
+ message_ttl = TTL,
+ ha_policy = HA} = Upstream,
+ #upstream_params{x_or_q = X,
+ params = Params} = UParams,
+ Q = upstream_queue_name(name(X), vhost(Params), DownXName),
+ Args = [A || {_K, _T, V} = A
+ <- [{<<"x-expires">>, long, Expiry},
+ {<<"x-message-ttl">>, long, TTL},
+ {<<"x-ha-policy">>, longstr, HA},
+ {<<"x-internal-purpose">>, longstr, <<"federation">>}],
+ V =/= none],
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q,
+ durable = true,
+ arguments = Args}),
+ NoAck = Upstream#upstream.ack_mode =:= 'no-ack',
+ case NoAck of
+ false -> amqp_channel:call(Ch, #'basic.qos'{prefetch_count = Prefetch});
+ true -> ok
+ end,
+ #'basic.consume_ok'{consumer_tag = CTag} =
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q,
+ no_ack = NoAck}, self()),
+ State#state{consumer_tag = CTag,
+ queue = Q}.
+
+ensure_upstream_bindings(State = #state{upstream = Upstream,
+ upstream_params = UParams,
+ connection = Conn,
+ channel = Ch,
+ downstream_exchange = DownXName,
+ queue = Q}, Bindings) ->
+ #upstream_params{x_or_q = X, params = Params} = UParams,
+ OldSuffix = rabbit_federation_db:get_active_suffix(
+ DownXName, Upstream, <<"A">>),
+ Suffix = case OldSuffix of
+ <<"A">> -> <<"B">>;
+ <<"B">> -> <<"A">>
+ end,
+ IntXNameBin = upstream_exchange_name(name(X), vhost(Params),
+ DownXName, Suffix),
+ ensure_upstream_exchange(State),
+ ensure_internal_exchange(IntXNameBin, State),
+ amqp_channel:call(Ch, #'queue.bind'{exchange = IntXNameBin, queue = Q}),
+ State1 = State#state{internal_exchange = IntXNameBin},
+ rabbit_federation_db:set_active_suffix(DownXName, Upstream, Suffix),
+ State2 = lists:foldl(fun add_binding/2, State1, Bindings),
+ OldIntXNameBin = upstream_exchange_name(
+ name(X), vhost(Params), DownXName, OldSuffix),
+ delete_upstream_exchange(Conn, OldIntXNameBin),
+ State2.
+
+ensure_upstream_exchange(#state{upstream_params = UParams,
+ connection = Conn,
+ channel = Ch}) ->
+ #upstream_params{x_or_q = X} = UParams,
+ #exchange{type = Type,
+ durable = Durable,
+ auto_delete = AutoDelete,
+ internal = Internal,
+ arguments = Arguments} = X,
+ Decl = #'exchange.declare'{exchange = name(X),
+ type = list_to_binary(atom_to_list(Type)),
+ durable = Durable,
+ auto_delete = AutoDelete,
+ internal = Internal,
+ arguments = Arguments},
+ rabbit_federation_link_util:disposable_channel_call(
+ Conn, Decl#'exchange.declare'{passive = true},
+ fun(?NOT_FOUND, _Text) ->
+ amqp_channel:call(Ch, Decl)
+ end).
+
+ensure_internal_exchange(IntXNameBin,
+ #state{upstream = #upstream{max_hops = MaxHops},
+ upstream_params = UParams,
+ connection = Conn,
+ channel = Ch}) ->
+ #upstream_params{params = Params} = UParams,
+ delete_upstream_exchange(Conn, IntXNameBin),
+ Base = #'exchange.declare'{exchange = IntXNameBin,
+ durable = true,
+ internal = true,
+ auto_delete = true},
+ Purpose = [{<<"x-internal-purpose">>, longstr, <<"federation">>}],
+ XFUArgs = [{?MAX_HOPS_ARG, long, MaxHops},
+ {?NODE_NAME_ARG, longstr, rabbit_nodes:cluster_name()}
+ | Purpose],
+ XFU = Base#'exchange.declare'{type = <<"x-federation-upstream">>,
+ arguments = XFUArgs},
+ Fan = Base#'exchange.declare'{type = <<"fanout">>,
+ arguments = Purpose},
+ rabbit_federation_link_util:disposable_connection_call(
+ Params, XFU, fun(?COMMAND_INVALID, _Text) ->
+ amqp_channel:call(Ch, Fan)
+ end).
+
+upstream_queue_name(XNameBin, VHost, #resource{name = DownXNameBin,
+ virtual_host = DownVHost}) ->
+ Node = rabbit_nodes:cluster_name(),
+ DownPart = case DownVHost of
+ VHost -> case DownXNameBin of
+ XNameBin -> <<"">>;
+ _ -> <<":", DownXNameBin/binary>>
+ end;
+ _ -> <<":", DownVHost/binary,
+ ":", DownXNameBin/binary>>
+ end,
+ <<"federation: ", XNameBin/binary, " -> ", Node/binary, DownPart/binary>>.
+
+upstream_exchange_name(XNameBin, VHost, DownXName, Suffix) ->
+ Name = upstream_queue_name(XNameBin, VHost, DownXName),
+ <<Name/binary, " ", Suffix/binary>>.
+
+delete_upstream_exchange(Conn, XNameBin) ->
+ rabbit_federation_link_util:disposable_channel_call(
+ Conn, #'exchange.delete'{exchange = XNameBin}).
+
+update_headers(#upstream_params{table = Table}, UName, Redelivered, Headers) ->
+ rabbit_basic:prepend_table_header(
+ ?ROUTING_HEADER, Table ++ [{<<"redelivered">>, bool, Redelivered}] ++
+ header_for_name(UName),
+ Headers).
+
+header_for_name(unknown) -> [];
+header_for_name(Name) -> [{<<"cluster-name">>, longstr, Name}].
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_federation_exchange_link_sup_sup).
+
+-behaviour(mirrored_supervisor).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-define(SUPERVISOR, ?MODULE).
+
+%% Supervises the upstream links for all exchanges (but not queues). We need
+%% different handling here since exchanges want a mirrored sup.
+
+-export([start_link/0, start_child/1, adjust/1, stop_child/1]).
+-export([init/1]).
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ mirrored_supervisor:start_link({local, ?SUPERVISOR}, ?SUPERVISOR,
+ fun rabbit_misc:execute_mnesia_transaction/1,
+ ?MODULE, []).
+
+%% Note that the next supervisor down, rabbit_federation_link_sup, is common
+%% between exchanges and queues.
+start_child(X) ->
+ case mirrored_supervisor:start_child(
+ ?SUPERVISOR,
+ {id(X), {rabbit_federation_link_sup, start_link, [X]},
+ transient, ?MAX_WAIT, supervisor,
+ [rabbit_federation_link_sup]}) of
+ {ok, _Pid} -> ok;
+ %% A link returned {stop, gone}, the link_sup shut down, that's OK.
+ {error, {shutdown, _}} -> ok
+ end.
+
+adjust(Reason) ->
+ [rabbit_federation_link_sup:adjust(Pid, X, Reason) ||
+ {X, Pid, _, _} <- mirrored_supervisor:which_children(?SUPERVISOR)],
+ ok.
+
+stop_child(X) ->
+ ok = mirrored_supervisor:terminate_child(?SUPERVISOR, id(X)),
+ ok = mirrored_supervisor:delete_child(?SUPERVISOR, id(X)).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{one_for_one, 3, 10}, []}}.
+
+%% See comment in rabbit_federation_queue_link_sup_sup:id/1
+id(X = #exchange{}) -> X#exchange{scratches = none}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_federation_link_sup).
+
+-behaviour(supervisor2).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("rabbit_federation.hrl").
+
+%% Supervises the upstream links for an exchange or queue.
+
+-export([start_link/1, adjust/3]).
+-export([init/1]).
+
+start_link(XorQ) ->
+ supervisor2:start_link(?MODULE, XorQ).
+
+adjust(Sup, XorQ, everything) ->
+ [stop(Sup, Upstream, XorQ) ||
+ {Upstream, _, _, _} <- supervisor2:which_children(Sup)],
+ [{ok, _Pid} = supervisor2:start_child(Sup, Spec) || Spec <- specs(XorQ)];
+
+adjust(Sup, XorQ, {upstream, UpstreamName}) ->
+ OldUpstreams0 = children(Sup, UpstreamName),
+ NewUpstreams0 = rabbit_federation_upstream:for(XorQ, UpstreamName),
+ %% If any haven't changed, don't restart them. The broker will
+ %% avoid telling us about connections that have not changed
+ %% syntactically, but even if one has, this XorQ may not have that
+ %% connection in an upstream, so we still need to check here.
+ {OldUpstreams, NewUpstreams} =
+ lists:foldl(
+ fun (OldU, {OldUs, NewUs}) ->
+ case lists:member(OldU, NewUs) of
+ true -> {OldUs -- [OldU], NewUs -- [OldU]};
+ false -> {OldUs, NewUs}
+ end
+ end, {OldUpstreams0, NewUpstreams0}, OldUpstreams0),
+ [stop(Sup, OldUpstream, XorQ) || OldUpstream <- OldUpstreams],
+ [start(Sup, NewUpstream, XorQ) || NewUpstream <- NewUpstreams];
+
+adjust(Sup, XorQ, {clear_upstream, UpstreamName}) ->
+ ok = rabbit_federation_db:prune_scratch(
+ name(XorQ), rabbit_federation_upstream:for(XorQ)),
+ [stop(Sup, Upstream, XorQ) || Upstream <- children(Sup, UpstreamName)];
+
+%% TODO handle changes of upstream sets minimally (bug 24853)
+adjust(Sup, X = #exchange{name = XName}, {upstream_set, _Set}) ->
+ adjust(Sup, X, everything),
+ case rabbit_federation_upstream:federate(X) of
+ false -> ok;
+ true -> ok = rabbit_federation_db:prune_scratch(
+ XName, rabbit_federation_upstream:for(X))
+ end;
+adjust(Sup, Q = #amqqueue{}, {upstream_set, _}) ->
+ adjust(Sup, Q, everything);
+adjust(Sup, XorQ, {clear_upstream_set, _}) ->
+ adjust(Sup, XorQ, everything).
+
+start(Sup, Upstream, XorQ) ->
+ {ok, _Pid} = supervisor2:start_child(Sup, spec(Upstream, XorQ)),
+ ok.
+
+stop(Sup, Upstream, XorQ) ->
+ ok = supervisor2:terminate_child(Sup, Upstream),
+ ok = supervisor2:delete_child(Sup, Upstream),
+ %% While the link will report its own removal, that only works if
+ %% the link was actually up. If the link was broken and failing to
+ %% come up, the possibility exists that there *is* no link
+ %% process, but we still have a report in the status table. So
+ %% remove it here too.
+ rabbit_federation_status:remove(Upstream, name(XorQ)).
+
+children(Sup, UpstreamName) ->
+ rabbit_federation_util:find_upstreams(
+ UpstreamName, [U || {U, _, _, _} <- supervisor2:which_children(Sup)]).
+
+%%----------------------------------------------------------------------------
+
+init(XorQ) ->
+ %% 1, ?MAX_WAIT so that we always give up after one fast retry and get
+ %% into the reconnect delay.
+ {ok, {{one_for_one, 1, ?MAX_WAIT}, specs(XorQ)}}.
+
+specs(XorQ) ->
+ [spec(Upstream, XorQ) || Upstream <- rabbit_federation_upstream:for(XorQ)].
+
+spec(U = #upstream{reconnect_delay = Delay}, #exchange{name = XName}) ->
+ {U, {rabbit_federation_exchange_link, start_link, [{U, XName}]},
+ {permanent, Delay}, ?MAX_WAIT, worker,
+ [rabbit_federation_link]};
+
+spec(Upstream = #upstream{reconnect_delay = Delay}, Q = #amqqueue{}) ->
+ {Upstream, {rabbit_federation_queue_link, start_link, [{Upstream, Q}]},
+ {permanent, Delay}, ?MAX_WAIT, worker,
+ [rabbit_federation_queue_link]}.
+
+name(#exchange{name = XName}) -> XName;
+name(#amqqueue{name = QName}) -> QName.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_federation_link_util).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_federation.hrl").
+
+%% real
+-export([start_conn_ch/5, disposable_channel_call/2, disposable_channel_call/3,
+ disposable_connection_call/3, ensure_connection_closed/1,
+ log_terminate/4, unacked_new/0, ack/3, nack/3, forward/9,
+ handle_down/6]).
+
+%% temp
+-export([connection_error/6]).
+
+-import(rabbit_misc, [pget/2]).
+
+-define(MAX_CONNECTION_CLOSE_TIMEOUT, 10000).
+
+%%----------------------------------------------------------------------------
+
+start_conn_ch(Fun, Upstream, UParams,
+ XorQName = #resource{virtual_host = DownVHost}, State) ->
+ case open_monitor(#amqp_params_direct{virtual_host = DownVHost}) of
+ {ok, DConn, DCh} ->
+ case Upstream#upstream.ack_mode of
+ 'on-confirm' ->
+ #'confirm.select_ok'{} =
+ amqp_channel:call(DCh, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(DCh, self());
+ _ ->
+ ok
+ end,
+ case open_monitor(UParams#upstream_params.params) of
+ {ok, Conn, Ch} ->
+ %% Don't trap exits until we have established
+ %% connections so that if we try to delete
+ %% federation upstreams while waiting for a
+ %% connection to be established then we don't
+ %% block
+ process_flag(trap_exit, true),
+ try
+ R = Fun(Conn, Ch, DConn, DCh),
+ log_info(
+ XorQName, "connected to ~s~n",
+ [rabbit_federation_upstream:params_to_string(
+ UParams)]),
+ Name = pget(name, amqp_connection:info(DConn, [name])),
+ rabbit_federation_status:report(
+ Upstream, UParams, XorQName, {running, Name}),
+ R
+ catch exit:E ->
+ %% terminate/2 will not get this, as we
+ %% have not put them in our state yet
+ ensure_connection_closed(DConn),
+ ensure_connection_closed(Conn),
+ connection_error(remote_start, E,
+ Upstream, UParams, XorQName, State)
+ end;
+ E ->
+ ensure_connection_closed(DConn),
+ connection_error(remote_start, E,
+ Upstream, UParams, XorQName, State)
+ end;
+ E ->
+ connection_error(local_start, E,
+ Upstream, UParams, XorQName, State)
+ end.
+
+open_monitor(Params) ->
+ case open(Params) of
+ {ok, Conn, Ch} -> erlang:monitor(process, Ch),
+ {ok, Conn, Ch};
+ E -> E
+ end.
+
+open(Params) ->
+ case amqp_connection:start(Params) of
+ {ok, Conn} -> case amqp_connection:open_channel(Conn) of
+ {ok, Ch} -> {ok, Conn, Ch};
+ E -> catch amqp_connection:close(Conn),
+ E
+ end;
+ E -> E
+ end.
+
+ensure_channel_closed(Ch) -> catch amqp_channel:close(Ch).
+
+ensure_connection_closed(Conn) ->
+ catch amqp_connection:close(Conn, ?MAX_CONNECTION_CLOSE_TIMEOUT).
+
+connection_error(remote_start, E, Upstream, UParams, XorQName, State) ->
+ rabbit_federation_status:report(
+ Upstream, UParams, XorQName, clean_reason(E)),
+ log_warning(XorQName, "did not connect to ~s~n~p~n",
+ [rabbit_federation_upstream:params_to_string(UParams),
+ E]),
+ {stop, {shutdown, restart}, State};
+
+connection_error(remote, E, Upstream, UParams, XorQName, State) ->
+ rabbit_federation_status:report(
+ Upstream, UParams, XorQName, clean_reason(E)),
+ log_info(XorQName, "disconnected from ~s~n~p~n",
+ [rabbit_federation_upstream:params_to_string(UParams), E]),
+ {stop, {shutdown, restart}, State};
+
+connection_error(local, basic_cancel, Upstream, UParams, XorQName, State) ->
+ rabbit_federation_status:report(
+ Upstream, UParams, XorQName, {error, basic_cancel}),
+ log_info(XorQName, "received 'basic.cancel'~n", []),
+ {stop, {shutdown, restart}, State};
+
+connection_error(local_start, E, Upstream, UParams, XorQName, State) ->
+ rabbit_federation_status:report(
+ Upstream, UParams, XorQName, clean_reason(E)),
+ log_warning(XorQName, "did not connect locally~n~p~n", [E]),
+ {stop, {shutdown, restart}, State}.
+
+%% If we terminate due to a gen_server call exploding (almost
+%% certainly due to an amqp_channel:call() exploding) then we do not
+%% want to report the gen_server call in our status.
+clean_reason({E = {shutdown, _}, _}) -> E;
+clean_reason(E) -> E.
+
+%% local / disconnected never gets invoked, see handle_info({'DOWN', ...
+
+%%----------------------------------------------------------------------------
+
+unacked_new() -> gb_trees:empty().
+
+ack(#'basic.ack'{delivery_tag = Seq,
+ multiple = Multiple}, Ch, Unack) ->
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = gb_trees:get(Seq, Unack),
+ multiple = Multiple}),
+ remove_delivery_tags(Seq, Multiple, Unack).
+
+
+%% Note: at time of writing the broker will never send requeue=false. And it's
+%% hard to imagine why it would. But we may as well handle it.
+nack(#'basic.nack'{delivery_tag = Seq,
+ multiple = Multiple,
+ requeue = Requeue}, Ch, Unack) ->
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = gb_trees:get(Seq, Unack),
+ multiple = Multiple,
+ requeue = Requeue}),
+ remove_delivery_tags(Seq, Multiple, Unack).
+
+remove_delivery_tags(Seq, false, Unacked) ->
+ gb_trees:delete(Seq, Unacked);
+remove_delivery_tags(Seq, true, Unacked) ->
+ case gb_trees:is_empty(Unacked) of
+ true -> Unacked;
+ false -> {Smallest, _Val, Unacked1} = gb_trees:take_smallest(Unacked),
+ case Smallest > Seq of
+ true -> Unacked;
+ false -> remove_delivery_tags(Seq, true, Unacked1)
+ end
+ end.
+
+forward(#upstream{ack_mode = AckMode,
+ trust_user_id = Trust},
+ #'basic.deliver'{delivery_tag = DT},
+ Ch, DCh, PublishMethod, HeadersFun, ForwardFun, Msg, Unacked) ->
+ Headers = extract_headers(Msg),
+ case ForwardFun(Headers) of
+ true -> Msg1 = maybe_clear_user_id(
+ Trust, update_headers(HeadersFun(Headers), Msg)),
+ Seq = case AckMode of
+ 'on-confirm' -> amqp_channel:next_publish_seqno(DCh);
+ _ -> ignore
+ end,
+ amqp_channel:cast(DCh, PublishMethod, Msg1),
+ case AckMode of
+ 'on-confirm' ->
+ gb_trees:insert(Seq, DT, Unacked);
+ 'on-publish' ->
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DT}),
+ Unacked;
+ 'no-ack' ->
+ Unacked
+ end;
+ false -> amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DT}),
+ %% Drop it, but acknowledge it!
+ Unacked
+ end.
+
+maybe_clear_user_id(false, Msg = #amqp_msg{props = Props}) ->
+ Msg#amqp_msg{props = Props#'P_basic'{user_id = undefined}};
+maybe_clear_user_id(true, Msg) ->
+ Msg.
+
+extract_headers(#amqp_msg{props = #'P_basic'{headers = Headers}}) ->
+ Headers.
+
+update_headers(Headers, Msg = #amqp_msg{props = Props}) ->
+ Msg#amqp_msg{props = Props#'P_basic'{headers = Headers}}.
+
+%%----------------------------------------------------------------------------
+
+%% If the downstream channel shuts down cleanly, we can just ignore it
+%% - we're the same node, we're presumably about to go down too.
+handle_down(DCh, shutdown, _Ch, DCh, _Args, State) ->
+ {noreply, State};
+
+%% If the upstream channel goes down for an intelligible reason, just
+%% log it and die quietly.
+handle_down(Ch, {shutdown, Reason}, Ch, _DCh,
+ {Upstream, UParams, XName}, State) ->
+ rabbit_federation_link_util:connection_error(
+ remote, {upstream_channel_down, Reason}, Upstream, UParams, XName, State);
+
+handle_down(Ch, Reason, Ch, _DCh, _Args, State) ->
+ {stop, {upstream_channel_down, Reason}, State};
+
+handle_down(DCh, Reason, _Ch, DCh, _Args, State) ->
+ {stop, {downstream_channel_down, Reason}, State}.
+
+%%----------------------------------------------------------------------------
+
+log_terminate({shutdown, restart}, _Upstream, _UParams, _XorQName) ->
+ %% We've already logged this before munging the reason
+ ok;
+log_terminate(shutdown, Upstream, UParams, XorQName) ->
+ %% The supervisor is shutting us down; we are probably restarting
+ %% the link because configuration has changed. So try to shut down
+ %% nicely so that we do not cause unacked messages to be
+ %% redelivered.
+ log_info(XorQName, "disconnecting from ~s~n",
+ [rabbit_federation_upstream:params_to_string(UParams)]),
+ rabbit_federation_status:remove(Upstream, XorQName);
+
+log_terminate(Reason, Upstream, UParams, XorQName) ->
+ %% Unexpected death. sasl will log it, but we should update
+ %% rabbit_federation_status.
+ rabbit_federation_status:report(
+ Upstream, UParams, XorQName, clean_reason(Reason)).
+
+log_info (XorQName, Fmt, Args) -> log(info, XorQName, Fmt, Args).
+log_warning(XorQName, Fmt, Args) -> log(warning, XorQName, Fmt, Args).
+
+log(Level, XorQName, Fmt, Args) ->
+ rabbit_log:log(federation, Level, "Federation ~s " ++ Fmt,
+ [rabbit_misc:rs(XorQName) | Args]).
+
+%%----------------------------------------------------------------------------
+
+disposable_channel_call(Conn, Method) ->
+ disposable_channel_call(Conn, Method, fun(_, _) -> ok end).
+
+disposable_channel_call(Conn, Method, ErrFun) ->
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ try
+ amqp_channel:call(Ch, Method)
+ catch exit:{{shutdown, {server_initiated_close, Code, Text}}, _} ->
+ ErrFun(Code, Text)
+ after
+ ensure_channel_closed(Ch)
+ end.
+
+disposable_connection_call(Params, Method, ErrFun) ->
+ case open(Params) of
+ {ok, Conn, Ch} ->
+ try
+ amqp_channel:call(Ch, Method)
+ catch exit:{{shutdown, {connection_closing,
+ {server_initiated_close, Code, Txt}}}, _} ->
+ ErrFun(Code, Txt)
+ after
+ ensure_connection_closed(Conn)
+ end;
+ E ->
+ E
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_federation_parameters).
+-behaviour(rabbit_runtime_parameter).
+-behaviour(rabbit_policy_validator).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([validate/5, notify/4, notify_clear/3]).
+-export([register/0, validate_policy/1, adjust/1]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "federation parameters"},
+ {mfa, {rabbit_federation_parameters, register, []}},
+ {requires, rabbit_registry},
+ {enables, recovery}]}).
+
+register() ->
+ [rabbit_registry:register(Class, Name, ?MODULE) ||
+ {Class, Name} <- [{runtime_parameter, <<"federation">>},
+ {runtime_parameter, <<"federation-upstream">>},
+ {runtime_parameter, <<"federation-upstream-set">>},
+ {policy_validator, <<"federation-upstream">>},
+ {policy_validator, <<"federation-upstream-set">>}]],
+ ok.
+
+validate(_VHost, <<"federation-upstream-set">>, Name, Term, _User) ->
+ [rabbit_parameter_validation:proplist(
+ Name,
+ [{<<"upstream">>, fun rabbit_parameter_validation:binary/2, mandatory} |
+ shared_validation()], Upstream)
+ || Upstream <- Term];
+
+validate(_VHost, <<"federation-upstream">>, Name, Term, _User) ->
+ rabbit_parameter_validation:proplist(
+ Name, [{<<"uri">>, fun validate_uri/2, mandatory} |
+ shared_validation()], Term);
+
+validate(_VHost, _Component, Name, _Term, _User) ->
+ {error, "name not recognised: ~p", [Name]}.
+
+notify(_VHost, <<"federation-upstream-set">>, Name, _Term) ->
+ adjust({upstream_set, Name});
+
+notify(_VHost, <<"federation-upstream">>, Name, _Term) ->
+ adjust({upstream, Name}).
+
+notify_clear(_VHost, <<"federation-upstream-set">>, Name) ->
+ adjust({clear_upstream_set, Name});
+
+notify_clear(_VHost, <<"federation-upstream">>, Name) ->
+ adjust({clear_upstream, Name}).
+
+adjust(Thing) ->
+ rabbit_federation_exchange_link_sup_sup:adjust(Thing),
+ rabbit_federation_queue_link_sup_sup:adjust(Thing).
+
+%%----------------------------------------------------------------------------
+
+shared_validation() ->
+ [{<<"exchange">>, fun rabbit_parameter_validation:binary/2, optional},
+ {<<"queue">>, fun rabbit_parameter_validation:binary/2, optional},
+ {<<"prefetch-count">>, fun rabbit_parameter_validation:number/2, optional},
+ {<<"reconnect-delay">>,fun rabbit_parameter_validation:number/2, optional},
+ {<<"max-hops">>, fun rabbit_parameter_validation:number/2, optional},
+ {<<"expires">>, fun rabbit_parameter_validation:number/2, optional},
+ {<<"message-ttl">>, fun rabbit_parameter_validation:number/2, optional},
+ {<<"trust-user-id">>, fun rabbit_parameter_validation:boolean/2, optional},
+ {<<"ack-mode">>, rabbit_parameter_validation:enum(
+ ['no-ack', 'on-publish', 'on-confirm']), optional},
+ {<<"ha-policy">>, fun rabbit_parameter_validation:binary/2, optional}].
+
+validate_uri(Name, Term) when is_binary(Term) ->
+ case rabbit_parameter_validation:binary(Name, Term) of
+ ok -> case amqp_uri:parse(binary_to_list(Term)) of
+ {ok, _} -> ok;
+ {error, E} -> {error, "\"~s\" not a valid URI: ~p", [Term, E]}
+ end;
+ E -> E
+ end;
+validate_uri(Name, Term) ->
+ case rabbit_parameter_validation:list(Name, Term) of
+ ok -> case [V || U <- Term,
+ V <- [validate_uri(Name, U)],
+ element(1, V) =:= error] of
+ [] -> ok;
+ [E | _] -> E
+ end;
+ E -> E
+ end.
+
+%%----------------------------------------------------------------------------
+
+validate_policy([{<<"federation-upstream-set">>, Value}])
+ when is_binary(Value) ->
+ ok;
+validate_policy([{<<"federation-upstream-set">>, Value}]) ->
+ {error, "~p is not a valid federation upstream set name", [Value]};
+
+validate_policy([{<<"federation-upstream">>, Value}])
+ when is_binary(Value) ->
+ ok;
+validate_policy([{<<"federation-upstream">>, Value}]) ->
+ {error, "~p is not a valid federation upstream name", [Value]};
+
+validate_policy(L) when length(L) =:= 2 ->
+ {error, "cannot specify federation-upstream and federation-upstream-set "
+ "together", []}.
+
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_federation_queue).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "federation queue decorator"},
+ {mfa, {rabbit_registry, register,
+ [queue_decorator, <<"federation">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, recovery}]}).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_federation.hrl").
+
+-behaviour(rabbit_queue_decorator).
+
+-export([startup/1, shutdown/1, policy_changed/2, active_for/1,
+ consumer_state_changed/3]).
+-export([policy_changed_local/2]).
+
+-import(rabbit_misc, [pget/2]).
+
+%%----------------------------------------------------------------------------
+
+startup(Q) ->
+ case active_for(Q) of
+ true -> rabbit_federation_queue_link_sup_sup:start_child(Q);
+ false -> ok
+ end,
+ ok.
+
+shutdown(Q = #amqqueue{name = QName}) ->
+ case active_for(Q) of
+ true -> rabbit_federation_queue_link_sup_sup:stop_child(Q),
+ rabbit_federation_status:remove_exchange_or_queue(QName);
+ false -> ok
+ end,
+ ok.
+
+policy_changed(Q1 = #amqqueue{name = QName}, Q2) ->
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, #amqqueue{pid = QPid}} ->
+ rpc:call(node(QPid), rabbit_federation_queue,
+ policy_changed_local, [Q1, Q2]);
+ {error, not_found} ->
+ ok
+ end.
+
+policy_changed_local(Q1, Q2) ->
+ shutdown(Q1),
+ startup(Q2).
+
+active_for(Q = #amqqueue{arguments = Args}) ->
+ case rabbit_misc:table_lookup(Args, <<"x-internal-purpose">>) of
+ {longstr, _} -> false; %% [0]
+ _ -> rabbit_federation_upstream:federate(Q)
+ end.
+%% [0] Currently the only "internal purpose" is federation, but I
+%% suspect if we introduce another one it will also be for something
+%% that doesn't want to be federated.
+
+%% We need to reconsider whether we need to run or pause every time
+%% the consumer state changes in the queue. But why can the state
+%% change?
+%%
+%% consumer blocked | We may have no more active consumers, and thus need to
+%% | pause
+%% |
+%% consumer unblocked | We don't care
+%% |
+%% queue empty | The queue has become empty therefore we need to run to
+%% | get more messages
+%% |
+%% basic consume | We don't care
+%% |
+%% basic cancel | We may have no more active consumers, and thus need to
+%% | pause
+%% |
+%% refresh | We asked for it (we have started a new link after
+%% | failover and need something to prod us into action
+%% | (or not)).
+%%
+%% In the cases where we don't care it's not prohibitively expensive
+%% for us to be here anyway, so never mind.
+%%
+%% Note that there is no "queue became non-empty" state change - that's
+%% because of the queue invariant. If the queue transitions from empty to
+%% non-empty then it must have no active consumers - in which case it stays
+%% the same from our POV.
+
+consumer_state_changed(#amqqueue{name = QName}, MaxActivePriority, IsEmpty) ->
+ case IsEmpty andalso active_unfederated(MaxActivePriority) of
+ true -> rabbit_federation_queue_link:run(QName);
+ false -> rabbit_federation_queue_link:pause(QName)
+ end,
+ ok.
+
+active_unfederated(empty) -> false;
+active_unfederated(P) when P >= 0 -> true;
+active_unfederated(_P) -> false.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_federation_queue_link).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_federation.hrl").
+
+-behaviour(gen_server2).
+
+-export([start_link/1, go/0, run/1, pause/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-import(rabbit_misc, [pget/2]).
+-import(rabbit_federation_util, [name/1, pgname/1]).
+
+-record(not_started, {queue, run, upstream, upstream_params}).
+-record(state, {queue, run, conn, ch, dconn, dch, upstream, upstream_params,
+ unacked}).
+
+start_link(Args) ->
+ gen_server2:start_link(?MODULE, Args, [{timeout, infinity}]).
+
+run(QName) -> cast(QName, run).
+pause(QName) -> cast(QName, pause).
+go() -> cast(go).
+
+%%----------------------------------------------------------------------------
+%%call(QName, Msg) -> [gen_server2:call(Pid, Msg, infinity) || Pid <- q(QName)].
+cast(Msg) -> [gen_server2:cast(Pid, Msg) || Pid <- all()].
+cast(QName, Msg) -> [gen_server2:cast(Pid, Msg) || Pid <- q(QName)].
+
+join(Name) ->
+ pg2_fixed:create(pgname(Name)),
+ ok = pg2_fixed:join(pgname(Name), self()).
+
+all() ->
+ pg2_fixed:create(pgname(rabbit_federation_queues)),
+ pg2_fixed:get_members(pgname(rabbit_federation_queues)).
+
+q(QName) ->
+ pg2_fixed:create(pgname({rabbit_federation_queue, QName})),
+ pg2_fixed:get_members(pgname({rabbit_federation_queue, QName})).
+
+federation_up() ->
+ proplists:is_defined(rabbitmq_federation,
+ application:which_applications(infinity)).
+
+%%----------------------------------------------------------------------------
+
+init({Upstream, Queue = #amqqueue{name = QName}}) ->
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} ->
+ UParams = rabbit_federation_upstream:to_params(Upstream, Queue),
+ rabbit_federation_status:report(Upstream, UParams, QName, starting),
+ join(rabbit_federation_queues),
+ join({rabbit_federation_queue, QName}),
+ gen_server2:cast(self(), maybe_go),
+ rabbit_amqqueue:notify_decorators(Q),
+ {ok, #not_started{queue = Queue,
+ run = false,
+ upstream = Upstream,
+ upstream_params = UParams}};
+ {error, not_found} ->
+ {stop, gone}
+ end.
+
+handle_call(Msg, _From, State) ->
+ {stop, {unexpected_call, Msg}, State}.
+
+handle_cast(maybe_go, State) ->
+ case federation_up() of
+ true -> go(State);
+ false -> {noreply, State}
+ end;
+
+handle_cast(go, State = #not_started{}) ->
+ go(State);
+
+handle_cast(go, State) ->
+ {noreply, State};
+
+handle_cast(run, State = #state{upstream = Upstream,
+ upstream_params = UParams,
+ ch = Ch,
+ run = false}) ->
+ consume(Ch, Upstream, UParams#upstream_params.x_or_q),
+ {noreply, State#state{run = true}};
+
+handle_cast(run, State = #not_started{}) ->
+ {noreply, State#not_started{run = true}};
+
+handle_cast(run, State) ->
+ %% Already started
+ {noreply, State};
+
+handle_cast(pause, State = #state{run = false}) ->
+ %% Already paused
+ {noreply, State};
+
+handle_cast(pause, State = #not_started{}) ->
+ {noreply, State#not_started{run = false}};
+
+handle_cast(pause, State = #state{ch = Ch}) ->
+ cancel(Ch),
+ {noreply, State#state{run = false}};
+
+handle_cast(Msg, State) ->
+ {stop, {unexpected_cast, Msg}, State}.
+
+handle_info(#'basic.consume_ok'{}, State) ->
+ {noreply, State};
+
+handle_info(#'basic.ack'{} = Ack, State = #state{ch = Ch,
+ unacked = Unacked}) ->
+ Unacked1 = rabbit_federation_link_util:ack(Ack, Ch, Unacked),
+ {noreply, State#state{unacked = Unacked1}};
+
+handle_info(#'basic.nack'{} = Nack, State = #state{ch = Ch,
+ unacked = Unacked}) ->
+ Unacked1 = rabbit_federation_link_util:nack(Nack, Ch, Unacked),
+ {noreply, State#state{unacked = Unacked1}};
+
+handle_info({#'basic.deliver'{redelivered = Redelivered,
+ exchange = X,
+ routing_key = K} = DeliverMethod, Msg},
+ State = #state{queue = #amqqueue{name = QName},
+ upstream = Upstream,
+ upstream_params = UParams,
+ ch = Ch,
+ dch = DCh,
+ unacked = Unacked}) ->
+ PublishMethod = #'basic.publish'{exchange = <<"">>,
+ routing_key = QName#resource.name},
+ HeadersFun = fun (H) -> update_headers(UParams, Redelivered, X, K, H) end,
+ ForwardFun = fun (_H) -> true end,
+ Unacked1 = rabbit_federation_link_util:forward(
+ Upstream, DeliverMethod, Ch, DCh, PublishMethod,
+ HeadersFun, ForwardFun, Msg, Unacked),
+ %% TODO actually we could reject when 'stopped'
+ {noreply, State#state{unacked = Unacked1}};
+
+handle_info(#'basic.cancel'{},
+ State = #state{queue = #amqqueue{name = QName},
+ upstream = Upstream,
+ upstream_params = UParams}) ->
+ rabbit_federation_link_util:connection_error(
+ local, basic_cancel, Upstream, UParams, QName, State);
+
+handle_info({'DOWN', _Ref, process, Pid, Reason},
+ State = #state{dch = DCh,
+ ch = Ch,
+ upstream = Upstream,
+ upstream_params = UParams,
+ queue = #amqqueue{name = QName}}) ->
+ rabbit_federation_link_util:handle_down(
+ Pid, Reason, Ch, DCh, {Upstream, UParams, QName}, State);
+
+handle_info(Msg, State) ->
+ {stop, {unexpected_info, Msg}, State}.
+
+terminate(Reason, #not_started{upstream = Upstream,
+ upstream_params = UParams,
+ queue = #amqqueue{name = QName}}) ->
+ rabbit_federation_link_util:log_terminate(Reason, Upstream, UParams, QName),
+ ok;
+
+terminate(Reason, #state{dconn = DConn,
+ conn = Conn,
+ upstream = Upstream,
+ upstream_params = UParams,
+ queue = #amqqueue{name = QName}}) ->
+ rabbit_federation_link_util:ensure_connection_closed(DConn),
+ rabbit_federation_link_util:ensure_connection_closed(Conn),
+ rabbit_federation_link_util:log_terminate(Reason, Upstream, UParams, QName),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+go(S0 = #not_started{run = Run,
+ upstream = Upstream = #upstream{
+ prefetch_count = Prefetch},
+ upstream_params = UParams,
+ queue = Queue = #amqqueue{name = QName}}) ->
+ #upstream_params{x_or_q = UQueue = #amqqueue{
+ durable = Durable,
+ auto_delete = AutoDelete,
+ arguments = Args}} = UParams,
+ Unacked = rabbit_federation_link_util:unacked_new(),
+ rabbit_federation_link_util:start_conn_ch(
+ fun (Conn, Ch, DConn, DCh) ->
+ check_upstream_suitable(Conn),
+ amqp_channel:call(Ch, #'queue.declare'{queue = name(UQueue),
+ durable = Durable,
+ auto_delete = AutoDelete,
+ arguments = Args}),
+ case Upstream#upstream.ack_mode of
+ 'no-ack' -> ok;
+ _ -> amqp_channel:call(
+ Ch, #'basic.qos'{prefetch_count = Prefetch})
+ end,
+ amqp_selective_consumer:register_default_consumer(Ch, self()),
+ case Run of
+ true -> consume(Ch, Upstream, UQueue);
+ false -> ok
+ end,
+ {noreply, #state{queue = Queue,
+ run = Run,
+ conn = Conn,
+ ch = Ch,
+ dconn = DConn,
+ dch = DCh,
+ upstream = Upstream,
+ upstream_params = UParams,
+ unacked = Unacked}}
+ end, Upstream, UParams, QName, S0).
+
+check_upstream_suitable(Conn) ->
+ Props = pget(server_properties,
+ amqp_connection:info(Conn, [server_properties])),
+ {table, Caps} = rabbit_misc:table_lookup(Props, <<"capabilities">>),
+ case rabbit_misc:table_lookup(Caps, <<"consumer_priorities">>) of
+ {bool, true} -> ok;
+ _ -> exit({error, upstream_lacks_consumer_priorities})
+ end.
+
+update_headers(UParams, Redelivered, X, K, undefined) ->
+ update_headers(UParams, Redelivered, X, K, []);
+
+update_headers(#upstream_params{table = Table}, Redelivered, X, K, Headers) ->
+ {Headers1, Count} =
+ case rabbit_misc:table_lookup(Headers, ?ROUTING_HEADER) of
+ undefined ->
+ %% We only want to record the original exchange and
+ %% routing key the first time a message gets
+ %% forwarded; after that it's known that they were
+ %% <<>> and QueueName respectively.
+ {rabbit_misc:set_table_value(
+ rabbit_misc:set_table_value(
+ Headers, <<"x-original-exchange">>, longstr, X),
+ <<"x-original-routing-key">>, longstr, K), 0};
+ {array, Been} ->
+ {Found, Been1} = lists:partition(
+ fun (I) -> visit_match(I, Table) end,
+ Been),
+ C = case Found of
+ [] -> 0;
+ [{table, T}] -> case rabbit_misc:table_lookup(
+ T, <<"visit-count">>) of
+ {_, I} when is_number(I) -> I;
+ _ -> 0
+ end
+ end,
+ {rabbit_misc:set_table_value(
+ Headers, ?ROUTING_HEADER, array, Been1), C}
+ end,
+ rabbit_basic:prepend_table_header(
+ ?ROUTING_HEADER, Table ++ [{<<"redelivered">>, bool, Redelivered},
+ {<<"visit-count">>, long, Count + 1}],
+ swap_cc_header(Headers1)).
+
+swap_cc_header(Table) ->
+ [{case K of
+ <<"CC">> -> <<"x-original-cc">>;
+ _ -> K
+ end, T, V} || {K, T, V} <- Table].
+
+visit_match({table, T}, Info) ->
+ lists:all(fun (K) ->
+ rabbit_misc:table_lookup(T, K) =:=
+ rabbit_misc:table_lookup(Info, K)
+ end, [<<"uri">>, <<"virtual_host">>, <<"queue">>]);
+visit_match(_ ,_) ->
+ false.
+
+consume(Ch, Upstream, UQueue) ->
+ NoAck = Upstream#upstream.ack_mode =:= 'no-ack',
+ amqp_channel:cast(
+ Ch, #'basic.consume'{queue = name(UQueue),
+ no_ack = NoAck,
+ nowait = true,
+ consumer_tag = <<"consumer">>,
+ arguments = [{<<"x-priority">>, long, -1}]}).
+
+cancel(Ch) ->
+ amqp_channel:cast(Ch, #'basic.cancel'{nowait = true,
+ consumer_tag = <<"consumer">>}).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_federation_queue_link_sup_sup).
+
+-behaviour(supervisor2).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-define(SUPERVISOR, ?MODULE).
+
+%% Supervises the upstream links for all queues (but not exchanges). We need
+%% different handling here since queues do not want a mirrored sup.
+
+-export([start_link/0, start_child/1, adjust/1, stop_child/1]).
+-export([init/1]).
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ supervisor2:start_link({local, ?SUPERVISOR}, ?MODULE, []).
+
+%% Note that the next supervisor down, rabbit_federation_link_sup, is common
+%% between exchanges and queues.
+start_child(Q) ->
+ case supervisor2:start_child(
+ ?SUPERVISOR,
+ {id(Q), {rabbit_federation_link_sup, start_link, [Q]},
+ transient, ?MAX_WAIT, supervisor,
+ [rabbit_federation_link_sup]}) of
+ {ok, _Pid} -> ok;
+ %% A link returned {stop, gone}, the link_sup shut down, that's OK.
+ {error, {shutdown, _}} -> ok
+ end.
+
+adjust(Reason) ->
+ [rabbit_federation_link_sup:adjust(Pid, Q, Reason) ||
+ {Q, Pid, _, _} <- supervisor2:which_children(?SUPERVISOR)],
+ ok.
+
+stop_child(Q) ->
+ ok = supervisor2:terminate_child(?SUPERVISOR, id(Q)),
+ ok = supervisor2:delete_child(?SUPERVISOR, id(Q)).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{one_for_one, 3, 10}, []}}.
+
+%% Clean out all transient aspects of the queue. We need to keep the
+%% entire queue around rather than just take its name since we will
+%% want to know its policy to determine how to federate it, and its
+%% immutable properties in case we want to redeclare it upstream. We
+%% don't just take its name and look it up again since that would
+%% introduce race conditions when policies change frequently. Note
+%% that since we take down all the links and start again when policies
+%% change, the policy will always be correct, so we don't clear it out
+%% here and can trust it.
+id(Q = #amqqueue{}) -> Q#amqqueue{pid = none,
+ slave_pids = none,
+ sync_slave_pids = none,
+ gm_pids = none}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_federation_status).
+-behaviour(gen_server).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_federation.hrl").
+
+-export([start_link/0]).
+
+-export([report/4, remove_exchange_or_queue/1, remove/2, status/0]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-import(rabbit_federation_util, [name/1]).
+
+-define(SERVER, ?MODULE).
+-define(ETS_NAME, ?MODULE).
+
+-record(state, {}).
+-record(entry, {key, uri, status, timestamp}).
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+report(Upstream, UParams, XorQName, Status) ->
+ gen_server:cast(?SERVER, {report, Upstream, UParams, XorQName, Status,
+ calendar:local_time()}).
+
+remove_exchange_or_queue(XorQName) ->
+ gen_server:call(?SERVER, {remove_exchange_or_queue, XorQName}, infinity).
+
+remove(Upstream, XorQName) ->
+ gen_server:call(?SERVER, {remove, Upstream, XorQName}, infinity).
+
+status() ->
+ gen_server:call(?SERVER, status, infinity).
+
+init([]) ->
+ ?ETS_NAME = ets:new(?ETS_NAME,
+ [named_table, {keypos, #entry.key}, private]),
+ {ok, #state{}}.
+
+handle_call({remove_exchange_or_queue, XorQName}, _From, State) ->
+ [link_gone(Entry)
+ || Entry <- ets:match_object(?ETS_NAME, match_entry(xorqkey(XorQName)))],
+ {reply, ok, State};
+
+handle_call({remove, Upstream, XorQName}, _From, State) ->
+ case ets:match_object(?ETS_NAME, match_entry(key(XorQName, Upstream))) of
+ [Entry] -> link_gone(Entry);
+ [] -> ok
+ end,
+ {reply, ok, State};
+
+handle_call(status, _From, State) ->
+ Entries = ets:tab2list(?ETS_NAME),
+ {reply, [format(Entry) || Entry <- Entries], State}.
+
+handle_cast({report, Upstream, #upstream_params{safe_uri = URI},
+ XorQName, Status, Timestamp}, State) ->
+ Entry = #entry{key = key(XorQName, Upstream),
+ status = Status,
+ uri = URI,
+ timestamp = Timestamp},
+ true = ets:insert(?ETS_NAME, Entry),
+ rabbit_event:notify(federation_link_status, format(Entry)),
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+format(#entry{status = Status,
+ uri = URI,
+ timestamp = Timestamp} = Entry) ->
+ identity(Entry) ++ split_status(Status) ++ [{uri, URI},
+ {timestamp, Timestamp}].
+
+identity(#entry{key = {#resource{virtual_host = VHost,
+ kind = Type,
+ name = XorQNameBin},
+ UpstreamName, UXorQNameBin}}) ->
+ case Type of
+ exchange -> [{exchange, XorQNameBin},
+ {upstream_exchange, UXorQNameBin}];
+ queue -> [{queue, XorQNameBin},
+ {upstream_queue, UXorQNameBin}]
+ end ++ [{type, Type},
+ {vhost, VHost},
+ {upstream, UpstreamName}].
+
+split_status({running, ConnName}) -> [{status, running},
+ {local_connection, ConnName}];
+split_status({Status, Error}) -> [{status, Status},
+ {error, Error}];
+split_status(Status) when is_atom(Status) -> [{status, Status}].
+
+link_gone(Entry) ->
+ rabbit_event:notify(federation_link_removed, identity(Entry)),
+ true = ets:delete_object(?ETS_NAME, Entry).
+
+%% We don't want to key off the entire upstream, bits of it may change
+key(XName = #resource{kind = exchange}, #upstream{name = UpstreamName,
+ exchange_name = UXNameBin}) ->
+ {XName, UpstreamName, UXNameBin};
+
+key(QName = #resource{kind = queue}, #upstream{name = UpstreamName,
+ queue_name = UQNameBin}) ->
+ {QName, UpstreamName, UQNameBin}.
+
+xorqkey(XorQName) ->
+ {XorQName, '_', '_'}.
+
+match_entry(Key) ->
+ #entry{key = Key,
+ uri = '_',
+ status = '_',
+ timestamp = '_'}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_federation_sup).
+
+-behaviour(supervisor).
+
+%% Supervises everything. There is just one of these.
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-define(SUPERVISOR, rabbit_federation_sup).
+
+-export([start_link/0]).
+
+-export([init/1]).
+
+%% This supervisor needs to be part of the rabbit application since
+%% a) it needs to be in place when exchange recovery takes place
+%% b) it needs to go up and down with rabbit
+
+-rabbit_boot_step({rabbit_federation_supervisor,
+ [{description, "federation"},
+ {mfa, {rabbit_sup, start_child, [?MODULE]}},
+ {requires, kernel_ready},
+ {enables, rabbit_federation_exchange}]}).
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ R = supervisor:start_link({local, ?SUPERVISOR}, ?MODULE, []),
+ rabbit_federation_event:add_handler(),
+ R.
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ Status = {status, {rabbit_federation_status, start_link, []},
+ transient, ?MAX_WAIT, worker,
+ [rabbit_federation_status]},
+ XLinkSupSup = {x_links,
+ {rabbit_federation_exchange_link_sup_sup, start_link, []},
+ transient, ?MAX_WAIT, supervisor,
+ [rabbit_federation_exchange_link_sup_sup]},
+ QLinkSupSup = {q_links,
+ {rabbit_federation_queue_link_sup_sup, start_link, []},
+ transient, ?MAX_WAIT, supervisor,
+ [rabbit_federation_queue_link_sup_sup]},
+ {ok, {{one_for_one, 3, 10}, [Status, XLinkSupSup, QLinkSupSup]}}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_federation_upstream).
+
+-include("rabbit_federation.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-export([federate/1, for/1, for/2, params_to_string/1, to_params/2]).
+%% For testing
+-export([from_set/2, remove_credentials/1]).
+
+-import(rabbit_misc, [pget/2, pget/3]).
+-import(rabbit_federation_util, [name/1, vhost/1, r/1]).
+
+%%----------------------------------------------------------------------------
+
+federate(XorQ) ->
+ rabbit_policy:get(<<"federation-upstream">>, XorQ) =/= undefined orelse
+ rabbit_policy:get(<<"federation-upstream-set">>, XorQ) =/= undefined.
+
+for(XorQ) ->
+ case federate(XorQ) of
+ false -> [];
+ true -> from_set_contents(upstreams(XorQ), XorQ)
+ end.
+
+for(XorQ, UpstreamName) ->
+ case federate(XorQ) of
+ false -> [];
+ true -> rabbit_federation_util:find_upstreams(
+ UpstreamName, from_set_contents(upstreams(XorQ), XorQ))
+ end.
+
+upstreams(XorQ) ->
+ UName = rabbit_policy:get(<<"federation-upstream">>, XorQ),
+ USetName = rabbit_policy:get(<<"federation-upstream-set">>, XorQ),
+ %% Cannot define both, see rabbit_federation_parameters:validate_policy/1
+ case {UName, USetName} of
+ {undefined, undefined} -> [];
+ {undefined, _} -> set_contents(USetName, vhost(XorQ));
+ {_, undefined} -> [[{<<"upstream">>, UName}]]
+ end.
+
+params_table(SafeURI, XorQ) ->
+ Key = case XorQ of
+ #exchange{} -> <<"exchange">>;
+ #amqqueue{} -> <<"queue">>
+ end,
+ [{<<"uri">>, longstr, SafeURI},
+ {Key, longstr, name(XorQ)}].
+
+params_to_string(#upstream_params{safe_uri = SafeURI,
+ x_or_q = XorQ}) ->
+ print("~s on ~s", [rabbit_misc:rs(r(XorQ)), SafeURI]).
+
+remove_credentials(URI) ->
+ list_to_binary(amqp_uri:remove_credentials(binary_to_list(URI))).
+
+to_params(Upstream = #upstream{uris = URIs}, XorQ) ->
+ random:seed(now()),
+ URI = lists:nth(random:uniform(length(URIs)), URIs),
+ {ok, Params} = amqp_uri:parse(binary_to_list(URI), vhost(XorQ)),
+ XorQ1 = with_name(Upstream, vhost(Params), XorQ),
+ SafeURI = remove_credentials(URI),
+ #upstream_params{params = Params,
+ uri = URI,
+ x_or_q = XorQ1,
+ safe_uri = SafeURI,
+ table = params_table(SafeURI, XorQ)}.
+
+print(Fmt, Args) -> iolist_to_binary(io_lib:format(Fmt, Args)).
+
+from_set(SetName, XorQ) ->
+ from_set_contents(set_contents(SetName, vhost(XorQ)), XorQ).
+
+set_contents(<<"all">>, VHost) ->
+ Upstreams = rabbit_runtime_parameters:list(
+ VHost, <<"federation-upstream">>),
+ [[{<<"upstream">>, pget(name, U)}] || U <- Upstreams];
+
+set_contents(SetName, VHost) ->
+ case rabbit_runtime_parameters:value(
+ VHost, <<"federation-upstream-set">>, SetName) of
+ not_found -> [];
+ Set -> Set
+ end.
+
+from_set_contents(Set, XorQ) ->
+ Results = [from_set_element(P, XorQ) || P <- Set],
+ [R || R <- Results, R =/= not_found].
+
+from_set_element(UpstreamSetElem, XorQ) ->
+ Name = bget(upstream, UpstreamSetElem, []),
+ case rabbit_runtime_parameters:value(
+ vhost(XorQ), <<"federation-upstream">>, Name) of
+ not_found -> not_found;
+ Upstream -> from_upstream_or_set(
+ UpstreamSetElem, Name, Upstream, XorQ)
+ end.
+
+from_upstream_or_set(US, Name, U, XorQ) ->
+ URIParam = bget(uri, US, U),
+ URIs = case URIParam of
+ B when is_binary(B) -> [B];
+ L when is_list(L) -> L
+ end,
+ #upstream{uris = URIs,
+ exchange_name = bget(exchange, US, U, name(XorQ)),
+ queue_name = bget(queue, US, U, name(XorQ)),
+ prefetch_count = bget('prefetch-count', US, U, ?DEF_PREFETCH),
+ reconnect_delay = bget('reconnect-delay', US, U, 1),
+ max_hops = bget('max-hops', US, U, 1),
+ expires = bget(expires, US, U, none),
+ message_ttl = bget('message-ttl', US, U, none),
+ trust_user_id = bget('trust-user-id', US, U, false),
+ ack_mode = list_to_atom(
+ binary_to_list(
+ bget('ack-mode', US, U, <<"on-confirm">>))),
+ ha_policy = bget('ha-policy', US, U, none),
+ name = Name}.
+
+%%----------------------------------------------------------------------------
+
+bget(K, L1, L2) -> bget(K, L1, L2, undefined).
+
+bget(K0, L1, L2, D) ->
+ K = a2b(K0),
+ case pget(K, L1, undefined) of
+ undefined -> pget(K, L2, D);
+ Result -> Result
+ end.
+
+a2b(A) -> list_to_binary(atom_to_list(A)).
+
+with_name(#upstream{exchange_name = XNameBin}, VHostBin, X = #exchange{}) ->
+ X#exchange{name = rabbit_misc:r(VHostBin, exchange, XNameBin)};
+
+with_name(#upstream{queue_name = QNameBin}, VHostBin, Q = #amqqueue{}) ->
+ Q#amqqueue{name = rabbit_misc:r(VHostBin, queue, QNameBin)}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_federation_upstream_exchange).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "federation upstream exchange type"},
+ {mfa, {rabbit_registry, register,
+ [exchange, <<"x-federation-upstream">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, recovery}]}).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("rabbit_federation.hrl").
+
+-behaviour(rabbit_exchange_type).
+
+-export([description/0, serialise_events/0, route/2]).
+-export([validate/1, validate_binding/2,
+ create/2, delete/3, policy_changed/2,
+ add_binding/3, remove_bindings/3, assert_args_equivalence/2]).
+
+%%----------------------------------------------------------------------------
+
+description() ->
+ [{description, <<"Federation upstream helper exchange">>},
+ {internal_purpose, federation}].
+
+serialise_events() -> false.
+
+route(X = #exchange{arguments = Args},
+ D = #delivery{message = #basic_message{content = Content}}) ->
+ %% This arg was introduced in the same release as this exchange type;
+ %% it must be set
+ {long, MaxHops} = rabbit_misc:table_lookup(Args, ?MAX_HOPS_ARG),
+ %% This was introduced later; it might be missing
+ DName = case rabbit_misc:table_lookup(Args, ?NODE_NAME_ARG) of
+ {longstr, N} -> N;
+ _ -> unknown
+ end,
+ Headers = rabbit_basic:extract_headers(Content),
+ case rabbit_federation_util:should_forward(Headers, MaxHops, DName) of
+ true -> rabbit_exchange_type_fanout:route(X, D);
+ false -> []
+ end.
+
+validate(#exchange{arguments = Args}) ->
+ rabbit_federation_util:validate_arg(?MAX_HOPS_ARG, long, Args).
+
+validate_binding(_X, _B) -> ok.
+create(_Tx, _X) -> ok.
+delete(_Tx, _X, _Bs) -> ok.
+policy_changed(_X1, _X2) -> ok.
+add_binding(_Tx, _X, _B) -> ok.
+remove_bindings(_Tx, _X, _Bs) -> ok.
+
+assert_args_equivalence(X = #exchange{name = Name,
+ arguments = Args}, ReqArgs) ->
+ rabbit_misc:assert_args_equivalence(Args, ReqArgs, Name, [?MAX_HOPS_ARG]),
+ rabbit_exchange:assert_args_equivalence(X, Args).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_federation_util).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_federation.hrl").
+
+-export([should_forward/3, find_upstreams/2, already_seen/2]).
+-export([validate_arg/3, fail/2, name/1, vhost/1, r/1, pgname/1]).
+
+-import(rabbit_misc, [pget_or_die/2, pget/3]).
+
+%%----------------------------------------------------------------------------
+
+should_forward(undefined, _MaxHops, _DName) ->
+ true;
+should_forward(Headers, MaxHops, DName) ->
+ case rabbit_misc:table_lookup(Headers, ?ROUTING_HEADER) of
+ {array, A} -> length(A) < MaxHops andalso not already_seen(DName, A);
+ _ -> true
+ end.
+
+already_seen(Name, Array) ->
+ lists:any(fun ({table, T}) -> {longstr, Name} =:= rabbit_misc:table_lookup(
+ T, <<"cluster-name">>);
+ (_) -> false
+ end, Array).
+
+find_upstreams(Name, Upstreams) ->
+ [U || U = #upstream{name = Name2} <- Upstreams,
+ Name =:= Name2].
+
+validate_arg(Name, Type, Args) ->
+ case rabbit_misc:table_lookup(Args, Name) of
+ {Type, _} -> ok;
+ undefined -> fail("Argument ~s missing", [Name]);
+ _ -> fail("Argument ~s must be of type ~s", [Name, Type])
+ end.
+
+fail(Fmt, Args) -> rabbit_misc:protocol_error(precondition_failed, Fmt, Args).
+
+name( #resource{name = XName}) -> XName;
+name(#exchange{name = #resource{name = XName}}) -> XName;
+name(#amqqueue{name = #resource{name = QName}}) -> QName.
+
+vhost( #resource{virtual_host = VHost}) -> VHost;
+vhost(#exchange{name = #resource{virtual_host = VHost}}) -> VHost;
+vhost(#amqqueue{name = #resource{virtual_host = VHost}}) -> VHost;
+vhost( #amqp_params_direct{virtual_host = VHost}) -> VHost;
+vhost(#amqp_params_network{virtual_host = VHost}) -> VHost.
+
+r(#exchange{name = XName}) -> XName;
+r(#amqqueue{name = QName}) -> QName.
+
+pgname(Name) ->
+ case application:get_env(rabbitmq_federation, pgroup_name_cluster_id) of
+ {ok, false} -> Name;
+ {ok, true} -> {rabbit_nodes:cluster_name(), Name}
+ end.
--- /dev/null
+{application, rabbitmq_federation,
+ [{description, "RabbitMQ Federation"},
+ {vsn, "%%VSN%%"},
+ {modules, []},
+ {registered, []},
+ {mod, {rabbit_federation_app, []}},
+ {env, [{pgroup_name_cluster_id, false}]},
+ {applications, [kernel, stdlib, rabbit, amqp_client]}]}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_federation_exchange_test).
+
+-compile(export_all).
+-include("rabbit_federation.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-import(rabbit_misc, [pget/2]).
+-import(rabbit_federation_util, [name/1]).
+
+-import(rabbit_federation_test_util,
+ [expect/3, expect_empty/2,
+ set_upstream/3, clear_upstream/2, set_upstream_set/3,
+ set_policy/4, clear_policy/2,
+ set_policy_upstream/4, set_policy_upstreams/3,
+ disambiguate/1, no_plugins/1, single_cfg/0]).
+
+-define(UPSTREAM_DOWNSTREAM, [x(<<"upstream">>),
+ x(<<"fed.downstream">>)]).
+
+simple_test() ->
+ with_ch(
+ fun (Ch) ->
+ Q = bind_queue(Ch, <<"fed.downstream">>, <<"key">>),
+ await_binding(<<"upstream">>, <<"key">>),
+ publish_expect(Ch, <<"upstream">>, <<"key">>, Q, <<"HELLO">>)
+ end, ?UPSTREAM_DOWNSTREAM).
+
+multiple_upstreams_test() ->
+ with_ch(
+ fun (Ch) ->
+ Q = bind_queue(Ch, <<"fed12.downstream">>, <<"key">>),
+ await_binding(<<"upstream">>, <<"key">>),
+ await_binding(<<"upstream2">>, <<"key">>),
+ publish_expect(Ch, <<"upstream">>, <<"key">>, Q, <<"HELLO1">>),
+ publish_expect(Ch, <<"upstream2">>, <<"key">>, Q, <<"HELLO2">>)
+ end, [x(<<"upstream">>),
+ x(<<"upstream2">>),
+ x(<<"fed12.downstream">>)]).
+
+multiple_uris_test() ->
+ %% We can't use a direct connection for Kill() to work.
+ set_upstream(single_cfg(), <<"localhost">>,
+ [<<"amqp://localhost">>, <<"amqp://localhost:5672">>]),
+ WithCh = fun(F) ->
+ {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ F(Ch),
+ amqp_connection:close(Conn)
+ end,
+ WithCh(fun (Ch) -> declare_all(Ch, ?UPSTREAM_DOWNSTREAM) end),
+ expect_uris([<<"amqp://localhost">>, <<"amqp://localhost:5672">>]),
+ WithCh(fun (Ch) -> delete_all(Ch, ?UPSTREAM_DOWNSTREAM) end),
+ %% Put back how it was
+ set_upstream(single_cfg(), <<"localhost">>, <<"amqp://">>).
+
+expect_uris([]) -> ok;
+expect_uris(URIs) -> [Link] = rabbit_federation_status:status(),
+ URI = pget(uri, Link),
+ kill_only_connection(n("rabbit-test")),
+ expect_uris(URIs -- [URI]).
+
+kill_only_connection(Node) ->
+ case connection_pids(Node) of
+ [Pid] -> catch rabbit_networking:close_connection(Pid, "boom"), %% [1]
+ wait_for_pid_to_die(Node, Pid);
+ _ -> timer:sleep(100),
+ kill_only_connection(Node)
+ end.
+
+%% [1] the catch is because we could still see a connection from a
+%% previous time round. If so that's fine (we'll just loop around
+%% again) but we don't want the test to fail because a connection
+%% closed as we were trying to close it.
+
+wait_for_pid_to_die(Node, Pid) ->
+ case connection_pids(Node) of
+ [Pid] -> timer:sleep(100),
+ wait_for_pid_to_die(Node, Pid);
+ _ -> ok
+ end.
+
+
+multiple_downstreams_test() ->
+ with_ch(
+ fun (Ch) ->
+ Q1 = bind_queue(Ch, <<"fed.downstream">>, <<"key">>),
+ Q12 = bind_queue(Ch, <<"fed12.downstream2">>, <<"key">>),
+ await_binding(<<"upstream">>, <<"key">>, 2),
+ await_binding(<<"upstream2">>, <<"key">>),
+ publish(Ch, <<"upstream">>, <<"key">>, <<"HELLO1">>),
+ publish(Ch, <<"upstream2">>, <<"key">>, <<"HELLO2">>),
+ expect(Ch, Q1, [<<"HELLO1">>]),
+ expect(Ch, Q12, [<<"HELLO1">>, <<"HELLO2">>])
+ end, ?UPSTREAM_DOWNSTREAM ++
+ [x(<<"upstream2">>),
+ x(<<"fed12.downstream2">>)]).
+
+e2e_test() ->
+ with_ch(
+ fun (Ch) ->
+ bind_exchange(Ch, <<"downstream2">>, <<"fed.downstream">>,
+ <<"key">>),
+ await_binding(<<"upstream">>, <<"key">>),
+ Q = bind_queue(Ch, <<"downstream2">>, <<"key">>),
+ publish_expect(Ch, <<"upstream">>, <<"key">>, Q, <<"HELLO1">>)
+ end, ?UPSTREAM_DOWNSTREAM ++ [x(<<"downstream2">>)]).
+
+unbind_on_delete_test() ->
+ with_ch(
+ fun (Ch) ->
+ Q1 = bind_queue(Ch, <<"fed.downstream">>, <<"key">>),
+ Q2 = bind_queue(Ch, <<"fed.downstream">>, <<"key">>),
+ await_binding(<<"upstream">>, <<"key">>),
+ delete_queue(Ch, Q2),
+ publish_expect(Ch, <<"upstream">>, <<"key">>, Q1, <<"HELLO">>)
+ end, ?UPSTREAM_DOWNSTREAM).
+
+unbind_on_unbind_test() ->
+ with_ch(
+ fun (Ch) ->
+ Q1 = bind_queue(Ch, <<"fed.downstream">>, <<"key">>),
+ Q2 = bind_queue(Ch, <<"fed.downstream">>, <<"key">>),
+ await_binding(<<"upstream">>, <<"key">>),
+ unbind_queue(Ch, Q2, <<"fed.downstream">>, <<"key">>),
+ publish_expect(Ch, <<"upstream">>, <<"key">>, Q1, <<"HELLO">>),
+ delete_queue(Ch, Q2)
+ end, ?UPSTREAM_DOWNSTREAM).
+
+user_id_with() -> disambiguate(start_ab).
+user_id([Rabbit, Hare]) ->
+ set_policy_upstream(Rabbit, <<"^test$">>, <<"amqp://localhost:5673">>, []),
+ Perm = fun (F, A) ->
+ ok = rpc:call(pget(node, Hare),
+ rabbit_auth_backend_internal, F, A)
+ end,
+ Perm(add_user, [<<"hare-user">>, <<"hare-user">>]),
+ Perm(set_permissions, [<<"hare-user">>,
+ <<"/">>, <<".*">>, <<".*">>, <<".*">>]),
+
+ {_, Ch} = rabbit_test_util:connect(Rabbit),
+ {ok, Conn2} = amqp_connection:start(
+ #amqp_params_network{username = <<"hare-user">>,
+ password = <<"hare-user">>,
+ port = pget(port, Hare)}),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+
+ declare_exchange(Ch2, x(<<"test">>)),
+ declare_exchange(Ch, x(<<"test">>)),
+ Q = bind_queue(Ch, <<"test">>, <<"key">>),
+ await_binding(Hare, <<"test">>, <<"key">>),
+
+ Msg = #amqp_msg{props = #'P_basic'{user_id = <<"hare-user">>},
+ payload = <<"HELLO">>},
+
+ SafeUri = fun (H) ->
+ {array, [{table, Recv}]} =
+ rabbit_misc:table_lookup(
+ H, <<"x-received-from">>),
+ ?assertEqual(
+ {longstr, <<"amqp://localhost:5673">>},
+ rabbit_misc:table_lookup(Recv, <<"uri">>))
+ end,
+ ExpectUser =
+ fun (ExpUser) ->
+ fun () ->
+ receive
+ {#'basic.deliver'{},
+ #amqp_msg{props = Props,
+ payload = Payload}} ->
+ #'P_basic'{user_id = ActUser,
+ headers = Headers} = Props,
+ SafeUri(Headers),
+ ?assertEqual(<<"HELLO">>, Payload),
+ ?assertEqual(ExpUser, ActUser)
+ end
+ end
+ end,
+
+ publish(Ch2, <<"test">>, <<"key">>, Msg),
+ expect(Ch, Q, ExpectUser(undefined)),
+
+ set_policy_upstream(Rabbit, <<"^test$">>, <<"amqp://localhost:5673">>,
+ [{<<"trust-user-id">>, true}]),
+
+ publish(Ch2, <<"test">>, <<"key">>, Msg),
+ expect(Ch, Q, ExpectUser(<<"hare-user">>)),
+
+ ok.
+
+%% In order to test that unbinds get sent we deliberately set up a
+%% broken config - with topic upstream and fanout downstream. You
+%% shouldn't really do this, but it lets us see "extra" messages that
+%% get sent.
+unbind_gets_transmitted_test() ->
+ with_ch(
+ fun (Ch) ->
+ Q11 = bind_queue(Ch, <<"fed.downstream">>, <<"key1">>),
+ Q12 = bind_queue(Ch, <<"fed.downstream">>, <<"key1">>),
+ Q21 = bind_queue(Ch, <<"fed.downstream">>, <<"key2">>),
+ Q22 = bind_queue(Ch, <<"fed.downstream">>, <<"key2">>),
+ await_binding(<<"upstream">>, <<"key1">>),
+ await_binding(<<"upstream">>, <<"key2">>),
+ [delete_queue(Ch, Q) || Q <- [Q12, Q21, Q22]],
+ publish(Ch, <<"upstream">>, <<"key1">>, <<"YES">>),
+ publish(Ch, <<"upstream">>, <<"key2">>, <<"NO">>),
+ expect(Ch, Q11, [<<"YES">>]),
+ expect_empty(Ch, Q11)
+ end, [x(<<"upstream">>),
+ x(<<"fed.downstream">>)]).
+
+no_loop_test() ->
+ with_ch(
+ fun (Ch) ->
+ Q1 = bind_queue(Ch, <<"one">>, <<"key">>),
+ Q2 = bind_queue(Ch, <<"two">>, <<"key">>),
+ await_binding(<<"one">>, <<"key">>, 2),
+ await_binding(<<"two">>, <<"key">>, 2),
+ publish(Ch, <<"one">>, <<"key">>, <<"Hello from one">>),
+ publish(Ch, <<"two">>, <<"key">>, <<"Hello from two">>),
+ expect(Ch, Q1, [<<"Hello from one">>, <<"Hello from two">>]),
+ expect(Ch, Q2, [<<"Hello from one">>, <<"Hello from two">>]),
+ expect_empty(Ch, Q1),
+ expect_empty(Ch, Q2)
+ end, [x(<<"one">>),
+ x(<<"two">>)]).
+
+binding_recovery_with() -> disambiguate(
+ fun (Init) ->
+ rabbit_test_configs:start_nodes(Init, [a])
+ end).
+binding_recovery([Rabbit]) ->
+ Q = <<"durable-Q">>,
+ {_, Ch} = rabbit_test_util:connect(Rabbit),
+
+ rabbit_federation_test_util:set_upstream(
+ Rabbit, <<"rabbit">>, <<"amqp://localhost:5672">>),
+ rabbit_federation_test_util:set_upstream_set(
+ Rabbit, <<"upstream">>,
+ [{<<"rabbit">>, [{<<"exchange">>, <<"upstream">>}]},
+ {<<"rabbit">>, [{<<"exchange">>, <<"upstream2">>}]}]),
+ rabbit_federation_test_util:set_policy(
+ Rabbit, <<"fed">>, <<"^fed\\.">>, <<"upstream">>),
+
+ declare_all(Ch, [x(<<"upstream2">>) | ?UPSTREAM_DOWNSTREAM]),
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q,
+ durable = true}),
+ bind_queue(Ch, Q, <<"fed.downstream">>, <<"key">>),
+ timer:sleep(100), %% To get the suffix written
+
+ %% i.e. don't clean up
+ Rabbit2 = rabbit_test_configs:restart_node(Rabbit),
+
+ ?assert(none =/= suffix(Rabbit2, <<"rabbit">>, "upstream")),
+ ?assert(none =/= suffix(Rabbit2, <<"rabbit">>, "upstream2")),
+
+ %% again don't clean up
+ Rabbit3 = rabbit_test_configs:restart_node(Rabbit2),
+ {_, Ch3} = rabbit_test_util:connect(Rabbit3),
+
+ rabbit_test_util:set_param(
+ Rabbit, <<"federation-upstream-set">>, <<"upstream">>,
+ [[{<<"upstream">>, <<"rabbit">>}, {<<"exchange">>, <<"upstream">>}]]),
+
+ publish_expect(Ch3, <<"upstream">>, <<"key">>, Q, <<"HELLO">>),
+ ?assert(none =/= suffix(Rabbit3, <<"rabbit">>, "upstream")),
+ ?assertEqual(none, suffix(Rabbit3, <<"rabbit">>, "upstream2")),
+ delete_all(Ch3, [x(<<"upstream2">>) | ?UPSTREAM_DOWNSTREAM]),
+ delete_queue(Ch3, Q),
+ ok.
+
+suffix(Cfg, Name, XName) ->
+ rpc:call(pget(node, Cfg), rabbit_federation_db, get_active_suffix,
+ [r(<<"fed.downstream">>),
+ #upstream{name = Name,
+ exchange_name = list_to_binary(XName)}, none]).
+
+%% TODO remove
+n(Nodename) ->
+ {_, NodeHost} = rabbit_nodes:parts(node()),
+ rabbit_nodes:make({Nodename, NodeHost}).
+
+restart_upstream_with() -> disambiguate(start_ab).
+restart_upstream([Rabbit, Hare]) ->
+ {_, Downstream} = rabbit_test_util:connect(Rabbit),
+ {_, Upstream} = rabbit_test_util:connect(Hare),
+
+ rabbit_federation_test_util:set_upstream(
+ Rabbit, <<"hare">>, <<"amqp://localhost:5673">>),
+ rabbit_federation_test_util:set_upstream_set(
+ Rabbit, <<"upstream">>,
+ [{<<"hare">>, [{<<"exchange">>, <<"upstream">>}]}]),
+ rabbit_federation_test_util:set_policy(
+ Rabbit, <<"hare">>, <<"^hare\\.">>, <<"upstream">>),
+
+ declare_exchange(Upstream, x(<<"upstream">>)),
+ declare_exchange(Downstream, x(<<"hare.downstream">>)),
+
+ Qstays = bind_queue(Downstream, <<"hare.downstream">>, <<"stays">>),
+ Qgoes = bind_queue(Downstream, <<"hare.downstream">>, <<"goes">>),
+
+ Hare2 = rabbit_test_configs:stop_node(Hare),
+
+ Qcomes = bind_queue(Downstream, <<"hare.downstream">>, <<"comes">>),
+ unbind_queue(Downstream, Qgoes, <<"hare.downstream">>, <<"goes">>),
+
+ Hare3 = rabbit_test_configs:start_node(Hare2),
+ {_, Upstream1} = rabbit_test_util:connect(Hare3),
+
+ %% Wait for the link to come up and for these bindings
+ %% to be transferred
+ await_binding(Hare, <<"upstream">>, <<"comes">>, 1),
+ await_binding_absent(Hare, <<"upstream">>, <<"goes">>),
+ await_binding(Hare, <<"upstream">>, <<"stays">>, 1),
+
+ publish(Upstream1, <<"upstream">>, <<"goes">>, <<"GOES">>),
+ publish(Upstream1, <<"upstream">>, <<"stays">>, <<"STAYS">>),
+ publish(Upstream1, <<"upstream">>, <<"comes">>, <<"COMES">>),
+
+ expect(Downstream, Qstays, [<<"STAYS">>]),
+ expect(Downstream, Qcomes, [<<"COMES">>]),
+ expect_empty(Downstream, Qgoes),
+
+ delete_exchange(Downstream, <<"hare.downstream">>),
+ delete_exchange(Upstream1, <<"upstream">>),
+ ok.
+
+%% flopsy, mopsy and cottontail, connected in a ring with max_hops = 2
+%% for each connection. We should not see any duplicates.
+
+max_hops_with() -> disambiguate(start_abc).
+max_hops([Flopsy, Mopsy, Cottontail]) ->
+ [set_policy_upstream(
+ Cfg, <<"^ring$">>,
+ list_to_binary("amqp://localhost:" ++ integer_to_list(Port)),
+ [{<<"max-hops">>, 2}])
+ || {Cfg, Port} <- [{Flopsy, pget(port, Cottontail)},
+ {Mopsy, pget(port, Flopsy)},
+ {Cottontail, pget(port, Mopsy)}]],
+
+ {_, FlopsyCh} = rabbit_test_util:connect(Flopsy),
+ {_, MopsyCh} = rabbit_test_util:connect(Mopsy),
+ {_, CottontailCh} = rabbit_test_util:connect(Cottontail),
+
+ declare_exchange(FlopsyCh, x(<<"ring">>)),
+ declare_exchange(MopsyCh, x(<<"ring">>)),
+ declare_exchange(CottontailCh, x(<<"ring">>)),
+
+ Q1 = bind_queue(FlopsyCh, <<"ring">>, <<"key">>),
+ Q2 = bind_queue(MopsyCh, <<"ring">>, <<"key">>),
+ Q3 = bind_queue(CottontailCh, <<"ring">>, <<"key">>),
+
+ await_binding(Flopsy, <<"ring">>, <<"key">>, 3),
+ await_binding(Mopsy, <<"ring">>, <<"key">>, 3),
+ await_binding(Cottontail, <<"ring">>, <<"key">>, 3),
+
+ publish(FlopsyCh, <<"ring">>, <<"key">>, <<"HELLO flopsy">>),
+ publish(MopsyCh, <<"ring">>, <<"key">>, <<"HELLO mopsy">>),
+ publish(CottontailCh, <<"ring">>, <<"key">>, <<"HELLO cottontail">>),
+
+ Msgs = [<<"HELLO flopsy">>, <<"HELLO mopsy">>, <<"HELLO cottontail">>],
+ expect(FlopsyCh, Q1, Msgs),
+ expect(MopsyCh, Q2, Msgs),
+ expect(CottontailCh, Q3, Msgs),
+ expect_empty(FlopsyCh, Q1),
+ expect_empty(MopsyCh, Q2),
+ expect_empty(CottontailCh, Q3),
+ ok.
+
+%% Two nodes, both federated with each other, and max_hops set to a
+%% high value. Things should not get out of hand.
+cycle_detection_with() -> disambiguate(start_ab).
+cycle_detection([Cycle1, Cycle2]) ->
+ [set_policy_upstream(
+ Cfg, <<"^cycle$">>,
+ list_to_binary("amqp://localhost:" ++ integer_to_list(Port)),
+ [{<<"max-hops">>, 10}])
+ || {Cfg, Port} <- [{Cycle1, pget(port, Cycle2)},
+ {Cycle2, pget(port, Cycle1)}]],
+
+ {_, Cycle1Ch} = rabbit_test_util:connect(Cycle1),
+ {_, Cycle2Ch} = rabbit_test_util:connect(Cycle2),
+
+ declare_exchange(Cycle1Ch, x(<<"cycle">>)),
+ declare_exchange(Cycle2Ch, x(<<"cycle">>)),
+
+ Q1 = bind_queue(Cycle1Ch, <<"cycle">>, <<"key">>),
+ Q2 = bind_queue(Cycle2Ch, <<"cycle">>, <<"key">>),
+
+ %% "key" present twice because once for the local queue and once
+ %% for federation in each case
+ await_binding(Cycle1, <<"cycle">>, <<"key">>, 2),
+ await_binding(Cycle2, <<"cycle">>, <<"key">>, 2),
+
+ publish(Cycle1Ch, <<"cycle">>, <<"key">>, <<"HELLO1">>),
+ publish(Cycle2Ch, <<"cycle">>, <<"key">>, <<"HELLO2">>),
+
+ Msgs = [<<"HELLO1">>, <<"HELLO2">>],
+ expect(Cycle1Ch, Q1, Msgs),
+ expect(Cycle2Ch, Q2, Msgs),
+ expect_empty(Cycle1Ch, Q1),
+ expect_empty(Cycle2Ch, Q2),
+
+ ok.
+
+%% Arrows indicate message flow. Numbers indicate max_hops.
+%%
+%% Dylan ---1--> Bugs ---2--> Jessica
+%% |^ |^
+%% |\--------------1---------------/|
+%% \---------------1----------------/
+%%
+%%
+%% We want to demonstrate that if we bind a queue locally at each
+%% broker, (exactly) the following bindings propagate:
+%%
+%% Bugs binds to Dylan
+%% Jessica binds to Bugs, which then propagates on to Dylan
+%% Jessica binds to Dylan directly
+%% Dylan binds to Jessica.
+%%
+%% i.e. Dylan has two bindings from Jessica and one from Bugs
+%% Bugs has one binding from Jessica
+%% Jessica has one binding from Dylan
+%%
+%% So we tag each binding with its original broker and see how far it gets
+%%
+%% Also we check that when we tear down the original bindings
+%% that we get rid of everything again.
+
+binding_propagation_with() -> disambiguate(start_abc).
+binding_propagation([Dylan, Bugs, Jessica]) ->
+ set_policy_upstream( Dylan, <<"^x$">>, <<"amqp://localhost:5674">>, []),
+ set_policy_upstream( Bugs, <<"^x$">>, <<"amqp://localhost:5672">>, []),
+ set_policy_upstreams(Jessica, <<"^x$">>, [{<<"amqp://localhost:5672">>, []},
+ {<<"amqp://localhost:5673">>,
+ [{<<"max-hops">>, 2}]}]),
+ {_, DylanCh} = rabbit_test_util:connect(Dylan),
+ {_, BugsCh} = rabbit_test_util:connect(Bugs),
+ {_, JessicaCh} = rabbit_test_util:connect(Jessica),
+
+ declare_exchange(DylanCh, x(<<"x">>)),
+ declare_exchange(BugsCh, x(<<"x">>)),
+ declare_exchange(JessicaCh, x(<<"x">>)),
+
+ Q1 = bind_queue(DylanCh, <<"x">>, <<"dylan">>),
+ Q2 = bind_queue(BugsCh, <<"x">>, <<"bugs">>),
+ Q3 = bind_queue(JessicaCh, <<"x">>, <<"jessica">>),
+
+ await_binding( Dylan, <<"x">>, <<"jessica">>, 2),
+ await_bindings(Dylan, <<"x">>, [<<"bugs">>, <<"dylan">>]),
+ await_bindings(Bugs, <<"x">>, [<<"jessica">>, <<"bugs">>]),
+ await_bindings(Jessica, <<"x">>, [<<"dylan">>, <<"jessica">>]),
+
+ delete_queue(DylanCh, Q1),
+ delete_queue(BugsCh, Q2),
+ delete_queue(JessicaCh, Q3),
+
+ await_bindings(Dylan, <<"x">>, []),
+ await_bindings(Bugs, <<"x">>, []),
+ await_bindings(Jessica, <<"x">>, []),
+
+ ok.
+
+upstream_has_no_federation_with() ->
+ disambiguate(fun (Init) ->
+ Inits = [Init, no_plugins(Init)],
+ rabbit_test_configs:start_nodes(Inits, [a, b])
+ end).
+upstream_has_no_federation([Rabbit, Hare]) ->
+ set_policy_upstream(Rabbit, <<"^test$">>, <<"amqp://localhost:5673">>, []),
+ {_, Downstream} = rabbit_test_util:connect(Rabbit),
+ {_, Upstream} = rabbit_test_util:connect(Hare),
+ declare_exchange(Upstream, x(<<"test">>)),
+ declare_exchange(Downstream, x(<<"test">>)),
+ Q = bind_queue(Downstream, <<"test">>, <<"routing">>),
+ await_binding(Hare, <<"test">>, <<"routing">>),
+ publish(Upstream, <<"test">>, <<"routing">>, <<"HELLO">>),
+ expect(Downstream, Q, [<<"HELLO">>]),
+ ok.
+
+dynamic_reconfiguration_test() ->
+ Cfg = single_cfg(),
+ with_ch(
+ fun (_Ch) ->
+ Xs = [<<"all.fed1">>, <<"all.fed2">>],
+ %% Left from the conf we set up for previous tests
+ assert_connections(Xs, [<<"localhost">>, <<"local5673">>]),
+
+ %% Test that clearing connections works
+ clear_upstream(Cfg, <<"localhost">>),
+ clear_upstream(Cfg, <<"local5673">>),
+ assert_connections(Xs, []),
+
+ %% Test that readding them and changing them works
+ set_upstream(Cfg, <<"localhost">>, <<"amqp://localhost">>),
+ %% Do it twice so we at least hit the no-restart optimisation
+ set_upstream(Cfg, <<"localhost">>, <<"amqp://">>),
+ set_upstream(Cfg, <<"localhost">>, <<"amqp://">>),
+ assert_connections(Xs, [<<"localhost">>]),
+
+ %% And re-add the last - for next test
+ set_upstream(Cfg, <<"local5673">>, <<"amqp://localhost:5673">>)
+ end, [x(<<"all.fed1">>), x(<<"all.fed2">>)]).
+
+dynamic_reconfiguration_integrity_test() ->
+ Cfg = single_cfg(),
+ with_ch(
+ fun (_Ch) ->
+ Xs = [<<"new.fed1">>, <<"new.fed2">>],
+
+ %% Declared exchanges with nonexistent set - no links
+ assert_connections(Xs, []),
+
+ %% Create the set - links appear
+ set_upstream_set(Cfg, <<"new-set">>, [{<<"localhost">>, []}]),
+ assert_connections(Xs, [<<"localhost">>]),
+
+ %% Add nonexistent connections to set - nothing breaks
+ set_upstream_set(
+ Cfg, <<"new-set">>, [{<<"localhost">>, []},
+ {<<"does-not-exist">>, []}]),
+ assert_connections(Xs, [<<"localhost">>]),
+
+ %% Change connection in set - links change
+ set_upstream_set(Cfg, <<"new-set">>, [{<<"local5673">>, []}]),
+ assert_connections(Xs, [<<"local5673">>])
+ end, [x(<<"new.fed1">>), x(<<"new.fed2">>)]).
+
+federate_unfederate_test() ->
+ Cfg = single_cfg(),
+ with_ch(
+ fun (_Ch) ->
+ Xs = [<<"dyn.exch1">>, <<"dyn.exch2">>],
+
+ %% Declared non-federated exchanges - no links
+ assert_connections(Xs, []),
+
+ %% Federate them - links appear
+ set_policy(Cfg, <<"dyn">>, <<"^dyn\\.">>, <<"all">>),
+ assert_connections(Xs, [<<"localhost">>, <<"local5673">>]),
+
+ %% Change policy - links change
+ set_policy(Cfg, <<"dyn">>, <<"^dyn\\.">>, <<"localhost">>),
+ assert_connections(Xs, [<<"localhost">>]),
+
+ %% Unfederate them - links disappear
+ clear_policy(Cfg, <<"dyn">>),
+ assert_connections(Xs, [])
+ end, [x(<<"dyn.exch1">>), x(<<"dyn.exch2">>)]).
+
+%%----------------------------------------------------------------------------
+
+with_ch(Fun, Xs) ->
+ {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ declare_all(Ch, Xs),
+ rabbit_federation_test_util:assert_status(
+ Xs, {exchange, upstream_exchange}),
+ Fun(Ch),
+ delete_all(Ch, Xs),
+ amqp_connection:close(Conn),
+ cleanup(single_cfg()),
+ ok.
+
+cleanup(Cfg) ->
+ [rpc:call(pget(node, Cfg), rabbit_amqqueue, delete, [Q, false, false]) ||
+ Q <- queues(pget(node, Cfg))].
+
+queues(Node) ->
+ case rpc:call(Node, rabbit_amqqueue, list, [<<"/">>]) of
+ {badrpc, _} -> [];
+ Qs -> Qs
+ end.
+
+stop_other_node(Node) ->
+ cleanup(Node),
+ rabbit_federation_test_util:stop_other_node(Node).
+
+declare_all(Ch, Xs) -> [declare_exchange(Ch, X) || X <- Xs].
+delete_all(Ch, Xs) ->
+ [delete_exchange(Ch, X) || #'exchange.declare'{exchange = X} <- Xs].
+
+declare_exchange(Ch, X) ->
+ amqp_channel:call(Ch, X).
+
+x(Name) -> x(Name, <<"topic">>).
+
+x(Name, Type) ->
+ #'exchange.declare'{exchange = Name,
+ type = Type,
+ durable = true}.
+
+r(Name) -> rabbit_misc:r(<<"/">>, exchange, Name).
+
+declare_queue(Ch) ->
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+ Q.
+
+bind_queue(Ch, Q, X, Key) ->
+ amqp_channel:call(Ch, #'queue.bind'{queue = Q,
+ exchange = X,
+ routing_key = Key}).
+
+unbind_queue(Ch, Q, X, Key) ->
+ amqp_channel:call(Ch, #'queue.unbind'{queue = Q,
+ exchange = X,
+ routing_key = Key}).
+
+bind_exchange(Ch, D, S, Key) ->
+ amqp_channel:call(Ch, #'exchange.bind'{destination = D,
+ source = S,
+ routing_key = Key}).
+
+bind_queue(Ch, X, Key) ->
+ Q = declare_queue(Ch),
+ bind_queue(Ch, Q, X, Key),
+ Q.
+
+delete_exchange(Ch, X) ->
+ amqp_channel:call(Ch, #'exchange.delete'{exchange = X}).
+
+delete_queue(Ch, Q) ->
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q}).
+
+await_binding(X, Key) -> await_binding(single_cfg(), X, Key, 1).
+await_binding(X, Key, Count)
+ when is_binary(X) -> await_binding(single_cfg(), X, Key, Count);
+await_binding(Broker, X, Key) -> await_binding(Broker, X, Key, 1).
+
+await_binding(Node, X, Key, Count) when is_atom(Node) ->
+ case bound_keys_from(Node, X, Key) of
+ L when length(L) < Count -> timer:sleep(100),
+ await_binding(Node, X, Key, Count);
+ L when length(L) =:= Count -> ok;
+ L -> exit({too_many_bindings,
+ X, Key, Count, L})
+ end;
+await_binding(Cfg, X, Key, Count) ->
+ await_binding(pget(node, Cfg), X, Key, Count).
+
+await_bindings(Broker, X, Keys) ->
+ [await_binding(Broker, X, Key) || Key <- Keys].
+
+await_binding_absent(Node, X, Key) when is_atom(Node) ->
+ case bound_keys_from(Node, X, Key) of
+ [] -> ok;
+ _ -> timer:sleep(100),
+ await_binding_absent(Node, X, Key)
+ end;
+await_binding_absent(Cfg, X, Key) ->
+ await_binding_absent(pget(node, Cfg), X, Key).
+
+bound_keys_from(Node, X, Key) ->
+ [K || #binding{key = K} <-
+ rpc:call(Node, rabbit_binding, list_for_source, [r(X)]),
+ K =:= Key].
+
+publish(Ch, X, Key, Payload) when is_binary(Payload) ->
+ publish(Ch, X, Key, #amqp_msg{payload = Payload});
+
+publish(Ch, X, Key, Msg = #amqp_msg{}) ->
+ amqp_channel:call(Ch, #'basic.publish'{exchange = X,
+ routing_key = Key}, Msg).
+
+publish_expect(Ch, X, Key, Q, Payload) ->
+ publish(Ch, X, Key, Payload),
+ expect(Ch, Q, [Payload]).
+
+%%----------------------------------------------------------------------------
+
+assert_connections(Xs, Conns) ->
+ Links = [{X, C, X} ||
+ X <- Xs,
+ C <- Conns],
+ Remaining = lists:foldl(
+ fun (Link, Status) ->
+ rabbit_federation_test_util:assert_link_status(
+ Link, Status, {exchange, upstream_exchange})
+ end, rabbit_federation_status:status(), Links),
+ ?assertEqual([], Remaining),
+ ok.
+
+connection_pids(Node) ->
+ [P || [{pid, P}] <-
+ rpc:call(Node, rabbit_networking, connection_info_all, [[pid]])].
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_federation_queue_test).
+
+-compile(export_all).
+-include("rabbit_federation.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-import(rabbit_misc, [pget/2]).
+-import(rabbit_federation_util, [name/1]).
+
+-import(rabbit_federation_test_util,
+ [expect/3,
+ set_upstream/3, clear_upstream/2, set_policy/4, clear_policy/2,
+ set_policy_upstream/4, set_policy_upstreams/3,
+ disambiguate/1, single_cfg/0]).
+
+-define(UPSTREAM_DOWNSTREAM, [q(<<"upstream">>),
+ q(<<"fed.downstream">>)]).
+
+%% Used in restart_upstream_test
+-define(HARE, {"hare", 5673}).
+
+simple_test() ->
+ with_ch(
+ fun (Ch) ->
+ expect_federation(Ch, <<"upstream">>, <<"fed.downstream">>)
+ end, [q(<<"upstream">>),
+ q(<<"fed.downstream">>)]).
+
+multiple_upstreams_test() ->
+ with_ch(
+ fun (Ch) ->
+ expect_federation(Ch, <<"upstream">>, <<"fed12.downstream">>),
+ expect_federation(Ch, <<"upstream2">>, <<"fed12.downstream">>)
+ end, [q(<<"upstream">>),
+ q(<<"upstream2">>),
+ q(<<"fed12.downstream">>)]).
+
+multiple_downstreams_test() ->
+ with_ch(
+ fun (Ch) ->
+ expect_federation(Ch, <<"upstream">>, <<"fed.downstream">>),
+ expect_federation(Ch, <<"upstream">>, <<"fed.downstream2">>)
+ end, [q(<<"upstream">>),
+ q(<<"fed.downstream">>),
+ q(<<"fed.downstream2">>)]).
+
+bidirectional_test() ->
+ with_ch(
+ fun (Ch) ->
+ publish_expect(Ch, <<>>, <<"one">>, <<"one">>, <<"first one">>),
+ publish_expect(Ch, <<>>, <<"two">>, <<"two">>, <<"first two">>),
+ Seq = lists:seq(1, 100),
+ [publish(Ch, <<>>, <<"one">>, <<"bulk">>) || _ <- Seq],
+ [publish(Ch, <<>>, <<"two">>, <<"bulk">>) || _ <- Seq],
+ expect(Ch, <<"one">>, repeat(150, <<"bulk">>)),
+ expect(Ch, <<"two">>, repeat(50, <<"bulk">>)),
+ expect_empty(Ch, <<"one">>),
+ expect_empty(Ch, <<"two">>)
+ end, [q(<<"one">>),
+ q(<<"two">>)]).
+
+dynamic_reconfiguration_test() ->
+ Cfg = single_cfg(),
+ with_ch(
+ fun (Ch) ->
+ expect_federation(Ch, <<"upstream">>, <<"fed.downstream">>),
+
+ %% Test that clearing connections works
+ clear_upstream(Cfg, <<"localhost">>),
+ expect_no_federation(Ch, <<"upstream">>, <<"fed.downstream">>),
+
+ %% Test that readding them and changing them works
+ set_upstream(Cfg, <<"localhost">>, <<"amqp://localhost">>),
+ %% Do it twice so we at least hit the no-restart optimisation
+ set_upstream(Cfg, <<"localhost">>, <<"amqp://">>),
+ set_upstream(Cfg, <<"localhost">>, <<"amqp://">>),
+ expect_federation(Ch, <<"upstream">>, <<"fed.downstream">>)
+ end, [q(<<"upstream">>),
+ q(<<"fed.downstream">>)]).
+
+federate_unfederate_test() ->
+ Cfg = single_cfg(),
+ with_ch(
+ fun (Ch) ->
+ expect_no_federation(Ch, <<"upstream">>, <<"downstream">>),
+ expect_no_federation(Ch, <<"upstream2">>, <<"downstream">>),
+
+ %% Federate it
+ set_policy(Cfg, <<"dyn">>, <<"^downstream\$">>, <<"upstream">>),
+ expect_federation(Ch, <<"upstream">>, <<"downstream">>),
+ expect_no_federation(Ch, <<"upstream2">>, <<"downstream">>),
+
+ %% Change policy - upstream changes
+ set_policy(Cfg, <<"dyn">>, <<"^downstream\$">>, <<"upstream2">>),
+ expect_no_federation(Ch, <<"upstream">>, <<"downstream">>),
+ expect_federation(Ch, <<"upstream2">>, <<"downstream">>),
+
+ %% Unfederate it - no federation
+ clear_policy(Cfg, <<"dyn">>),
+ expect_no_federation(Ch, <<"upstream2">>, <<"downstream">>)
+ end, [q(<<"upstream">>),
+ q(<<"upstream2">>),
+ q(<<"downstream">>)]).
+
+
+%% Downstream: rabbit-test, port 5672
+%% Upstream: hare, port 5673
+
+restart_upstream_with() -> disambiguate(start_ab).
+restart_upstream([Rabbit, Hare]) ->
+ set_policy_upstream(Rabbit, <<"^test$">>, <<"amqp://localhost:5673">>, []),
+
+ {_, Downstream} = rabbit_test_util:connect(Rabbit),
+ {_, Upstream} = rabbit_test_util:connect(Hare),
+
+ declare_queue(Upstream, q(<<"test">>)),
+ declare_queue(Downstream, q(<<"test">>)),
+ Seq = lists:seq(1, 100),
+ [publish(Upstream, <<>>, <<"test">>, <<"bulk">>) || _ <- Seq],
+ expect(Upstream, <<"test">>, repeat(25, <<"bulk">>)),
+ expect(Downstream, <<"test">>, repeat(25, <<"bulk">>)),
+
+ Hare2 = rabbit_test_configs:restart_node(Hare),
+ {_, Upstream2} = rabbit_test_util:connect(Hare2),
+
+ expect(Upstream2, <<"test">>, repeat(25, <<"bulk">>)),
+ expect(Downstream, <<"test">>, repeat(25, <<"bulk">>)),
+ expect_empty(Upstream2, <<"test">>),
+ expect_empty(Downstream, <<"test">>),
+
+ ok.
+
+upstream_has_no_federation_test() ->
+ %% TODO
+ ok.
+
+%%----------------------------------------------------------------------------
+
+with_ch(Fun, Qs) ->
+ {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ declare_all(Ch, Qs),
+ timer:sleep(1000), %% Time for statuses to get updated
+ rabbit_federation_test_util:assert_status(
+ Qs, {queue, upstream_queue}),
+ Fun(Ch),
+ delete_all(Ch, Qs),
+ amqp_connection:close(Conn),
+ ok.
+
+declare_all(Ch, Qs) -> [declare_queue(Ch, Q) || Q <- Qs].
+delete_all(Ch, Qs) ->
+ [delete_queue(Ch, Q) || #'queue.declare'{queue = Q} <- Qs].
+
+declare_queue(Ch, Q) ->
+ amqp_channel:call(Ch, Q).
+
+delete_queue(Ch, Q) ->
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q}).
+
+q(Name) ->
+ #'queue.declare'{queue = Name,
+ durable = true}.
+
+repeat(Count, Item) -> [Item || _ <- lists:seq(1, Count)].
+
+%%----------------------------------------------------------------------------
+
+publish(Ch, X, Key, Payload) when is_binary(Payload) ->
+ publish(Ch, X, Key, #amqp_msg{payload = Payload});
+
+publish(Ch, X, Key, Msg = #amqp_msg{}) ->
+ amqp_channel:call(Ch, #'basic.publish'{exchange = X,
+ routing_key = Key}, Msg).
+
+publish_expect(Ch, X, Key, Q, Payload) ->
+ publish(Ch, X, Key, Payload),
+ expect(Ch, Q, [Payload]).
+
+%% Doubled due to our strange basic.get behaviour.
+expect_empty(Ch, Q) ->
+ rabbit_federation_test_util:expect_empty(Ch, Q),
+ rabbit_federation_test_util:expect_empty(Ch, Q).
+
+expect_federation(Ch, UpstreamQ, DownstreamQ) ->
+ publish_expect(Ch, <<>>, UpstreamQ, DownstreamQ, <<"HELLO">>).
+
+expect_no_federation(Ch, UpstreamQ, DownstreamQ) ->
+ publish(Ch, <<>>, UpstreamQ, <<"HELLO">>),
+ expect_empty(Ch, DownstreamQ),
+ expect(Ch, UpstreamQ, [<<"HELLO">>]).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_federation_test_util).
+
+-include("rabbit_federation.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-import(rabbit_misc, [pget/2]).
+
+expect(Ch, Q, Fun) when is_function(Fun) ->
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q,
+ no_ack = true}, self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = CTag} -> ok
+ end,
+ Fun(),
+ amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag});
+
+expect(Ch, Q, Payloads) ->
+ expect(Ch, Q, fun() -> expect(Payloads) end).
+
+expect([]) ->
+ ok;
+expect(Payloads) ->
+ receive
+ {#'basic.deliver'{}, #amqp_msg{payload = Payload}} ->
+ case lists:member(Payload, Payloads) of
+ true -> expect(Payloads -- [Payload]);
+ false -> throw({expected, Payloads, actual, Payload})
+ end
+ end.
+
+expect_empty(Ch, Q) ->
+ ?assertMatch(#'basic.get_empty'{},
+ amqp_channel:call(Ch, #'basic.get'{ queue = Q })).
+
+set_upstream(Cfg, Name, URI) ->
+ set_upstream(Cfg, Name, URI, []).
+
+set_upstream(Cfg, Name, URI, Extra) ->
+ rabbit_test_util:set_param(Cfg, <<"federation-upstream">>, Name,
+ [{<<"uri">>, URI} | Extra]).
+
+clear_upstream(Cfg, Name) ->
+ rabbit_test_util:clear_param(Cfg, <<"federation-upstream">>, Name).
+
+set_upstream_set(Cfg, Name, Set) ->
+ rabbit_test_util:set_param(
+ Cfg, <<"federation-upstream-set">>, Name,
+ [[{<<"upstream">>, UStream} | Extra] || {UStream, Extra} <- Set]).
+
+set_policy(Cfg, Name, Pattern, UpstreamSet) ->
+ rabbit_test_util:set_policy(Cfg, Name, Pattern, <<"all">>,
+ [{<<"federation-upstream-set">>, UpstreamSet}]).
+
+set_policy1(Cfg, Name, Pattern, Upstream) ->
+ rabbit_test_util:set_policy(Cfg, Name, Pattern, <<"all">>,
+ [{<<"federation-upstream">>, Upstream}]).
+
+clear_policy(Cfg, Name) ->
+ rabbit_test_util:clear_policy(Cfg, Name).
+
+set_policy_upstream(Cfg, Pattern, URI, Extra) ->
+ set_policy_upstreams(Cfg, Pattern, [{URI, Extra}]).
+
+set_policy_upstreams(Cfg, Pattern, URIExtras) ->
+ put(upstream_num, 1),
+ [set_upstream(Cfg, gen_upstream_name(), URI, Extra)
+ || {URI, Extra} <- URIExtras],
+ set_policy(Cfg, Pattern, Pattern, <<"all">>).
+
+gen_upstream_name() ->
+ list_to_binary("upstream-" ++ integer_to_list(next_upstream_num())).
+
+next_upstream_num() ->
+ R = get(upstream_num) + 1,
+ put (upstream_num, R),
+ R.
+
+%% Make sure that even though multiple nodes are in a single
+%% distributed system, we still keep all our process groups separate.
+disambiguate(Rest) ->
+ [Rest,
+ fun (Cfgs) ->
+ [rpc:call(pget(node, Cfg), application, set_env,
+ [rabbitmq_federation, pgroup_name_cluster_id, true])
+ || Cfg <- Cfgs],
+ Cfgs
+ end].
+
+no_plugins(Cfg) ->
+ [{K, case K of
+ plugins -> none;
+ _ -> V
+ end} || {K, V} <- Cfg].
+
+%% "fake" cfg to let us use various utility functions when running
+%% in-broker tests
+single_cfg() ->
+ [{nodename, 'rabbit-test'},
+ {node, rabbit_nodes:make('rabbit-test')},
+ {port, 5672}].
+
+%%----------------------------------------------------------------------------
+
+assert_status(XorQs, Names) ->
+ Links = lists:append([links(XorQ) || XorQ <- XorQs]),
+ Remaining = lists:foldl(fun (Link, Status) ->
+ assert_link_status(Link, Status, Names)
+ end, rabbit_federation_status:status(), Links),
+ ?assertEqual([], Remaining),
+ ok.
+
+assert_link_status({DXorQNameBin, UpstreamName, UXorQNameBin}, Status,
+ {TypeName, UpstreamTypeName}) ->
+ {This, Rest} = lists:partition(
+ fun(St) ->
+ pget(upstream, St) =:= UpstreamName andalso
+ pget(TypeName, St) =:= DXorQNameBin andalso
+ pget(UpstreamTypeName, St) =:= UXorQNameBin
+ end, Status),
+ ?assertMatch([_], This),
+ Rest.
+
+links(#'exchange.declare'{exchange = Name}) ->
+ case rabbit_policy:get(<<"federation-upstream-set">>, xr(Name)) of
+ undefined -> [];
+ Set -> X = #exchange{name = xr(Name)},
+ [{Name, U#upstream.name, U#upstream.exchange_name} ||
+ U <- rabbit_federation_upstream:from_set(Set, X)]
+ end;
+links(#'queue.declare'{queue = Name}) ->
+ case rabbit_policy:get(<<"federation-upstream-set">>, qr(Name)) of
+ undefined -> [];
+ Set -> Q = #amqqueue{name = qr(Name)},
+ [{Name, U#upstream.name, U#upstream.queue_name} ||
+ U <- rabbit_federation_upstream:from_set(Set, Q)]
+ end.
+
+xr(Name) -> rabbit_misc:r(<<"/">>, exchange, Name).
+qr(Name) -> rabbit_misc:r(<<"/">>, queue, Name).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_federation_unit_test).
+
+-define(US_NAME, <<"upstream">>).
+-define(DS_NAME, <<"fed.downstream">>).
+
+-include("rabbit_federation.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%% Test that we apply binding changes in the correct order even when
+%% they arrive out of order.
+serialisation_test() ->
+ with_exchanges(
+ fun(X) ->
+ [B1, B2, B3] = [b(K) || K <- [<<"1">>, <<"2">>, <<"3">>]],
+ remove_bindings(4, X, [B1, B3]),
+ add_binding(5, X, B1),
+ add_binding(1, X, B1),
+ add_binding(2, X, B2),
+ add_binding(3, X, B3),
+ %% List of lists because one for each link
+ Keys = rabbit_federation_exchange_link:list_routing_keys(
+ X#exchange.name),
+ ?assertEqual([[<<"1">>, <<"2">>]], Keys)
+ end).
+
+with_exchanges(Fun) ->
+ rabbit_exchange:declare(r(?US_NAME), fanout, false, false, false, []),
+ X = rabbit_exchange:declare(r(?DS_NAME), fanout, false, false, false, []),
+ Fun(X),
+ %% Delete downstream first or it will recreate the upstream
+ rabbit_exchange:delete(r(?DS_NAME), false),
+ rabbit_exchange:delete(r(?US_NAME), false),
+ ok.
+
+add_binding(Ser, X, B) ->
+ rabbit_federation_exchange:add_binding(transaction, X, B),
+ rabbit_federation_exchange:add_binding(Ser, X, B).
+
+remove_bindings(Ser, X, Bs) ->
+ rabbit_federation_exchange:remove_bindings(transaction, X, Bs),
+ rabbit_federation_exchange:remove_bindings(Ser, X, Bs).
+
+r(Name) -> rabbit_misc:r(<<"/">>, exchange, Name).
+
+b(Key) ->
+ #binding{source = ?DS_NAME, destination = <<"whatever">>,
+ key = Key, args = []}.
+
+scratch_space_test() ->
+ A = <<"A">>,
+ B = <<"B">>,
+ DB = rabbit_federation_db,
+ with_exchanges(
+ fun(#exchange{name = N}) ->
+ DB:set_active_suffix(N, upstream(x), A),
+ DB:set_active_suffix(N, upstream(y), A),
+ DB:prune_scratch(N, [upstream(y), upstream(z)]),
+ DB:set_active_suffix(N, upstream(y), B),
+ DB:set_active_suffix(N, upstream(z), A),
+ ?assertEqual(none, DB:get_active_suffix(N, upstream(x), none)),
+ ?assertEqual(B, DB:get_active_suffix(N, upstream(y), none)),
+ ?assertEqual(A, DB:get_active_suffix(N, upstream(z), none))
+ end).
+
+upstream(UpstreamName) ->
+ #upstream{name = atom_to_list(UpstreamName),
+ exchange_name = <<"upstream">>}.
+
+remove_credentials_test() ->
+ Test0 = fun (In, Exp) ->
+ Act = rabbit_federation_upstream:remove_credentials(In),
+ ?assertEqual(Exp, Act)
+ end,
+ Cat = fun (Bs) ->
+ list_to_binary(lists:append([binary_to_list(B) || B <- Bs]))
+ end,
+ Test = fun (Scheme, Rest) ->
+ Exp = Cat([Scheme, Rest]),
+ Test0(Exp, Exp),
+ Test0(Cat([Scheme, <<"user@">>, Rest]), Exp),
+ Test0(Cat([Scheme, <<"user:pass@">>, Rest]), Exp)
+ end,
+ Test(<<"amqp://">>, <<"">>),
+ Test(<<"amqp://">>, <<"localhost">>),
+ Test(<<"amqp://">>, <<"localhost/">>),
+ Test(<<"amqp://">>, <<"localhost/foo">>),
+ Test(<<"amqp://">>, <<"localhost:5672">>),
+ Test(<<"amqp://">>, <<"localhost:5672/foo">>),
+ Test(<<"amqps://">>, <<"localhost:5672/%2f">>),
+ ok.
--- /dev/null
+include ../umbrella.mk
--- /dev/null
+DEPS:=rabbitmq-erlang-client
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Console.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_agent_app).
+
+-behaviour(application).
+-export([start/2, stop/1]).
+
+%% Make sure our database is hooked in *before* listening on the network or
+%% recovering queues (i.e. so there can't be any events fired before it starts).
+-rabbit_boot_step({rabbit_mgmt_db_handler,
+ [{description, "management agent"},
+ {mfa, {rabbit_mgmt_db_handler, add_handler,
+ []}},
+ {requires, rabbit_event},
+ {enables, recovery}]}).
+
+
+start(_Type, _StartArgs) ->
+ rabbit_mgmt_agent_sup:start_link().
+
+stop(_State) ->
+ ok.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Console.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_agent_sup).
+
+-behaviour(supervisor).
+
+-export([init/1]).
+-export([start_link/0]).
+
+init([]) ->
+ ExternalStats = {rabbit_mgmt_external_stats,
+ {rabbit_mgmt_external_stats, start_link, []},
+ permanent, 5000, worker, [rabbit_mgmt_external_stats]},
+ {ok, {{one_for_one, 10, 10}, [ExternalStats]}}.
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Console.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_db_handler).
+
+-behaviour(gen_event).
+
+-export([add_handler/0, gc/0]).
+
+-export([init/1, handle_call/2, handle_event/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+%%----------------------------------------------------------------------------
+
+add_handler() ->
+ ensure_statistics_enabled(),
+ gen_event:add_sup_handler(rabbit_event, ?MODULE, []).
+
+gc() ->
+ erlang:garbage_collect(whereis(rabbit_event)).
+
+%%----------------------------------------------------------------------------
+
+ensure_statistics_enabled() ->
+ {ok, ForceStats} = application:get_env(rabbitmq_management_agent,
+ force_fine_statistics),
+ {ok, StatsLevel} = application:get_env(rabbit, collect_statistics),
+ case {ForceStats, StatsLevel} of
+ {true, fine} ->
+ ok;
+ {true, _} ->
+ application:set_env(rabbit, collect_statistics, fine);
+ {false, none} ->
+ application:set_env(rabbit, collect_statistics, coarse);
+ {_, _} ->
+ ok
+ end.
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, []}.
+
+handle_call(_Request, State) ->
+ {ok, not_understood, State}.
+
+handle_event(Event, State) ->
+ gen_server:cast({global, rabbit_mgmt_db}, {event, Event}),
+ {ok, State}.
+
+handle_info(_Info, State) ->
+ {ok, State}.
+
+terminate(_Arg, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Console.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_external_stats).
+
+-behaviour(gen_server).
+
+-export([start_link/0]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-export([list_registry_plugins/1]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-define(REFRESH_RATIO, 5000).
+-define(KEYS, [name, partitions, os_pid, fd_used, fd_total,
+ sockets_used, sockets_total, mem_used, mem_limit, mem_alarm,
+ disk_free_limit, disk_free, disk_free_alarm,
+ proc_used, proc_total, statistics_level,
+ uptime, run_queue, processors, exchange_types,
+ auth_mechanisms, applications, contexts]).
+
+%%--------------------------------------------------------------------
+
+-record(state, {fd_total}).
+
+%%--------------------------------------------------------------------
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+%%--------------------------------------------------------------------
+
+get_used_fd() ->
+ get_used_fd(os:type()).
+
+get_used_fd({unix, linux}) ->
+ case file:list_dir("/proc/" ++ os:getpid() ++ "/fd") of
+ {ok, Files} -> length(Files);
+ {error, _} -> get_used_fd({unix, generic})
+ end;
+
+get_used_fd({unix, BSD})
+ when BSD == openbsd; BSD == freebsd; BSD == netbsd ->
+ Digit = fun (D) -> lists:member(D, "0123456789*") end,
+ Output = os:cmd("fstat -p " ++ os:getpid()),
+ try
+ length(
+ lists:filter(
+ fun (Line) ->
+ lists:all(Digit, (lists:nth(4, string:tokens(Line, " "))))
+ end, string:tokens(Output, "\n")))
+ catch _:Error ->
+ case get(logged_used_fd_error) of
+ undefined -> rabbit_log:warning(
+ "Could not parse fstat output:~n~s~n~p~n",
+ [Output, {Error, erlang:get_stacktrace()}]),
+ put(logged_used_fd_error, true);
+ _ -> ok
+ end,
+ unknown
+ end;
+
+get_used_fd({unix, _}) ->
+ Cmd = rabbit_misc:format(
+ "lsof -d \"0-9999999\" -lna -p ~s || echo failed", [os:getpid()]),
+ Res = os:cmd(Cmd),
+ case string:right(Res, 7) of
+ "failed\n" -> unknown;
+ _ -> string:words(Res, $\n) - 1
+ end;
+
+%% handle.exe can be obtained from
+%% http://technet.microsoft.com/en-us/sysinternals/bb896655.aspx
+
+%% Output looks like:
+
+%% Handle v3.42
+%% Copyright (C) 1997-2008 Mark Russinovich
+%% Sysinternals - www.sysinternals.com
+%%
+%% Handle type summary:
+%% ALPC Port : 2
+%% Desktop : 1
+%% Directory : 1
+%% Event : 108
+%% File : 25
+%% IoCompletion : 3
+%% Key : 7
+%% KeyedEvent : 1
+%% Mutant : 1
+%% Process : 3
+%% Process : 38
+%% Thread : 41
+%% Timer : 3
+%% TpWorkerFactory : 2
+%% WindowStation : 2
+%% Total handles: 238
+
+%% Note that the "File" number appears to include network sockets too; I assume
+%% that's the number we care about. Note also that if you omit "-s" you will
+%% see a list of file handles *without* network sockets. If you then add "-a"
+%% you will see a list of handles of various types, including network sockets
+%% shown as file handles to \Device\Afd.
+
+get_used_fd({win32, _}) ->
+ Handle = rabbit_misc:os_cmd(
+ "handle.exe /accepteula -s -p " ++ os:getpid() ++ " 2> nul"),
+ case Handle of
+ [] -> install_handle_from_sysinternals;
+ _ -> find_files_line(string:tokens(Handle, "\r\n"))
+ end;
+
+get_used_fd(_) ->
+ unknown.
+
+find_files_line([]) ->
+ unknown;
+find_files_line([" File " ++ Rest | _T]) ->
+ [Files] = string:tokens(Rest, ": "),
+ list_to_integer(Files);
+find_files_line([_H | T]) ->
+ find_files_line(T).
+
+-define(SAFE_CALL(Fun, NoProcFailResult),
+ try
+ Fun
+ catch exit:{noproc, _} -> NoProcFailResult
+ end).
+
+get_disk_free_limit() -> ?SAFE_CALL(rabbit_disk_monitor:get_disk_free_limit(),
+ disk_free_monitoring_disabled).
+
+get_disk_free() -> ?SAFE_CALL(rabbit_disk_monitor:get_disk_free(),
+ disk_free_monitoring_disabled).
+
+%%--------------------------------------------------------------------
+
+infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
+
+i(name, _State) -> node();
+i(partitions, _State) -> rabbit_node_monitor:partitions();
+i(fd_used, _State) -> get_used_fd();
+i(fd_total, #state{fd_total = FdTotal}) -> FdTotal;
+i(sockets_used, _State) ->
+ proplists:get_value(sockets_used, file_handle_cache:info([sockets_used]));
+i(sockets_total, _State) ->
+ proplists:get_value(sockets_limit, file_handle_cache:info([sockets_limit]));
+i(os_pid, _State) -> list_to_binary(os:getpid());
+i(mem_used, _State) -> erlang:memory(total);
+i(mem_limit, _State) -> vm_memory_monitor:get_memory_limit();
+i(mem_alarm, _State) -> resource_alarm_set(memory);
+i(proc_used, _State) -> erlang:system_info(process_count);
+i(proc_total, _State) -> erlang:system_info(process_limit);
+i(run_queue, _State) -> erlang:statistics(run_queue);
+i(processors, _State) -> erlang:system_info(logical_processors);
+i(disk_free_limit, _State) -> get_disk_free_limit();
+i(disk_free, _State) -> get_disk_free();
+i(disk_free_alarm, _State) -> resource_alarm_set(disk);
+i(contexts, _State) -> rabbit_web_dispatch_contexts();
+i(uptime, _State) ->
+ {Total, _} = erlang:statistics(wall_clock),
+ Total;
+i(statistics_level, _State) ->
+ {ok, StatsLevel} = application:get_env(rabbit, collect_statistics),
+ StatsLevel;
+i(exchange_types, _State) ->
+ list_registry_plugins(exchange);
+i(auth_mechanisms, _State) ->
+ {ok, Mechanisms} = application:get_env(rabbit, auth_mechanisms),
+ list_registry_plugins(
+ auth_mechanism,
+ fun (N) -> lists:member(list_to_atom(binary_to_list(N)), Mechanisms) end);
+i(applications, _State) ->
+ [format_application(A) ||
+ A <- lists:keysort(1, rabbit_misc:which_applications())].
+
+resource_alarm_set(Source) ->
+ lists:member({{resource_limit, Source, node()},[]},
+ rabbit_alarm:get_alarms()).
+
+list_registry_plugins(Type) ->
+ list_registry_plugins(Type, fun(_) -> true end).
+
+list_registry_plugins(Type, Fun) ->
+ [registry_plugin_enabled(set_plugin_name(Name, Module), Fun) ||
+ {Name, Module} <- rabbit_registry:lookup_all(Type)].
+
+registry_plugin_enabled(Desc, Fun) ->
+ Desc ++ [{enabled, Fun(proplists:get_value(name, Desc))}].
+
+format_application({Application, Description, Version}) ->
+ [{name, Application},
+ {description, list_to_binary(Description)},
+ {version, list_to_binary(Version)}].
+
+set_plugin_name(Name, Module) ->
+ [{name, list_to_binary(atom_to_list(Name))} |
+ proplists:delete(name, Module:description())].
+
+%%--------------------------------------------------------------------
+
+%% This is slightly icky in that we introduce knowledge of
+%% rabbit_web_dispatch, which is not a dependency. But the last thing I
+%% want to do is create a rabbitmq_mochiweb_management_agent plugin.
+rabbit_web_dispatch_contexts() ->
+ [format_context(C) || C <- rabbit_web_dispatch_registry_list_all()].
+
+%% For similar reasons we don't declare a dependency on
+%% rabbitmq_mochiweb - so at startup there's no guarantee it will be
+%% running. So we have to catch this noproc.
+rabbit_web_dispatch_registry_list_all() ->
+ case code:is_loaded(rabbit_web_dispatch_registry) of
+ false -> [];
+ _ -> try
+ M = rabbit_web_dispatch_registry, %% Fool xref
+ M:list_all()
+ catch exit:{noproc, _} ->
+ []
+ end
+ end.
+
+format_context({Path, Description, Rest}) ->
+ [{description, list_to_binary(Description)},
+ {path, list_to_binary("/" ++ Path)} |
+ format_mochiweb_option_list(Rest)].
+
+format_mochiweb_option_list(C) ->
+ [{K, format_mochiweb_option(K, V)} || {K, V} <- C].
+
+format_mochiweb_option(ssl_opts, V) ->
+ format_mochiweb_option_list(V);
+format_mochiweb_option(ciphers, V) ->
+ list_to_binary(rabbit_misc:format("~w", [V]));
+format_mochiweb_option(_K, V) when is_list(V) ->
+ list_to_binary(V);
+format_mochiweb_option(_K, V) ->
+ V.
+
+%%--------------------------------------------------------------------
+
+init([]) ->
+ State = #state{fd_total = file_handle_cache:ulimit()},
+ %% If we emit an update straight away we will do so just before
+ %% the mgmt db starts up - and then have to wait ?REFRESH_RATIO
+ %% until we send another. So let's have a shorter wait in the hope
+ %% that the db will have started by the time we emit an update,
+ %% and thus shorten that little gap at startup where mgmt knows
+ %% nothing about any nodes.
+ erlang:send_after(1000, self(), emit_update),
+ {ok, State}.
+
+handle_call(_Req, _From, State) ->
+ {reply, unknown_request, State}.
+
+handle_cast(_C, State) ->
+ {noreply, State}.
+
+handle_info(emit_update, State) ->
+ {noreply, emit_update(State)};
+
+handle_info(_I, State) ->
+ {noreply, State}.
+
+terminate(_, _) -> ok.
+
+code_change(_, State, _) -> {ok, State}.
+
+%%--------------------------------------------------------------------
+
+emit_update(State) ->
+ rabbit_event:notify(node_stats, infos(?KEYS, State)),
+ erlang:send_after(?REFRESH_RATIO, self(), emit_update),
+ State.
--- /dev/null
+{application, rabbitmq_management_agent,
+ [{description, "RabbitMQ Management Agent"},
+ {vsn, "%%VSN%%"},
+ {modules, []},
+ {registered, []},
+ {mod, {rabbit_mgmt_agent_app, []}},
+ {env, [{force_fine_statistics, true}]},
+ {applications, [kernel, stdlib, rabbit]}]}.
--- /dev/null
+This package, the RabbitMQ Visualiser is licensed under the MPL. For
+the MPL, please see LICENSE-MPL-RabbitMQ.
+
+This package makes use of the following third party libraries:
+glMatrix - http://code.google.com/p/glmatrix/ - BSD 2-clause license, see LICENSE-BSD-glMatrix
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
--- /dev/null
+Copyright (c) 2011, Brandon Jones
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the
+ distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--- /dev/null
+ MOZILLA PUBLIC LICENSE
+ Version 1.1
+
+ ---------------
+
+1. Definitions.
+
+ 1.0.1. "Commercial Use" means distribution or otherwise making the
+ Covered Code available to a third party.
+
+ 1.1. "Contributor" means each entity that creates or contributes to
+ the creation of Modifications.
+
+ 1.2. "Contributor Version" means the combination of the Original
+ Code, prior Modifications used by a Contributor, and the Modifications
+ made by that particular Contributor.
+
+ 1.3. "Covered Code" means the Original Code or Modifications or the
+ combination of the Original Code and Modifications, in each case
+ including portions thereof.
+
+ 1.4. "Electronic Distribution Mechanism" means a mechanism generally
+ accepted in the software development community for the electronic
+ transfer of data.
+
+ 1.5. "Executable" means Covered Code in any form other than Source
+ Code.
+
+ 1.6. "Initial Developer" means the individual or entity identified
+ as the Initial Developer in the Source Code notice required by Exhibit
+ A.
+
+ 1.7. "Larger Work" means a work which combines Covered Code or
+ portions thereof with code not governed by the terms of this License.
+
+ 1.8. "License" means this document.
+
+ 1.8.1. "Licensable" means having the right to grant, to the maximum
+ extent possible, whether at the time of the initial grant or
+ subsequently acquired, any and all of the rights conveyed herein.
+
+ 1.9. "Modifications" means any addition to or deletion from the
+ substance or structure of either the Original Code or any previous
+ Modifications. When Covered Code is released as a series of files, a
+ Modification is:
+ A. Any addition to or deletion from the contents of a file
+ containing Original Code or previous Modifications.
+
+ B. Any new file that contains any part of the Original Code or
+ previous Modifications.
+
+ 1.10. "Original Code" means Source Code of computer software code
+ which is described in the Source Code notice required by Exhibit A as
+ Original Code, and which, at the time of its release under this
+ License is not already Covered Code governed by this License.
+
+ 1.10.1. "Patent Claims" means any patent claim(s), now owned or
+ hereafter acquired, including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by grantor.
+
+ 1.11. "Source Code" means the preferred form of the Covered Code for
+ making modifications to it, including all modules it contains, plus
+ any associated interface definition files, scripts used to control
+ compilation and installation of an Executable, or source code
+ differential comparisons against either the Original Code or another
+ well known, available Covered Code of the Contributor's choice. The
+ Source Code can be in a compressed or archival form, provided the
+ appropriate decompression or de-archiving software is widely available
+ for no charge.
+
+ 1.12. "You" (or "Your") means an individual or a legal entity
+ exercising rights under, and complying with all of the terms of, this
+ License or a future version of this License issued under Section 6.1.
+ For legal entities, "You" includes any entity which controls, is
+ controlled by, or is under common control with You. For purposes of
+ this definition, "control" means (a) the power, direct or indirect,
+ to cause the direction or management of such entity, whether by
+ contract or otherwise, or (b) ownership of more than fifty percent
+ (50%) of the outstanding shares or beneficial ownership of such
+ entity.
+
+2. Source Code License.
+
+ 2.1. The Initial Developer Grant.
+ The Initial Developer hereby grants You a world-wide, royalty-free,
+ non-exclusive license, subject to third party intellectual property
+ claims:
+ (a) under intellectual property rights (other than patent or
+ trademark) Licensable by Initial Developer to use, reproduce,
+ modify, display, perform, sublicense and distribute the Original
+ Code (or portions thereof) with or without Modifications, and/or
+ as part of a Larger Work; and
+
+ (b) under Patents Claims infringed by the making, using or
+ selling of Original Code, to make, have made, use, practice,
+ sell, and offer for sale, and/or otherwise dispose of the
+ Original Code (or portions thereof).
+
+ (c) the licenses granted in this Section 2.1(a) and (b) are
+ effective on the date Initial Developer first distributes
+ Original Code under the terms of this License.
+
+ (d) Notwithstanding Section 2.1(b) above, no patent license is
+ granted: 1) for code that You delete from the Original Code; 2)
+ separate from the Original Code; or 3) for infringements caused
+ by: i) the modification of the Original Code or ii) the
+ combination of the Original Code with other software or devices.
+
+ 2.2. Contributor Grant.
+ Subject to third party intellectual property claims, each Contributor
+ hereby grants You a world-wide, royalty-free, non-exclusive license
+
+ (a) under intellectual property rights (other than patent or
+ trademark) Licensable by Contributor, to use, reproduce, modify,
+ display, perform, sublicense and distribute the Modifications
+ created by such Contributor (or portions thereof) either on an
+ unmodified basis, with other Modifications, as Covered Code
+ and/or as part of a Larger Work; and
+
+ (b) under Patent Claims infringed by the making, using, or
+ selling of Modifications made by that Contributor either alone
+ and/or in combination with its Contributor Version (or portions
+ of such combination), to make, use, sell, offer for sale, have
+ made, and/or otherwise dispose of: 1) Modifications made by that
+ Contributor (or portions thereof); and 2) the combination of
+ Modifications made by that Contributor with its Contributor
+ Version (or portions of such combination).
+
+ (c) the licenses granted in Sections 2.2(a) and 2.2(b) are
+ effective on the date Contributor first makes Commercial Use of
+ the Covered Code.
+
+ (d) Notwithstanding Section 2.2(b) above, no patent license is
+ granted: 1) for any code that Contributor has deleted from the
+ Contributor Version; 2) separate from the Contributor Version;
+ 3) for infringements caused by: i) third party modifications of
+ Contributor Version or ii) the combination of Modifications made
+ by that Contributor with other software (except as part of the
+ Contributor Version) or other devices; or 4) under Patent Claims
+ infringed by Covered Code in the absence of Modifications made by
+ that Contributor.
+
+3. Distribution Obligations.
+
+ 3.1. Application of License.
+ The Modifications which You create or to which You contribute are
+ governed by the terms of this License, including without limitation
+ Section 2.2. The Source Code version of Covered Code may be
+ distributed only under the terms of this License or a future version
+ of this License released under Section 6.1, and You must include a
+ copy of this License with every copy of the Source Code You
+ distribute. You may not offer or impose any terms on any Source Code
+ version that alters or restricts the applicable version of this
+ License or the recipients' rights hereunder. However, You may include
+ an additional document offering the additional rights described in
+ Section 3.5.
+
+ 3.2. Availability of Source Code.
+ Any Modification which You create or to which You contribute must be
+ made available in Source Code form under the terms of this License
+ either on the same media as an Executable version or via an accepted
+ Electronic Distribution Mechanism to anyone to whom you made an
+ Executable version available; and if made available via Electronic
+ Distribution Mechanism, must remain available for at least twelve (12)
+ months after the date it initially became available, or at least six
+ (6) months after a subsequent version of that particular Modification
+ has been made available to such recipients. You are responsible for
+ ensuring that the Source Code version remains available even if the
+ Electronic Distribution Mechanism is maintained by a third party.
+
+ 3.3. Description of Modifications.
+ You must cause all Covered Code to which You contribute to contain a
+ file documenting the changes You made to create that Covered Code and
+ the date of any change. You must include a prominent statement that
+ the Modification is derived, directly or indirectly, from Original
+ Code provided by the Initial Developer and including the name of the
+ Initial Developer in (a) the Source Code, and (b) in any notice in an
+ Executable version or related documentation in which You describe the
+ origin or ownership of the Covered Code.
+
+ 3.4. Intellectual Property Matters
+ (a) Third Party Claims.
+ If Contributor has knowledge that a license under a third party's
+ intellectual property rights is required to exercise the rights
+ granted by such Contributor under Sections 2.1 or 2.2,
+ Contributor must include a text file with the Source Code
+ distribution titled "LEGAL" which describes the claim and the
+ party making the claim in sufficient detail that a recipient will
+ know whom to contact. If Contributor obtains such knowledge after
+ the Modification is made available as described in Section 3.2,
+ Contributor shall promptly modify the LEGAL file in all copies
+ Contributor makes available thereafter and shall take other steps
+ (such as notifying appropriate mailing lists or newsgroups)
+ reasonably calculated to inform those who received the Covered
+ Code that new knowledge has been obtained.
+
+ (b) Contributor APIs.
+ If Contributor's Modifications include an application programming
+ interface and Contributor has knowledge of patent licenses which
+ are reasonably necessary to implement that API, Contributor must
+ also include this information in the LEGAL file.
+
+ (c) Representations.
+ Contributor represents that, except as disclosed pursuant to
+ Section 3.4(a) above, Contributor believes that Contributor's
+ Modifications are Contributor's original creation(s) and/or
+ Contributor has sufficient rights to grant the rights conveyed by
+ this License.
+
+ 3.5. Required Notices.
+ You must duplicate the notice in Exhibit A in each file of the Source
+ Code. If it is not possible to put such notice in a particular Source
+ Code file due to its structure, then You must include such notice in a
+ location (such as a relevant directory) where a user would be likely
+ to look for such a notice. If You created one or more Modification(s)
+ You may add your name as a Contributor to the notice described in
+ Exhibit A. You must also duplicate this License in any documentation
+ for the Source Code where You describe recipients' rights or ownership
+ rights relating to Covered Code. You may choose to offer, and to
+ charge a fee for, warranty, support, indemnity or liability
+ obligations to one or more recipients of Covered Code. However, You
+ may do so only on Your own behalf, and not on behalf of the Initial
+ Developer or any Contributor. You must make it absolutely clear than
+ any such warranty, support, indemnity or liability obligation is
+ offered by You alone, and You hereby agree to indemnify the Initial
+ Developer and every Contributor for any liability incurred by the
+ Initial Developer or such Contributor as a result of warranty,
+ support, indemnity or liability terms You offer.
+
+ 3.6. Distribution of Executable Versions.
+ You may distribute Covered Code in Executable form only if the
+ requirements of Section 3.1-3.5 have been met for that Covered Code,
+ and if You include a notice stating that the Source Code version of
+ the Covered Code is available under the terms of this License,
+ including a description of how and where You have fulfilled the
+ obligations of Section 3.2. The notice must be conspicuously included
+ in any notice in an Executable version, related documentation or
+ collateral in which You describe recipients' rights relating to the
+ Covered Code. You may distribute the Executable version of Covered
+ Code or ownership rights under a license of Your choice, which may
+ contain terms different from this License, provided that You are in
+ compliance with the terms of this License and that the license for the
+ Executable version does not attempt to limit or alter the recipient's
+ rights in the Source Code version from the rights set forth in this
+ License. If You distribute the Executable version under a different
+ license You must make it absolutely clear that any terms which differ
+ from this License are offered by You alone, not by the Initial
+ Developer or any Contributor. You hereby agree to indemnify the
+ Initial Developer and every Contributor for any liability incurred by
+ the Initial Developer or such Contributor as a result of any such
+ terms You offer.
+
+ 3.7. Larger Works.
+ You may create a Larger Work by combining Covered Code with other code
+ not governed by the terms of this License and distribute the Larger
+ Work as a single product. In such a case, You must make sure the
+ requirements of this License are fulfilled for the Covered Code.
+
+4. Inability to Comply Due to Statute or Regulation.
+
+ If it is impossible for You to comply with any of the terms of this
+ License with respect to some or all of the Covered Code due to
+ statute, judicial order, or regulation then You must: (a) comply with
+ the terms of this License to the maximum extent possible; and (b)
+ describe the limitations and the code they affect. Such description
+ must be included in the LEGAL file described in Section 3.4 and must
+ be included with all distributions of the Source Code. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Application of this License.
+
+ This License applies to code to which the Initial Developer has
+ attached the notice in Exhibit A and to related Covered Code.
+
+6. Versions of the License.
+
+ 6.1. New Versions.
+ Netscape Communications Corporation ("Netscape") may publish revised
+ and/or new versions of the License from time to time. Each version
+ will be given a distinguishing version number.
+
+ 6.2. Effect of New Versions.
+ Once Covered Code has been published under a particular version of the
+ License, You may always continue to use it under the terms of that
+ version. You may also choose to use such Covered Code under the terms
+ of any subsequent version of the License published by Netscape. No one
+ other than Netscape has the right to modify the terms applicable to
+ Covered Code created under this License.
+
+ 6.3. Derivative Works.
+ If You create or use a modified version of this License (which you may
+ only do in order to apply it to code which is not already Covered Code
+ governed by this License), You must (a) rename Your license so that
+ the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
+ "MPL", "NPL" or any confusingly similar phrase do not appear in your
+ license (except to note that your license differs from this License)
+ and (b) otherwise make it clear that Your version of the license
+ contains terms which differ from the Mozilla Public License and
+ Netscape Public License. (Filling in the name of the Initial
+ Developer, Original Code or Contributor in the notice described in
+ Exhibit A shall not of themselves be deemed to be modifications of
+ this License.)
+
+7. DISCLAIMER OF WARRANTY.
+
+ COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
+ DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
+ THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
+ IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
+ YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
+ COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
+ OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
+ ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
+
+8. TERMINATION.
+
+ 8.1. This License and the rights granted hereunder will terminate
+ automatically if You fail to comply with terms herein and fail to cure
+ such breach within 30 days of becoming aware of the breach. All
+ sublicenses to the Covered Code which are properly granted shall
+ survive any termination of this License. Provisions which, by their
+ nature, must remain in effect beyond the termination of this License
+ shall survive.
+
+ 8.2. If You initiate litigation by asserting a patent infringement
+ claim (excluding declatory judgment actions) against Initial Developer
+ or a Contributor (the Initial Developer or Contributor against whom
+ You file such action is referred to as "Participant") alleging that:
+
+ (a) such Participant's Contributor Version directly or indirectly
+ infringes any patent, then any and all rights granted by such
+ Participant to You under Sections 2.1 and/or 2.2 of this License
+ shall, upon 60 days notice from Participant terminate prospectively,
+ unless if within 60 days after receipt of notice You either: (i)
+ agree in writing to pay Participant a mutually agreeable reasonable
+ royalty for Your past and future use of Modifications made by such
+ Participant, or (ii) withdraw Your litigation claim with respect to
+ the Contributor Version against such Participant. If within 60 days
+ of notice, a reasonable royalty and payment arrangement are not
+ mutually agreed upon in writing by the parties or the litigation claim
+ is not withdrawn, the rights granted by Participant to You under
+ Sections 2.1 and/or 2.2 automatically terminate at the expiration of
+ the 60 day notice period specified above.
+
+ (b) any software, hardware, or device, other than such Participant's
+ Contributor Version, directly or indirectly infringes any patent, then
+ any rights granted to You by such Participant under Sections 2.1(b)
+ and 2.2(b) are revoked effective as of the date You first made, used,
+ sold, distributed, or had made, Modifications made by that
+ Participant.
+
+ 8.3. If You assert a patent infringement claim against Participant
+ alleging that such Participant's Contributor Version directly or
+ indirectly infringes any patent where such claim is resolved (such as
+ by license or settlement) prior to the initiation of patent
+ infringement litigation, then the reasonable value of the licenses
+ granted by such Participant under Sections 2.1 or 2.2 shall be taken
+ into account in determining the amount or value of any payment or
+ license.
+
+ 8.4. In the event of termination under Sections 8.1 or 8.2 above,
+ all end user license agreements (excluding distributors and resellers)
+ which have been validly granted by You or any distributor hereunder
+ prior to termination shall survive termination.
+
+9. LIMITATION OF LIABILITY.
+
+ UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
+ (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
+ DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
+ OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
+ ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
+ CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
+ WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
+ COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
+ INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
+ LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
+ RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
+ PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
+ EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
+ THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
+
+10. U.S. GOVERNMENT END USERS.
+
+ The Covered Code is a "commercial item," as that term is defined in
+ 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
+ software" and "commercial computer software documentation," as such
+ terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
+ C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
+ all U.S. Government End Users acquire Covered Code with only those
+ rights set forth herein.
+
+11. MISCELLANEOUS.
+
+ This License represents the complete agreement concerning subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. This License shall be governed by
+ California law provisions (except to the extent applicable law, if
+ any, provides otherwise), excluding its conflict-of-law provisions.
+ With respect to disputes in which at least one party is a citizen of,
+ or an entity chartered or registered to do business in the United
+ States of America, any litigation relating to this License shall be
+ subject to the jurisdiction of the Federal Courts of the Northern
+ District of California, with venue lying in Santa Clara County,
+ California, with the losing party responsible for costs, including
+ without limitation, court costs and reasonable attorneys' fees and
+ expenses. The application of the United Nations Convention on
+ Contracts for the International Sale of Goods is expressly excluded.
+ Any law or regulation which provides that the language of a contract
+ shall be construed against the drafter shall not apply to this
+ License.
+
+12. RESPONSIBILITY FOR CLAIMS.
+
+ As between Initial Developer and the Contributors, each party is
+ responsible for claims and damages arising, directly or indirectly,
+ out of its utilization of rights under this License and You agree to
+ work with Initial Developer and Contributors to distribute such
+ responsibility on an equitable basis. Nothing herein is intended or
+ shall be deemed to constitute any admission of liability.
+
+13. MULTIPLE-LICENSED CODE.
+
+ Initial Developer may designate portions of the Covered Code as
+ "Multiple-Licensed". "Multiple-Licensed" means that the Initial
+ Developer permits you to utilize portions of the Covered Code under
+ Your choice of the NPL or the alternative licenses, if any, specified
+ by the Initial Developer in the file described in Exhibit A.
+
+EXHIBIT A -Mozilla Public License.
+
+ ``The contents of this file are subject to the Mozilla Public License
+ Version 1.1 (the "License"); you may not use this file except in
+ compliance with the License. You may obtain a copy of the License at
+ http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+ License for the specific language governing rights and limitations
+ under the License.
+
+ The Original Code is RabbitMQ Visualiser.
+
+ The Initial Developer of the Original Code is GoPivotal, Inc.
+ Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.''
+
+ [NOTE: The text of this Exhibit A may differ slightly from the text of
+ the notices in the Source Code files of the Original Code. You should
+ use the text of this Exhibit A rather than the text found in the
+ Original Code Source Code for Your Modifications.]
--- /dev/null
+include ../umbrella.mk
--- /dev/null
+RabbitMQ Visualiser
+===================
+
+
+Usage
+-----
+
+This is a plugin for the RabbitMQ Management Plugin that provides an
+HTML Canvas for rendering configured broker topology. The current main
+purpose of this is for diagnostics and comprehension of the current
+routing topology of the broker.
+
+The left of the canvas displays exchanges, the right displays queues,
+and the top displays channels. All of these items can be dragged
+around the canvas. They repel one another, and snap back into their
+predefined areas should they be released within the boundaries of those
+areas.
+
+Shift-clicking on an item hides it - it will be added to the relevant
+select box on the left.
+
+Hovering over an item shows at the top of the screen various details
+about the item. Double-clicking on the item will take you to the
+specific page in the Management Plugin concerning that item.
+
+When hovering over an item, incoming links and/or traffic are shown in
+green, whilst outgoing links and/or traffic are shown in
+blue. Bindings are always displayed, but the consumers of a queue, and
+likewise the publishers to an exchange, are only drawn in when
+hovering over the exchange, queue or channel in question.
+
+By default, up to 10 exchanges, 10 queues and 10 channels are
+displayed. Additional resources are available from the left hand-side
+select boxes, and can be brought into the display by selecting them
+and clicking on the relevant 'Show' button.
+
+The 'Display' check-boxes turn off and on entire resource classes, and
+resets positioning.
+
+
+Compatibility and Performance Notes
+-----------------------------------
+
+Does work in recent versions of Safari both on OS X and Windows.
+
+Does work in Firefox (at least version 4.0).
+
+Does work in Chrome.
+
+Does not work in Internet Explorer. No error is given, but it doesn't
+work.
+
+Best performance is with Chrome. Note though that in some cases it has
+been seen that hardware rendering (use of GPU) can actually slow down
+performance. Some experimentation with browser flags and settings may
+be necessary to ensure smooth operation.
--- /dev/null
+glMatrix is "Copyright (c) 2011, Brandon Jones" and is covered by the
+BSD 2-Clause license. It was downloaded from
+http://code.google.com/p/glmatrix/
+
--- /dev/null
+RELEASABLE:=true
+DEPS:=rabbitmq-management
+
+CONSTRUCT_APP_PREREQS:=$(shell find $(PACKAGE_DIR)/priv -type f)
+define construct_app_commands
+ cp -r $(PACKAGE_DIR)/priv $(APP_DIR)
+endef
--- /dev/null
+dispatcher_add(function(sammy) {});
+
+NAVIGATION['Visualiser'] = ['visualiser/', "management"];
--- /dev/null
+<!DOCTYPE html>
+<html>
+
+<head>
+<title>RabbitMQ Visualiser</title>
+<meta http-equiv="content-type" content="text/html; charset=ISO-8859-1">
+
+<link href="../css/main.css" rel="stylesheet" type="text/css"/>
+
+<script type="text/javascript" src="js/glMatrix-min.js"></script>
+<script type="text/javascript" src="js/octtree.js"></script>
+<script type="text/javascript" src="js/physics.js"></script>
+<script type="text/javascript" src="js/model.js"></script>
+<script type="text/javascript" src="js/main.js"></script>
+<style type="text/css">
+#side{
+position: absolute;
+top: 0;
+bottom: 0;
+left: 0;
+width: 16%;
+height: 100%;
+overflow: auto;
+}
+
+#main{
+position: fixed;
+top: 10%;
+left: 16%;
+right: 0;
+bottom: 0;
+overflow: auto;
+margin: 0;
+padding: 0;
+border: 0;
+}
+
+.inner{
+margin: 10px;
+}
+
+.hidden{
+width: 100%;
+clear: both;
+}
+
+.show_button{
+float: right;
+}
+
+body{
+margin: 0;
+padding: 0;
+border: 0;
+overflow: hidden;
+height: 100%;
+max-height: 100%;
+}
+
+#details{
+height: 10%;
+position: fixed;
+top: 0px;
+right: 0px;
+left: 16%;
+overflow: auto;
+}
+
+table td { padding : 1px 10px 1px 4px; margin : 0; border: none; line-height: 14px; text-align: left; }
+table th { padding : 1px 4px 1px 10px; margin : 0; border: none; line-height: 14px; text-align: right; }
+
+#helpButton{
+position: fixed;
+top: 4px;
+right: 4px;
+}
+
+#help{
+position: fixed;
+top: 10%;
+right: 10%;
+bottom: 10%;
+left: 10%;
+display: none;
+background: white;
+width: 80%;
+border: 1px solid black;
+-webkit-border-radius: 8px;
+-moz-border-radius: 8px;
+border-radius: 8px;
+padding: 10px;
+overflow: auto;
+}
+#side .inner #logo { width: 204px; height: 37px; margin-bottom: 20px; background: url(../img/rabbitmqlogo.png); }
+</style>
+</head>
+<body onload="visualisationStart();" onresize="resizeCanvas();" onfocus="enableRendering();" onblur="disableRendering();">
+ <div id="side">
+ <div class="inner">
+ <div><a href="../"><div id="logo"></div></a></div>
+
+ <div>Vhost: <select id="vhosts" class="show_button" size="1" onchange="vhostChanged();"></select></div>
+
+ <div class="hidden">Exchanges
+ <div style="float: right;">
+ <input type="checkbox" value="Display" checked onclick="toggleRendering('hidden_exchanges', 'show_exchanges', 'exchange');"/>Display</div>
+ <select id="hidden_exchanges" multiple="true" size="8" class="hidden"></select>
+ <button id="show_exchanges" class="show_button" onclick="showExchanges();">Show</button>
+ </div>
+
+ <div class="hidden">Queues
+ <div style="float: right;">
+ <input type="checkbox" value="Display" checked onclick="toggleRendering('hidden_queues', 'show_queues', 'queue');"/>Display</div>
+ <select id="hidden_queues" multiple="true" size="8" class="hidden"></select>
+ <button id="show_queues" class="show_button" onclick="showQueues();">Show</button>
+ </div>
+
+ <div class="hidden">Channels
+ <div style="float: right;">
+ <input type="checkbox" value="Display" checked onclick="toggleRendering('hidden_channels', 'show_channels', 'channel');"/>Display</div>
+ <select id="hidden_channels" multiple="true" size="8" class="hidden"></select>
+ <button id="show_channels" class="show_button" onclick="showChannels();">Show</button>
+ </div>
+
+ <!-- <div class="hidden">Connections
+ <div style="float: right;">
+ <input type="checkbox" value="Display" onclick="toggleConnections();"/>Display</div>
+ <select id="hidden_connections" multiple="true" size="8" class="hidden"></select>
+ <button class="show_button" onclick="showConnections();">Show</button>
+ </div> -->
+ </div>
+ </div>
+ <div id="main" onscroll="canvasScroll();">
+ <canvas id="topology_canvas"></canvas>
+ </div>
+ <div id="details"></div>
+ <div id="helpButton" onclick="displayHelp();">Help</div>
+ <div id="help" onclick="hideHelp();">
+ <h1 style="text-align: center;">RabbitMQ Visualiser</h1>
+
+ <p style="text-align: center;">Click to hide.</p>
+
+ <p>
+ The left of the canvas displays exchanges, the right displays
+ queues, and the top displays channels. All of these items can
+ be dragged around the canvas. They repel one another, and snap
+ back into their predefined areas should they be released within
+ the boundaries of those areas.
+ </p>
+
+ <p>
+ Shift-clicking on an item hides it - it will be added to the
+ relevant select box on the left.
+ </p>
+
+ <p>
+ Hovering over an item shows at the top of the screen various
+ details about the item. Double-clicking on the item will take
+ you to the specific page in the Management Plugin concerning
+ that item.
+ </p>
+
+ <p>
+ When hovering over an item, incoming links and/or traffic are
+ shown in green, whilst outgoing links and/or traffic are shown
+ in blue. Bindings are always displayed, but the consumers of a
+ queue, and likewise the publishers to an exchange, are only
+ drawn in when hovering over the exchange, queue or channel in
+ question.
+ </p>
+
+ <p>
+ By default, up to 10 exchanges, 10 queues and 10 channels are
+ displayed. Additional resources are available from the left
+ hand-side select boxes, and can be brought into the display by
+ selecting them and clicking on the relevant 'Show' button.
+ </p>
+
+ <p>
+ The 'Display' check-boxes turn off and on entire resource
+ classes, and resets positioning.
+ </p>
+
+ <p style="text-align: center;">Click to hide.</p>
+ </div>
+</body>
+</html>
--- /dev/null
+// glMatrix v0.9.5
+glMatrixArrayType=typeof Float32Array!="undefined"?Float32Array:typeof WebGLFloatArray!="undefined"?WebGLFloatArray:Array;var vec3={};vec3.create=function(a){var b=new glMatrixArrayType(3);if(a){b[0]=a[0];b[1]=a[1];b[2]=a[2]}return b};vec3.set=function(a,b){b[0]=a[0];b[1]=a[1];b[2]=a[2];return b};vec3.add=function(a,b,c){if(!c||a==c){a[0]+=b[0];a[1]+=b[1];a[2]+=b[2];return a}c[0]=a[0]+b[0];c[1]=a[1]+b[1];c[2]=a[2]+b[2];return c};
+vec3.subtract=function(a,b,c){if(!c||a==c){a[0]-=b[0];a[1]-=b[1];a[2]-=b[2];return a}c[0]=a[0]-b[0];c[1]=a[1]-b[1];c[2]=a[2]-b[2];return c};vec3.negate=function(a,b){b||(b=a);b[0]=-a[0];b[1]=-a[1];b[2]=-a[2];return b};vec3.scale=function(a,b,c){if(!c||a==c){a[0]*=b;a[1]*=b;a[2]*=b;return a}c[0]=a[0]*b;c[1]=a[1]*b;c[2]=a[2]*b;return c};
+vec3.normalize=function(a,b){b||(b=a);var c=a[0],d=a[1],e=a[2],g=Math.sqrt(c*c+d*d+e*e);if(g){if(g==1){b[0]=c;b[1]=d;b[2]=e;return b}}else{b[0]=0;b[1]=0;b[2]=0;return b}g=1/g;b[0]=c*g;b[1]=d*g;b[2]=e*g;return b};vec3.cross=function(a,b,c){c||(c=a);var d=a[0],e=a[1];a=a[2];var g=b[0],f=b[1];b=b[2];c[0]=e*b-a*f;c[1]=a*g-d*b;c[2]=d*f-e*g;return c};vec3.length=function(a){var b=a[0],c=a[1];a=a[2];return Math.sqrt(b*b+c*c+a*a)};vec3.dot=function(a,b){return a[0]*b[0]+a[1]*b[1]+a[2]*b[2]};
+vec3.direction=function(a,b,c){c||(c=a);var d=a[0]-b[0],e=a[1]-b[1];a=a[2]-b[2];b=Math.sqrt(d*d+e*e+a*a);if(!b){c[0]=0;c[1]=0;c[2]=0;return c}b=1/b;c[0]=d*b;c[1]=e*b;c[2]=a*b;return c};vec3.lerp=function(a,b,c,d){d||(d=a);d[0]=a[0]+c*(b[0]-a[0]);d[1]=a[1]+c*(b[1]-a[1]);d[2]=a[2]+c*(b[2]-a[2]);return d};vec3.str=function(a){return"["+a[0]+", "+a[1]+", "+a[2]+"]"};var mat3={};
+mat3.create=function(a){var b=new glMatrixArrayType(9);if(a){b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=a[3];b[4]=a[4];b[5]=a[5];b[6]=a[6];b[7]=a[7];b[8]=a[8];b[9]=a[9]}return b};mat3.set=function(a,b){b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=a[3];b[4]=a[4];b[5]=a[5];b[6]=a[6];b[7]=a[7];b[8]=a[8];return b};mat3.identity=function(a){a[0]=1;a[1]=0;a[2]=0;a[3]=0;a[4]=1;a[5]=0;a[6]=0;a[7]=0;a[8]=1;return a};
+mat3.transpose=function(a,b){if(!b||a==b){var c=a[1],d=a[2],e=a[5];a[1]=a[3];a[2]=a[6];a[3]=c;a[5]=a[7];a[6]=d;a[7]=e;return a}b[0]=a[0];b[1]=a[3];b[2]=a[6];b[3]=a[1];b[4]=a[4];b[5]=a[7];b[6]=a[2];b[7]=a[5];b[8]=a[8];return b};mat3.toMat4=function(a,b){b||(b=mat4.create());b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=0;b[4]=a[3];b[5]=a[4];b[6]=a[5];b[7]=0;b[8]=a[6];b[9]=a[7];b[10]=a[8];b[11]=0;b[12]=0;b[13]=0;b[14]=0;b[15]=1;return b};
+mat3.str=function(a){return"["+a[0]+", "+a[1]+", "+a[2]+", "+a[3]+", "+a[4]+", "+a[5]+", "+a[6]+", "+a[7]+", "+a[8]+"]"};var mat4={};mat4.create=function(a){var b=new glMatrixArrayType(16);if(a){b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=a[3];b[4]=a[4];b[5]=a[5];b[6]=a[6];b[7]=a[7];b[8]=a[8];b[9]=a[9];b[10]=a[10];b[11]=a[11];b[12]=a[12];b[13]=a[13];b[14]=a[14];b[15]=a[15]}return b};
+mat4.set=function(a,b){b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=a[3];b[4]=a[4];b[5]=a[5];b[6]=a[6];b[7]=a[7];b[8]=a[8];b[9]=a[9];b[10]=a[10];b[11]=a[11];b[12]=a[12];b[13]=a[13];b[14]=a[14];b[15]=a[15];return b};mat4.identity=function(a){a[0]=1;a[1]=0;a[2]=0;a[3]=0;a[4]=0;a[5]=1;a[6]=0;a[7]=0;a[8]=0;a[9]=0;a[10]=1;a[11]=0;a[12]=0;a[13]=0;a[14]=0;a[15]=1;return a};
+mat4.transpose=function(a,b){if(!b||a==b){var c=a[1],d=a[2],e=a[3],g=a[6],f=a[7],h=a[11];a[1]=a[4];a[2]=a[8];a[3]=a[12];a[4]=c;a[6]=a[9];a[7]=a[13];a[8]=d;a[9]=g;a[11]=a[14];a[12]=e;a[13]=f;a[14]=h;return a}b[0]=a[0];b[1]=a[4];b[2]=a[8];b[3]=a[12];b[4]=a[1];b[5]=a[5];b[6]=a[9];b[7]=a[13];b[8]=a[2];b[9]=a[6];b[10]=a[10];b[11]=a[14];b[12]=a[3];b[13]=a[7];b[14]=a[11];b[15]=a[15];return b};
+mat4.determinant=function(a){var b=a[0],c=a[1],d=a[2],e=a[3],g=a[4],f=a[5],h=a[6],i=a[7],j=a[8],k=a[9],l=a[10],o=a[11],m=a[12],n=a[13],p=a[14];a=a[15];return m*k*h*e-j*n*h*e-m*f*l*e+g*n*l*e+j*f*p*e-g*k*p*e-m*k*d*i+j*n*d*i+m*c*l*i-b*n*l*i-j*c*p*i+b*k*p*i+m*f*d*o-g*n*d*o-m*c*h*o+b*n*h*o+g*c*p*o-b*f*p*o-j*f*d*a+g*k*d*a+j*c*h*a-b*k*h*a-g*c*l*a+b*f*l*a};
+mat4.inverse=function(a,b){b||(b=a);var c=a[0],d=a[1],e=a[2],g=a[3],f=a[4],h=a[5],i=a[6],j=a[7],k=a[8],l=a[9],o=a[10],m=a[11],n=a[12],p=a[13],r=a[14],s=a[15],A=c*h-d*f,B=c*i-e*f,t=c*j-g*f,u=d*i-e*h,v=d*j-g*h,w=e*j-g*i,x=k*p-l*n,y=k*r-o*n,z=k*s-m*n,C=l*r-o*p,D=l*s-m*p,E=o*s-m*r,q=1/(A*E-B*D+t*C+u*z-v*y+w*x);b[0]=(h*E-i*D+j*C)*q;b[1]=(-d*E+e*D-g*C)*q;b[2]=(p*w-r*v+s*u)*q;b[3]=(-l*w+o*v-m*u)*q;b[4]=(-f*E+i*z-j*y)*q;b[5]=(c*E-e*z+g*y)*q;b[6]=(-n*w+r*t-s*B)*q;b[7]=(k*w-o*t+m*B)*q;b[8]=(f*D-h*z+j*x)*q;
+b[9]=(-c*D+d*z-g*x)*q;b[10]=(n*v-p*t+s*A)*q;b[11]=(-k*v+l*t-m*A)*q;b[12]=(-f*C+h*y-i*x)*q;b[13]=(c*C-d*y+e*x)*q;b[14]=(-n*u+p*B-r*A)*q;b[15]=(k*u-l*B+o*A)*q;return b};mat4.toRotationMat=function(a,b){b||(b=mat4.create());b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=a[3];b[4]=a[4];b[5]=a[5];b[6]=a[6];b[7]=a[7];b[8]=a[8];b[9]=a[9];b[10]=a[10];b[11]=a[11];b[12]=0;b[13]=0;b[14]=0;b[15]=1;return b};
+mat4.toMat3=function(a,b){b||(b=mat3.create());b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=a[4];b[4]=a[5];b[5]=a[6];b[6]=a[8];b[7]=a[9];b[8]=a[10];return b};mat4.toInverseMat3=function(a,b){var c=a[0],d=a[1],e=a[2],g=a[4],f=a[5],h=a[6],i=a[8],j=a[9],k=a[10],l=k*f-h*j,o=-k*g+h*i,m=j*g-f*i,n=c*l+d*o+e*m;if(!n)return null;n=1/n;b||(b=mat3.create());b[0]=l*n;b[1]=(-k*d+e*j)*n;b[2]=(h*d-e*f)*n;b[3]=o*n;b[4]=(k*c-e*i)*n;b[5]=(-h*c+e*g)*n;b[6]=m*n;b[7]=(-j*c+d*i)*n;b[8]=(f*c-d*g)*n;return b};
+mat4.multiply=function(a,b,c){c||(c=a);var d=a[0],e=a[1],g=a[2],f=a[3],h=a[4],i=a[5],j=a[6],k=a[7],l=a[8],o=a[9],m=a[10],n=a[11],p=a[12],r=a[13],s=a[14];a=a[15];var A=b[0],B=b[1],t=b[2],u=b[3],v=b[4],w=b[5],x=b[6],y=b[7],z=b[8],C=b[9],D=b[10],E=b[11],q=b[12],F=b[13],G=b[14];b=b[15];c[0]=A*d+B*h+t*l+u*p;c[1]=A*e+B*i+t*o+u*r;c[2]=A*g+B*j+t*m+u*s;c[3]=A*f+B*k+t*n+u*a;c[4]=v*d+w*h+x*l+y*p;c[5]=v*e+w*i+x*o+y*r;c[6]=v*g+w*j+x*m+y*s;c[7]=v*f+w*k+x*n+y*a;c[8]=z*d+C*h+D*l+E*p;c[9]=z*e+C*i+D*o+E*r;c[10]=z*
+g+C*j+D*m+E*s;c[11]=z*f+C*k+D*n+E*a;c[12]=q*d+F*h+G*l+b*p;c[13]=q*e+F*i+G*o+b*r;c[14]=q*g+F*j+G*m+b*s;c[15]=q*f+F*k+G*n+b*a;return c};mat4.multiplyVec3=function(a,b,c){c||(c=b);var d=b[0],e=b[1];b=b[2];c[0]=a[0]*d+a[4]*e+a[8]*b+a[12];c[1]=a[1]*d+a[5]*e+a[9]*b+a[13];c[2]=a[2]*d+a[6]*e+a[10]*b+a[14];return c};
+mat4.multiplyVec4=function(a,b,c){c||(c=b);var d=b[0],e=b[1],g=b[2];b=b[3];c[0]=a[0]*d+a[4]*e+a[8]*g+a[12]*b;c[1]=a[1]*d+a[5]*e+a[9]*g+a[13]*b;c[2]=a[2]*d+a[6]*e+a[10]*g+a[14]*b;c[3]=a[3]*d+a[7]*e+a[11]*g+a[15]*b;return c};
+mat4.translate=function(a,b,c){var d=b[0],e=b[1];b=b[2];if(!c||a==c){a[12]=a[0]*d+a[4]*e+a[8]*b+a[12];a[13]=a[1]*d+a[5]*e+a[9]*b+a[13];a[14]=a[2]*d+a[6]*e+a[10]*b+a[14];a[15]=a[3]*d+a[7]*e+a[11]*b+a[15];return a}var g=a[0],f=a[1],h=a[2],i=a[3],j=a[4],k=a[5],l=a[6],o=a[7],m=a[8],n=a[9],p=a[10],r=a[11];c[0]=g;c[1]=f;c[2]=h;c[3]=i;c[4]=j;c[5]=k;c[6]=l;c[7]=o;c[8]=m;c[9]=n;c[10]=p;c[11]=r;c[12]=g*d+j*e+m*b+a[12];c[13]=f*d+k*e+n*b+a[13];c[14]=h*d+l*e+p*b+a[14];c[15]=i*d+o*e+r*b+a[15];return c};
+mat4.scale=function(a,b,c){var d=b[0],e=b[1];b=b[2];if(!c||a==c){a[0]*=d;a[1]*=d;a[2]*=d;a[3]*=d;a[4]*=e;a[5]*=e;a[6]*=e;a[7]*=e;a[8]*=b;a[9]*=b;a[10]*=b;a[11]*=b;return a}c[0]=a[0]*d;c[1]=a[1]*d;c[2]=a[2]*d;c[3]=a[3]*d;c[4]=a[4]*e;c[5]=a[5]*e;c[6]=a[6]*e;c[7]=a[7]*e;c[8]=a[8]*b;c[9]=a[9]*b;c[10]=a[10]*b;c[11]=a[11]*b;c[12]=a[12];c[13]=a[13];c[14]=a[14];c[15]=a[15];return c};
+mat4.rotate=function(a,b,c,d){var e=c[0],g=c[1];c=c[2];var f=Math.sqrt(e*e+g*g+c*c);if(!f)return null;if(f!=1){f=1/f;e*=f;g*=f;c*=f}var h=Math.sin(b),i=Math.cos(b),j=1-i;b=a[0];f=a[1];var k=a[2],l=a[3],o=a[4],m=a[5],n=a[6],p=a[7],r=a[8],s=a[9],A=a[10],B=a[11],t=e*e*j+i,u=g*e*j+c*h,v=c*e*j-g*h,w=e*g*j-c*h,x=g*g*j+i,y=c*g*j+e*h,z=e*c*j+g*h;e=g*c*j-e*h;g=c*c*j+i;if(d){if(a!=d){d[12]=a[12];d[13]=a[13];d[14]=a[14];d[15]=a[15]}}else d=a;d[0]=b*t+o*u+r*v;d[1]=f*t+m*u+s*v;d[2]=k*t+n*u+A*v;d[3]=l*t+p*u+B*
+v;d[4]=b*w+o*x+r*y;d[5]=f*w+m*x+s*y;d[6]=k*w+n*x+A*y;d[7]=l*w+p*x+B*y;d[8]=b*z+o*e+r*g;d[9]=f*z+m*e+s*g;d[10]=k*z+n*e+A*g;d[11]=l*z+p*e+B*g;return d};mat4.rotateX=function(a,b,c){var d=Math.sin(b);b=Math.cos(b);var e=a[4],g=a[5],f=a[6],h=a[7],i=a[8],j=a[9],k=a[10],l=a[11];if(c){if(a!=c){c[0]=a[0];c[1]=a[1];c[2]=a[2];c[3]=a[3];c[12]=a[12];c[13]=a[13];c[14]=a[14];c[15]=a[15]}}else c=a;c[4]=e*b+i*d;c[5]=g*b+j*d;c[6]=f*b+k*d;c[7]=h*b+l*d;c[8]=e*-d+i*b;c[9]=g*-d+j*b;c[10]=f*-d+k*b;c[11]=h*-d+l*b;return c};
+mat4.rotateY=function(a,b,c){var d=Math.sin(b);b=Math.cos(b);var e=a[0],g=a[1],f=a[2],h=a[3],i=a[8],j=a[9],k=a[10],l=a[11];if(c){if(a!=c){c[4]=a[4];c[5]=a[5];c[6]=a[6];c[7]=a[7];c[12]=a[12];c[13]=a[13];c[14]=a[14];c[15]=a[15]}}else c=a;c[0]=e*b+i*-d;c[1]=g*b+j*-d;c[2]=f*b+k*-d;c[3]=h*b+l*-d;c[8]=e*d+i*b;c[9]=g*d+j*b;c[10]=f*d+k*b;c[11]=h*d+l*b;return c};
+mat4.rotateZ=function(a,b,c){var d=Math.sin(b);b=Math.cos(b);var e=a[0],g=a[1],f=a[2],h=a[3],i=a[4],j=a[5],k=a[6],l=a[7];if(c){if(a!=c){c[8]=a[8];c[9]=a[9];c[10]=a[10];c[11]=a[11];c[12]=a[12];c[13]=a[13];c[14]=a[14];c[15]=a[15]}}else c=a;c[0]=e*b+i*d;c[1]=g*b+j*d;c[2]=f*b+k*d;c[3]=h*b+l*d;c[4]=e*-d+i*b;c[5]=g*-d+j*b;c[6]=f*-d+k*b;c[7]=h*-d+l*b;return c};
+mat4.frustum=function(a,b,c,d,e,g,f){f||(f=mat4.create());var h=b-a,i=d-c,j=g-e;f[0]=e*2/h;f[1]=0;f[2]=0;f[3]=0;f[4]=0;f[5]=e*2/i;f[6]=0;f[7]=0;f[8]=(b+a)/h;f[9]=(d+c)/i;f[10]=-(g+e)/j;f[11]=-1;f[12]=0;f[13]=0;f[14]=-(g*e*2)/j;f[15]=0;return f};mat4.perspective=function(a,b,c,d,e){a=c*Math.tan(a*Math.PI/360);b=a*b;return mat4.frustum(-b,b,-a,a,c,d,e)};
+mat4.ortho=function(a,b,c,d,e,g,f){f||(f=mat4.create());var h=b-a,i=d-c,j=g-e;f[0]=2/h;f[1]=0;f[2]=0;f[3]=0;f[4]=0;f[5]=2/i;f[6]=0;f[7]=0;f[8]=0;f[9]=0;f[10]=-2/j;f[11]=0;f[12]=-(a+b)/h;f[13]=-(d+c)/i;f[14]=-(g+e)/j;f[15]=1;return f};
+mat4.lookAt=function(a,b,c,d){d||(d=mat4.create());var e=a[0],g=a[1];a=a[2];var f=c[0],h=c[1],i=c[2];c=b[1];var j=b[2];if(e==b[0]&&g==c&&a==j)return mat4.identity(d);var k,l,o,m;c=e-b[0];j=g-b[1];b=a-b[2];m=1/Math.sqrt(c*c+j*j+b*b);c*=m;j*=m;b*=m;k=h*b-i*j;i=i*c-f*b;f=f*j-h*c;if(m=Math.sqrt(k*k+i*i+f*f)){m=1/m;k*=m;i*=m;f*=m}else f=i=k=0;h=j*f-b*i;l=b*k-c*f;o=c*i-j*k;if(m=Math.sqrt(h*h+l*l+o*o)){m=1/m;h*=m;l*=m;o*=m}else o=l=h=0;d[0]=k;d[1]=h;d[2]=c;d[3]=0;d[4]=i;d[5]=l;d[6]=j;d[7]=0;d[8]=f;d[9]=
+o;d[10]=b;d[11]=0;d[12]=-(k*e+i*g+f*a);d[13]=-(h*e+l*g+o*a);d[14]=-(c*e+j*g+b*a);d[15]=1;return d};mat4.str=function(a){return"["+a[0]+", "+a[1]+", "+a[2]+", "+a[3]+", "+a[4]+", "+a[5]+", "+a[6]+", "+a[7]+", "+a[8]+", "+a[9]+", "+a[10]+", "+a[11]+", "+a[12]+", "+a[13]+", "+a[14]+", "+a[15]+"]"};quat4={};quat4.create=function(a){var b=new glMatrixArrayType(4);if(a){b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=a[3]}return b};quat4.set=function(a,b){b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=a[3];return b};
+quat4.calculateW=function(a,b){var c=a[0],d=a[1],e=a[2];if(!b||a==b){a[3]=-Math.sqrt(Math.abs(1-c*c-d*d-e*e));return a}b[0]=c;b[1]=d;b[2]=e;b[3]=-Math.sqrt(Math.abs(1-c*c-d*d-e*e));return b};quat4.inverse=function(a,b){if(!b||a==b){a[0]*=1;a[1]*=1;a[2]*=1;return a}b[0]=-a[0];b[1]=-a[1];b[2]=-a[2];b[3]=a[3];return b};quat4.length=function(a){var b=a[0],c=a[1],d=a[2];a=a[3];return Math.sqrt(b*b+c*c+d*d+a*a)};
+quat4.normalize=function(a,b){b||(b=a);var c=a[0],d=a[1],e=a[2],g=a[3],f=Math.sqrt(c*c+d*d+e*e+g*g);if(f==0){b[0]=0;b[1]=0;b[2]=0;b[3]=0;return b}f=1/f;b[0]=c*f;b[1]=d*f;b[2]=e*f;b[3]=g*f;return b};quat4.multiply=function(a,b,c){c||(c=a);var d=a[0],e=a[1],g=a[2];a=a[3];var f=b[0],h=b[1],i=b[2];b=b[3];c[0]=d*b+a*f+e*i-g*h;c[1]=e*b+a*h+g*f-d*i;c[2]=g*b+a*i+d*h-e*f;c[3]=a*b-d*f-e*h-g*i;return c};
+quat4.multiplyVec3=function(a,b,c){c||(c=b);var d=b[0],e=b[1],g=b[2];b=a[0];var f=a[1],h=a[2];a=a[3];var i=a*d+f*g-h*e,j=a*e+h*d-b*g,k=a*g+b*e-f*d;d=-b*d-f*e-h*g;c[0]=i*a+d*-b+j*-h-k*-f;c[1]=j*a+d*-f+k*-b-i*-h;c[2]=k*a+d*-h+i*-f-j*-b;return c};quat4.toMat3=function(a,b){b||(b=mat3.create());var c=a[0],d=a[1],e=a[2],g=a[3],f=c+c,h=d+d,i=e+e,j=c*f,k=c*h;c=c*i;var l=d*h;d=d*i;e=e*i;f=g*f;h=g*h;g=g*i;b[0]=1-(l+e);b[1]=k-g;b[2]=c+h;b[3]=k+g;b[4]=1-(j+e);b[5]=d-f;b[6]=c-h;b[7]=d+f;b[8]=1-(j+l);return b};
+quat4.toMat4=function(a,b){b||(b=mat4.create());var c=a[0],d=a[1],e=a[2],g=a[3],f=c+c,h=d+d,i=e+e,j=c*f,k=c*h;c=c*i;var l=d*h;d=d*i;e=e*i;f=g*f;h=g*h;g=g*i;b[0]=1-(l+e);b[1]=k-g;b[2]=c+h;b[3]=0;b[4]=k+g;b[5]=1-(j+e);b[6]=d-f;b[7]=0;b[8]=c-h;b[9]=d+f;b[10]=1-(j+l);b[11]=0;b[12]=0;b[13]=0;b[14]=0;b[15]=1;return b};quat4.slerp=function(a,b,c,d){d||(d=a);var e=c;if(a[0]*b[0]+a[1]*b[1]+a[2]*b[2]+a[3]*b[3]<0)e=-1*c;d[0]=1-c*a[0]+e*b[0];d[1]=1-c*a[1]+e*b[1];d[2]=1-c*a[2]+e*b[2];d[3]=1-c*a[3]+e*b[3];return d};
+quat4.str=function(a){return"["+a[0]+", "+a[1]+", "+a[2]+", "+a[3]+"]"};
--- /dev/null
+/*
+ * glMatrix.js - High performance matrix and vector operations for WebGL
+ * version 0.9.5
+ */
+
+/*
+ * Copyright (c) 2010 Brandon Jones
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgment in the product documentation would be
+ * appreciated but is not required.
+ *
+ * 2. Altered source versions must be plainly marked as such, and must not
+ * be misrepresented as being the original software.
+ *
+ * 3. This notice may not be removed or altered from any source
+ * distribution.
+ */
+
+// Fallback for systems that don't support WebGL
+if(typeof Float32Array != 'undefined') {
+ glMatrixArrayType = Float32Array;
+} else if(typeof WebGLFloatArray != 'undefined') {
+ glMatrixArrayType = WebGLFloatArray; // This is officially deprecated and should dissapear in future revisions.
+} else {
+ glMatrixArrayType = Array;
+}
+
+/*
+ * vec3 - 3 Dimensional Vector
+ */
+var vec3 = {};
+
+/*
+ * vec3.create
+ * Creates a new instance of a vec3 using the default array type
+ * Any javascript array containing at least 3 numeric elements can serve as a vec3
+ *
+ * Params:
+ * vec - Optional, vec3 containing values to initialize with
+ *
+ * Returns:
+ * New vec3
+ */
+vec3.create = function(vec) {
+ var dest = new glMatrixArrayType(3);
+
+ if(vec) {
+ dest[0] = vec[0];
+ dest[1] = vec[1];
+ dest[2] = vec[2];
+ }
+
+ return dest;
+};
+
+/*
+ * vec3.set
+ * Copies the values of one vec3 to another
+ *
+ * Params:
+ * vec - vec3 containing values to copy
+ * dest - vec3 receiving copied values
+ *
+ * Returns:
+ * dest
+ */
+vec3.set = function(vec, dest) {
+ dest[0] = vec[0];
+ dest[1] = vec[1];
+ dest[2] = vec[2];
+
+ return dest;
+};
+
+/*
+ * vec3.add
+ * Performs a vector addition
+ *
+ * Params:
+ * vec - vec3, first operand
+ * vec2 - vec3, second operand
+ * dest - Optional, vec3 receiving operation result. If not specified result is written to vec
+ *
+ * Returns:
+ * dest if specified, vec otherwise
+ */
+vec3.add = function(vec, vec2, dest) {
+ if(!dest || vec == dest) {
+ vec[0] += vec2[0];
+ vec[1] += vec2[1];
+ vec[2] += vec2[2];
+ return vec;
+ }
+
+ dest[0] = vec[0] + vec2[0];
+ dest[1] = vec[1] + vec2[1];
+ dest[2] = vec[2] + vec2[2];
+ return dest;
+};
+
+/*
+ * vec3.subtract
+ * Performs a vector subtraction
+ *
+ * Params:
+ * vec - vec3, first operand
+ * vec2 - vec3, second operand
+ * dest - Optional, vec3 receiving operation result. If not specified result is written to vec
+ *
+ * Returns:
+ * dest if specified, vec otherwise
+ */
+vec3.subtract = function(vec, vec2, dest) {
+ if(!dest || vec == dest) {
+ vec[0] -= vec2[0];
+ vec[1] -= vec2[1];
+ vec[2] -= vec2[2];
+ return vec;
+ }
+
+ dest[0] = vec[0] - vec2[0];
+ dest[1] = vec[1] - vec2[1];
+ dest[2] = vec[2] - vec2[2];
+ return dest;
+};
+
+/*
+ * vec3.negate
+ * Negates the components of a vec3
+ *
+ * Params:
+ * vec - vec3 to negate
+ * dest - Optional, vec3 receiving operation result. If not specified result is written to vec
+ *
+ * Returns:
+ * dest if specified, vec otherwise
+ */
+vec3.negate = function(vec, dest) {
+ if(!dest) { dest = vec; }
+
+ dest[0] = -vec[0];
+ dest[1] = -vec[1];
+ dest[2] = -vec[2];
+ return dest;
+};
+
+/*
+ * vec3.scale
+ * Multiplies the components of a vec3 by a scalar value
+ *
+ * Params:
+ * vec - vec3 to scale
+ * val - Numeric value to scale by
+ * dest - Optional, vec3 receiving operation result. If not specified result is written to vec
+ *
+ * Returns:
+ * dest if specified, vec otherwise
+ */
+vec3.scale = function(vec, val, dest) {
+ if(!dest || vec == dest) {
+ vec[0] *= val;
+ vec[1] *= val;
+ vec[2] *= val;
+ return vec;
+ }
+
+ dest[0] = vec[0]*val;
+ dest[1] = vec[1]*val;
+ dest[2] = vec[2]*val;
+ return dest;
+};
+
+/*
+ * vec3.normalize
+ * Generates a unit vector of the same direction as the provided vec3
+ * If vector length is 0, returns [0, 0, 0]
+ *
+ * Params:
+ * vec - vec3 to normalize
+ * dest - Optional, vec3 receiving operation result. If not specified result is written to vec
+ *
+ * Returns:
+ * dest if specified, vec otherwise
+ */
+vec3.normalize = function(vec, dest) {
+ if(!dest) { dest = vec; }
+
+ var x = vec[0], y = vec[1], z = vec[2];
+ var len = Math.sqrt(x*x + y*y + z*z);
+
+ if (!len) {
+ dest[0] = 0;
+ dest[1] = 0;
+ dest[2] = 0;
+ return dest;
+ } else if (len == 1) {
+ dest[0] = x;
+ dest[1] = y;
+ dest[2] = z;
+ return dest;
+ }
+
+ len = 1 / len;
+ dest[0] = x*len;
+ dest[1] = y*len;
+ dest[2] = z*len;
+ return dest;
+};
+
+/*
+ * vec3.cross
+ * Generates the cross product of two vec3s
+ *
+ * Params:
+ * vec - vec3, first operand
+ * vec2 - vec3, second operand
+ * dest - Optional, vec3 receiving operation result. If not specified result is written to vec
+ *
+ * Returns:
+ * dest if specified, vec otherwise
+ */
+vec3.cross = function(vec, vec2, dest){
+ if(!dest) { dest = vec; }
+
+ var x = vec[0], y = vec[1], z = vec[2];
+ var x2 = vec2[0], y2 = vec2[1], z2 = vec2[2];
+
+ dest[0] = y*z2 - z*y2;
+ dest[1] = z*x2 - x*z2;
+ dest[2] = x*y2 - y*x2;
+ return dest;
+};
+
+/*
+ * vec3.length
+ * Caclulates the length of a vec3
+ *
+ * Params:
+ * vec - vec3 to calculate length of
+ *
+ * Returns:
+ * Length of vec
+ */
+vec3.length = function(vec){
+ var x = vec[0], y = vec[1], z = vec[2];
+ return Math.sqrt(x*x + y*y + z*z);
+};
+
+/*
+ * vec3.dot
+ * Caclulates the dot product of two vec3s
+ *
+ * Params:
+ * vec - vec3, first operand
+ * vec2 - vec3, second operand
+ *
+ * Returns:
+ * Dot product of vec and vec2
+ */
+vec3.dot = function(vec, vec2){
+ return vec[0]*vec2[0] + vec[1]*vec2[1] + vec[2]*vec2[2];
+};
+
+/*
+ * vec3.direction
+ * Generates a unit vector pointing from one vector to another
+ *
+ * Params:
+ * vec - origin vec3
+ * vec2 - vec3 to point to
+ * dest - Optional, vec3 receiving operation result. If not specified result is written to vec
+ *
+ * Returns:
+ * dest if specified, vec otherwise
+ */
+vec3.direction = function(vec, vec2, dest) {
+ if(!dest) { dest = vec; }
+
+ var x = vec[0] - vec2[0];
+ var y = vec[1] - vec2[1];
+ var z = vec[2] - vec2[2];
+
+ var len = Math.sqrt(x*x + y*y + z*z);
+ if (!len) {
+ dest[0] = 0;
+ dest[1] = 0;
+ dest[2] = 0;
+ return dest;
+ }
+
+ len = 1 / len;
+ dest[0] = x * len;
+ dest[1] = y * len;
+ dest[2] = z * len;
+ return dest;
+};
+
+/*
+ * vec3.lerp
+ * Performs a linear interpolation between two vec3
+ *
+ * Params:
+ * vec - vec3, first vector
+ * vec2 - vec3, second vector
+ * lerp - interpolation amount between the two inputs
+ * dest - Optional, vec3 receiving operation result. If not specified result is written to vec
+ *
+ * Returns:
+ * dest if specified, vec otherwise
+ */
+vec3.lerp = function(vec, vec2, lerp, dest){
+ if(!dest) { dest = vec; }
+
+ dest[0] = vec[0] + lerp * (vec2[0] - vec[0]);
+ dest[1] = vec[1] + lerp * (vec2[1] - vec[1]);
+ dest[2] = vec[2] + lerp * (vec2[2] - vec[2]);
+
+ return dest;
+}
+
+/*
+ * vec3.str
+ * Returns a string representation of a vector
+ *
+ * Params:
+ * vec - vec3 to represent as a string
+ *
+ * Returns:
+ * string representation of vec
+ */
+vec3.str = function(vec) {
+ return '[' + vec[0] + ', ' + vec[1] + ', ' + vec[2] + ']';
+};
+
+/*
+ * mat3 - 3x3 Matrix
+ */
+var mat3 = {};
+
+/*
+ * mat3.create
+ * Creates a new instance of a mat3 using the default array type
+ * Any javascript array containing at least 9 numeric elements can serve as a mat3
+ *
+ * Params:
+ * mat - Optional, mat3 containing values to initialize with
+ *
+ * Returns:
+ * New mat3
+ */
+mat3.create = function(mat) {
+ var dest = new glMatrixArrayType(9);
+
+ if(mat) {
+ dest[0] = mat[0];
+ dest[1] = mat[1];
+ dest[2] = mat[2];
+ dest[3] = mat[3];
+ dest[4] = mat[4];
+ dest[5] = mat[5];
+ dest[6] = mat[6];
+ dest[7] = mat[7];
+ dest[8] = mat[8];
+ dest[9] = mat[9];
+ }
+
+ return dest;
+};
+
+/*
+ * mat3.set
+ * Copies the values of one mat3 to another
+ *
+ * Params:
+ * mat - mat3 containing values to copy
+ * dest - mat3 receiving copied values
+ *
+ * Returns:
+ * dest
+ */
+mat3.set = function(mat, dest) {
+ dest[0] = mat[0];
+ dest[1] = mat[1];
+ dest[2] = mat[2];
+ dest[3] = mat[3];
+ dest[4] = mat[4];
+ dest[5] = mat[5];
+ dest[6] = mat[6];
+ dest[7] = mat[7];
+ dest[8] = mat[8];
+ return dest;
+};
+
+/*
+ * mat3.identity
+ * Sets a mat3 to an identity matrix
+ *
+ * Params:
+ * dest - mat3 to set
+ *
+ * Returns:
+ * dest
+ */
+mat3.identity = function(dest) {
+ dest[0] = 1;
+ dest[1] = 0;
+ dest[2] = 0;
+ dest[3] = 0;
+ dest[4] = 1;
+ dest[5] = 0;
+ dest[6] = 0;
+ dest[7] = 0;
+ dest[8] = 1;
+ return dest;
+};
+
+/*
+ * mat4.transpose
+ * Transposes a mat3 (flips the values over the diagonal)
+ *
+ * Params:
+ * mat - mat3 to transpose
+ * dest - Optional, mat3 receiving transposed values. If not specified result is written to mat
+ *
+ * Returns:
+ * dest is specified, mat otherwise
+ */
+mat3.transpose = function(mat, dest) {
+ // If we are transposing ourselves we can skip a few steps but have to cache some values
+ if(!dest || mat == dest) {
+ var a01 = mat[1], a02 = mat[2];
+ var a12 = mat[5];
+
+ mat[1] = mat[3];
+ mat[2] = mat[6];
+ mat[3] = a01;
+ mat[5] = mat[7];
+ mat[6] = a02;
+ mat[7] = a12;
+ return mat;
+ }
+
+ dest[0] = mat[0];
+ dest[1] = mat[3];
+ dest[2] = mat[6];
+ dest[3] = mat[1];
+ dest[4] = mat[4];
+ dest[5] = mat[7];
+ dest[6] = mat[2];
+ dest[7] = mat[5];
+ dest[8] = mat[8];
+ return dest;
+};
+
+/*
+ * mat3.toMat4
+ * Copies the elements of a mat3 into the upper 3x3 elements of a mat4
+ *
+ * Params:
+ * mat - mat3 containing values to copy
+ * dest - Optional, mat4 receiving copied values
+ *
+ * Returns:
+ * dest if specified, a new mat4 otherwise
+ */
+mat3.toMat4 = function(mat, dest) {
+ if(!dest) { dest = mat4.create(); }
+
+ dest[0] = mat[0];
+ dest[1] = mat[1];
+ dest[2] = mat[2];
+ dest[3] = 0;
+
+ dest[4] = mat[3];
+ dest[5] = mat[4];
+ dest[6] = mat[5];
+ dest[7] = 0;
+
+ dest[8] = mat[6];
+ dest[9] = mat[7];
+ dest[10] = mat[8];
+ dest[11] = 0;
+
+ dest[12] = 0;
+ dest[13] = 0;
+ dest[14] = 0;
+ dest[15] = 1;
+
+ return dest;
+}
+
+/*
+ * mat3.str
+ * Returns a string representation of a mat3
+ *
+ * Params:
+ * mat - mat3 to represent as a string
+ *
+ * Returns:
+ * string representation of mat
+ */
+mat3.str = function(mat) {
+ return '[' + mat[0] + ', ' + mat[1] + ', ' + mat[2] +
+ ', ' + mat[3] + ', '+ mat[4] + ', ' + mat[5] +
+ ', ' + mat[6] + ', ' + mat[7] + ', '+ mat[8] + ']';
+};
+
+/*
+ * mat4 - 4x4 Matrix
+ */
+var mat4 = {};
+
+/*
+ * mat4.create
+ * Creates a new instance of a mat4 using the default array type
+ * Any javascript array containing at least 16 numeric elements can serve as a mat4
+ *
+ * Params:
+ * mat - Optional, mat4 containing values to initialize with
+ *
+ * Returns:
+ * New mat4
+ */
+mat4.create = function(mat) {
+ var dest = new glMatrixArrayType(16);
+
+ if(mat) {
+ dest[0] = mat[0];
+ dest[1] = mat[1];
+ dest[2] = mat[2];
+ dest[3] = mat[3];
+ dest[4] = mat[4];
+ dest[5] = mat[5];
+ dest[6] = mat[6];
+ dest[7] = mat[7];
+ dest[8] = mat[8];
+ dest[9] = mat[9];
+ dest[10] = mat[10];
+ dest[11] = mat[11];
+ dest[12] = mat[12];
+ dest[13] = mat[13];
+ dest[14] = mat[14];
+ dest[15] = mat[15];
+ }
+
+ return dest;
+};
+
+/*
+ * mat4.set
+ * Copies the values of one mat4 to another
+ *
+ * Params:
+ * mat - mat4 containing values to copy
+ * dest - mat4 receiving copied values
+ *
+ * Returns:
+ * dest
+ */
+mat4.set = function(mat, dest) {
+ dest[0] = mat[0];
+ dest[1] = mat[1];
+ dest[2] = mat[2];
+ dest[3] = mat[3];
+ dest[4] = mat[4];
+ dest[5] = mat[5];
+ dest[6] = mat[6];
+ dest[7] = mat[7];
+ dest[8] = mat[8];
+ dest[9] = mat[9];
+ dest[10] = mat[10];
+ dest[11] = mat[11];
+ dest[12] = mat[12];
+ dest[13] = mat[13];
+ dest[14] = mat[14];
+ dest[15] = mat[15];
+ return dest;
+};
+
+/*
+ * mat4.identity
+ * Sets a mat4 to an identity matrix
+ *
+ * Params:
+ * dest - mat4 to set
+ *
+ * Returns:
+ * dest
+ */
+mat4.identity = function(dest) {
+ dest[0] = 1;
+ dest[1] = 0;
+ dest[2] = 0;
+ dest[3] = 0;
+ dest[4] = 0;
+ dest[5] = 1;
+ dest[6] = 0;
+ dest[7] = 0;
+ dest[8] = 0;
+ dest[9] = 0;
+ dest[10] = 1;
+ dest[11] = 0;
+ dest[12] = 0;
+ dest[13] = 0;
+ dest[14] = 0;
+ dest[15] = 1;
+ return dest;
+};
+
+/*
+ * mat4.transpose
+ * Transposes a mat4 (flips the values over the diagonal)
+ *
+ * Params:
+ * mat - mat4 to transpose
+ * dest - Optional, mat4 receiving transposed values. If not specified result is written to mat
+ *
+ * Returns:
+ * dest is specified, mat otherwise
+ */
+mat4.transpose = function(mat, dest) {
+ // If we are transposing ourselves we can skip a few steps but have to cache some values
+ if(!dest || mat == dest) {
+ var a01 = mat[1], a02 = mat[2], a03 = mat[3];
+ var a12 = mat[6], a13 = mat[7];
+ var a23 = mat[11];
+
+ mat[1] = mat[4];
+ mat[2] = mat[8];
+ mat[3] = mat[12];
+ mat[4] = a01;
+ mat[6] = mat[9];
+ mat[7] = mat[13];
+ mat[8] = a02;
+ mat[9] = a12;
+ mat[11] = mat[14];
+ mat[12] = a03;
+ mat[13] = a13;
+ mat[14] = a23;
+ return mat;
+ }
+
+ dest[0] = mat[0];
+ dest[1] = mat[4];
+ dest[2] = mat[8];
+ dest[3] = mat[12];
+ dest[4] = mat[1];
+ dest[5] = mat[5];
+ dest[6] = mat[9];
+ dest[7] = mat[13];
+ dest[8] = mat[2];
+ dest[9] = mat[6];
+ dest[10] = mat[10];
+ dest[11] = mat[14];
+ dest[12] = mat[3];
+ dest[13] = mat[7];
+ dest[14] = mat[11];
+ dest[15] = mat[15];
+ return dest;
+};
+
+/*
+ * mat4.determinant
+ * Calculates the determinant of a mat4
+ *
+ * Params:
+ * mat - mat4 to calculate determinant of
+ *
+ * Returns:
+ * determinant of mat
+ */
+mat4.determinant = function(mat) {
+ // Cache the matrix values (makes for huge speed increases!)
+ var a00 = mat[0], a01 = mat[1], a02 = mat[2], a03 = mat[3];
+ var a10 = mat[4], a11 = mat[5], a12 = mat[6], a13 = mat[7];
+ var a20 = mat[8], a21 = mat[9], a22 = mat[10], a23 = mat[11];
+ var a30 = mat[12], a31 = mat[13], a32 = mat[14], a33 = mat[15];
+
+ return a30*a21*a12*a03 - a20*a31*a12*a03 - a30*a11*a22*a03 + a10*a31*a22*a03 +
+ a20*a11*a32*a03 - a10*a21*a32*a03 - a30*a21*a02*a13 + a20*a31*a02*a13 +
+ a30*a01*a22*a13 - a00*a31*a22*a13 - a20*a01*a32*a13 + a00*a21*a32*a13 +
+ a30*a11*a02*a23 - a10*a31*a02*a23 - a30*a01*a12*a23 + a00*a31*a12*a23 +
+ a10*a01*a32*a23 - a00*a11*a32*a23 - a20*a11*a02*a33 + a10*a21*a02*a33 +
+ a20*a01*a12*a33 - a00*a21*a12*a33 - a10*a01*a22*a33 + a00*a11*a22*a33;
+};
+
+/*
+ * mat4.inverse
+ * Calculates the inverse matrix of a mat4
+ *
+ * Params:
+ * mat - mat4 to calculate inverse of
+ * dest - Optional, mat4 receiving inverse matrix. If not specified result is written to mat
+ *
+ * Returns:
+ * dest is specified, mat otherwise
+ */
+mat4.inverse = function(mat, dest) {
+ if(!dest) { dest = mat; }
+
+ // Cache the matrix values (makes for huge speed increases!)
+ var a00 = mat[0], a01 = mat[1], a02 = mat[2], a03 = mat[3];
+ var a10 = mat[4], a11 = mat[5], a12 = mat[6], a13 = mat[7];
+ var a20 = mat[8], a21 = mat[9], a22 = mat[10], a23 = mat[11];
+ var a30 = mat[12], a31 = mat[13], a32 = mat[14], a33 = mat[15];
+
+ var b00 = a00*a11 - a01*a10;
+ var b01 = a00*a12 - a02*a10;
+ var b02 = a00*a13 - a03*a10;
+ var b03 = a01*a12 - a02*a11;
+ var b04 = a01*a13 - a03*a11;
+ var b05 = a02*a13 - a03*a12;
+ var b06 = a20*a31 - a21*a30;
+ var b07 = a20*a32 - a22*a30;
+ var b08 = a20*a33 - a23*a30;
+ var b09 = a21*a32 - a22*a31;
+ var b10 = a21*a33 - a23*a31;
+ var b11 = a22*a33 - a23*a32;
+
+ // Calculate the determinant (inlined to avoid double-caching)
+ var invDet = 1/(b00*b11 - b01*b10 + b02*b09 + b03*b08 - b04*b07 + b05*b06);
+
+ dest[0] = (a11*b11 - a12*b10 + a13*b09)*invDet;
+ dest[1] = (-a01*b11 + a02*b10 - a03*b09)*invDet;
+ dest[2] = (a31*b05 - a32*b04 + a33*b03)*invDet;
+ dest[3] = (-a21*b05 + a22*b04 - a23*b03)*invDet;
+ dest[4] = (-a10*b11 + a12*b08 - a13*b07)*invDet;
+ dest[5] = (a00*b11 - a02*b08 + a03*b07)*invDet;
+ dest[6] = (-a30*b05 + a32*b02 - a33*b01)*invDet;
+ dest[7] = (a20*b05 - a22*b02 + a23*b01)*invDet;
+ dest[8] = (a10*b10 - a11*b08 + a13*b06)*invDet;
+ dest[9] = (-a00*b10 + a01*b08 - a03*b06)*invDet;
+ dest[10] = (a30*b04 - a31*b02 + a33*b00)*invDet;
+ dest[11] = (-a20*b04 + a21*b02 - a23*b00)*invDet;
+ dest[12] = (-a10*b09 + a11*b07 - a12*b06)*invDet;
+ dest[13] = (a00*b09 - a01*b07 + a02*b06)*invDet;
+ dest[14] = (-a30*b03 + a31*b01 - a32*b00)*invDet;
+ dest[15] = (a20*b03 - a21*b01 + a22*b00)*invDet;
+
+ return dest;
+};
+
+/*
+ * mat4.toRotationMat
+ * Copies the upper 3x3 elements of a mat4 into another mat4
+ *
+ * Params:
+ * mat - mat4 containing values to copy
+ * dest - Optional, mat4 receiving copied values
+ *
+ * Returns:
+ * dest is specified, a new mat4 otherwise
+ */
+mat4.toRotationMat = function(mat, dest) {
+ if(!dest) { dest = mat4.create(); }
+
+ dest[0] = mat[0];
+ dest[1] = mat[1];
+ dest[2] = mat[2];
+ dest[3] = mat[3];
+ dest[4] = mat[4];
+ dest[5] = mat[5];
+ dest[6] = mat[6];
+ dest[7] = mat[7];
+ dest[8] = mat[8];
+ dest[9] = mat[9];
+ dest[10] = mat[10];
+ dest[11] = mat[11];
+ dest[12] = 0;
+ dest[13] = 0;
+ dest[14] = 0;
+ dest[15] = 1;
+
+ return dest;
+};
+
+/*
+ * mat4.toMat3
+ * Copies the upper 3x3 elements of a mat4 into a mat3
+ *
+ * Params:
+ * mat - mat4 containing values to copy
+ * dest - Optional, mat3 receiving copied values
+ *
+ * Returns:
+ * dest is specified, a new mat3 otherwise
+ */
+mat4.toMat3 = function(mat, dest) {
+ if(!dest) { dest = mat3.create(); }
+
+ dest[0] = mat[0];
+ dest[1] = mat[1];
+ dest[2] = mat[2];
+ dest[3] = mat[4];
+ dest[4] = mat[5];
+ dest[5] = mat[6];
+ dest[6] = mat[8];
+ dest[7] = mat[9];
+ dest[8] = mat[10];
+
+ return dest;
+};
+
+/*
+ * mat4.toInverseMat3
+ * Calculates the inverse of the upper 3x3 elements of a mat4 and copies the result into a mat3
+ * The resulting matrix is useful for calculating transformed normals
+ *
+ * Params:
+ * mat - mat4 containing values to invert and copy
+ * dest - Optional, mat3 receiving values
+ *
+ * Returns:
+ * dest is specified, a new mat3 otherwise
+ */
+mat4.toInverseMat3 = function(mat, dest) {
+ // Cache the matrix values (makes for huge speed increases!)
+ var a00 = mat[0], a01 = mat[1], a02 = mat[2];
+ var a10 = mat[4], a11 = mat[5], a12 = mat[6];
+ var a20 = mat[8], a21 = mat[9], a22 = mat[10];
+
+ var b01 = a22*a11-a12*a21;
+ var b11 = -a22*a10+a12*a20;
+ var b21 = a21*a10-a11*a20;
+
+ var d = a00*b01 + a01*b11 + a02*b21;
+ if (!d) { return null; }
+ var id = 1/d;
+
+ if(!dest) { dest = mat3.create(); }
+
+ dest[0] = b01*id;
+ dest[1] = (-a22*a01 + a02*a21)*id;
+ dest[2] = (a12*a01 - a02*a11)*id;
+ dest[3] = b11*id;
+ dest[4] = (a22*a00 - a02*a20)*id;
+ dest[5] = (-a12*a00 + a02*a10)*id;
+ dest[6] = b21*id;
+ dest[7] = (-a21*a00 + a01*a20)*id;
+ dest[8] = (a11*a00 - a01*a10)*id;
+
+ return dest;
+};
+
+/*
+ * mat4.multiply
+ * Performs a matrix multiplication
+ *
+ * Params:
+ * mat - mat4, first operand
+ * mat2 - mat4, second operand
+ * dest - Optional, mat4 receiving operation result. If not specified result is written to mat
+ *
+ * Returns:
+ * dest if specified, mat otherwise
+ */
+mat4.multiply = function(mat, mat2, dest) {
+ if(!dest) { dest = mat }
+
+ // Cache the matrix values (makes for huge speed increases!)
+ var a00 = mat[0], a01 = mat[1], a02 = mat[2], a03 = mat[3];
+ var a10 = mat[4], a11 = mat[5], a12 = mat[6], a13 = mat[7];
+ var a20 = mat[8], a21 = mat[9], a22 = mat[10], a23 = mat[11];
+ var a30 = mat[12], a31 = mat[13], a32 = mat[14], a33 = mat[15];
+
+ var b00 = mat2[0], b01 = mat2[1], b02 = mat2[2], b03 = mat2[3];
+ var b10 = mat2[4], b11 = mat2[5], b12 = mat2[6], b13 = mat2[7];
+ var b20 = mat2[8], b21 = mat2[9], b22 = mat2[10], b23 = mat2[11];
+ var b30 = mat2[12], b31 = mat2[13], b32 = mat2[14], b33 = mat2[15];
+
+ dest[0] = b00*a00 + b01*a10 + b02*a20 + b03*a30;
+ dest[1] = b00*a01 + b01*a11 + b02*a21 + b03*a31;
+ dest[2] = b00*a02 + b01*a12 + b02*a22 + b03*a32;
+ dest[3] = b00*a03 + b01*a13 + b02*a23 + b03*a33;
+ dest[4] = b10*a00 + b11*a10 + b12*a20 + b13*a30;
+ dest[5] = b10*a01 + b11*a11 + b12*a21 + b13*a31;
+ dest[6] = b10*a02 + b11*a12 + b12*a22 + b13*a32;
+ dest[7] = b10*a03 + b11*a13 + b12*a23 + b13*a33;
+ dest[8] = b20*a00 + b21*a10 + b22*a20 + b23*a30;
+ dest[9] = b20*a01 + b21*a11 + b22*a21 + b23*a31;
+ dest[10] = b20*a02 + b21*a12 + b22*a22 + b23*a32;
+ dest[11] = b20*a03 + b21*a13 + b22*a23 + b23*a33;
+ dest[12] = b30*a00 + b31*a10 + b32*a20 + b33*a30;
+ dest[13] = b30*a01 + b31*a11 + b32*a21 + b33*a31;
+ dest[14] = b30*a02 + b31*a12 + b32*a22 + b33*a32;
+ dest[15] = b30*a03 + b31*a13 + b32*a23 + b33*a33;
+
+ return dest;
+};
+
+/*
+ * mat4.multiplyVec3
+ * Transforms a vec3 with the given matrix
+ * 4th vector component is implicitly '1'
+ *
+ * Params:
+ * mat - mat4 to transform the vector with
+ * vec - vec3 to transform
+ * dest - Optional, vec3 receiving operation result. If not specified result is written to vec
+ *
+ * Returns:
+ * dest if specified, vec otherwise
+ */
+mat4.multiplyVec3 = function(mat, vec, dest) {
+ if(!dest) { dest = vec }
+
+ var x = vec[0], y = vec[1], z = vec[2];
+
+ dest[0] = mat[0]*x + mat[4]*y + mat[8]*z + mat[12];
+ dest[1] = mat[1]*x + mat[5]*y + mat[9]*z + mat[13];
+ dest[2] = mat[2]*x + mat[6]*y + mat[10]*z + mat[14];
+
+ return dest;
+};
+
+/*
+ * mat4.multiplyVec4
+ * Transforms a vec4 with the given matrix
+ *
+ * Params:
+ * mat - mat4 to transform the vector with
+ * vec - vec4 to transform
+ * dest - Optional, vec4 receiving operation result. If not specified result is written to vec
+ *
+ * Returns:
+ * dest if specified, vec otherwise
+ */
+mat4.multiplyVec4 = function(mat, vec, dest) {
+ if(!dest) { dest = vec }
+
+ var x = vec[0], y = vec[1], z = vec[2], w = vec[3];
+
+ dest[0] = mat[0]*x + mat[4]*y + mat[8]*z + mat[12]*w;
+ dest[1] = mat[1]*x + mat[5]*y + mat[9]*z + mat[13]*w;
+ dest[2] = mat[2]*x + mat[6]*y + mat[10]*z + mat[14]*w;
+ dest[3] = mat[3]*x + mat[7]*y + mat[11]*z + mat[15]*w;
+
+ return dest;
+};
+
+/*
+ * mat4.translate
+ * Translates a matrix by the given vector
+ *
+ * Params:
+ * mat - mat4 to translate
+ * vec - vec3 specifying the translation
+ * dest - Optional, mat4 receiving operation result. If not specified result is written to mat
+ *
+ * Returns:
+ * dest if specified, mat otherwise
+ */
+mat4.translate = function(mat, vec, dest) {
+ var x = vec[0], y = vec[1], z = vec[2];
+
+ if(!dest || mat == dest) {
+ mat[12] = mat[0]*x + mat[4]*y + mat[8]*z + mat[12];
+ mat[13] = mat[1]*x + mat[5]*y + mat[9]*z + mat[13];
+ mat[14] = mat[2]*x + mat[6]*y + mat[10]*z + mat[14];
+ mat[15] = mat[3]*x + mat[7]*y + mat[11]*z + mat[15];
+ return mat;
+ }
+
+ var a00 = mat[0], a01 = mat[1], a02 = mat[2], a03 = mat[3];
+ var a10 = mat[4], a11 = mat[5], a12 = mat[6], a13 = mat[7];
+ var a20 = mat[8], a21 = mat[9], a22 = mat[10], a23 = mat[11];
+
+ dest[0] = a00;
+ dest[1] = a01;
+ dest[2] = a02;
+ dest[3] = a03;
+ dest[4] = a10;
+ dest[5] = a11;
+ dest[6] = a12;
+ dest[7] = a13;
+ dest[8] = a20;
+ dest[9] = a21;
+ dest[10] = a22;
+ dest[11] = a23;
+
+ dest[12] = a00*x + a10*y + a20*z + mat[12];
+ dest[13] = a01*x + a11*y + a21*z + mat[13];
+ dest[14] = a02*x + a12*y + a22*z + mat[14];
+ dest[15] = a03*x + a13*y + a23*z + mat[15];
+ return dest;
+};
+
+/*
+ * mat4.scale
+ * Scales a matrix by the given vector
+ *
+ * Params:
+ * mat - mat4 to scale
+ * vec - vec3 specifying the scale for each axis
+ * dest - Optional, mat4 receiving operation result. If not specified result is written to mat
+ *
+ * Returns:
+ * dest if specified, mat otherwise
+ */
+mat4.scale = function(mat, vec, dest) {
+ var x = vec[0], y = vec[1], z = vec[2];
+
+ if(!dest || mat == dest) {
+ mat[0] *= x;
+ mat[1] *= x;
+ mat[2] *= x;
+ mat[3] *= x;
+ mat[4] *= y;
+ mat[5] *= y;
+ mat[6] *= y;
+ mat[7] *= y;
+ mat[8] *= z;
+ mat[9] *= z;
+ mat[10] *= z;
+ mat[11] *= z;
+ return mat;
+ }
+
+ dest[0] = mat[0]*x;
+ dest[1] = mat[1]*x;
+ dest[2] = mat[2]*x;
+ dest[3] = mat[3]*x;
+ dest[4] = mat[4]*y;
+ dest[5] = mat[5]*y;
+ dest[6] = mat[6]*y;
+ dest[7] = mat[7]*y;
+ dest[8] = mat[8]*z;
+ dest[9] = mat[9]*z;
+ dest[10] = mat[10]*z;
+ dest[11] = mat[11]*z;
+ dest[12] = mat[12];
+ dest[13] = mat[13];
+ dest[14] = mat[14];
+ dest[15] = mat[15];
+ return dest;
+};
+
+/*
+ * mat4.rotate
+ * Rotates a matrix by the given angle around the specified axis
+ * If rotating around a primary axis (X,Y,Z) one of the specialized rotation functions should be used instead for performance
+ *
+ * Params:
+ * mat - mat4 to rotate
+ * angle - angle (in radians) to rotate
+ * axis - vec3 representing the axis to rotate around
+ * dest - Optional, mat4 receiving operation result. If not specified result is written to mat
+ *
+ * Returns:
+ * dest if specified, mat otherwise
+ */
+mat4.rotate = function(mat, angle, axis, dest) {
+ var x = axis[0], y = axis[1], z = axis[2];
+ var len = Math.sqrt(x*x + y*y + z*z);
+ if (!len) { return null; }
+ if (len != 1) {
+ len = 1 / len;
+ x *= len;
+ y *= len;
+ z *= len;
+ }
+
+ var s = Math.sin(angle);
+ var c = Math.cos(angle);
+ var t = 1-c;
+
+ // Cache the matrix values (makes for huge speed increases!)
+ var a00 = mat[0], a01 = mat[1], a02 = mat[2], a03 = mat[3];
+ var a10 = mat[4], a11 = mat[5], a12 = mat[6], a13 = mat[7];
+ var a20 = mat[8], a21 = mat[9], a22 = mat[10], a23 = mat[11];
+
+ // Construct the elements of the rotation matrix
+ var b00 = x*x*t + c, b01 = y*x*t + z*s, b02 = z*x*t - y*s;
+ var b10 = x*y*t - z*s, b11 = y*y*t + c, b12 = z*y*t + x*s;
+ var b20 = x*z*t + y*s, b21 = y*z*t - x*s, b22 = z*z*t + c;
+
+ if(!dest) {
+ dest = mat
+ } else if(mat != dest) { // If the source and destination differ, copy the unchanged last row
+ dest[12] = mat[12];
+ dest[13] = mat[13];
+ dest[14] = mat[14];
+ dest[15] = mat[15];
+ }
+
+ // Perform rotation-specific matrix multiplication
+ dest[0] = a00*b00 + a10*b01 + a20*b02;
+ dest[1] = a01*b00 + a11*b01 + a21*b02;
+ dest[2] = a02*b00 + a12*b01 + a22*b02;
+ dest[3] = a03*b00 + a13*b01 + a23*b02;
+
+ dest[4] = a00*b10 + a10*b11 + a20*b12;
+ dest[5] = a01*b10 + a11*b11 + a21*b12;
+ dest[6] = a02*b10 + a12*b11 + a22*b12;
+ dest[7] = a03*b10 + a13*b11 + a23*b12;
+
+ dest[8] = a00*b20 + a10*b21 + a20*b22;
+ dest[9] = a01*b20 + a11*b21 + a21*b22;
+ dest[10] = a02*b20 + a12*b21 + a22*b22;
+ dest[11] = a03*b20 + a13*b21 + a23*b22;
+ return dest;
+};
+
+/*
+ * mat4.rotateX
+ * Rotates a matrix by the given angle around the X axis
+ *
+ * Params:
+ * mat - mat4 to rotate
+ * angle - angle (in radians) to rotate
+ * dest - Optional, mat4 receiving operation result. If not specified result is written to mat
+ *
+ * Returns:
+ * dest if specified, mat otherwise
+ */
+mat4.rotateX = function(mat, angle, dest) {
+ var s = Math.sin(angle);
+ var c = Math.cos(angle);
+
+ // Cache the matrix values (makes for huge speed increases!)
+ var a10 = mat[4], a11 = mat[5], a12 = mat[6], a13 = mat[7];
+ var a20 = mat[8], a21 = mat[9], a22 = mat[10], a23 = mat[11];
+
+ if(!dest) {
+ dest = mat
+ } else if(mat != dest) { // If the source and destination differ, copy the unchanged rows
+ dest[0] = mat[0];
+ dest[1] = mat[1];
+ dest[2] = mat[2];
+ dest[3] = mat[3];
+
+ dest[12] = mat[12];
+ dest[13] = mat[13];
+ dest[14] = mat[14];
+ dest[15] = mat[15];
+ }
+
+ // Perform axis-specific matrix multiplication
+ dest[4] = a10*c + a20*s;
+ dest[5] = a11*c + a21*s;
+ dest[6] = a12*c + a22*s;
+ dest[7] = a13*c + a23*s;
+
+ dest[8] = a10*-s + a20*c;
+ dest[9] = a11*-s + a21*c;
+ dest[10] = a12*-s + a22*c;
+ dest[11] = a13*-s + a23*c;
+ return dest;
+};
+
+/*
+ * mat4.rotateY
+ * Rotates a matrix by the given angle around the Y axis
+ *
+ * Params:
+ * mat - mat4 to rotate
+ * angle - angle (in radians) to rotate
+ * dest - Optional, mat4 receiving operation result. If not specified result is written to mat
+ *
+ * Returns:
+ * dest if specified, mat otherwise
+ */
+mat4.rotateY = function(mat, angle, dest) {
+ var s = Math.sin(angle);
+ var c = Math.cos(angle);
+
+ // Cache the matrix values (makes for huge speed increases!)
+ var a00 = mat[0], a01 = mat[1], a02 = mat[2], a03 = mat[3];
+ var a20 = mat[8], a21 = mat[9], a22 = mat[10], a23 = mat[11];
+
+ if(!dest) {
+ dest = mat
+ } else if(mat != dest) { // If the source and destination differ, copy the unchanged rows
+ dest[4] = mat[4];
+ dest[5] = mat[5];
+ dest[6] = mat[6];
+ dest[7] = mat[7];
+
+ dest[12] = mat[12];
+ dest[13] = mat[13];
+ dest[14] = mat[14];
+ dest[15] = mat[15];
+ }
+
+ // Perform axis-specific matrix multiplication
+ dest[0] = a00*c + a20*-s;
+ dest[1] = a01*c + a21*-s;
+ dest[2] = a02*c + a22*-s;
+ dest[3] = a03*c + a23*-s;
+
+ dest[8] = a00*s + a20*c;
+ dest[9] = a01*s + a21*c;
+ dest[10] = a02*s + a22*c;
+ dest[11] = a03*s + a23*c;
+ return dest;
+};
+
+/*
+ * mat4.rotateZ
+ * Rotates a matrix by the given angle around the Z axis
+ *
+ * Params:
+ * mat - mat4 to rotate
+ * angle - angle (in radians) to rotate
+ * dest - Optional, mat4 receiving operation result. If not specified result is written to mat
+ *
+ * Returns:
+ * dest if specified, mat otherwise
+ */
+mat4.rotateZ = function(mat, angle, dest) {
+ var s = Math.sin(angle);
+ var c = Math.cos(angle);
+
+ // Cache the matrix values (makes for huge speed increases!)
+ var a00 = mat[0], a01 = mat[1], a02 = mat[2], a03 = mat[3];
+ var a10 = mat[4], a11 = mat[5], a12 = mat[6], a13 = mat[7];
+
+ if(!dest) {
+ dest = mat
+ } else if(mat != dest) { // If the source and destination differ, copy the unchanged last row
+ dest[8] = mat[8];
+ dest[9] = mat[9];
+ dest[10] = mat[10];
+ dest[11] = mat[11];
+
+ dest[12] = mat[12];
+ dest[13] = mat[13];
+ dest[14] = mat[14];
+ dest[15] = mat[15];
+ }
+
+ // Perform axis-specific matrix multiplication
+ dest[0] = a00*c + a10*s;
+ dest[1] = a01*c + a11*s;
+ dest[2] = a02*c + a12*s;
+ dest[3] = a03*c + a13*s;
+
+ dest[4] = a00*-s + a10*c;
+ dest[5] = a01*-s + a11*c;
+ dest[6] = a02*-s + a12*c;
+ dest[7] = a03*-s + a13*c;
+
+ return dest;
+};
+
+/*
+ * mat4.frustum
+ * Generates a frustum matrix with the given bounds
+ *
+ * Params:
+ * left, right - scalar, left and right bounds of the frustum
+ * bottom, top - scalar, bottom and top bounds of the frustum
+ * near, far - scalar, near and far bounds of the frustum
+ * dest - Optional, mat4 frustum matrix will be written into
+ *
+ * Returns:
+ * dest if specified, a new mat4 otherwise
+ */
+mat4.frustum = function(left, right, bottom, top, near, far, dest) {
+ if(!dest) { dest = mat4.create(); }
+ var rl = (right - left);
+ var tb = (top - bottom);
+ var fn = (far - near);
+ dest[0] = (near*2) / rl;
+ dest[1] = 0;
+ dest[2] = 0;
+ dest[3] = 0;
+ dest[4] = 0;
+ dest[5] = (near*2) / tb;
+ dest[6] = 0;
+ dest[7] = 0;
+ dest[8] = (right + left) / rl;
+ dest[9] = (top + bottom) / tb;
+ dest[10] = -(far + near) / fn;
+ dest[11] = -1;
+ dest[12] = 0;
+ dest[13] = 0;
+ dest[14] = -(far*near*2) / fn;
+ dest[15] = 0;
+ return dest;
+};
+
+/*
+ * mat4.perspective
+ * Generates a perspective projection matrix with the given bounds
+ *
+ * Params:
+ * fovy - scalar, vertical field of view
+ * aspect - scalar, aspect ratio. typically viewport width/height
+ * near, far - scalar, near and far bounds of the frustum
+ * dest - Optional, mat4 frustum matrix will be written into
+ *
+ * Returns:
+ * dest if specified, a new mat4 otherwise
+ */
+mat4.perspective = function(fovy, aspect, near, far, dest) {
+ var top = near*Math.tan(fovy*Math.PI / 360.0);
+ var right = top*aspect;
+ return mat4.frustum(-right, right, -top, top, near, far, dest);
+};
+
+/*
+ * mat4.ortho
+ * Generates a orthogonal projection matrix with the given bounds
+ *
+ * Params:
+ * left, right - scalar, left and right bounds of the frustum
+ * bottom, top - scalar, bottom and top bounds of the frustum
+ * near, far - scalar, near and far bounds of the frustum
+ * dest - Optional, mat4 frustum matrix will be written into
+ *
+ * Returns:
+ * dest if specified, a new mat4 otherwise
+ */
+mat4.ortho = function(left, right, bottom, top, near, far, dest) {
+ if(!dest) { dest = mat4.create(); }
+ var rl = (right - left);
+ var tb = (top - bottom);
+ var fn = (far - near);
+ dest[0] = 2 / rl;
+ dest[1] = 0;
+ dest[2] = 0;
+ dest[3] = 0;
+ dest[4] = 0;
+ dest[5] = 2 / tb;
+ dest[6] = 0;
+ dest[7] = 0;
+ dest[8] = 0;
+ dest[9] = 0;
+ dest[10] = -2 / fn;
+ dest[11] = 0;
+ dest[12] = -(left + right) / rl;
+ dest[13] = -(top + bottom) / tb;
+ dest[14] = -(far + near) / fn;
+ dest[15] = 1;
+ return dest;
+};
+
+/*
+ * mat4.ortho
+ * Generates a look-at matrix with the given eye position, focal point, and up axis
+ *
+ * Params:
+ * eye - vec3, position of the viewer
+ * center - vec3, point the viewer is looking at
+ * up - vec3 pointing "up"
+ * dest - Optional, mat4 frustum matrix will be written into
+ *
+ * Returns:
+ * dest if specified, a new mat4 otherwise
+ */
+mat4.lookAt = function(eye, center, up, dest) {
+ if(!dest) { dest = mat4.create(); }
+
+ var eyex = eye[0],
+ eyey = eye[1],
+ eyez = eye[2],
+ upx = up[0],
+ upy = up[1],
+ upz = up[2],
+ centerx = center[0],
+ centery = center[1],
+ centerz = center[2];
+
+ if (eyex == centerx && eyey == centery && eyez == centerz) {
+ return mat4.identity(dest);
+ }
+
+ var z0,z1,z2,x0,x1,x2,y0,y1,y2,len;
+
+ //vec3.direction(eye, center, z);
+ z0 = eyex - center[0];
+ z1 = eyey - center[1];
+ z2 = eyez - center[2];
+
+ // normalize (no check needed for 0 because of early return)
+ len = 1/Math.sqrt(z0*z0 + z1*z1 + z2*z2);
+ z0 *= len;
+ z1 *= len;
+ z2 *= len;
+
+ //vec3.normalize(vec3.cross(up, z, x));
+ x0 = upy*z2 - upz*z1;
+ x1 = upz*z0 - upx*z2;
+ x2 = upx*z1 - upy*z0;
+ len = Math.sqrt(x0*x0 + x1*x1 + x2*x2);
+ if (!len) {
+ x0 = 0;
+ x1 = 0;
+ x2 = 0;
+ } else {
+ len = 1/len;
+ x0 *= len;
+ x1 *= len;
+ x2 *= len;
+ };
+
+ //vec3.normalize(vec3.cross(z, x, y));
+ y0 = z1*x2 - z2*x1;
+ y1 = z2*x0 - z0*x2;
+ y2 = z0*x1 - z1*x0;
+
+ len = Math.sqrt(y0*y0 + y1*y1 + y2*y2);
+ if (!len) {
+ y0 = 0;
+ y1 = 0;
+ y2 = 0;
+ } else {
+ len = 1/len;
+ y0 *= len;
+ y1 *= len;
+ y2 *= len;
+ }
+
+ dest[0] = x0;
+ dest[1] = y0;
+ dest[2] = z0;
+ dest[3] = 0;
+ dest[4] = x1;
+ dest[5] = y1;
+ dest[6] = z1;
+ dest[7] = 0;
+ dest[8] = x2;
+ dest[9] = y2;
+ dest[10] = z2;
+ dest[11] = 0;
+ dest[12] = -(x0*eyex + x1*eyey + x2*eyez);
+ dest[13] = -(y0*eyex + y1*eyey + y2*eyez);
+ dest[14] = -(z0*eyex + z1*eyey + z2*eyez);
+ dest[15] = 1;
+
+ return dest;
+};
+
+/*
+ * mat4.str
+ * Returns a string representation of a mat4
+ *
+ * Params:
+ * mat - mat4 to represent as a string
+ *
+ * Returns:
+ * string representation of mat
+ */
+mat4.str = function(mat) {
+ return '[' + mat[0] + ', ' + mat[1] + ', ' + mat[2] + ', ' + mat[3] +
+ ', '+ mat[4] + ', ' + mat[5] + ', ' + mat[6] + ', ' + mat[7] +
+ ', '+ mat[8] + ', ' + mat[9] + ', ' + mat[10] + ', ' + mat[11] +
+ ', '+ mat[12] + ', ' + mat[13] + ', ' + mat[14] + ', ' + mat[15] + ']';
+};
+
+/*
+ * quat4 - Quaternions
+ */
+quat4 = {};
+
+/*
+ * quat4.create
+ * Creates a new instance of a quat4 using the default array type
+ * Any javascript array containing at least 4 numeric elements can serve as a quat4
+ *
+ * Params:
+ * quat - Optional, quat4 containing values to initialize with
+ *
+ * Returns:
+ * New quat4
+ */
+quat4.create = function(quat) {
+ var dest = new glMatrixArrayType(4);
+
+ if(quat) {
+ dest[0] = quat[0];
+ dest[1] = quat[1];
+ dest[2] = quat[2];
+ dest[3] = quat[3];
+ }
+
+ return dest;
+};
+
+/*
+ * quat4.set
+ * Copies the values of one quat4 to another
+ *
+ * Params:
+ * quat - quat4 containing values to copy
+ * dest - quat4 receiving copied values
+ *
+ * Returns:
+ * dest
+ */
+quat4.set = function(quat, dest) {
+ dest[0] = quat[0];
+ dest[1] = quat[1];
+ dest[2] = quat[2];
+ dest[3] = quat[3];
+
+ return dest;
+};
+
+/*
+ * quat4.calculateW
+ * Calculates the W component of a quat4 from the X, Y, and Z components.
+ * Assumes that quaternion is 1 unit in length.
+ * Any existing W component will be ignored.
+ *
+ * Params:
+ * quat - quat4 to calculate W component of
+ * dest - Optional, quat4 receiving calculated values. If not specified result is written to quat
+ *
+ * Returns:
+ * dest if specified, quat otherwise
+ */
+quat4.calculateW = function(quat, dest) {
+ var x = quat[0], y = quat[1], z = quat[2];
+
+ if(!dest || quat == dest) {
+ quat[3] = -Math.sqrt(Math.abs(1.0 - x*x - y*y - z*z));
+ return quat;
+ }
+ dest[0] = x;
+ dest[1] = y;
+ dest[2] = z;
+ dest[3] = -Math.sqrt(Math.abs(1.0 - x*x - y*y - z*z));
+ return dest;
+}
+
+/*
+ * quat4.inverse
+ * Calculates the inverse of a quat4
+ *
+ * Params:
+ * quat - quat4 to calculate inverse of
+ * dest - Optional, quat4 receiving inverse values. If not specified result is written to quat
+ *
+ * Returns:
+ * dest if specified, quat otherwise
+ */
+quat4.inverse = function(quat, dest) {
+ if(!dest || quat == dest) {
+ quat[0] *= 1;
+ quat[1] *= 1;
+ quat[2] *= 1;
+ return quat;
+ }
+ dest[0] = -quat[0];
+ dest[1] = -quat[1];
+ dest[2] = -quat[2];
+ dest[3] = quat[3];
+ return dest;
+}
+
+/*
+ * quat4.length
+ * Calculates the length of a quat4
+ *
+ * Params:
+ * quat - quat4 to calculate length of
+ *
+ * Returns:
+ * Length of quat
+ */
+quat4.length = function(quat) {
+ var x = quat[0], y = quat[1], z = quat[2], w = quat[3];
+ return Math.sqrt(x*x + y*y + z*z + w*w);
+}
+
+/*
+ * quat4.normalize
+ * Generates a unit quaternion of the same direction as the provided quat4
+ * If quaternion length is 0, returns [0, 0, 0, 0]
+ *
+ * Params:
+ * quat - quat4 to normalize
+ * dest - Optional, quat4 receiving operation result. If not specified result is written to quat
+ *
+ * Returns:
+ * dest if specified, quat otherwise
+ */
+quat4.normalize = function(quat, dest) {
+ if(!dest) { dest = quat; }
+
+ var x = quat[0], y = quat[1], z = quat[2], w = quat[3];
+ var len = Math.sqrt(x*x + y*y + z*z + w*w);
+ if(len == 0) {
+ dest[0] = 0;
+ dest[1] = 0;
+ dest[2] = 0;
+ dest[3] = 0;
+ return dest;
+ }
+ len = 1/len;
+ dest[0] = x * len;
+ dest[1] = y * len;
+ dest[2] = z * len;
+ dest[3] = w * len;
+
+ return dest;
+}
+
+/*
+ * quat4.multiply
+ * Performs a quaternion multiplication
+ *
+ * Params:
+ * quat - quat4, first operand
+ * quat2 - quat4, second operand
+ * dest - Optional, quat4 receiving operation result. If not specified result is written to quat
+ *
+ * Returns:
+ * dest if specified, quat otherwise
+ */
+quat4.multiply = function(quat, quat2, dest) {
+ if(!dest) { dest = quat; }
+
+ var qax = quat[0], qay = quat[1], qaz = quat[2], qaw = quat[3];
+ var qbx = quat2[0], qby = quat2[1], qbz = quat2[2], qbw = quat2[3];
+
+ dest[0] = qax*qbw + qaw*qbx + qay*qbz - qaz*qby;
+ dest[1] = qay*qbw + qaw*qby + qaz*qbx - qax*qbz;
+ dest[2] = qaz*qbw + qaw*qbz + qax*qby - qay*qbx;
+ dest[3] = qaw*qbw - qax*qbx - qay*qby - qaz*qbz;
+
+ return dest;
+}
+
+/*
+ * quat4.multiplyVec3
+ * Transforms a vec3 with the given quaternion
+ *
+ * Params:
+ * quat - quat4 to transform the vector with
+ * vec - vec3 to transform
+ * dest - Optional, vec3 receiving operation result. If not specified result is written to vec
+ *
+ * Returns:
+ * dest if specified, vec otherwise
+ */
+quat4.multiplyVec3 = function(quat, vec, dest) {
+ if(!dest) { dest = vec; }
+
+ var x = vec[0], y = vec[1], z = vec[2];
+ var qx = quat[0], qy = quat[1], qz = quat[2], qw = quat[3];
+
+ // calculate quat * vec
+ var ix = qw*x + qy*z - qz*y;
+ var iy = qw*y + qz*x - qx*z;
+ var iz = qw*z + qx*y - qy*x;
+ var iw = -qx*x - qy*y - qz*z;
+
+ // calculate result * inverse quat
+ dest[0] = ix*qw + iw*-qx + iy*-qz - iz*-qy;
+ dest[1] = iy*qw + iw*-qy + iz*-qx - ix*-qz;
+ dest[2] = iz*qw + iw*-qz + ix*-qy - iy*-qx;
+
+ return dest;
+}
+
+/*
+ * quat4.toMat3
+ * Calculates a 3x3 matrix from the given quat4
+ *
+ * Params:
+ * quat - quat4 to create matrix from
+ * dest - Optional, mat3 receiving operation result
+ *
+ * Returns:
+ * dest if specified, a new mat3 otherwise
+ */
+quat4.toMat3 = function(quat, dest) {
+ if(!dest) { dest = mat3.create(); }
+
+ var x = quat[0], y = quat[1], z = quat[2], w = quat[3];
+
+ var x2 = x + x;
+ var y2 = y + y;
+ var z2 = z + z;
+
+ var xx = x*x2;
+ var xy = x*y2;
+ var xz = x*z2;
+
+ var yy = y*y2;
+ var yz = y*z2;
+ var zz = z*z2;
+
+ var wx = w*x2;
+ var wy = w*y2;
+ var wz = w*z2;
+
+ dest[0] = 1 - (yy + zz);
+ dest[1] = xy - wz;
+ dest[2] = xz + wy;
+
+ dest[3] = xy + wz;
+ dest[4] = 1 - (xx + zz);
+ dest[5] = yz - wx;
+
+ dest[6] = xz - wy;
+ dest[7] = yz + wx;
+ dest[8] = 1 - (xx + yy);
+
+ return dest;
+}
+
+/*
+ * quat4.toMat4
+ * Calculates a 4x4 matrix from the given quat4
+ *
+ * Params:
+ * quat - quat4 to create matrix from
+ * dest - Optional, mat4 receiving operation result
+ *
+ * Returns:
+ * dest if specified, a new mat4 otherwise
+ */
+quat4.toMat4 = function(quat, dest) {
+ if(!dest) { dest = mat4.create(); }
+
+ var x = quat[0], y = quat[1], z = quat[2], w = quat[3];
+
+ var x2 = x + x;
+ var y2 = y + y;
+ var z2 = z + z;
+
+ var xx = x*x2;
+ var xy = x*y2;
+ var xz = x*z2;
+
+ var yy = y*y2;
+ var yz = y*z2;
+ var zz = z*z2;
+
+ var wx = w*x2;
+ var wy = w*y2;
+ var wz = w*z2;
+
+ dest[0] = 1 - (yy + zz);
+ dest[1] = xy - wz;
+ dest[2] = xz + wy;
+ dest[3] = 0;
+
+ dest[4] = xy + wz;
+ dest[5] = 1 - (xx + zz);
+ dest[6] = yz - wx;
+ dest[7] = 0;
+
+ dest[8] = xz - wy;
+ dest[9] = yz + wx;
+ dest[10] = 1 - (xx + yy);
+ dest[11] = 0;
+
+ dest[12] = 0;
+ dest[13] = 0;
+ dest[14] = 0;
+ dest[15] = 1;
+
+ return dest;
+}
+
+/*
+ * quat4.slerp
+ * Performs a spherical linear interpolation between two quat4
+ *
+ * Params:
+ * quat - quat4, first quaternion
+ * quat2 - quat4, second quaternion
+ * lerp - interpolation amount between the two inputs
+ * dest - Optional, quat4 receiving operation result. If not specified result is written to quat
+ *
+ * Returns:
+ * dest if specified, quat otherwise
+ */
+quat4.slerp = function(quat, quat2, lerp, dest) {
+ if(!dest) { dest = quat; }
+
+ var eps_lerp = lerp;
+
+ var dot = quat[0]*quat2[0] + quat[1]*quat2[1] + quat[2]*quat2[2] + quat[3]*quat2[3];
+ if (dot < 0.0) {
+ eps_lerp = -1.0 * lerp;
+ }
+
+ dest[0] = 1.0 - lerp * quat[0] + eps_lerp * quat2[0];
+ dest[1] = 1.0 - lerp * quat[1] + eps_lerp * quat2[1];
+ dest[2] = 1.0 - lerp * quat[2] + eps_lerp * quat2[2];
+ dest[3] = 1.0 - lerp * quat[3] + eps_lerp * quat2[3];
+
+ return dest;
+}
+
+/*
+ * quat4.str
+ * Returns a string representation of a quaternion
+ *
+ * Params:
+ * quat - quat4 to represent as a string
+ *
+ * Returns:
+ * string representation of quat
+ */
+quat4.str = function(quat) {
+ return '[' + quat[0] + ', ' + quat[1] + ', ' + quat[2] + ', ' + quat[3] + ']';
+}
+
--- /dev/null
+/*global octtree, vec3, Model, Channel, Exchange, Queue, Binding, Newton, Spring, window */
+/*jslint browser: true, devel: true */
+
+var tree = octtree.create(0, 10000, 0, 1000000, -0.5, 2);
+var updatePeriod = 1000; // 1 second
+
+var configuration, detailsInFlight, mouseDragOffsetVec, hoveringOver, dragging, selectedVhost, ctx, canvas, tick, mouseMove, setCanvasMousemove, requestAnimFrame;
+
+var client = new XMLHttpRequest();
+
+var detailsClient = new XMLHttpRequest();
+
+var model = new Model();
+var mousePos = vec3.create();
+var mouseDown = false;
+var highlight = "#ffffc0";
+var faded = "#c0c0c0";
+
+var eta = 0.1;
+var max_v = 100000;
+
+var newton = new Newton();
+var spring = new Spring();
+spring.octtreeRadius = 500;
+spring.equilibriumLength = 50;
+spring.dampingFactor = 0.01;
+spring.pull = false;
+spring.push = true;
+
+var rendering = true;
+var lastTime = 0;
+
+var fontSize = 12;
+Exchange.prototype.fontSize = fontSize;
+Queue.prototype.fontSize = fontSize;
+Binding.prototype.fontSize = fontSize;
+var canvasLeft = 0;
+var canvasTop = 0;
+var scrollLeft = 0;
+var scrollTop = 0;
+var clientWidth = 0;
+var clientHeight = 0;
+
+
+/******************************************************************************
+ * Fetching details from the broker *
+ ******************************************************************************/
+
+function update() {
+ if (undefined === selectedVhost) {
+ client.open("GET", "../api/all");
+ } else {
+ client.open("GET", "../api/all/" + encodeURIComponent(selectedVhost));
+ }
+ client.send();
+}
+
+function updateReady() {
+ if (client.readyState === 4 && client.status === 200) {
+ setTimeout(update, updatePeriod);
+ configuration = JSON.parse(client.responseText);
+ model.rebuild(tree, configuration);
+ if (!rendering) {
+ lastTime = 0;
+ requestAnimFrame(tick);
+ }
+ }
+}
+client.onreadystatechange = updateReady;
+
+function getDetails() {
+ detailsInFlight = this;
+ detailsClient.abort();
+ detailsClient.open("GET", "../api" + this.url());
+ detailsClient.send();
+}
+
+Channel.prototype.getDetails = getDetails;
+Exchange.prototype.getDetails = getDetails;
+Queue.prototype.getDetails = getDetails;
+
+function repeatGetDetails() {
+ if (undefined !== hoveringOver) {
+ hoveringOver.getDetails();
+ }
+}
+
+function flattenAtts(a) {
+ if ("string" === typeof a) {
+ return a;
+ } else {
+ var str, e;
+ str = "{";
+ for (e in a) {
+ if (a.hasOwnProperty(e)) {
+ str += "" + e + ": " + flattenAtts(a[e]) + ", ";
+ }
+ }
+ return str.replace(/(, )?$/, "}");
+ }
+}
+
+function setDetails(elem) {
+ var details, strAtts, visibleRows, columns, column, str, attName, i;
+ details = document.getElementById("details");
+ if (undefined === elem) {
+ details.innerHTML = "";
+ detailsInFlight = undefined;
+ } else {
+ strAtts = elem.stringAttributes();
+ visibleRows = Math.floor(details.clientHeight / 16); // line-height + padding;
+ columns = Math.ceil(strAtts.attributeOrder.length / visibleRows);
+ column = 0;
+ str = "<table><tr>";
+ for (i in strAtts.attributeOrder) {
+ column += 1;
+ attName = strAtts.attributeOrder[i];
+ if (undefined !== strAtts[attName]) {
+ str += "<th>" + attName + "</th><td>" + flattenAtts(strAtts[attName]) + "</td>";
+ } else {
+ str += "<th>" + attName + "</th><td></td>";
+ }
+ if (column === columns) {
+ column = 0;
+ str += "</tr><tr>";
+ }
+ }
+ str += "</tr></table>";
+ document.getElementById("details").innerHTML = str;
+ }
+}
+
+function detailsUpdateReady() {
+ if (detailsClient.readyState === 4 &&
+ detailsClient.status === 200 &&
+ undefined !== hoveringOver &&
+ undefined !== detailsInFlight &&
+ hoveringOver.object_type === detailsInFlight.object_type &&
+ hoveringOver.name === detailsInFlight.name) {
+ try {
+ var details = JSON.parse(detailsClient.responseText);
+ if (undefined !== details.name &&
+ details.name === detailsInFlight.name) {
+ model[detailsInFlight.object_type][detailsInFlight.name].details = details;
+ setDetails(model[detailsInFlight.object_type][detailsInFlight.name]);
+ setTimeout(repeatGetDetails, updatePeriod);
+ }
+ } catch (err) {
+ // We probably cancelled it as we were receiving data.
+ model[detailsInFlight.object_type][detailsInFlight.name].details = undefined;
+ window.console.info("" + err);
+ }
+ }
+}
+detailsClient.onreadystatechange = detailsUpdateReady;
+
+
+/******************************************************************************
+ * Rendering / animation *
+ ******************************************************************************/
+
+requestAnimFrame = (function () {
+ return (this.requestAnimationFrame ||
+ this.webkitRequestAnimationFrame ||
+ this.mozRequestAnimationFrame ||
+ this.oRequestAnimationFrame ||
+ this.msRequestAnimationFrame ||
+ function (/* function FrameRequestCallback */ callback, /* DOMElement Element */ element) {
+ setTimeout(callback, 1000 / 60);
+ });
+})();
+
+function recordMousePos(e) {
+ var x, y;
+ x = e.pageX;
+ y = e.pageY;
+ x = (x - canvasLeft) + scrollLeft;
+ y = (y - canvasTop) + scrollTop;
+ mousePos[octtree.x] = x;
+ mousePos[octtree.y] = y;
+}
+
+mouseMove = function (e) {
+ recordMousePos(e);
+ canvas.onmousemove = undefined;
+ setTimeout(setCanvasMousemove, 10);
+};
+
+setCanvasMousemove = function () {
+ if (rendering) {
+ canvas.onmousemove = mouseMove;
+ }
+};
+
+function resizeCanvas() {
+ var e;
+ if (undefined !== canvas) {
+ canvas.width = canvas.parentNode.offsetWidth;
+ canvas.height = canvas.parentNode.offsetHeight;
+ Channel.prototype.canvasResized(canvas);
+ Exchange.prototype.canvasResized(canvas);
+ Queue.prototype.canvasResized(canvas);
+ clientWidth = canvas.width;
+ clientHeight = canvas.height;
+ e = canvas.parentNode;
+ while (undefined !== e && null !== e) {
+ if (undefined !== e.clientHeight && undefined !== e.clientWidth &&
+ e.clientHeight > 0 && e.clientWidth > 0) {
+ clientHeight = Math.min(clientHeight, e.clientHeight);
+ clientWidth = Math.min(clientWidth, e.clientWidth);
+ }
+ e = e.parentNode;
+ }
+ canvasLeft = 0;
+ canvasTop = 0;
+ e = canvas.parentNode;
+ while (undefined !== e && null !== e) {
+ if (undefined !== e.offsetLeft && undefined !== e.offsetTop) {
+ canvasLeft += e.offsetLeft;
+ canvasTop += e.offsetTop;
+ }
+ e = e.parentNode;
+ }
+ if (undefined !== hoveringOver && undefined !== hoveringOver.details) {
+ setDetails(hoveringOver);
+ }
+ }
+}
+
+function canvasScroll() {
+ scrollLeft = 0;
+ scrollTop = 0;
+ var e = canvas.parentNode;
+ while (undefined !== e && null !== e) {
+ if (undefined !== e.scrollLeft && undefined !== e.scrollTop) {
+ scrollLeft += e.scrollLeft;
+ scrollTop += e.scrollTop;
+ }
+ e = e.parentNode;
+ }
+}
+
+function clamp(elem) {
+ var x_vel_abs, y_vel_abs;
+ x_vel_abs = Math.abs(elem.velocity[octtree.x]);
+ y_vel_abs = Math.abs(elem.velocity[octtree.y]);
+ if (0 !== x_vel_abs && eta > x_vel_abs) {
+ elem.velocity[octtree.x] = 0;
+ } else if (max_v < x_vel_abs) {
+ elem.velocity[octtree.x] = max_v * (x_vel_abs / elem.velocity[octtree.x]);
+ }
+ if (0 !== y_vel_abs && eta > y_vel_abs) {
+ elem.velocity[octtree.y] = 0;
+ } else if (max_v < y_vel_abs) {
+ elem.velocity[octtree.y] = max_v * (y_vel_abs / elem.velocity[octtree.y]);
+ }
+ if (elem.next_pos[octtree.x] < 1) {
+ elem.next_pos[octtree.x] = 1;
+ elem.velocity[octtree.x] = 0;
+ }
+ if (elem.next_pos[octtree.y] < 1) {
+ elem.next_pos[octtree.y] = 1;
+ elem.velocity[octtree.y] = 0;
+ }
+ if (elem.next_pos[octtree.x] >= canvas.width) {
+ elem.next_pos[octtree.x] = canvas.width - 1;
+ }
+ if (elem.next_pos[octtree.y] >= (canvas.height - 100)) {
+ canvas.height += 100;
+ }
+}
+
+function initCanvas() {
+ resizeCanvas();
+ setCanvasMousemove();
+ canvas.onmousedown = function (e) {
+ recordMousePos(e);
+ if (e.shiftKey && undefined !== hoveringOver) {
+ model.disable(hoveringOver, tree);
+ mouseDown = false;
+ hoveringOver = undefined;
+ dragging = undefined;
+ } else {
+ mouseDown = true;
+ mouseDragOffsetVec = undefined;
+ }
+ };
+ canvas.ondblclick = function (e) {
+ recordMousePos(e);
+ if (undefined !== hoveringOver) {
+ hoveringOver.navigateTo();
+ }
+ };
+ canvas.onmouseup = function (e) {
+ recordMousePos(e);
+ mouseDown = false;
+ mouseDragOffsetVec = undefined;
+ dragging = undefined;
+ };
+ try {
+ ctx = canvas.getContext("2d");
+ } catch (e) {
+ }
+ if (!ctx) {
+ alert("Could not initialise 2D canvas. Change browser?");
+ }
+}
+
+function drawScene() {
+ ctx.font = "" + fontSize + "px sans-serif";
+ ctx.clearRect(scrollLeft, scrollTop, clientWidth, clientHeight);
+ ctx.lineWidth = 1.0;
+ ctx.lineCap = "round";
+ ctx.lineJoin = "round";
+ ctx.strokeStyle = "black";
+ model.render(ctx);
+}
+
+function animate() {
+ var timeNow, elapsed, e, i;
+ timeNow = new Date().getTime();
+ if (lastTime !== 0) {
+ elapsed = (timeNow - lastTime) / 10000;
+ for (i in model.exchange) {
+ e = model.exchange[i];
+ if ((undefined === dragging || dragging !== e) && ! e.disabled) {
+ e.animate(elapsed);
+ newton.update(elapsed, e);
+ spring.update(elapsed, tree, e);
+ clamp(e);
+ }
+ }
+ for (i in model.channel) {
+ e = model.channel[i];
+ if ((undefined === dragging || dragging !== e) && ! e.disabled) {
+ e.animate(elapsed);
+ newton.update(elapsed, e);
+ spring.update(elapsed, tree, e);
+ clamp(e);
+ }
+ }
+ for (i in model.queue) {
+ e = model.queue[i];
+ if ((undefined === dragging || dragging !== e) && ! e.disabled) {
+ e.animate(elapsed);
+ newton.update(elapsed, e);
+ spring.update(elapsed, tree, e);
+ clamp(e);
+ }
+ }
+ tree.update();
+ }
+ lastTime = timeNow;
+}
+
+tick = function () {
+ drawScene();
+ animate();
+ if (rendering) {
+ requestAnimFrame(tick);
+ }
+};
+
+function visualisationStart() {
+ canvas = document.getElementById("topology_canvas");
+ initCanvas();
+ update();
+ requestAnimFrame(tick);
+}
+
+// Used to start/stop doing work when we gain/lose focus
+function enableRendering() {
+ lastTime = 0;
+ rendering = true;
+ setCanvasMousemove();
+ requestAnimFrame(tick);
+}
+
+function disableRendering() {
+ canvas.onmousemove = undefined;
+ rendering = false;
+}
+
+
+/******************************************************************************
+ * Model callbacks for rendering *
+ ******************************************************************************/
+
+function draggable(model, ctx) {
+ var inPath = ctx.isPointInPath(mousePos[octtree.x], mousePos[octtree.y]);
+ if ((inPath && undefined === hoveringOver) || dragging === this || hoveringOver === this) {
+ ctx.fillStyle = highlight;
+ ctx.fill();
+
+ if (hoveringOver !== this) {
+ this.getDetails();
+ }
+
+ hoveringOver = this;
+ if (mouseDown) {
+ dragging = this;
+ if (undefined === mouseDragOffsetVec) {
+ mouseDragOffsetVec = vec3.create(this.pos);
+ vec3.subtract(mouseDragOffsetVec, mousePos);
+ }
+ vec3.set(mousePos, this.next_pos);
+ vec3.add(this.next_pos, mouseDragOffsetVec);
+ this.velocity = vec3.create();
+ clamp(this);
+ } else if (!inPath) {
+ if (undefined !== hoveringOver) {
+ hoveringOver.details = undefined;
+ }
+ if (detailsInFlight === this) {
+ setDetails(undefined);
+ }
+ dragging = undefined;
+ hoveringOver = undefined;
+ mouseDragOffsetVec = undefined;
+ }
+ } else {
+ ctx.fillStyle = "white";
+ ctx.fill();
+ }
+ if (undefined !== hoveringOver && hoveringOver !== this && ! model.isHighlighted(this)) {
+ ctx.strokeStyle = faded;
+ }
+ ctx.stroke();
+}
+
+Channel.prototype.preStroke = draggable;
+Exchange.prototype.preStroke = draggable;
+Queue.prototype.preStroke = draggable;
+
+Binding.prototype.preStroke = function (source, destination, model, ctx) {
+ var drawBindingKeys, xMid, yMid, bindingKey, k, dim;
+ drawBindingKeys = false;
+ if (undefined === hoveringOver) {
+ drawBindingKeys = ctx.isPointInPath(mousePos[octtree.x], mousePos[octtree.y]);
+ } else {
+ if (hoveringOver === source) {
+ ctx.strokeStyle = "#0000a0";
+ drawBindingKeys = true;
+ } else if (hoveringOver === destination) {
+ ctx.strokeStyle = "#00a000";
+ drawBindingKeys = true;
+ } else {
+ ctx.strokeStyle = faded;
+ }
+ }
+ ctx.stroke();
+
+ if (drawBindingKeys) {
+ xMid = (source.xMax + destination.xMin) / 2;
+ yMid = source === destination ? source.pos[octtree.y] - this.loopOffset + fontSize
+ : (source.pos[octtree.y] + destination.pos[octtree.y]) / 2;
+ bindingKey = "";
+ for (k in this.keys) {
+ bindingKey += ", " + k;
+ }
+ bindingKey = bindingKey.slice(2);
+ dim = ctx.measureText(bindingKey);
+
+ ctx.textBaseline = "middle";
+ ctx.textAlign = "center";
+ ctx.fillStyle = "rgba(255, 255, 255, 0.67)";
+ ctx.fillRect(xMid - (dim.width / 2), yMid - (this.fontSize / 2),
+ dim.width, this.fontSize);
+ ctx.fillStyle = ctx.strokeStyle;
+ ctx.fillText(bindingKey, xMid, yMid);
+ }
+};
+
+function frustumCull(xMin, yMin, width, height) {
+ return ((yMin > (scrollTop + clientHeight)) ||
+ ((yMin + height) < scrollTop) ||
+ (xMin > (scrollLeft + clientWidth)) ||
+ ((xMin + width) < scrollLeft));
+}
+Model.prototype.cull = frustumCull;
+
+
+/******************************************************************************
+ * Showing / hiding / removing resources *
+ ******************************************************************************/
+
+function selectInsertAlphabetical(selectElem, optionElem) {
+ var preceding, i;
+ for (i = 0; i < selectElem.options.length; i += 1) {
+ if (optionElem.text < selectElem.options[i].text) {
+ preceding = selectElem.options[i];
+ break;
+ }
+ }
+ selectElem.add(optionElem, preceding);
+ return selectElem.options;
+}
+
+function show(hiddenElemId, model, type) {
+ var i, e, hidden;
+ if (model.rendering[type].enabled) {
+ hidden = document.getElementById(hiddenElemId);
+ for (i = 0; i < hidden.options.length; i += 1) {
+ e = hidden.options[i];
+ if (e.selected) {
+ model.enable(model[type][e.value], tree);
+ hidden.remove(i);
+ i -= 1;
+ }
+ }
+ }
+}
+
+function showChannels() {
+ show("hidden_channels", model, 'channel');
+}
+
+function showExchanges() {
+ show("hidden_exchanges", model, 'exchange');
+}
+
+function showQueues() {
+ show("hidden_queues", model, 'queue');
+}
+
+// Called when the resource is enabled from being hidden
+function enable_fun(type, postFun) {
+ return function (model, tree) {
+ if (model.rendering[type].enabled) {
+ delete model.rendering[type].on_enable[this.name];
+ }
+ this.remove = Object.getPrototypeOf(this).remove;
+ this.postFun = postFun;
+ this.postFun(model, tree);
+ };
+}
+
+Channel.prototype.enable = enable_fun('channel', Channel.prototype.enable);
+Exchange.prototype.enable = enable_fun('exchange', Exchange.prototype.enable);
+Queue.prototype.enable = enable_fun('queue', Queue.prototype.enable);
+
+
+// Called when the item is removed and the item is disabled
+function remove_disabled_fun(hiddenElemId, postFun) {
+ return function (tree, model) {
+ var hidden, i;
+ hidden = document.getElementById(hiddenElemId);
+ for (i = 0; i < hidden.options.length; i += 1) {
+ if (hidden.options[i].value === this.name) {
+ hidden.remove(i);
+ break;
+ }
+ }
+ model.enable(this, tree);
+ this.postFun = postFun;
+ this.postFun(tree, model);
+ };
+}
+
+function disable_fun(hiddenElemId, type, postFun) {
+ return function (model) {
+ if (detailsInFlight === this) {
+ setDetails(undefined);
+ }
+ var optionElem = document.createElement('option');
+ optionElem.text = '"' + this.name + '"';
+ if (undefined !== model.rendering[type].on_enable[this.name]) {
+ optionElem.text += ' *';
+ }
+ optionElem.value = this.name;
+ selectInsertAlphabetical(document.getElementById(hiddenElemId), optionElem);
+ this.remove = remove_disabled_fun(hiddenElemId, this.remove);
+ this.postFun = postFun;
+ this.postFun(model);
+ };
+}
+
+Channel.prototype.disable =
+ disable_fun("hidden_channels", 'channel', Channel.prototype.disable);
+Exchange.prototype.disable =
+ disable_fun("hidden_exchanges", 'exchange', Exchange.prototype.disable);
+Queue.prototype.disable =
+ disable_fun("hidden_queues", 'queue', Queue.prototype.disable);
+
+// Called when the resource is deleted / vanishes on the broker
+function remove_fun(postFun, type) {
+ return function (tree, model) {
+ if (undefined !== hoveringOver && this === hoveringOver) {
+ hoveringOver = undefined;
+ dragging = undefined;
+ }
+ delete model.rendering[type].on_enable[this.name];
+ if (this === detailsInFlight) {
+ setDetails(undefined);
+ }
+ this.postFun = postFun;
+ this.postFun(tree, model);
+ };
+}
+
+Channel.prototype.remove = remove_fun(Channel.prototype.remove, 'channel');
+Queue.prototype.remove = remove_fun(Queue.prototype.remove, 'queue');
+Exchange.prototype.remove = remove_fun(Exchange.prototype.remove, 'exchange');
+
+function toggleRendering(hiddenElemId, showButtonElemId, type) {
+ var hidden, i, e;
+ model.rendering[type].enabled = !model.rendering[type].enabled;
+ if (model.rendering[type].enabled) {
+ hidden = document.getElementById(hiddenElemId);
+ for (i = 0; i < hidden.options.length; i += 1) {
+ e = hidden.options[i].value;
+ if (undefined !== model.rendering[type].on_enable[e]) {
+ model.enable(model[type][e], tree);
+ hidden.remove(i);
+ i -= 1;
+ }
+ }
+ document.getElementById(showButtonElemId).disabled = false;
+ } else {
+ for (i in model[type]) {
+ if (! model[type][i].disabled) {
+ model.rendering[type].on_enable[model[type][i].name] = true;
+ model.disable(model[type][i], tree);
+ }
+ }
+ document.getElementById(showButtonElemId).disabled = true;
+ }
+ return true;
+}
+
+function displayHelp() {
+ document.getElementById('help').style.display = 'block';
+}
+
+function hideHelp() {
+ document.getElementById('help').style.display = 'none';
+}
+
+
+/******************************************************************************
+ * VHost *
+ ******************************************************************************/
+
+Model.prototype.vhost_add = function (vhost) {
+ var optionElem, options;
+ optionElem = document.createElement('option');
+ optionElem.text = vhost.name;
+ optionElem.value = vhost.name;
+ options =
+ selectInsertAlphabetical(document.getElementById('vhosts'), optionElem);
+ if (options.length === 1) {
+ selectedVhost = options[0].value;
+ options[0].selected = true;
+ }
+};
+
+Model.prototype.vhost_remove = function (vhost) {
+ var elem, i;
+ elem = document.getElementById('vhosts');
+ for (i = 0; i < elem.options.length; i += 1) {
+ if (elem.options[i].value === vhost.name) {
+ elem.remove(i);
+ break;
+ }
+ }
+};
+
+function vhostChanged() {
+ var elem, i, e, j;
+ elem = document.getElementById('vhosts');
+ for (i = 0; i < elem.options.length; i += 1) {
+ if (elem.options[i].selected && selectedVhost !== elem.options[i].value) {
+ selectedVhost = elem.options[i].value;
+ for (e in ['channel', 'exchange', 'queue']) {
+ for (j in model[e]) {
+ model[e][j].remove(tree, model);
+ }
+ }
+ break;
+ }
+ }
+}
--- /dev/null
+/*global octtree, vec3, Spring */
+
+function searchX(elem, tree, xIncr, xMax) {
+ var found = tree.findInRadius(elem.pos, xIncr / 2, 1);
+ while (found.length > 0 && elem.pos[octtree.x] + xIncr < xMax) {
+ elem.pos[octtree.x] += xIncr;
+ found = tree.findInRadius(elem.pos, xIncr / 2, 1);
+ }
+ return (found.length === 0);
+}
+
+function searchY(elem, tree, yIncr) {
+ var found = tree.findInRadius(elem.pos, yIncr / 2, 1);
+ while (found.length > 0) {
+ elem.pos[octtree.y] += yIncr;
+ found = tree.findInRadius(elem.pos, yIncr / 2, 1);
+ }
+}
+
+function bezierMid(startX, startY, ctl1X, ctl1Y, ctl2X, ctl2Y, endX, endY) {
+ var start_ctl1X, start_ctl1Y, end_ctl2X, end_ctl2Y, ctl1_ctl2X, ctl1_ctl2Y, mid1X, mid1Y, mid2X, mid2Y;
+
+ start_ctl1X = (startX + ctl1X) / 2;
+ start_ctl1Y = (startY + ctl1Y) / 2;
+
+ end_ctl2X = (endX + ctl2X) / 2;
+ end_ctl2Y = (endY + ctl2Y) / 2;
+
+ ctl1_ctl2X = (ctl1X + ctl2X) / 2;
+ ctl1_ctl2Y = (ctl1Y + ctl2Y) / 2;
+
+ mid1X = (start_ctl1X + ctl1_ctl2X) / 2;
+ mid1Y = (start_ctl1Y + ctl1_ctl2Y) / 2;
+
+ mid2X = (end_ctl2X + ctl1_ctl2X) / 2;
+ mid2Y = (end_ctl2Y + ctl1_ctl2Y) / 2;
+
+ return [(mid1X + mid2X) / 2, (mid1Y + mid2Y) / 2];
+}
+
+function stringifyObject(a) {
+ var b, e;
+ b = {};
+ for (e in a) {
+ if (a.hasOwnProperty(e)) {
+ if ("object" === typeof a[e]) {
+ b[e] = stringifyObject(a[e]);
+ } else {
+ b[e] = "" + a[e];
+ }
+ }
+ }
+ return b;
+}
+
+String.prototype.toTitleCase = function () {
+ return this.replace(/(^|_)([a-z])/g,
+ function (str, g1, g2, offset, totalStr) {
+ return g1.replace("_", " ") + g2.toUpperCase();
+ });
+};
+
+var Consumer = {};
+Consumer.render = function (channel, queue, ctx, consumerTag) {
+ var yMid, xCtl, dim, mid;
+ ctx.beginPath();
+ yMid = (channel.yMax + queue.pos[octtree.y]) / 2;
+ xCtl = queue.pos[octtree.x];
+ ctx.moveTo(channel.pos[octtree.x], channel.yMax);
+ ctx.bezierCurveTo(channel.pos[octtree.x], yMid,
+ xCtl, queue.pos[octtree.y] - channel.yInit,
+ xCtl, queue.pos[octtree.y] - queue.fontSize);
+ ctx.moveTo(channel.pos[octtree.x], channel.yMax);
+ ctx.closePath();
+ ctx.stroke();
+
+ dim = ctx.measureText(consumerTag);
+ mid = bezierMid(channel.pos[octtree.x], channel.yMax,
+ channel.pos[octtree.x], yMid,
+ xCtl, queue.pos[octtree.y] - channel.yInit,
+ xCtl, queue.pos[octtree.y] - queue.fontSize);
+ ctx.textBaseline = "middle";
+ ctx.textAlign = "center";
+ ctx.fillStyle = "rgba(255, 255, 255, 0.67)";
+ ctx.fillRect(mid[0] - (dim.width / 2), mid[1] - (channel.fontSize / 2),
+ dim.width, channel.fontSize);
+ ctx.fillStyle = ctx.strokeStyle;
+ ctx.fillText(consumerTag, mid[0], mid[1]);
+
+ ctx.beginPath();
+ ctx.moveTo(channel.pos[octtree.x], channel.yMax);
+ ctx.lineTo(channel.pos[octtree.x] - (channel.fontSize / 2),
+ channel.yMax + channel.fontSize);
+ ctx.lineTo(channel.pos[octtree.x] + (channel.fontSize / 2),
+ channel.yMax + channel.fontSize);
+ ctx.closePath();
+ ctx.fillStyle = ctx.strokeStyle;
+ ctx.fill();
+};
+
+var Publisher = {};
+Publisher.render = function (channel, exchange, ctx) {
+ var yMid, xCtl;
+ ctx.beginPath();
+ yMid = (channel.yMax + exchange.pos[octtree.y]) / 2;
+ xCtl = exchange.pos[octtree.x];
+ ctx.moveTo(channel.pos[octtree.x], channel.yMax);
+ ctx.bezierCurveTo(channel.pos[octtree.x], yMid,
+ xCtl, exchange.pos[octtree.y] - channel.yInit,
+ xCtl, exchange.pos[octtree.y] - exchange.fontSize);
+ ctx.moveTo(channel.pos[octtree.x], channel.yMax);
+ ctx.closePath();
+ ctx.stroke();
+
+ ctx.beginPath();
+ ctx.moveTo(exchange.pos[octtree.x],
+ exchange.pos[octtree.y] - exchange.fontSize);
+ ctx.lineTo(exchange.pos[octtree.x] - exchange.fontSize / 2,
+ exchange.pos[octtree.y] - 2 * exchange.fontSize);
+ ctx.lineTo(exchange.pos[octtree.x] + exchange.fontSize / 2,
+ exchange.pos[octtree.y] - 2 * exchange.fontSize);
+ ctx.closePath();
+ ctx.fillStyle = ctx.strokeStyle;
+ ctx.fill();
+
+};
+
+function Channel(tree, elem, model) {
+ this.name = elem.name;
+ this.pos = vec3.create();
+ this.findNewPosition(model, tree);
+
+ this.next_pos = vec3.create(this.pos);
+ this.mass = 0.1;
+ this.velocity = vec3.create();
+ this.ideal = { pos : vec3.create() };
+ this.disabled = false;
+ this.update(elem);
+ tree.add(this);
+}
+
+Channel.prototype = {
+ yInit : 100,
+ yIncr : 50,
+ xInit : 100,
+ xIncr : 50,
+ xMax : 200,
+ yBoundary : 200,
+ attributes : [ 'acks_uncommitted', 'client_flow_blocked', 'confirm', 'connection_details',
+ 'consumer_count', 'message_stats', 'messages_unacknowledged',
+ 'messages_unconfirmed', 'node', 'number', 'prefetch_count', 'transactional',
+ 'user', 'vhost' ],
+ pos : vec3.create(),
+ fontSize : 12,
+ spring : new Spring(),
+ details : undefined,
+ object_type : 'channel',
+ detail_attributes : [ 'name', 'user', 'transactional', 'confirm', 'node', 'vhost',
+ 'prefetch_count', 'messages_unacknowledged', 'messages_unconfirmed',
+ 'consumer_count', 'client_flow_blocked' ]
+};
+Channel.prototype.spring.octtreeLimit = 10;
+Channel.prototype.spring.octtreeRadius = 500;
+Channel.prototype.spring.equilibriumLength = 0;
+Channel.prototype.spring.dampingFactor = 0.1;
+Channel.prototype.spring.pull = true;
+Channel.prototype.spring.push = false;
+
+Channel.prototype.findNewPosition = function (model, tree) {
+ this.pos[octtree.x] = this.xInit;
+ this.pos[octtree.y] = this.yInit;
+ this.pos[octtree.z] = 0;
+
+ while (! searchX(this, tree, this.xIncr, this.xMax)) {
+ this.pos[octtree.y] += this.yIncr;
+ this.pos[octtree.x] = this.xInit + (this.pos[octtree.y] / 10);
+ }
+
+ this.yMin = this.pos[octtree.y];
+ this.yMax = this.pos[octtree.y];
+};
+Channel.prototype.canvasResized = function (canvas) {
+ Channel.prototype.xMax = canvas.width;
+};
+Channel.prototype.update = function (elem) {
+ var attr, i;
+ for (i = 0; i < this.attributes.length; i += 1) {
+ attr = this.attributes[i];
+ this[attr] = elem[attr];
+ }
+};
+Channel.prototype.remove = function (tree, model) {
+ tree.del(this);
+};
+Channel.prototype.render = function (model, ctx) {
+ var i, dim, consumer, queue, publisher, exchange;
+ if (this.disabled) {
+ return;
+ }
+ dim = ctx.measureText(this.name);
+ if (model.cull(this.pos[octtree.x] - this.fontSize,
+ this.pos[octtree.y] - (dim.width / 2) - this.fontSize,
+ this.fontSize * 2,
+ dim.width + (this.fontSize * 2))) {
+ return;
+ }
+
+ this.yMax = this.pos[octtree.y] + (dim.width / 2) + this.fontSize;
+ this.yMin = this.pos[octtree.y] - (dim.width / 2) - this.fontSize;
+
+ ctx.beginPath();
+ ctx.textAlign = "center";
+ ctx.textBaseline = "middle";
+
+ ctx.lineWidth = 2.0;
+ ctx.strokeStyle = "black";
+ ctx.moveTo(this.pos[octtree.x] - this.fontSize, this.yMin);
+ ctx.lineTo(this.pos[octtree.x] + this.fontSize, this.yMin);
+ ctx.lineTo(this.pos[octtree.x] + this.fontSize, this.yMax);
+ ctx.lineTo(this.pos[octtree.x] - this.fontSize, this.yMax);
+ ctx.closePath();
+ this.preStroke(model, ctx);
+
+ ctx.save();
+ ctx.translate(this.pos[octtree.x], this.pos[octtree.y]);
+ ctx.rotate(3 * Math.PI / 2);
+ ctx.fillStyle = ctx.strokeStyle;
+ ctx.fillText(this.name, 0, 0);
+ ctx.restore();
+
+ if (undefined !== this.details) {
+ model.resetHighlighted();
+ ctx.lineWidth = 2.0;
+ if (undefined !== this.details.consumer_details) {
+ ctx.strokeStyle = "#00a000";
+ for (i = 0; i < this.details.consumer_details.length; i += 1) {
+ consumer = this.details.consumer_details[i];
+ queue = consumer.queue_details.name;
+ if (undefined !== model.queue[queue] && ! model.queue[queue].disabled) {
+ model.setHighlighted(model.queue[queue]);
+ Consumer.render(this, model.queue[queue], ctx, consumer.consumer_tag);
+ }
+ }
+ }
+
+ if (undefined !== this.details.publishes) {
+ ctx.strokeStyle = "#0000a0";
+ for (i = 0; i < this.details.publishes.length; i += 1) {
+ publisher = this.details.publishes[i];
+ exchange = publisher.exchange.name;
+ if (undefined !== model.exchange[exchange] &&
+ ! model.exchange[exchange].disabled) {
+ model.setHighlighted(model.exchange[exchange]);
+ Publisher.render(this, model.exchange[exchange], ctx);
+ }
+ }
+ }
+ }
+};
+Channel.prototype.preStroke = function (model, ctx) {
+};
+Channel.prototype.animate = function (elapsed) {
+ if (this.yBoundary > this.pos[octtree.y]) {
+ this.ideal.pos[octtree.x] = this.pos[octtree.x];
+ this.ideal.pos[octtree.y] = this.yInit;
+ this.spring.apply(elapsed, this, this.ideal);
+ }
+};
+Channel.prototype.disable = function (model) {
+ model.channels_visible -= 1;
+};
+Channel.prototype.enable = function (model, tree) {
+ model.channels_visible += 1;
+ this.findNewPosition(model, tree);
+};
+Channel.prototype.getDetails = function () {
+};
+Channel.prototype.stringAttributes = function () {
+ var obj, i, attName, attNameTitle;
+ obj = { Channel : '',
+ attributeOrder : ['Channel'] };
+ for (i in this.detail_attributes) {
+ attName = this.detail_attributes[i];
+ attNameTitle = attName.toTitleCase();
+ obj.attributeOrder.push(attNameTitle);
+ if ("object" === typeof this[attName]) {
+ obj[attNameTitle] = stringifyObject(this[attName]);
+ } else {
+ obj[attNameTitle] = "" + this[attName];
+ }
+ }
+
+ if (undefined !== this.message_stats) {
+ if (undefined !== this.message_stats.publish_details) {
+ obj.attributeOrder.push('Publish Rate (msgs/sec)');
+ obj['Publish Rate (msgs/sec)'] = "" + Math.round(this.message_stats.publish_details.rate);
+ }
+
+ if (undefined !== this.message_stats.deliver_get_details) {
+ obj.attributeOrder.push('Delivery and Get Rate (msgs/sec)');
+ obj['Delivery and Get Rate (msgs/sec)'] = "" + Math.round(this.message_stats.deliver_get_details.rate);
+ }
+
+ if (undefined !== this.message_stats.ack_details) {
+ obj.attributeOrder.push('Delivery Acknowledgement Rate (acks/sec)');
+ obj['Delivery Acknowledgement Rate (acks/sec)'] = "" + Math.round(this.message_stats.ack_details.rate);
+ }
+ }
+
+ return obj;
+};
+Channel.prototype.url = function () {
+ return "/channels/" + encodeURIComponent(this.name);
+};
+Channel.prototype.navigateTo = function () {
+ document.location = "../#" + this.url();
+};
+
+function Exchange(tree, elem, model) {
+ this.name = elem.name;
+ this.pos = vec3.create();
+ this.findNewPosition(model, tree);
+ this.next_pos = vec3.create(this.pos);
+ this.mass = 0.1;
+ this.velocity = vec3.create();
+ this.ideal = { pos : vec3.create() };
+ this.disabled = false;
+ this.bindings_outbound = { exchange : {}, queue : {} };
+ this.bindings_inbound = {};
+ this.update(elem);
+ tree.add(this);
+}
+
+Exchange.prototype = {
+ yInit : 250,
+ yIncr : 50,
+ xInit : 100,
+ xBoundary : 200,
+ attributes : [ 'arguments', 'auto_delete', 'durable', 'internal', 'type',
+ 'message_stats_out', 'message_stats_in', 'vhost' ],
+ pos : vec3.create(),
+ fontSize : 12,
+ spring : new Spring(),
+ details : undefined,
+ object_type : 'exchange',
+ detail_attributes : [ 'name', 'type', 'durable', 'auto_delete', 'internal', 'arguments', 'vhost' ]
+};
+Exchange.prototype.spring.octtreeLimit = 10;
+Exchange.prototype.spring.octtreeRadius = 500;
+Exchange.prototype.spring.equilibriumLength = 0;
+Exchange.prototype.spring.dampingFactor = 0.1;
+Exchange.prototype.spring.pull = true;
+Exchange.prototype.spring.push = false;
+
+Exchange.prototype.findNewPosition = function (model, tree) {
+ this.pos[octtree.x] = this.xInit;
+ this.pos[octtree.y] = this.yInit;
+ this.pos[octtree.z] = 0;
+
+ searchY(this, tree, this.yIncr);
+
+ this.xMin = this.pos[octtree.x];
+ this.xMax = this.pos[octtree.x];
+};
+Exchange.prototype.canvasResized = function (canvas) {
+ Exchange.prototype.xInit = canvas.width / 6;
+ Exchange.prototype.xBoundary = 2 * canvas.width / 6;
+};
+Exchange.prototype.update = function (elem) {
+ var attr, i;
+ for (i = 0; i < this.attributes.length; i += 1) {
+ attr = this.attributes[i];
+ this[attr] = elem[attr];
+ }
+};
+Exchange.prototype.remove = function (tree, model) {
+ tree.del(this);
+};
+Exchange.prototype.render = function (model, ctx) {
+ var i, dim, channel;
+ if (this.disabled) {
+ return;
+ }
+ for (i in this.bindings_outbound.exchange) {
+ this.bindings_outbound.exchange[i].render(model, ctx);
+ }
+ if (model.rendering.queue.enabled) {
+ for (i in this.bindings_outbound.queue) {
+ this.bindings_outbound.queue[i].render(model, ctx);
+ }
+ }
+ dim = ctx.measureText(this.name);
+ if (model.cull(this.pos[octtree.x] - (dim.width / 2) - this.fontSize,
+ this.pos[octtree.y] - this.fontSize,
+ dim.width + (2 * this.fontSize),
+ 2 * this.fontSize)) {
+ return;
+ }
+
+ ctx.beginPath();
+ ctx.textAlign = "center";
+ ctx.textBaseline = "middle";
+
+ ctx.lineWidth = 2.0;
+ ctx.strokeStyle = "black";
+
+ ctx.arc(this.pos[octtree.x] - (dim.width / 2), this.pos[octtree.y],
+ this.fontSize, Math.PI / 2, 3 * Math.PI / 2, false);
+ ctx.lineTo(this.pos[octtree.x] + (dim.width / 2), this.pos[octtree.y] -
+ this.fontSize);
+
+ ctx.arc(this.pos[octtree.x] + (dim.width / 2), this.pos[octtree.y],
+ this.fontSize, 3 * Math.PI / 2, Math.PI / 2, false);
+ ctx.closePath();
+
+ this.preStroke(model, ctx);
+
+ ctx.fillStyle = ctx.strokeStyle;
+ ctx.fillText(this.name, this.pos[octtree.x], this.pos[octtree.y]);
+
+ this.xMin = this.pos[octtree.x] - (dim.width / 2) - this.fontSize;
+ this.xMax = this.pos[octtree.x] + (dim.width / 2) + this.fontSize;
+
+ if (undefined !== this.details) {
+ model.resetHighlighted();
+ ctx.lineWidth = 2.0;
+ ctx.strokeStyle = "#00a000";
+ if (undefined !== this.details.incoming) {
+ for (i = 0; i < this.details.incoming.length; i += 1) {
+ channel = this.details.incoming[i].channel_details.name;
+ if (undefined !== model.channel[channel] && ! model.channel[channel].disabled) {
+ model.setHighlighted(model.channel[channel]);
+ Publisher.render(model.channel[channel], this, ctx);
+ }
+ }
+ }
+
+ for (i in this.bindings_outbound.queue) {
+ model.setHighlighted(model.queue[this.bindings_outbound.queue[i].destination]);
+ }
+ for (i in this.bindings_outbound.exchange) {
+ model.setHighlighted(model.exchange[this.bindings_outbound.exchange[i].destination]);
+ }
+ for (i in this.bindings_inbound) {
+ model.setHighlighted(model.exchange[this.bindings_inbound[i].source]);
+ }
+
+ }
+};
+Exchange.prototype.preStroke = function (model, ctx) {
+};
+Exchange.prototype.animate = function (elapsed) {
+ if (this.xBoundary > this.pos[octtree.x]) {
+ this.ideal.pos[octtree.x] = this.xInit;
+ this.ideal.pos[octtree.y] = this.pos[octtree.y];
+ this.spring.apply(elapsed, this, this.ideal);
+ }
+};
+Exchange.prototype.disable = function (model) {
+ model.exchanges_visible -= 1;
+};
+Exchange.prototype.enable = function (model, tree) {
+ model.exchanges_visible += 1;
+ this.findNewPosition(model, tree);
+};
+Exchange.prototype.getDetails = function () {
+};
+Exchange.prototype.stringAttributes = function () {
+ var obj, i, attName, attNameTitle;
+ obj = { Exchange : '',
+ attributeOrder : ['Exchange'] };
+ for (i in this.detail_attributes) {
+ attName = this.detail_attributes[i];
+ attNameTitle = attName.toTitleCase();
+ obj.attributeOrder.push(attNameTitle);
+ if ("object" === typeof this[attName]) {
+ obj[attNameTitle] = stringifyObject(this[attName]);
+ } else {
+ obj[attNameTitle] = "" + this[attName];
+ }
+ }
+
+ obj.attributeOrder.push('Outgoing Queue Bindings');
+ obj['Outgoing Queue Bindings'] = "" + Object.keys(this.bindings_outbound.queue).length;
+
+ obj.attributeOrder.push('Outgoing Exchange Bindings');
+ obj['Outgoing Exchange Bindings'] = "" + Object.keys(this.bindings_outbound.exchange).length;
+
+ obj.attributeOrder.push('Incoming Exchange Bindings');
+ obj['Incoming Exchange Bindings'] = "" + Object.keys(this.bindings_inbound).length;
+
+ if (undefined !== this.message_stats_in &&
+ undefined !== this.message_stats_in.publish_details) {
+ obj.attributeOrder.push('Message Incoming Rate (msgs/sec)');
+ obj['Message Incoming Rate (msgs/sec)'] = "" + Math.round(this.message_stats_in.publish_details.rate);
+ }
+
+ if (undefined !== this.message_stats_out &&
+ undefined !== this.message_stats_out.publish_details) {
+ obj.attributeOrder.push('Message Outgoing Rate (msgs/sec)');
+ obj['Message Outgoing Rate (msgs/sec)'] = "" + Math.round(this.message_stats_out.publish_details.rate);
+ }
+
+ return obj;
+};
+Exchange.prototype.url = function () {
+ var name;
+ if (this.name === "") {
+ name = "amq.default";
+ } else {
+ name = this.name;
+ }
+ return "/exchanges/" + encodeURIComponent(this.vhost) +
+ "/" + encodeURIComponent(name);
+};
+Exchange.prototype.navigateTo = function () {
+ document.location = "../#" + this.url();
+};
+
+function Queue(tree, elem, model) {
+ this.name = elem.name;
+ this.pos = vec3.create();
+ this.findNewPosition(model, tree);
+ this.next_pos = vec3.create(this.pos);
+ this.mass = 0.1;
+ this.velocity = vec3.create();
+ this.ideal = { pos : vec3.create() };
+ this.disabled = false;
+ this.bindings_inbound = {};
+ this.update(elem);
+ tree.add(this);
+}
+
+Queue.prototype = {
+ yInit : 250,
+ yIncr : 50,
+ xInit : 400,
+ xBoundary : 300,
+ attributes : [ 'arguments', 'auto_delete', 'durable', 'messages',
+ 'messages_ready', 'messages_unacknowledged', 'message_stats',
+ 'node', 'owner_pid_details', 'vhost', 'memory', 'consumers' ],
+ pos : vec3.create(),
+ fontSize : 12,
+ spring : new Spring(),
+ details : undefined,
+ object_type : 'queue',
+ detail_attributes : [ 'name', 'durable', 'auto_delete', 'arguments', 'node', 'vhost',
+ 'messages_ready', 'messages_unacknowledged', 'consumers', 'memory' ]
+};
+Queue.prototype.spring.octtreeLimit = 10;
+Queue.prototype.spring.octtreeRadius = 500;
+Queue.prototype.spring.equilibriumLength = 0;
+Queue.prototype.spring.dampingFactor = 0.1;
+Queue.prototype.spring.pull = true;
+Queue.prototype.spring.push = false;
+
+Queue.prototype.findNewPosition = function (model, tree) {
+ this.pos[octtree.x] = this.xInit;
+ this.pos[octtree.y] = this.yInit;
+ this.pos[octtree.z] = 0;
+
+ searchY(this, tree, this.yIncr);
+
+ this.xMin = this.pos[octtree.x];
+ this.xMax = this.pos[octtree.x];
+};
+Queue.prototype.canvasResized = function (canvas) {
+ Queue.prototype.xInit = 5 * canvas.width / 6;
+ Queue.prototype.xBoundary = 4 * canvas.width / 6;
+};
+Queue.prototype.update = function (elem) {
+ var attr, i;
+ for (i = 0; i < this.attributes.length; i += 1) {
+ attr = this.attributes[i];
+ this[attr] = elem[attr];
+ }
+};
+Queue.prototype.remove = function (tree, model) {
+ tree.del(this);
+};
+Queue.prototype.render = function (model, ctx) {
+ var text, dim, i, channel;
+ if (this.disabled) {
+ return;
+ }
+ text = this.name + " (" + this.messages_ready + ", " +
+ this.messages_unacknowledged + ")";
+ dim = ctx.measureText(text);
+ if (model.cull(this.pos[octtree.x] - (dim.width / 2) - this.fontSize,
+ this.pos[octtree.y] - this.fontSize,
+ dim.width + (2 * this.fontSize),
+ 2 * this.fontSize)) {
+ return;
+ }
+ ctx.beginPath();
+ ctx.textAlign = "center";
+ ctx.textBaseline = "middle";
+
+ ctx.lineWidth = 2.0;
+ ctx.strokeStyle = "black";
+ ctx.moveTo(this.pos[octtree.x] - (dim.width / 2) - this.fontSize,
+ this.pos[octtree.y] - this.fontSize);
+ ctx.lineTo(this.pos[octtree.x] + (dim.width / 2) + this.fontSize,
+ this.pos[octtree.y] - this.fontSize);
+ ctx.lineTo(this.pos[octtree.x] + (dim.width / 2) + this.fontSize,
+ this.pos[octtree.y] + this.fontSize);
+ ctx.lineTo(this.pos[octtree.x] - (dim.width / 2) - this.fontSize,
+ this.pos[octtree.y] + this.fontSize);
+ ctx.closePath();
+
+ this.preStroke(model, ctx);
+
+ ctx.fillStyle = ctx.strokeStyle;
+ ctx.fillText(text, this.pos[octtree.x], this.pos[octtree.y]);
+
+ this.xMin = this.pos[octtree.x] - (dim.width / 2) - this.fontSize;
+ this.xMax = this.pos[octtree.x] + (dim.width / 2) + this.fontSize;
+
+ if (undefined !== this.details && undefined !== this.details.consumer_details) {
+ model.resetHighlighted();
+ ctx.lineWidth = 2.0;
+ ctx.strokeStyle = "#0000a0";
+ for (i = 0; i < this.details.consumer_details.length; i += 1) {
+ channel = this.details.consumer_details[i].channel_details.name;
+ if (undefined !== model.channel[channel] && ! model.channel[channel].disabled) {
+ model.setHighlighted(model.channel[channel]);
+ Consumer.render(model.channel[channel], this, ctx,
+ this.details.consumer_details[i].consumer_tag);
+ }
+ }
+ for (i in this.bindings_inbound) {
+ model.setHighlighted(model.exchange[this.bindings_inbound[i].source]);
+ }
+ }
+};
+Queue.prototype.preStroke = function (model, ctx) {
+};
+Queue.prototype.animate = function (elapsed) {
+ if (this.xBoundary < this.pos[octtree.x]) {
+ this.ideal.pos[octtree.x] = this.xInit;
+ this.ideal.pos[octtree.y] = this.pos[octtree.y];
+ this.spring.apply(elapsed, this, this.ideal);
+ }
+};
+Queue.prototype.disable = function (model) {
+ model.queues_visible -= 1;
+};
+Queue.prototype.enable = function (model, tree) {
+ model.queues_visible += 1;
+ this.findNewPosition(model, tree);
+};
+Queue.prototype.getDetails = function () {
+};
+Queue.prototype.stringAttributes = function () {
+ var obj, i, attName, attNameTitle;
+ obj = { Queue : '',
+ attributeOrder : ['Queue'] };
+ for (i in this.detail_attributes) {
+ attName = this.detail_attributes[i];
+ attNameTitle = attName.toTitleCase();
+ obj.attributeOrder.push(attNameTitle);
+ if ("object" === typeof this[attName]) {
+ obj[attNameTitle] = stringifyObject(this[attName]);
+ } else {
+ obj[attNameTitle] = "" + this[attName];
+ }
+ }
+
+ obj.attributeOrder.push('Incoming Exchange Bindings');
+ obj['Incoming Exchange Bindings'] = "" + Object.keys(this.bindings_inbound).length;
+
+ if (undefined !== this.message_stats) {
+ if (undefined !== this.message_stats.publish_details) {
+ obj.attributeOrder.push('Message Incoming Rate (msgs/sec)');
+ obj['Message Incoming Rate (msgs/sec)'] = "" + Math.round(this.message_stats.publish_details.rate);
+ }
+
+ if (undefined !== this.message_stats.deliver_get_details) {
+ obj.attributeOrder.push('Delivery and Get Rate (msgs/sec)');
+ obj['Delivery and Get Rate (msgs/sec)'] = "" + Math.round(this.message_stats.deliver_get_details.rate);
+ }
+
+ if (undefined !== this.message_stats.ack_details) {
+ obj.attributeOrder.push('Delivery Acknowledgement Rate (acks/sec)');
+ obj['Delivery Acknowledgement Rate (acks/sec)'] = "" + Math.round(this.message_stats.ack_details.rate);
+ }
+ }
+
+ return obj;
+};
+Queue.prototype.url = function () {
+ return "/queues/" + encodeURIComponent(this.vhost) +
+ "/" + encodeURIComponent(this.name);
+};
+Queue.prototype.navigateTo = function () {
+ document.location = "../#" + this.url();
+};
+
+function Binding(elems) {
+ this.keys = {};
+ this.set(elems);
+ var elem = elems.shift();
+ this.source = elem.source;
+ this.destination_type = elem.destination_type;
+ this.destination = elem.destination;
+}
+Binding.prototype = {
+ attributes : [ 'arguments' ],
+ offset : 150,
+ fontSize : 12,
+ loopOffset : 50,
+ object_type : 'binding'
+};
+Binding.prototype.set = function (elems) {
+ var i, elem, attr, j;
+ this.keys = {};
+ for (i = 0; i < elems.length; i += 1) {
+ elem = elems[i];
+ this.keys[elem.routing_key] = {};
+ for (j = 0; j < this.attributes.length; j += 1) {
+ attr = this.attributes[j];
+ this.keys[elem.routing_key][attr] = elem[attr];
+ }
+ }
+};
+Binding.prototype.render = function (model, ctx) {
+ var source, destination, xMid, xCtl1, xCtl2, yCtl1, yCtl2, xMin, yMin, xMax, yMax;
+ source = model.exchange[this.source];
+ if (this.destination_type === "exchange") {
+ destination = model.exchange[this.destination];
+ } else {
+ destination = model.queue[this.destination];
+ }
+ if (undefined === source || undefined === destination) {
+ return;
+ }
+ if (source.disabled || destination.disabled) {
+ return;
+ }
+ xMid = (source.xMax + destination.xMin) / 2;
+ xCtl1 = xMid > (source.xMax + this.offset) ? xMid : source.xMax + this.offset;
+ xCtl2 = xMid < (destination.xMin - this.offset) ? xMid
+ : destination.xMin - this.offset;
+ yCtl1 = destination === source ? source.pos[octtree.y] - this.loopOffset : source.pos[octtree.y];
+ yCtl2 = destination === source ? destination.pos[octtree.y] - this.loopOffset : destination.pos[octtree.y];
+ xMin = Math.min(source.xMax, xCtl2);
+ yMin = Math.min(yCtl1, yCtl2);
+ xMax = Math.max(destination.xMin, xCtl1);
+ yMax = Math.max(source.pos[octtree.y], destination.pos[octtree.y]);
+ if (model.cull(xMin, yMin, xMax - xMin, yMax - yMin)) {
+ return;
+ }
+
+ ctx.beginPath();
+ ctx.lineWidth = 1.0;
+ ctx.strokeStyle = "black";
+ ctx.moveTo(source.xMax, source.pos[octtree.y]);
+ ctx.bezierCurveTo(xCtl1, yCtl1, xCtl2, yCtl2, destination.xMin,
+ destination.pos[octtree.y]);
+ ctx.moveTo(destination.xMin, destination.pos[octtree.y] + 1);
+ ctx.bezierCurveTo(xCtl2, yCtl2 + 1, xCtl1, yCtl1 + 1, source.xMax,
+ source.pos[octtree.y] + 1);
+ ctx.moveTo(source.xMax, source.pos[octtree.y]);
+ this.preStroke(source, destination, model, ctx);
+
+ // draw an arrow head
+ ctx.beginPath();
+ ctx.moveTo(destination.xMin, destination.pos[octtree.y]);
+ ctx.lineTo(destination.xMin - this.fontSize, destination.pos[octtree.y] +
+ (this.fontSize / 2));
+ ctx.lineTo(destination.xMin - this.fontSize, destination.pos[octtree.y] -
+ (this.fontSize / 2));
+ ctx.closePath();
+ ctx.fillStyle = ctx.strokeStyle;
+ ctx.fill();
+};
+Binding.prototype.preStroke = function (source, destination, model, ctx) {
+};
+
+function Model() {
+ this.exchange = {};
+ this.exchanges_visible = 0;
+ this.queue = {};
+ this.queues_visible = 0;
+ this.channel = {};
+ this.channels_visible = 0;
+ this.connection = {};
+ this.vhost = {};
+ this.rendering = { exchange : { enabled : true,
+ on_enable : {} },
+ queue : { enabled : true,
+ on_enable : {} },
+ channel : { enabled : true,
+ on_enable : {} },
+ connection : { enabled : true,
+ on_enable : {} }
+ };
+ this.highlighted = { exchange : {},
+ queue : {},
+ channel : {},
+ connection : {} };
+}
+
+Model.prototype.permitted_exchanges_visible = 10;
+Model.prototype.permitted_queues_visible = 10;
+Model.prototype.permitted_channels_visible = 10;
+
+Model.prototype.rebuild = function (tree, configuration) {
+ var elem, matched, i, binding, bindings, source, src, destination_type, j, src1, destination, dest, dest_type;
+
+ // Channels
+ matched = {};
+ for (i = 0; i < configuration.channels.length; i += 1) {
+ elem = configuration.channels[i];
+ if (undefined === this.channel[elem.name]) {
+ this.channel[elem.name] = new Channel(tree, elem, this);
+ this.channels_visible += 1;
+ if ((this.channels_visible >
+ this.permitted_channels_visible) ||
+ ! this.rendering.channel.enabled) {
+ this.disable(this.channel[elem.name], tree);
+ }
+ } else {
+ this.channel[elem.name].update(elem);
+ }
+ matched[elem.name] = true;
+ }
+ for (i in this.channel) {
+ if (undefined === matched[i]) {
+ elem = this.channel[i];
+ delete this.channel[i];
+ elem.remove(tree, this);
+ if (! elem.disabled) {
+ this.channels_visible -= 1;
+ }
+ }
+ }
+
+ // Exchanges
+ matched = {};
+ for (i = 0; i < configuration.exchanges.length; i += 1) {
+ elem = configuration.exchanges[i];
+ if (undefined === this.exchange[elem.name]) {
+ this.exchange[elem.name] = new Exchange(tree, elem, this);
+ this.exchanges_visible += 1;
+ if (elem.name.slice(0, 4) === "amq." ||
+ (this.exchanges_visible >
+ this.permitted_exchanges_visible) ||
+ ! this.rendering.exchange.enabled) {
+ this.disable(this.exchange[elem.name], tree);
+ }
+ } else {
+ this.exchange[elem.name].update(elem);
+ }
+ matched[elem.name] = true;
+ }
+ for (i in this.exchange) {
+ if (undefined === matched[i]) {
+ elem = this.exchange[i];
+ delete this.exchange[i];
+ elem.remove(tree, this);
+ if (! elem.disabled) {
+ this.exchanges_visible -= 1;
+ }
+ }
+ }
+
+ // Queues
+ matched = {};
+ for (i = 0; i < configuration.queues.length; i += 1) {
+ elem = configuration.queues[i];
+ if (undefined === this.queue[elem.name]) {
+ this.queue[elem.name] = new Queue(tree, elem, this);
+ this.queues_visible += 1;
+ if ((this.queues_visible >
+ this.permitted_queues_visible) ||
+ ! this.rendering.queue.enabled) {
+ this.disable(this.queue[elem.name], tree);
+ delete this.rendering.queue.on_enable[elem.name];
+ }
+ } else {
+ this.queue[elem.name].update(elem);
+ }
+ matched[elem.name] = true;
+ }
+ for (i in this.queue) {
+ if (undefined === matched[i]) {
+ elem = this.queue[i];
+ delete this.queue[i];
+ elem.remove(tree, this);
+ if (! elem.disabled) {
+ this.queues_visible -= 1;
+ }
+ }
+ }
+
+ // Bindings
+ bindings = {};
+ for (i = 0; i < configuration.bindings.length; i += 1) {
+ elem = configuration.bindings[i];
+ if (undefined === this.exchange[elem.source] ||
+ undefined === this[elem.destination_type][elem.destination]) {
+ continue;
+ }
+ if (undefined === bindings[elem.source]) {
+ bindings[elem.source] = { exchange : {}, queue : {} };
+ }
+ source = bindings[elem.source];
+ if (undefined === source[elem.destination_type][elem.destination]) {
+ source[elem.destination_type][elem.destination] = new Array(elem);
+ } else {
+ source[elem.destination_type][elem.destination].push(elem);
+ }
+ }
+
+ for (source in bindings) {
+ src = this.exchange[source].bindings_outbound;
+ i = bindings[source];
+ for (destination_type in i) {
+ j = i[destination_type];
+ src1 = src[destination_type];
+ for (destination in j) {
+ dest = this[destination_type][destination].bindings_inbound;
+ if (undefined === src1[destination]) {
+ src1[destination] = new Binding(j[destination]);
+ } else {
+ src1[destination].set(j[destination]);
+ }
+ binding = src1[destination];
+ if (undefined === dest[source]) {
+ dest[source] = binding;
+ }
+ }
+ }
+ }
+ for (src in this.exchange) {
+ for (dest_type in this.exchange[src].bindings_outbound) {
+ for (dest in this.exchange[src].bindings_outbound[dest_type]) {
+ binding = this.exchange[src].bindings_outbound[dest_type][dest];
+ if (undefined === bindings[binding.source] ||
+ undefined === bindings[binding.source][binding.destination_type] ||
+ undefined === bindings[binding.source][binding.destination_type][binding.destination]) {
+ delete this.exchange[src].bindings_outbound[dest_type][dest];
+ if (undefined !== this[binding.destination_type][binding.destination]) {
+ delete this[binding.destination_type][binding.destination].bindings_inbound[binding.source];
+ }
+ }
+ }
+ }
+ }
+ bindings = undefined;
+
+ // vhosts
+ matched = {};
+ for (i = 0; i < configuration.vhosts.length; i += 1) {
+ elem = configuration.vhosts[i];
+ if (undefined === this.vhost[elem.name]) {
+ this.vhost[elem.name] = elem;
+ this.vhost_add(elem);
+ }
+ matched[elem.name] = true;
+ }
+ for (i in this.vhost) {
+ if (undefined === matched[i]) {
+ this.vhost_remove(this.vhost[i]);
+ delete this.vhost[i];
+ }
+ }
+
+ matched = undefined;
+};
+Model.prototype.disable = function (elem, tree) {
+ elem.disable(this);
+ tree.del(elem);
+ elem.disabled = true;
+ elem.details = undefined;
+};
+Model.prototype.enable = function (elem, tree) {
+ elem.enable(this, tree);
+ tree.add(elem);
+ elem.disabled = false;
+ elem.details = undefined;
+};
+Model.prototype.render = function (ctx) {
+ var i;
+ if (this.rendering.exchange.enabled) {
+ for (i in this.exchange) {
+ this.exchange[i].render(this, ctx);
+ }
+ }
+ if (this.rendering.queue.enabled) {
+ for (i in this.queue) {
+ this.queue[i].render(this, ctx);
+ }
+ }
+ if (this.rendering.channel.enabled) {
+ for (i in this.channel) {
+ this.channel[i].render(this, ctx);
+ }
+ }
+};
+Model.prototype.cull = function (xMin, yMin, width, height) {
+ return false;
+};
+Model.prototype.vhost_add = function (elem) {
+};
+Model.prototype.vhost_del = function (elem) {
+};
+Model.prototype.resetHighlighted = function () {
+ this.highlighted = { exchange : {},
+ queue : {},
+ channel : {},
+ connection : {} };
+};
+Model.prototype.setHighlighted = function (elem) {
+ if (undefined !== elem) {
+ this.highlighted[elem.object_type][elem.name] = elem;
+ }
+};
+Model.prototype.isHighlighted = function (elem) {
+ return ((undefined !== elem) && (undefined !== this.highlighted[elem.object_type][elem.name]));
+};
--- /dev/null
+/*global vec3 */
+
+var octtree = {};
+octtree.top_nw = 0;
+octtree.top_ne = 1;
+octtree.top_se = 2;
+octtree.top_sw = 3;
+octtree.bot_nw = 4;
+octtree.bot_ne = 5;
+octtree.bot_se = 6;
+octtree.bot_sw = 7;
+octtree.children = [ octtree.top_nw, octtree.top_ne, octtree.top_se,
+ octtree.top_sw, octtree.bot_nw, octtree.bot_ne, octtree.bot_se,
+ octtree.bot_sw ];
+octtree.firstChildId = 0;
+octtree.lastChildId = 7;
+octtree.x = 0;
+octtree.y = 1;
+octtree.z = 2;
+octtree.randoms = [];
+octtree.randomIndex = 0;
+octtree.i = 0;
+
+for (octtree.i = 0; octtree.i < 100; octtree.i += 1) {
+ octtree.randoms.push(Math.random());
+}
+
+function Octtree(xMin, xMax, yMin, yMax, zMin, zMax, parent, childId) {
+ this.xMin = xMin;
+ this.xMax = xMax;
+ this.yMin = yMin;
+ this.yMax = yMax;
+ this.zMin = zMin;
+ this.zMax = zMax;
+ this.parent = parent;
+ this.childId = childId;
+ if (undefined !== childId && childId !== octtree.lastChildId &&
+ undefined !== parent) {
+ this.nextSiblingId = childId + 1;
+ }
+
+ this.xMid = xMin + (xMax - xMin) / 2;
+ this.yMid = yMin + (yMax - yMin) / 2;
+ this.zMid = zMin + (zMax - zMin) / 2;
+}
+
+Octtree.prototype.isEmpty = function () {
+ return (undefined === this[octtree.firstChildId]) &&
+ (undefined === this.value);
+};
+
+Octtree.prototype.hasChildren = function () {
+ return undefined !== this[octtree.firstChildId];
+};
+
+Octtree.prototype.hasValue = function () {
+ return undefined !== this.value;
+};
+
+Octtree.prototype.add = function (value) {
+ return octtree.add(this, value);
+};
+
+Octtree.prototype.del = function (value) {
+ return octtree.del(this, value);
+};
+
+Octtree.prototype.update = function () {
+ return octtree.update(this);
+};
+
+Octtree.prototype.findInRadius = function (pos, radius, limit) {
+ return octtree.findInRadius(this, pos, radius, limit);
+};
+
+Octtree.prototype.size = function () {
+ return octtree.size(this);
+};
+
+octtree.findNode = function (tree, pos) {
+ while (true) {
+ if (pos[octtree.x] < tree.xMin || tree.xMax <= pos[octtree.x] ||
+ pos[octtree.y] < tree.yMin || tree.yMax <= pos[octtree.y] ||
+ pos[octtree.z] < tree.zMin || tree.zMax <= pos[octtree.z]) {
+ if (undefined === tree.parent) {
+ return undefined;
+ } else {
+ tree = tree.parent;
+ continue;
+ }
+ }
+
+ if (tree.hasChildren()) {
+ if (pos[octtree.x] < tree.xMid) {
+ if (pos[octtree.y] < tree.yMid) {
+ if (pos[octtree.z] < tree.zMid) {
+ tree = tree[octtree.bot_sw];
+ } else {
+ tree = tree[octtree.bot_nw];
+ }
+ } else {
+ if (pos[octtree.z] < tree.zMid) {
+ tree = tree[octtree.top_sw];
+ } else {
+ tree = tree[octtree.top_nw];
+ }
+ }
+ } else {
+ if (pos[octtree.y] < tree.yMid) {
+ if (pos[octtree.z] < tree.zMid) {
+ tree = tree[octtree.bot_se];
+ } else {
+ tree = tree[octtree.bot_ne];
+ }
+ } else {
+ if (pos[octtree.z] < tree.zMid) {
+ tree = tree[octtree.top_se];
+ } else {
+ tree = tree[octtree.top_ne];
+ }
+ }
+ }
+ } else {
+ return tree;
+ }
+ }
+};
+
+octtree.add = function (tree, value) {
+ tree = octtree.findNode(tree, value.pos);
+ if (undefined === tree) {
+ return undefined;
+ } else {
+ var displaced;
+ while (undefined !== value) {
+ if (tree.hasValue()) {
+ if (tree.value.pos[octtree.x] === value.pos[octtree.x] &&
+ tree.value.pos[octtree.y] === value.pos[octtree.y] &&
+ tree.value.pos[octtree.z] === value.pos[octtree.z]) {
+ tree.value = value;
+ value = undefined;
+ } else {
+ displaced = value; // make sure we add our new value last
+ value = tree.value;
+ tree.value = undefined;
+
+ tree[octtree.top_nw] = new Octtree(tree.xMin, tree.xMid,
+ tree.yMid, tree.yMax, tree.zMid, tree.zMax, tree,
+ octtree.top_nw);
+ tree[octtree.top_ne] = new Octtree(tree.xMid, tree.xMax,
+ tree.yMid, tree.yMax, tree.zMid, tree.zMax, tree,
+ octtree.top_ne);
+ tree[octtree.top_se] = new Octtree(tree.xMid, tree.xMax,
+ tree.yMid, tree.yMax, tree.zMin, tree.zMid, tree,
+ octtree.top_se);
+ tree[octtree.top_sw] = new Octtree(tree.xMin, tree.xMid,
+ tree.yMid, tree.yMax, tree.zMin, tree.zMid, tree,
+ octtree.top_sw);
+
+ tree[octtree.bot_nw] = new Octtree(tree.xMin, tree.xMid,
+ tree.yMin, tree.yMid, tree.zMid, tree.zMax, tree,
+ octtree.bot_nw);
+ tree[octtree.bot_ne] = new Octtree(tree.xMid, tree.xMax,
+ tree.yMin, tree.yMid, tree.zMid, tree.zMax, tree,
+ octtree.bot_ne);
+ tree[octtree.bot_se] = new Octtree(tree.xMid, tree.xMax,
+ tree.yMin, tree.yMid, tree.zMin, tree.zMid, tree,
+ octtree.bot_se);
+ tree[octtree.bot_sw] = new Octtree(tree.xMin, tree.xMid,
+ tree.yMin, tree.yMid, tree.zMin, tree.zMid, tree,
+ octtree.bot_sw);
+ tree = octtree.findNode(tree, value.pos);
+ }
+ } else {
+ tree.value = value;
+ value = displaced;
+ displaced = undefined;
+ if (undefined !== value) {
+ tree = octtree.findNode(tree, value.pos);
+ }
+ }
+ }
+ return tree;
+ }
+};
+
+octtree.del = function (tree, value) {
+ tree = octtree.findNode(tree, value.pos);
+ if (undefined === tree || (!tree.hasValue())) {
+ return tree;
+ }
+ if (tree.value.pos[octtree.x] === value.pos[octtree.x] &&
+ tree.value.pos[octtree.y] === value.pos[octtree.y] &&
+ tree.value.pos[octtree.z] === value.pos[octtree.z]) {
+ tree.value = undefined;
+ tree = tree.parent;
+ var valCount, nonEmptyChild, child, i;
+ while (undefined !== tree) {
+ valCount = 0;
+ for (i = 0; i < octtree.children.length; i += 1) {
+ child = octtree.children[i];
+ if (!tree[child].isEmpty()) {
+ valCount += 1;
+ nonEmptyChild = tree[child];
+ }
+ }
+ if (0 === valCount) {
+ for (i = 0; i < octtree.children.length; i += 1) {
+ child = octtree.children[i];
+ tree[child] = undefined;
+ }
+ tree = tree.parent;
+ } else if (1 === valCount) {
+ if (nonEmptyChild.hasValue()) {
+ for (i = 0; i < octtree.children.length; i += 1) {
+ child = octtree.children[i];
+ tree[child] = undefined;
+ }
+ tree.value = nonEmptyChild.value;
+ tree = tree.parent;
+ } else {
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+ }
+ return tree;
+};
+
+octtree.next = function (tree) {
+ while (undefined !== tree) {
+ if (undefined !== tree.nextSiblingId) {
+ return tree.parent[tree.nextSiblingId];
+ } else {
+ tree = tree.parent;
+ }
+ }
+ return undefined;
+};
+
+octtree.defined = function (a, b) {
+ if (undefined === a) {
+ return b;
+ } else {
+ return a;
+ }
+};
+
+octtree.update = function (tree) {
+ var root, parent, v, movedValues, i;
+ root = tree;
+ parent = root.parent;
+ root.parent = undefined; // do this to stop next going up past tree
+
+ movedValues = [];
+ while (undefined !== tree) {
+ if (tree.hasValue()) {
+ v = tree.value;
+ if (v.next_pos[octtree.x] < tree.xMin ||
+ tree.xMax <= v.next_pos[octtree.x] ||
+ v.next_pos[octtree.y] < tree.yMin ||
+ tree.yMax <= v.next_pos[octtree.y] ||
+ v.next_pos[octtree.z] < tree.zMin ||
+ tree.zMax <= v.next_pos[octtree.z]) {
+ movedValues.push(tree.value);
+ } else {
+ vec3.set(v.next_pos, v.pos);
+ }
+ tree = octtree.next(tree);
+ } else if (tree.hasChildren()) {
+ tree = tree[octtree.firstChildId];
+ } else {
+ tree = octtree.next(tree);
+ }
+ }
+
+ root.parent = parent;
+ tree = root;
+ for (i = 0; i < movedValues.length; i += 1) {
+ v = movedValues[i];
+ tree = octtree.defined(tree.del(v), tree);
+ vec3.set(v.next_pos, v.pos);
+ tree = octtree.defined(tree.add(v), tree);
+ }
+
+ return root;
+};
+
+octtree.findInRadius = function (tree, pos, radius, limit) {
+ var acc, radiusSq, worklist, x_p_r, x_m_r, y_p_r, y_m_r, z_p_r, z_m_r, xd, yd, zd;
+ acc = [];
+ radiusSq = radius * radius;
+ worklist = [tree];
+ tree = undefined;
+
+ x_p_r = 0;
+ x_m_r = 0;
+ y_p_r = 0;
+ y_m_r = 0;
+ z_p_r = 0;
+ z_m_r = 0;
+
+ while (0 < worklist.length && (undefined === limit || limit > acc.length)) {
+ tree = worklist.shift();
+
+ if (tree.isEmpty()) {
+ continue;
+ }
+
+ if (tree.hasValue()) {
+ xd = Math.abs(tree.value.pos[octtree.x] - pos[octtree.x]);
+ yd = Math.abs(tree.value.pos[octtree.y] - pos[octtree.y]);
+ zd = Math.abs(tree.value.pos[octtree.z] - pos[octtree.z]);
+ xd *= xd;
+ yd *= yd;
+ zd *= zd;
+ if ((xd + yd + zd) <= radiusSq) {
+ acc.push(tree);
+ }
+ continue;
+ }
+
+ x_p_r = pos[octtree.x] + radius;
+ x_m_r = pos[octtree.x] - radius;
+ y_p_r = pos[octtree.y] + radius;
+ y_m_r = pos[octtree.y] - radius;
+ z_p_r = pos[octtree.z] + radius;
+ z_m_r = pos[octtree.z] - radius;
+
+ if (x_p_r < tree.xMin || tree.xMax <= x_m_r || y_p_r < tree.yMin ||
+ tree.yMax <= y_m_r || z_p_r < tree.zMin || tree.zMax <= z_m_r) {
+ continue;
+ }
+
+ if (x_m_r < tree.xMid) {
+ if (y_m_r < tree.yMid) {
+ if (z_m_r < tree.zMid) {
+ octtree.randomPush(worklist, tree[octtree.bot_sw]);
+ }
+ if (tree.zMid <= z_p_r) {
+ octtree.randomPush(worklist, tree[octtree.bot_nw]);
+ }
+ }
+ if (tree.yMid <= y_p_r) {
+ if (z_m_r < tree.zMid) {
+ octtree.randomPush(worklist, tree[octtree.top_sw]);
+ }
+ if (tree.zMid <= z_p_r) {
+ octtree.randomPush(worklist, tree[octtree.top_nw]);
+ }
+ }
+ }
+ if (tree.xMid <= x_p_r) {
+ if (y_m_r < tree.yMid) {
+ if (z_m_r < tree.zMid) {
+ octtree.randomPush(worklist, tree[octtree.bot_se]);
+ }
+ if (tree.zMid <= z_p_r) {
+ octtree.randomPush(worklist, tree[octtree.bot_ne]);
+ }
+ }
+ if (tree.yMid <= y_p_r) {
+ if (z_m_r < tree.zMid) {
+ octtree.randomPush(worklist, tree[octtree.top_se]);
+ }
+ if (tree.zMid <= z_p_r) {
+ octtree.randomPush(worklist, tree[octtree.top_ne]);
+ }
+ }
+ }
+ }
+ return acc;
+};
+
+octtree.size = function (tree) {
+ var count, root, parent;
+ root = 0;
+ root = tree;
+ parent = root.parent;
+ root.parent = undefined; // stop the traversal going above us.
+
+ while (undefined !== tree) {
+ if (tree.hasValue()) {
+ count += 1;
+ tree = octtree.next(tree);
+ } else if (tree.hasChildren()) {
+ tree = tree[octtree.firstChildId];
+ } else {
+ tree = octtree.next(tree);
+ }
+ }
+
+ root.parent = parent;
+ return count;
+};
+
+octtree.create = function (xMin, xMax, yMin, yMax, zMin, zMax) {
+ return new Octtree(xMin, xMax, yMin, yMax, zMin, zMax, undefined, undefined);
+};
+
+octtree.randomPush = function (ary, e) {
+ if (octtree.nextRandom() > 0.5) {
+ ary.push(e);
+ } else {
+ ary.unshift(e);
+ }
+ return ary;
+};
+
+octtree.nextRandom = function () {
+ var r = octtree.randoms[octtree.randomIndex];
+ octtree.randomIndex += 1;
+ if (octtree.randomIndex === octtree.randoms.length) {
+ octtree.randomIndex = 0;
+ }
+ return r;
+};
--- /dev/null
+/*global octtree, vec3 */
+
+function Newton() {
+}
+Newton.prototype.friction = 100;
+
+Newton.prototype.update = function (elapsed, obj) {
+ var incr;
+ vec3.scale(obj.velocity, 1 - (this.friction * elapsed));
+ incr = vec3.create(obj.velocity);
+ vec3.scale(incr, elapsed);
+ vec3.add(obj.pos, incr, obj.next_pos);
+};
+
+function Spring() {
+}
+Spring.prototype.k = 1;
+Spring.prototype.equilibriumLength = 2;
+Spring.prototype.push = true;
+Spring.prototype.pull = true;
+Spring.prototype.dampingFactor = 0.5;
+Spring.prototype.octtreeRadius = 4;
+Spring.prototype.octtreeLimit = 40;
+
+Spring.prototype.apply = function (elapsed, obj1, obj2) {
+ var damper, vecOP, distanceOP, x;
+ damper = this.dampingFactor * elapsed * 100000;
+ vecOP = vec3.create();
+ distanceOP = 0;
+ x = 0;
+ vec3.subtract(obj2.pos, obj1.pos, vecOP);
+ distanceOP = vec3.length(vecOP);
+ if (!isNaN(distanceOP) && 0 !== distanceOP) {
+ x = distanceOP - this.equilibriumLength;
+ if (distanceOP > this.equilibriumLength && !this.pull) {
+ return;
+ }
+ if (distanceOP < this.equilibriumLength && !this.push) {
+ return;
+ }
+ vec3.scale(vecOP, (damper * (((1 / distanceOP) * x) / obj1.mass)));
+ vec3.add(obj1.velocity, vecOP);
+ }
+};
+Spring.prototype.update = function (elapsed, tree, obj) {
+ var damper, vecOP, distanceOP, x, found, i, obj1;
+ damper = this.dampingFactor * elapsed * 100000;
+ vecOP = vec3.create();
+ distanceOP = 0;
+ x = 0;
+ found = tree.findInRadius(obj.pos, this.octtreeRadius, this.octtreeLimit);
+ for (i = 0; i < found.length; i += 1) {
+ obj1 = found[i].value;
+ if (obj1 !== obj) {
+ // F = -k x where x is difference from equilibriumLength
+ // a = F / m
+ vec3.subtract(obj1.pos, obj.pos, vecOP);
+ distanceOP = vec3.length(vecOP);
+ if (!isNaN(distanceOP) && 0 !== distanceOP) {
+ x = distanceOP - this.equilibriumLength;
+ if (distanceOP > this.equilibriumLength && !this.pull) {
+ continue;
+ }
+ if (distanceOP < this.equilibriumLength && !this.push) {
+ continue;
+ }
+ vec3.scale(vecOP,
+ (damper * (((1 / distanceOP) * x) / obj.mass)));
+ vec3.add(obj.velocity, vecOP);
+ }
+ }
+ }
+};
+
+function Gravity() {
+}
+Gravity.prototype.bigG = 1 / 20;
+Gravity.prototype.octtreeRadius = 5;
+Gravity.prototype.octtreeLimit = 20;
+Gravity.prototype.repel = false;
+
+Gravity.prototype.update = function (elapsed, tree, obj) {
+ var vecOP, distanceOP, found, i, obj1;
+ vecOP = vec3.create();
+ distanceOP = 0;
+ found = tree.findInRadius(obj.pos, this.octtreeRadius, this.octtreeLimit);
+ for (i = 0; i < found.length; i += 1) {
+ obj1 = found[i].value;
+ if (obj1 !== obj) {
+ // F = G.m1.m2 / (d.d)
+ // a = F / m1
+ // thus a = G.m2/(d.d)
+ vec3.subtract(obj1.pos, obj.pos, vecOP);
+ distanceOP = vec3.length(vecOP);
+ if ((!(isNaN(distanceOP))) && 0 !== distanceOP) {
+ vec3.scale(vecOP, (this.bigG * obj1.mass) /
+ (distanceOP * distanceOP));
+ if (this.repel) {
+ vec3.subtract(obj.velocity, vecOP);
+ } else {
+ vec3.add(obj.velocity, vecOP);
+ }
+ }
+ }
+ }
+};
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Visualiser.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
+-module(rabbit_mgmt_wm_all).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2,
+ resource_exists/2]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include_lib("rabbitmq_management/include/rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%--------------------------------------------------------------------
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case rabbit_mgmt_util:vhost(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply(
+ [{Key, Mod:augmented(ReqData, Context)}
+ || {Key, Mod} <- [{queues, rabbit_mgmt_wm_queues},
+ {exchanges, rabbit_mgmt_wm_exchanges},
+ {bindings, rabbit_mgmt_wm_bindings},
+ {channels, rabbit_mgmt_wm_channels},
+ {connections, rabbit_mgmt_wm_connections},
+ {vhosts, rabbit_mgmt_wm_vhosts}]
+ ], ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized(ReqData, Context).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Visualiser.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_visualiser_mgmt).
+
+-behaviour(rabbit_mgmt_extension).
+
+-export([dispatcher/0, web_ui/0]).
+dispatcher() -> [{["all"], rabbit_mgmt_wm_all, []},
+ {["all", vhost], rabbit_mgmt_wm_all, []}].
+web_ui() -> [{javascript, <<"visualiser.js">>}].
--- /dev/null
+{application, rabbitmq_management_visualiser,
+ [{description, "RabbitMQ Visualiser"},
+ {vsn, "%%VSN%%"},
+ {modules, []},
+ {registered, []},
+ {applications, [kernel, stdlib, rabbit, rabbitmq_management]}]}.
--- /dev/null
+This package, the RabbitMQ Management Plugin is licensed under the MPL. For the
+MPL, please see LICENSE-MPL-RabbitMQ.
+
+This package makes use of the following third party libraries:
+jQuery - http://jquery.com/ - MIT license, see LICENSE-MIT-jQuery164
+EJS - http://embeddedjs.com/ - MIT license, see LICENSE-MIT-EJS10
+Sammy - http://code.quirkey.com/sammy/ - MIT license, see LICENSE-MIT-Sammy060
+webmachine - http://webmachine.basho.com/ - Apache license, 2.0
+mochiweb - http://github.com/mochi/mochiweb/ - MIT license
+base64.js - http://code.google.com/p/stringencoders/ - BSD license, see LICENSE-BSD-base64js
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
--- /dev/null
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
--- /dev/null
+/*
+ * Copyright (c) 2010 Nick Galbreath
+ * http://code.google.com/p/stringencoders/source/browse/#svn/trunk/javascript
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+*/
--- /dev/null
+EJS - Embedded JavaScript
+
+Copyright (c) 2007 Edward Benson
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+
--- /dev/null
+Copyright (c) 2007-2013 IOLA and Ole Laursen
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
--- /dev/null
+Copyright (c) 2008 Aaron Quint, Quirkey NYC, LLC
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+
+
+
--- /dev/null
+Copyright (c) 2011 John Resig, http://jquery.com/
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
--- /dev/null
+ MOZILLA PUBLIC LICENSE
+ Version 1.1
+
+ ---------------
+
+1. Definitions.
+
+ 1.0.1. "Commercial Use" means distribution or otherwise making the
+ Covered Code available to a third party.
+
+ 1.1. "Contributor" means each entity that creates or contributes to
+ the creation of Modifications.
+
+ 1.2. "Contributor Version" means the combination of the Original
+ Code, prior Modifications used by a Contributor, and the Modifications
+ made by that particular Contributor.
+
+ 1.3. "Covered Code" means the Original Code or Modifications or the
+ combination of the Original Code and Modifications, in each case
+ including portions thereof.
+
+ 1.4. "Electronic Distribution Mechanism" means a mechanism generally
+ accepted in the software development community for the electronic
+ transfer of data.
+
+ 1.5. "Executable" means Covered Code in any form other than Source
+ Code.
+
+ 1.6. "Initial Developer" means the individual or entity identified
+ as the Initial Developer in the Source Code notice required by Exhibit
+ A.
+
+ 1.7. "Larger Work" means a work which combines Covered Code or
+ portions thereof with code not governed by the terms of this License.
+
+ 1.8. "License" means this document.
+
+ 1.8.1. "Licensable" means having the right to grant, to the maximum
+ extent possible, whether at the time of the initial grant or
+ subsequently acquired, any and all of the rights conveyed herein.
+
+ 1.9. "Modifications" means any addition to or deletion from the
+ substance or structure of either the Original Code or any previous
+ Modifications. When Covered Code is released as a series of files, a
+ Modification is:
+ A. Any addition to or deletion from the contents of a file
+ containing Original Code or previous Modifications.
+
+ B. Any new file that contains any part of the Original Code or
+ previous Modifications.
+
+ 1.10. "Original Code" means Source Code of computer software code
+ which is described in the Source Code notice required by Exhibit A as
+ Original Code, and which, at the time of its release under this
+ License is not already Covered Code governed by this License.
+
+ 1.10.1. "Patent Claims" means any patent claim(s), now owned or
+ hereafter acquired, including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by grantor.
+
+ 1.11. "Source Code" means the preferred form of the Covered Code for
+ making modifications to it, including all modules it contains, plus
+ any associated interface definition files, scripts used to control
+ compilation and installation of an Executable, or source code
+ differential comparisons against either the Original Code or another
+ well known, available Covered Code of the Contributor's choice. The
+ Source Code can be in a compressed or archival form, provided the
+ appropriate decompression or de-archiving software is widely available
+ for no charge.
+
+ 1.12. "You" (or "Your") means an individual or a legal entity
+ exercising rights under, and complying with all of the terms of, this
+ License or a future version of this License issued under Section 6.1.
+ For legal entities, "You" includes any entity which controls, is
+ controlled by, or is under common control with You. For purposes of
+ this definition, "control" means (a) the power, direct or indirect,
+ to cause the direction or management of such entity, whether by
+ contract or otherwise, or (b) ownership of more than fifty percent
+ (50%) of the outstanding shares or beneficial ownership of such
+ entity.
+
+2. Source Code License.
+
+ 2.1. The Initial Developer Grant.
+ The Initial Developer hereby grants You a world-wide, royalty-free,
+ non-exclusive license, subject to third party intellectual property
+ claims:
+ (a) under intellectual property rights (other than patent or
+ trademark) Licensable by Initial Developer to use, reproduce,
+ modify, display, perform, sublicense and distribute the Original
+ Code (or portions thereof) with or without Modifications, and/or
+ as part of a Larger Work; and
+
+ (b) under Patents Claims infringed by the making, using or
+ selling of Original Code, to make, have made, use, practice,
+ sell, and offer for sale, and/or otherwise dispose of the
+ Original Code (or portions thereof).
+
+ (c) the licenses granted in this Section 2.1(a) and (b) are
+ effective on the date Initial Developer first distributes
+ Original Code under the terms of this License.
+
+ (d) Notwithstanding Section 2.1(b) above, no patent license is
+ granted: 1) for code that You delete from the Original Code; 2)
+ separate from the Original Code; or 3) for infringements caused
+ by: i) the modification of the Original Code or ii) the
+ combination of the Original Code with other software or devices.
+
+ 2.2. Contributor Grant.
+ Subject to third party intellectual property claims, each Contributor
+ hereby grants You a world-wide, royalty-free, non-exclusive license
+
+ (a) under intellectual property rights (other than patent or
+ trademark) Licensable by Contributor, to use, reproduce, modify,
+ display, perform, sublicense and distribute the Modifications
+ created by such Contributor (or portions thereof) either on an
+ unmodified basis, with other Modifications, as Covered Code
+ and/or as part of a Larger Work; and
+
+ (b) under Patent Claims infringed by the making, using, or
+ selling of Modifications made by that Contributor either alone
+ and/or in combination with its Contributor Version (or portions
+ of such combination), to make, use, sell, offer for sale, have
+ made, and/or otherwise dispose of: 1) Modifications made by that
+ Contributor (or portions thereof); and 2) the combination of
+ Modifications made by that Contributor with its Contributor
+ Version (or portions of such combination).
+
+ (c) the licenses granted in Sections 2.2(a) and 2.2(b) are
+ effective on the date Contributor first makes Commercial Use of
+ the Covered Code.
+
+ (d) Notwithstanding Section 2.2(b) above, no patent license is
+ granted: 1) for any code that Contributor has deleted from the
+ Contributor Version; 2) separate from the Contributor Version;
+ 3) for infringements caused by: i) third party modifications of
+ Contributor Version or ii) the combination of Modifications made
+ by that Contributor with other software (except as part of the
+ Contributor Version) or other devices; or 4) under Patent Claims
+ infringed by Covered Code in the absence of Modifications made by
+ that Contributor.
+
+3. Distribution Obligations.
+
+ 3.1. Application of License.
+ The Modifications which You create or to which You contribute are
+ governed by the terms of this License, including without limitation
+ Section 2.2. The Source Code version of Covered Code may be
+ distributed only under the terms of this License or a future version
+ of this License released under Section 6.1, and You must include a
+ copy of this License with every copy of the Source Code You
+ distribute. You may not offer or impose any terms on any Source Code
+ version that alters or restricts the applicable version of this
+ License or the recipients' rights hereunder. However, You may include
+ an additional document offering the additional rights described in
+ Section 3.5.
+
+ 3.2. Availability of Source Code.
+ Any Modification which You create or to which You contribute must be
+ made available in Source Code form under the terms of this License
+ either on the same media as an Executable version or via an accepted
+ Electronic Distribution Mechanism to anyone to whom you made an
+ Executable version available; and if made available via Electronic
+ Distribution Mechanism, must remain available for at least twelve (12)
+ months after the date it initially became available, or at least six
+ (6) months after a subsequent version of that particular Modification
+ has been made available to such recipients. You are responsible for
+ ensuring that the Source Code version remains available even if the
+ Electronic Distribution Mechanism is maintained by a third party.
+
+ 3.3. Description of Modifications.
+ You must cause all Covered Code to which You contribute to contain a
+ file documenting the changes You made to create that Covered Code and
+ the date of any change. You must include a prominent statement that
+ the Modification is derived, directly or indirectly, from Original
+ Code provided by the Initial Developer and including the name of the
+ Initial Developer in (a) the Source Code, and (b) in any notice in an
+ Executable version or related documentation in which You describe the
+ origin or ownership of the Covered Code.
+
+ 3.4. Intellectual Property Matters
+ (a) Third Party Claims.
+ If Contributor has knowledge that a license under a third party's
+ intellectual property rights is required to exercise the rights
+ granted by such Contributor under Sections 2.1 or 2.2,
+ Contributor must include a text file with the Source Code
+ distribution titled "LEGAL" which describes the claim and the
+ party making the claim in sufficient detail that a recipient will
+ know whom to contact. If Contributor obtains such knowledge after
+ the Modification is made available as described in Section 3.2,
+ Contributor shall promptly modify the LEGAL file in all copies
+ Contributor makes available thereafter and shall take other steps
+ (such as notifying appropriate mailing lists or newsgroups)
+ reasonably calculated to inform those who received the Covered
+ Code that new knowledge has been obtained.
+
+ (b) Contributor APIs.
+ If Contributor's Modifications include an application programming
+ interface and Contributor has knowledge of patent licenses which
+ are reasonably necessary to implement that API, Contributor must
+ also include this information in the LEGAL file.
+
+ (c) Representations.
+ Contributor represents that, except as disclosed pursuant to
+ Section 3.4(a) above, Contributor believes that Contributor's
+ Modifications are Contributor's original creation(s) and/or
+ Contributor has sufficient rights to grant the rights conveyed by
+ this License.
+
+ 3.5. Required Notices.
+ You must duplicate the notice in Exhibit A in each file of the Source
+ Code. If it is not possible to put such notice in a particular Source
+ Code file due to its structure, then You must include such notice in a
+ location (such as a relevant directory) where a user would be likely
+ to look for such a notice. If You created one or more Modification(s)
+ You may add your name as a Contributor to the notice described in
+ Exhibit A. You must also duplicate this License in any documentation
+ for the Source Code where You describe recipients' rights or ownership
+ rights relating to Covered Code. You may choose to offer, and to
+ charge a fee for, warranty, support, indemnity or liability
+ obligations to one or more recipients of Covered Code. However, You
+ may do so only on Your own behalf, and not on behalf of the Initial
+ Developer or any Contributor. You must make it absolutely clear than
+ any such warranty, support, indemnity or liability obligation is
+ offered by You alone, and You hereby agree to indemnify the Initial
+ Developer and every Contributor for any liability incurred by the
+ Initial Developer or such Contributor as a result of warranty,
+ support, indemnity or liability terms You offer.
+
+ 3.6. Distribution of Executable Versions.
+ You may distribute Covered Code in Executable form only if the
+ requirements of Section 3.1-3.5 have been met for that Covered Code,
+ and if You include a notice stating that the Source Code version of
+ the Covered Code is available under the terms of this License,
+ including a description of how and where You have fulfilled the
+ obligations of Section 3.2. The notice must be conspicuously included
+ in any notice in an Executable version, related documentation or
+ collateral in which You describe recipients' rights relating to the
+ Covered Code. You may distribute the Executable version of Covered
+ Code or ownership rights under a license of Your choice, which may
+ contain terms different from this License, provided that You are in
+ compliance with the terms of this License and that the license for the
+ Executable version does not attempt to limit or alter the recipient's
+ rights in the Source Code version from the rights set forth in this
+ License. If You distribute the Executable version under a different
+ license You must make it absolutely clear that any terms which differ
+ from this License are offered by You alone, not by the Initial
+ Developer or any Contributor. You hereby agree to indemnify the
+ Initial Developer and every Contributor for any liability incurred by
+ the Initial Developer or such Contributor as a result of any such
+ terms You offer.
+
+ 3.7. Larger Works.
+ You may create a Larger Work by combining Covered Code with other code
+ not governed by the terms of this License and distribute the Larger
+ Work as a single product. In such a case, You must make sure the
+ requirements of this License are fulfilled for the Covered Code.
+
+4. Inability to Comply Due to Statute or Regulation.
+
+ If it is impossible for You to comply with any of the terms of this
+ License with respect to some or all of the Covered Code due to
+ statute, judicial order, or regulation then You must: (a) comply with
+ the terms of this License to the maximum extent possible; and (b)
+ describe the limitations and the code they affect. Such description
+ must be included in the LEGAL file described in Section 3.4 and must
+ be included with all distributions of the Source Code. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Application of this License.
+
+ This License applies to code to which the Initial Developer has
+ attached the notice in Exhibit A and to related Covered Code.
+
+6. Versions of the License.
+
+ 6.1. New Versions.
+ Netscape Communications Corporation ("Netscape") may publish revised
+ and/or new versions of the License from time to time. Each version
+ will be given a distinguishing version number.
+
+ 6.2. Effect of New Versions.
+ Once Covered Code has been published under a particular version of the
+ License, You may always continue to use it under the terms of that
+ version. You may also choose to use such Covered Code under the terms
+ of any subsequent version of the License published by Netscape. No one
+ other than Netscape has the right to modify the terms applicable to
+ Covered Code created under this License.
+
+ 6.3. Derivative Works.
+ If You create or use a modified version of this License (which you may
+ only do in order to apply it to code which is not already Covered Code
+ governed by this License), You must (a) rename Your license so that
+ the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
+ "MPL", "NPL" or any confusingly similar phrase do not appear in your
+ license (except to note that your license differs from this License)
+ and (b) otherwise make it clear that Your version of the license
+ contains terms which differ from the Mozilla Public License and
+ Netscape Public License. (Filling in the name of the Initial
+ Developer, Original Code or Contributor in the notice described in
+ Exhibit A shall not of themselves be deemed to be modifications of
+ this License.)
+
+7. DISCLAIMER OF WARRANTY.
+
+ COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
+ DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
+ THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
+ IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
+ YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
+ COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
+ OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
+ ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
+
+8. TERMINATION.
+
+ 8.1. This License and the rights granted hereunder will terminate
+ automatically if You fail to comply with terms herein and fail to cure
+ such breach within 30 days of becoming aware of the breach. All
+ sublicenses to the Covered Code which are properly granted shall
+ survive any termination of this License. Provisions which, by their
+ nature, must remain in effect beyond the termination of this License
+ shall survive.
+
+ 8.2. If You initiate litigation by asserting a patent infringement
+ claim (excluding declatory judgment actions) against Initial Developer
+ or a Contributor (the Initial Developer or Contributor against whom
+ You file such action is referred to as "Participant") alleging that:
+
+ (a) such Participant's Contributor Version directly or indirectly
+ infringes any patent, then any and all rights granted by such
+ Participant to You under Sections 2.1 and/or 2.2 of this License
+ shall, upon 60 days notice from Participant terminate prospectively,
+ unless if within 60 days after receipt of notice You either: (i)
+ agree in writing to pay Participant a mutually agreeable reasonable
+ royalty for Your past and future use of Modifications made by such
+ Participant, or (ii) withdraw Your litigation claim with respect to
+ the Contributor Version against such Participant. If within 60 days
+ of notice, a reasonable royalty and payment arrangement are not
+ mutually agreed upon in writing by the parties or the litigation claim
+ is not withdrawn, the rights granted by Participant to You under
+ Sections 2.1 and/or 2.2 automatically terminate at the expiration of
+ the 60 day notice period specified above.
+
+ (b) any software, hardware, or device, other than such Participant's
+ Contributor Version, directly or indirectly infringes any patent, then
+ any rights granted to You by such Participant under Sections 2.1(b)
+ and 2.2(b) are revoked effective as of the date You first made, used,
+ sold, distributed, or had made, Modifications made by that
+ Participant.
+
+ 8.3. If You assert a patent infringement claim against Participant
+ alleging that such Participant's Contributor Version directly or
+ indirectly infringes any patent where such claim is resolved (such as
+ by license or settlement) prior to the initiation of patent
+ infringement litigation, then the reasonable value of the licenses
+ granted by such Participant under Sections 2.1 or 2.2 shall be taken
+ into account in determining the amount or value of any payment or
+ license.
+
+ 8.4. In the event of termination under Sections 8.1 or 8.2 above,
+ all end user license agreements (excluding distributors and resellers)
+ which have been validly granted by You or any distributor hereunder
+ prior to termination shall survive termination.
+
+9. LIMITATION OF LIABILITY.
+
+ UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
+ (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
+ DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
+ OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
+ ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
+ CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
+ WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
+ COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
+ INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
+ LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
+ RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
+ PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
+ EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
+ THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
+
+10. U.S. GOVERNMENT END USERS.
+
+ The Covered Code is a "commercial item," as that term is defined in
+ 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
+ software" and "commercial computer software documentation," as such
+ terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
+ C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
+ all U.S. Government End Users acquire Covered Code with only those
+ rights set forth herein.
+
+11. MISCELLANEOUS.
+
+ This License represents the complete agreement concerning subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. This License shall be governed by
+ California law provisions (except to the extent applicable law, if
+ any, provides otherwise), excluding its conflict-of-law provisions.
+ With respect to disputes in which at least one party is a citizen of,
+ or an entity chartered or registered to do business in the United
+ States of America, any litigation relating to this License shall be
+ subject to the jurisdiction of the Federal Courts of the Northern
+ District of California, with venue lying in Santa Clara County,
+ California, with the losing party responsible for costs, including
+ without limitation, court costs and reasonable attorneys' fees and
+ expenses. The application of the United Nations Convention on
+ Contracts for the International Sale of Goods is expressly excluded.
+ Any law or regulation which provides that the language of a contract
+ shall be construed against the drafter shall not apply to this
+ License.
+
+12. RESPONSIBILITY FOR CLAIMS.
+
+ As between Initial Developer and the Contributors, each party is
+ responsible for claims and damages arising, directly or indirectly,
+ out of its utilization of rights under this License and You agree to
+ work with Initial Developer and Contributors to distribute such
+ responsibility on an equitable basis. Nothing herein is intended or
+ shall be deemed to constitute any admission of liability.
+
+13. MULTIPLE-LICENSED CODE.
+
+ Initial Developer may designate portions of the Covered Code as
+ "Multiple-Licensed". "Multiple-Licensed" means that the Initial
+ Developer permits you to utilize portions of the Covered Code under
+ Your choice of the NPL or the alternative licenses, if any, specified
+ by the Initial Developer in the file described in Exhibit A.
+
+EXHIBIT A -Mozilla Public License.
+
+ ``The contents of this file are subject to the Mozilla Public License
+ Version 1.1 (the "License"); you may not use this file except in
+ compliance with the License. You may obtain a copy of the License at
+ http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+ License for the specific language governing rights and limitations
+ under the License.
+
+ The Original Code is RabbitMQ Management Plugin.
+
+ The Initial Developer of the Original Code is GoPivotal, Inc.
+ Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.''
+
+ [NOTE: The text of this Exhibit A may differ slightly from the text of
+ the notices in the Source Code files of the Original Code. You should
+ use the text of this Exhibit A rather than the text found in the
+ Original Code Source Code for Your Modifications.]
--- /dev/null
+include ../umbrella.mk
+
+RABBITMQCTL=../rabbitmq-server/scripts/rabbitmqctl
+TEST_TMPDIR=$(TMPDIR)/rabbitmq-test
+OTHER_NODE=undefined
+OTHER_PORT=undefined
+
+start-other-node:
+ rm -f $(TEST_TMPDIR)/rabbitmq-$(OTHER_NODE)-pid
+ RABBITMQ_MNESIA_BASE=$(TEST_TMPDIR)/rabbitmq-$(OTHER_NODE)-mnesia \
+ RABBITMQ_PID_FILE=$(TEST_TMPDIR)/rabbitmq-$(OTHER_NODE)-pid \
+ RABBITMQ_LOG_BASE=$(TEST_TMPDIR)/log \
+ RABBITMQ_NODENAME=$(OTHER_NODE) \
+ RABBITMQ_NODE_PORT=$(OTHER_PORT) \
+ RABBITMQ_CONFIG_FILE=etc/$(OTHER_NODE) \
+ RABBITMQ_PLUGINS_DIR=$(TEST_TMPDIR)/plugins \
+ RABBITMQ_PLUGINS_EXPAND_DIR=$(TEST_TMPDIR)/$(OTHER_NODE)-plugins-expand \
+ ../rabbitmq-server/scripts/rabbitmq-server >/tmp/$(OTHER_NODE).out 2>/tmp/$(OTHER_NODE).err &
+ $(RABBITMQCTL) -n $(OTHER_NODE) wait $(TEST_TMPDIR)/rabbitmq-$(OTHER_NODE)-pid
+
+cluster-other-node:
+ $(RABBITMQCTL) -n $(OTHER_NODE) stop_app
+ $(RABBITMQCTL) -n $(OTHER_NODE) reset
+ $(RABBITMQCTL) -n $(OTHER_NODE) join_cluster rabbit-test@`hostname -s`
+ $(RABBITMQCTL) -n $(OTHER_NODE) start_app
+
+stop-other-node:
+ $(RABBITMQCTL) -n $(OTHER_NODE) stop
--- /dev/null
+Generic build instructions are at:
+ http://www.rabbitmq.com/plugin-development.html
+
+When installed, point your broswer at:
+
+http://<server>:15672/
+
+and log in with AMQP credentials (guest/guest by default).
+
+Documentation for the HTTP API can be found at
+
+http://<server>:15672/api/
--- /dev/null
+#!/usr/bin/env python
+
+# The contents of this file are subject to the Mozilla Public License
+# Version 1.1 (the "License"); you may not use this file except in
+# compliance with the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+# License for the specific language governing rights and limitations
+# under the License.
+#
+# The Original Code is RabbitMQ Management Plugin.
+#
+# The Initial Developer of the Original Code is GoPivotal, Inc.
+# Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+
+import sys
+if sys.version_info[0] < 2 or sys.version_info[1] < 6:
+ print "Sorry, rabbitmqadmin requires at least Python 2.6."
+ sys.exit(1)
+
+from ConfigParser import ConfigParser, NoSectionError
+from optparse import OptionParser, TitledHelpFormatter
+import httplib
+import urllib
+import urlparse
+import base64
+import json
+import os
+import socket
+
+VERSION = '%%VSN%%'
+
+LISTABLE = {'connections': {'vhost': False},
+ 'channels': {'vhost': False},
+ 'exchanges': {'vhost': True},
+ 'queues': {'vhost': True},
+ 'bindings': {'vhost': True},
+ 'users': {'vhost': False},
+ 'vhosts': {'vhost': False},
+ 'permissions': {'vhost': False},
+ 'nodes': {'vhost': False},
+ 'parameters': {'vhost': False,
+ 'json': ['value']},
+ 'policies': {'vhost': False,
+ 'json': ['definition']}}
+
+SHOWABLE = {'overview': {'vhost': False}}
+
+PROMOTE_COLUMNS = ['vhost', 'name', 'type',
+ 'source', 'destination', 'destination_type', 'routing_key']
+
+URIS = {
+ 'exchange': '/exchanges/{vhost}/{name}',
+ 'queue': '/queues/{vhost}/{name}',
+ 'binding': '/bindings/{vhost}/e/{source}/{destination_char}/{destination}',
+ 'binding_del':'/bindings/{vhost}/e/{source}/{destination_char}/{destination}/{properties_key}',
+ 'vhost': '/vhosts/{name}',
+ 'user': '/users/{name}',
+ 'permission': '/permissions/{vhost}/{user}',
+ 'parameter': '/parameters/{component}/{vhost}/{name}',
+ 'policy': '/policies/{vhost}/{name}'
+ }
+
+DECLARABLE = {
+ 'exchange': {'mandatory': ['name', 'type'],
+ 'json': ['arguments'],
+ 'optional': {'auto_delete': 'false', 'durable': 'true',
+ 'internal': 'false', 'arguments': {}}},
+ 'queue': {'mandatory': ['name'],
+ 'json': ['arguments'],
+ 'optional': {'auto_delete': 'false', 'durable': 'true',
+ 'arguments': {}, 'node': None}},
+ 'binding': {'mandatory': ['source', 'destination'],
+ 'json': ['arguments'],
+ 'optional': {'destination_type': 'queue',
+ 'routing_key': '', 'arguments': {}}},
+ 'vhost': {'mandatory': ['name'],
+ 'optional': {'tracing': None}},
+ 'user': {'mandatory': ['name', 'password', 'tags'],
+ 'optional': {}},
+ 'permission': {'mandatory': ['vhost', 'user', 'configure', 'write', 'read'],
+ 'optional': {}},
+ 'parameter': {'mandatory': ['component', 'name', 'value'],
+ 'json': ['value'],
+ 'optional': {}},
+ # Priority is 'json' to convert to int
+ 'policy': {'mandatory': ['name', 'pattern', 'definition'],
+ 'json': ['definition', 'priority'],
+ 'optional': {'priority' : 0, 'apply-to': None}}
+ }
+
+DELETABLE = {
+ 'exchange': {'mandatory': ['name']},
+ 'queue': {'mandatory': ['name']},
+ 'binding': {'mandatory': ['source', 'destination_type', 'destination',
+ 'properties_key']},
+ 'vhost': {'mandatory': ['name']},
+ 'user': {'mandatory': ['name']},
+ 'permission': {'mandatory': ['vhost', 'user']},
+ 'parameter': {'mandatory': ['component', 'name']},
+ 'policy': {'mandatory': ['name']}
+ }
+
+CLOSABLE = {
+ 'connection': {'mandatory': ['name'],
+ 'optional': {},
+ 'uri': '/connections/{name}'}
+ }
+
+PURGABLE = {
+ 'queue': {'mandatory': ['name'],
+ 'optional': {},
+ 'uri': '/queues/{vhost}/{name}/contents'}
+ }
+
+EXTRA_VERBS = {
+ 'publish': {'mandatory': ['routing_key'],
+ 'optional': {'payload': None,
+ 'exchange': 'amq.default',
+ 'payload_encoding': 'string'},
+ 'uri': '/exchanges/{vhost}/{exchange}/publish'},
+ 'get': {'mandatory': ['queue'],
+ 'optional': {'count': '1', 'requeue': 'true',
+ 'payload_file': None, 'encoding': 'auto'},
+ 'uri': '/queues/{vhost}/{queue}/get'}
+}
+
+for k in DECLARABLE:
+ DECLARABLE[k]['uri'] = URIS[k]
+
+for k in DELETABLE:
+ DELETABLE[k]['uri'] = URIS[k]
+ DELETABLE[k]['optional'] = {}
+DELETABLE['binding']['uri'] = URIS['binding_del']
+
+def short_usage():
+ return "rabbitmqadmin [options] subcommand"
+
+def title(name):
+ return "\n%s\n%s\n\n" % (name, '=' * len(name))
+
+def subcommands_usage():
+ usage = """Usage
+=====
+ """ + short_usage() + """
+
+ where subcommand is one of:
+""" + title("Display")
+
+ for l in LISTABLE:
+ usage += " list {0} [<column>...]\n".format(l)
+ for s in SHOWABLE:
+ usage += " show {0} [<column>...]\n".format(s)
+ usage += title("Object Manipulation")
+ usage += fmt_usage_stanza(DECLARABLE, 'declare')
+ usage += fmt_usage_stanza(DELETABLE, 'delete')
+ usage += fmt_usage_stanza(CLOSABLE, 'close')
+ usage += fmt_usage_stanza(PURGABLE, 'purge')
+ usage += title("Broker Definitions")
+ usage += """ export <file>
+ import <file>
+"""
+ usage += title("Publishing and Consuming")
+ usage += fmt_usage_stanza(EXTRA_VERBS, '')
+ usage += """
+ * If payload is not specified on publish, standard input is used
+
+ * If payload_file is not specified on get, the payload will be shown on
+ standard output along with the message metadata
+
+ * If payload_file is specified on get, count must not be set
+"""
+ return usage
+
+def config_usage():
+ usage = "Usage\n=====\n" + short_usage()
+ usage += "\n" + title("Configuration File")
+ usage += """ It is possible to specify a configuration file from the command line.
+ Hosts can be configured easily in a configuration file and called
+ from the command line.
+"""
+ usage += title("Example")
+ usage += """ # rabbitmqadmin.conf.example START
+
+ [host_normal]
+ hostname = localhost
+ port = 15672
+ username = guest
+ password = guest
+ declare_vhost = / # Used as default for declare / delete only
+ vhost = / # Used as default for declare / delete / list
+
+ [host_ssl]
+ hostname = otherhost
+ port = 15672
+ username = guest
+ password = guest
+ ssl = True
+ ssl_key_file = /path/to/key.pem
+ ssl_cert_file = /path/to/cert.pem
+
+ # rabbitmqadmin.conf.example END
+"""
+ usage += title("Use")
+ usage += """ rabbitmqadmin -c rabbitmqadmin.conf.example -N host_normal ..."""
+ return usage
+
+def more_help():
+ return """
+More Help
+=========
+
+For more help use the help subcommand:
+
+ rabbitmqadmin help subcommands # For a list of available subcommands
+ rabbitmqadmin help config # For help with the configuration file
+"""
+
+def fmt_usage_stanza(root, verb):
+ def fmt_args(args):
+ res = " ".join(["{0}=...".format(a) for a in args['mandatory']])
+ opts = " ".join("{0}=...".format(o) for o in args['optional'].keys())
+ if opts != "":
+ res += " [{0}]".format(opts)
+ return res
+
+ text = ""
+ if verb != "":
+ verb = " " + verb
+ for k in root.keys():
+ text += " {0} {1} {2}\n".format(verb, k, fmt_args(root[k]))
+ return text
+
+default_options = { "hostname" : "localhost",
+ "port" : "15672",
+ "declare_vhost" : "/",
+ "username" : "guest",
+ "password" : "guest",
+ "ssl" : False,
+ "verbose" : True,
+ "format" : "table",
+ "depth" : 1,
+ "bash_completion" : False }
+
+
+class MyFormatter(TitledHelpFormatter):
+ def format_epilog(self, epilog):
+ return epilog
+
+parser = OptionParser(usage=short_usage(),
+ formatter=MyFormatter(),
+ epilog=more_help())
+
+def make_parser():
+ def add(*args, **kwargs):
+ key = kwargs['dest']
+ if key in default_options:
+ default = " [default: %s]" % default_options[key]
+ kwargs['help'] = kwargs['help'] + default
+ parser.add_option(*args, **kwargs)
+
+ add("-c", "--config", dest="config",
+ help="configuration file [default: ~/.rabbitmqadmin.conf]",
+ metavar="CONFIG")
+ add("-N", "--node", dest="node",
+ help="node described in the configuration file [default: 'default'" + \
+ " only if configuration file is specified]",
+ metavar="NODE")
+ add("-H", "--host", dest="hostname",
+ help="connect to host HOST" ,
+ metavar="HOST")
+ add("-P", "--port", dest="port",
+ help="connect to port PORT",
+ metavar="PORT")
+ add("-V", "--vhost", dest="vhost",
+ help="connect to vhost VHOST [default: all vhosts for list, '/' for declare]",
+ metavar="VHOST")
+ add("-u", "--username", dest="username",
+ help="connect using username USERNAME",
+ metavar="USERNAME")
+ add("-p", "--password", dest="password",
+ help="connect using password PASSWORD",
+ metavar="PASSWORD")
+ add("-q", "--quiet", action="store_false", dest="verbose",
+ help="suppress status messages")
+ add("-s", "--ssl", action="store_true", dest="ssl",
+ help="connect with ssl")
+ add("--ssl-key-file", dest="ssl_key_file",
+ help="PEM format key file for SSL")
+ add("--ssl-cert-file", dest="ssl_cert_file",
+ help="PEM format certificate file for SSL")
+ add("-f", "--format", dest="format",
+ help="format for listing commands - one of [" + ", ".join(FORMATS.keys()) + "]")
+ add("-S", "--sort", dest="sort", help="sort key for listing queries")
+ add("-R", "--sort-reverse", action="store_true", dest="sort_reverse",
+ help="reverse the sort order")
+ add("-d", "--depth", dest="depth",
+ help="maximum depth to recurse for listing tables")
+ add("--bash-completion", action="store_true",
+ dest="bash_completion",
+ help="Print bash completion script")
+ add("--version", action="store_true",
+ dest="version",
+ help="Display version and exit")
+
+def default_config():
+ home = os.getenv('USERPROFILE') or os.getenv('HOME')
+ if home is not None:
+ config_file = home + os.sep + ".rabbitmqadmin.conf"
+ if os.path.isfile(config_file):
+ return config_file
+ return None
+
+def make_configuration():
+ make_parser()
+ (options, args) = parser.parse_args()
+ setattr(options, "declare_vhost", None)
+ if options.version:
+ print_version()
+ if options.config is None:
+ config_file = default_config()
+ if config_file is not None:
+ setattr(options, "config", config_file)
+ else:
+ if not os.path.isfile(options.config):
+ assert_usage(False,
+ "Could not read config file '%s'" % options.config)
+
+ if options.node is None and options.config:
+ options.node = "default"
+ else:
+ options.node = options.node
+ for (key, val) in default_options.items():
+ if getattr(options, key) is None:
+ setattr(options, key, val)
+
+ if options.config is not None:
+ config = ConfigParser()
+ try:
+ config.read(options.config)
+ new_conf = dict(config.items(options.node))
+ except NoSectionError, error:
+ if options.node == "default":
+ pass
+ else:
+ assert_usage(False, ("Could not read section '%s' in config file" +
+ " '%s':\n %s") %
+ (options.node, options.config, error))
+ else:
+ for key, val in new_conf.items():
+ setattr(options, key, val)
+
+ return (options, args)
+
+def assert_usage(expr, error):
+ if not expr:
+ output("\nERROR: {0}\n".format(error))
+ output("{0} --help for help\n".format(os.path.basename(sys.argv[0])))
+ sys.exit(1)
+
+def print_version():
+ output("rabbitmqadmin {0}".format(VERSION))
+ sys.exit(0)
+
+def column_sort_key(col):
+ if col in PROMOTE_COLUMNS:
+ return (1, PROMOTE_COLUMNS.index(col))
+ else:
+ return (2, col)
+
+def main():
+ (options, args) = make_configuration()
+ if options.bash_completion:
+ print_bash_completion()
+ exit(0)
+ assert_usage(len(args) > 0, 'Action not specified')
+ mgmt = Management(options, args[1:])
+ mode = "invoke_" + args[0]
+ assert_usage(hasattr(mgmt, mode),
+ 'Action {0} not understood'.format(args[0]))
+ method = getattr(mgmt, "invoke_%s" % args[0])
+ method()
+
+def output(s):
+ print maybe_utf8(s, sys.stdout)
+
+def die(s):
+ sys.stderr.write(maybe_utf8("*** {0}\n".format(s), sys.stderr))
+ exit(1)
+
+def maybe_utf8(s, stream):
+ if stream.isatty():
+ # It will have an encoding, which Python will respect
+ return s
+ else:
+ # It won't have an encoding, and Python will pick ASCII by default
+ return s.encode('utf-8')
+
+class Management:
+ def __init__(self, options, args):
+ self.options = options
+ self.args = args
+
+ def get(self, path):
+ return self.http("GET", "/api%s" % path, "")
+
+ def put(self, path, body):
+ return self.http("PUT", "/api%s" % path, body)
+
+ def post(self, path, body):
+ return self.http("POST", "/api%s" % path, body)
+
+ def delete(self, path):
+ return self.http("DELETE", "/api%s" % path, "")
+
+ def http(self, method, path, body):
+ if self.options.ssl:
+ conn = httplib.HTTPSConnection(self.options.hostname,
+ self.options.port,
+ self.options.ssl_key_file,
+ self.options.ssl_cert_file)
+ else:
+ conn = httplib.HTTPConnection(self.options.hostname,
+ self.options.port)
+ headers = {"Authorization":
+ "Basic " + base64.b64encode(self.options.username + ":" +
+ self.options.password)}
+ if body != "":
+ headers["Content-Type"] = "application/json"
+ try:
+ conn.request(method, path, body, headers)
+ except socket.error, e:
+ die("Could not connect: {0}".format(e))
+ resp = conn.getresponse()
+ if resp.status == 400:
+ die(json.loads(resp.read())['reason'])
+ if resp.status == 401:
+ die("Access refused: {0}".format(path))
+ if resp.status == 404:
+ die("Not found: {0}".format(path))
+ if resp.status == 301:
+ url = urlparse.urlparse(resp.getheader('location'))
+ [host, port] = url.netloc.split(':')
+ self.options.hostname = host
+ self.options.port = int(port)
+ return self.http(method, url.path + '?' + url.query, body)
+ if resp.status < 200 or resp.status > 400:
+ raise Exception("Received %d %s for path %s\n%s"
+ % (resp.status, resp.reason, path, resp.read()))
+ return resp.read()
+
+ def verbose(self, string):
+ if self.options.verbose:
+ output(string)
+
+ def get_arg(self):
+ assert_usage(len(self.args) == 1, 'Exactly one argument required')
+ return self.args[0]
+
+ def invoke_help(self):
+ if len(self.args) == 0:
+ parser.print_help()
+ else:
+ help_cmd = self.get_arg()
+ if help_cmd == 'subcommands':
+ usage = subcommands_usage()
+ elif help_cmd == 'config':
+ usage = config_usage()
+ else:
+ assert_usage(False, """help topic must be one of:
+ subcommands
+ config""")
+ print usage
+ exit(0)
+
+ def invoke_publish(self):
+ (uri, upload) = self.parse_args(self.args, EXTRA_VERBS['publish'])
+ upload['properties'] = {} # TODO do we care here?
+ if not 'payload' in upload:
+ data = sys.stdin.read()
+ upload['payload'] = base64.b64encode(data)
+ upload['payload_encoding'] = 'base64'
+ resp = json.loads(self.post(uri, json.dumps(upload)))
+ if resp['routed']:
+ self.verbose("Message published")
+ else:
+ self.verbose("Message published but NOT routed")
+
+ def invoke_get(self):
+ (uri, upload) = self.parse_args(self.args, EXTRA_VERBS['get'])
+ payload_file = 'payload_file' in upload and upload['payload_file'] or None
+ assert_usage(not payload_file or upload['count'] == '1',
+ 'Cannot get multiple messages using payload_file')
+ result = self.post(uri, json.dumps(upload))
+ if payload_file:
+ write_payload_file(payload_file, result)
+ columns = ['routing_key', 'exchange', 'message_count',
+ 'payload_bytes', 'redelivered']
+ format_list(result, columns, {}, self.options)
+ else:
+ format_list(result, [], {}, self.options)
+
+ def invoke_export(self):
+ path = self.get_arg()
+ definitions = self.get("/definitions")
+ f = open(path, 'w')
+ f.write(definitions)
+ f.close()
+ self.verbose("Exported definitions for %s to \"%s\""
+ % (self.options.hostname, path))
+
+ def invoke_import(self):
+ path = self.get_arg()
+ f = open(path, 'r')
+ definitions = f.read()
+ f.close()
+ self.post("/definitions", definitions)
+ self.verbose("Imported definitions for %s from \"%s\""
+ % (self.options.hostname, path))
+
+ def invoke_list(self):
+ cols = self.args[1:]
+ (uri, obj_info) = self.list_show_uri(LISTABLE, 'list', cols)
+ format_list(self.get(uri), cols, obj_info, self.options)
+
+ def invoke_show(self):
+ cols = self.args[1:]
+ (uri, obj_info) = self.list_show_uri(SHOWABLE, 'show', cols)
+ format_list('[{0}]'.format(self.get(uri)), cols, obj_info, self.options)
+
+ def list_show_uri(self, obj_types, verb, cols):
+ obj_type = self.args[0]
+ assert_usage(obj_type in obj_types,
+ "Don't know how to {0} {1}".format(verb, obj_type))
+ obj_info = obj_types[obj_type]
+ uri = "/%s" % obj_type
+ query = []
+ if obj_info['vhost'] and self.options.vhost:
+ uri += "/%s" % urllib.quote_plus(self.options.vhost)
+ if cols != []:
+ query.append("columns=" + ",".join(cols))
+ sort = self.options.sort
+ if sort:
+ query.append("sort=" + sort)
+ if self.options.sort_reverse:
+ query.append("sort_reverse=true")
+ query = "&".join(query)
+ if query != "":
+ uri += "?" + query
+ return (uri, obj_info)
+
+ def invoke_declare(self):
+ (obj_type, uri, upload) = self.declare_delete_parse(DECLARABLE)
+ if obj_type == 'binding':
+ self.post(uri, json.dumps(upload))
+ else:
+ self.put(uri, json.dumps(upload))
+ self.verbose("{0} declared".format(obj_type))
+
+ def invoke_delete(self):
+ (obj_type, uri, upload) = self.declare_delete_parse(DELETABLE)
+ self.delete(uri)
+ self.verbose("{0} deleted".format(obj_type))
+
+ def invoke_close(self):
+ (obj_type, uri, upload) = self.declare_delete_parse(CLOSABLE)
+ self.delete(uri)
+ self.verbose("{0} closed".format(obj_type))
+
+ def invoke_purge(self):
+ (obj_type, uri, upload) = self.declare_delete_parse(PURGABLE)
+ self.delete(uri)
+ self.verbose("{0} purged".format(obj_type))
+
+ def declare_delete_parse(self, root):
+ assert_usage(len(self.args) > 0, 'Type not specified')
+ obj_type = self.args[0]
+ assert_usage(obj_type in root,
+ 'Type {0} not recognised'.format(obj_type))
+ obj = root[obj_type]
+ (uri, upload) = self.parse_args(self.args[1:], obj)
+ return (obj_type, uri, upload)
+
+ def parse_args(self, args, obj):
+ mandatory = obj['mandatory']
+ optional = obj['optional']
+ uri_template = obj['uri']
+ upload = {}
+ for k in optional.keys():
+ if optional[k]:
+ upload[k] = optional[k]
+ for arg in args:
+ assert_usage("=" in arg,
+ 'Argument "{0}" not in format name=value'.format(arg))
+ (name, value) = arg.split("=", 1)
+ assert_usage(name in mandatory or name in optional.keys(),
+ 'Argument "{0}" not recognised'.format(name))
+ if 'json' in obj and name in obj['json']:
+ upload[name] = self.parse_json(value)
+ else:
+ upload[name] = value
+ for m in mandatory:
+ assert_usage(m in upload.keys(),
+ 'mandatory argument "{0}" required'.format(m))
+ if 'vhost' not in mandatory:
+ upload['vhost'] = self.options.vhost or self.options.declare_vhost
+ uri_args = {}
+ for k in upload:
+ v = upload[k]
+ if v and isinstance(v, basestring):
+ uri_args[k] = urllib.quote_plus(v)
+ if k == 'destination_type':
+ uri_args['destination_char'] = v[0]
+ uri = uri_template.format(**uri_args)
+ return (uri, upload)
+
+ def parse_json(self, text):
+ try:
+ return json.loads(text)
+ except ValueError:
+ print "Could not parse JSON:\n {0}".format(text)
+ sys.exit(1)
+
+def format_list(json_list, columns, args, options):
+ format = options.format
+ formatter = None
+ if format == "raw_json":
+ output(json_list)
+ return
+ elif format == "pretty_json":
+ enc = json.JSONEncoder(False, False, True, True, True, 2)
+ output(enc.encode(json.loads(json_list)))
+ return
+ else:
+ formatter = FORMATS[format]
+ assert_usage(formatter != None,
+ "Format {0} not recognised".format(format))
+ formatter_instance = formatter(columns, args, options)
+ formatter_instance.display(json_list)
+
+class Lister:
+ def verbose(self, string):
+ if self.options.verbose:
+ output(string)
+
+ def display(self, json_list):
+ depth = sys.maxint
+ if len(self.columns) == 0:
+ depth = int(self.options.depth)
+ (columns, table) = self.list_to_table(json.loads(json_list), depth)
+ if len(table) > 0:
+ self.display_list(columns, table)
+ else:
+ self.verbose("No items")
+
+ def list_to_table(self, items, max_depth):
+ columns = {}
+ column_ix = {}
+ row = None
+ table = []
+
+ def add(prefix, depth, item, fun):
+ for key in item:
+ column = prefix == '' and key or (prefix + '.' + key)
+ subitem = item[key]
+ if type(subitem) == dict:
+ if self.obj_info.has_key('json') and key in self.obj_info['json']:
+ fun(column, json.dumps(subitem))
+ else:
+ if depth < max_depth:
+ add(column, depth + 1, subitem, fun)
+ elif type(subitem) == list:
+ # The first branch has slave nodes in queues in
+ # mind (which come out looking decent); the second
+ # one has applications in nodes (which look less
+ # so, but what would look good?).
+ if [x for x in subitem if type(x) != unicode] == []:
+ serialised = " ".join(subitem)
+ else:
+ serialised = json.dumps(subitem)
+ fun(column, serialised)
+ else:
+ fun(column, subitem)
+
+ def add_to_columns(col, val):
+ columns[col] = True
+
+ def add_to_row(col, val):
+ if col in column_ix:
+ row[column_ix[col]] = unicode(val)
+
+ if len(self.columns) == 0:
+ for item in items:
+ add('', 1, item, add_to_columns)
+ columns = columns.keys()
+ columns.sort(key=column_sort_key)
+ else:
+ columns = self.columns
+
+ for i in xrange(0, len(columns)):
+ column_ix[columns[i]] = i
+ for item in items:
+ row = len(columns) * ['']
+ add('', 1, item, add_to_row)
+ table.append(row)
+
+ return (columns, table)
+
+class TSVList(Lister):
+ def __init__(self, columns, obj_info, options):
+ self.columns = columns
+ self.obj_info = obj_info
+ self.options = options
+
+ def display_list(self, columns, table):
+ head = "\t".join(columns)
+ self.verbose(head)
+
+ for row in table:
+ line = "\t".join(row)
+ output(line)
+
+class LongList(Lister):
+ def __init__(self, columns, obj_info, options):
+ self.columns = columns
+ self.obj_info = obj_info
+ self.options = options
+
+ def display_list(self, columns, table):
+ sep = "\n" + "-" * 80 + "\n"
+ max_width = 0
+ for col in columns:
+ max_width = max(max_width, len(col))
+ fmt = "{0:>" + unicode(max_width) + "}: {1}"
+ output(sep)
+ for i in xrange(0, len(table)):
+ for j in xrange(0, len(columns)):
+ output(fmt.format(columns[j], table[i][j]))
+ output(sep)
+
+class TableList(Lister):
+ def __init__(self, columns, obj_info, options):
+ self.columns = columns
+ self.obj_info = obj_info
+ self.options = options
+
+ def display_list(self, columns, table):
+ total = [columns]
+ total.extend(table)
+ self.ascii_table(total)
+
+ def ascii_table(self, rows):
+ table = ""
+ col_widths = [0] * len(rows[0])
+ for i in xrange(0, len(rows[0])):
+ for j in xrange(0, len(rows)):
+ col_widths[i] = max(col_widths[i], len(rows[j][i]))
+ self.ascii_bar(col_widths)
+ self.ascii_row(col_widths, rows[0], "^")
+ self.ascii_bar(col_widths)
+ for row in rows[1:]:
+ self.ascii_row(col_widths, row, "<")
+ self.ascii_bar(col_widths)
+
+ def ascii_row(self, col_widths, row, align):
+ txt = "|"
+ for i in xrange(0, len(col_widths)):
+ fmt = " {0:" + align + unicode(col_widths[i]) + "} "
+ txt += fmt.format(row[i]) + "|"
+ output(txt)
+
+ def ascii_bar(self, col_widths):
+ txt = "+"
+ for w in col_widths:
+ txt += ("-" * (w + 2)) + "+"
+ output(txt)
+
+class KeyValueList(Lister):
+ def __init__(self, columns, obj_info, options):
+ self.columns = columns
+ self.obj_info = obj_info
+ self.options = options
+
+ def display_list(self, columns, table):
+ for i in xrange(0, len(table)):
+ row = []
+ for j in xrange(0, len(columns)):
+ row.append("{0}=\"{1}\"".format(columns[j], table[i][j]))
+ output(" ".join(row))
+
+# TODO handle spaces etc in completable names
+class BashList(Lister):
+ def __init__(self, columns, obj_info, options):
+ self.columns = columns
+ self.obj_info = obj_info
+ self.options = options
+
+ def display_list(self, columns, table):
+ ix = None
+ for i in xrange(0, len(columns)):
+ if columns[i] == 'name':
+ ix = i
+ if ix is not None:
+ res = []
+ for row in table:
+ res.append(row[ix])
+ output(" ".join(res))
+
+FORMATS = {
+ 'raw_json' : None, # Special cased
+ 'pretty_json' : None, # Ditto
+ 'tsv' : TSVList,
+ 'long' : LongList,
+ 'table' : TableList,
+ 'kvp' : KeyValueList,
+ 'bash' : BashList
+}
+
+def write_payload_file(payload_file, json_list):
+ result = json.loads(json_list)[0]
+ payload = result['payload']
+ payload_encoding = result['payload_encoding']
+ f = open(payload_file, 'w')
+ if payload_encoding == 'base64':
+ data = base64.b64decode(payload)
+ else:
+ data = payload
+ f.write(data)
+ f.close()
+
+def print_bash_completion():
+ script = """# This is a bash completion script for rabbitmqadmin.
+# Redirect it to a file, then source it or copy it to /etc/bash_completion.d
+# to get tab completion. rabbitmqadmin must be on your PATH for this to work.
+_rabbitmqadmin()
+{
+ local cur prev opts base
+ COMPREPLY=()
+ cur="${COMP_WORDS[COMP_CWORD]}"
+ prev="${COMP_WORDS[COMP_CWORD-1]}"
+
+ opts="list show declare delete close purge import export get publish help"
+ fargs="--help --host --port --vhost --username --password --format --depth --sort --sort-reverse"
+
+ case "${prev}" in
+ list)
+ COMPREPLY=( $(compgen -W '""" + " ".join(LISTABLE) + """' -- ${cur}) )
+ return 0
+ ;;
+ show)
+ COMPREPLY=( $(compgen -W '""" + " ".join(SHOWABLE) + """' -- ${cur}) )
+ return 0
+ ;;
+ declare)
+ COMPREPLY=( $(compgen -W '""" + " ".join(DECLARABLE.keys()) + """' -- ${cur}) )
+ return 0
+ ;;
+ delete)
+ COMPREPLY=( $(compgen -W '""" + " ".join(DELETABLE.keys()) + """' -- ${cur}) )
+ return 0
+ ;;
+ close)
+ COMPREPLY=( $(compgen -W '""" + " ".join(CLOSABLE.keys()) + """' -- ${cur}) )
+ return 0
+ ;;
+ purge)
+ COMPREPLY=( $(compgen -W '""" + " ".join(PURGABLE.keys()) + """' -- ${cur}) )
+ return 0
+ ;;
+ export)
+ COMPREPLY=( $(compgen -f ${cur}) )
+ return 0
+ ;;
+ import)
+ COMPREPLY=( $(compgen -f ${cur}) )
+ return 0
+ ;;
+ help)
+ opts="subcommands config"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+ -H)
+ COMPREPLY=( $(compgen -A hostname ${cur}) )
+ return 0
+ ;;
+ --host)
+ COMPREPLY=( $(compgen -A hostname ${cur}) )
+ return 0
+ ;;
+ -V)
+ opts="$(rabbitmqadmin -q -f bash list vhosts)"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+ --vhost)
+ opts="$(rabbitmqadmin -q -f bash list vhosts)"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+ -u)
+ opts="$(rabbitmqadmin -q -f bash list users)"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+ --username)
+ opts="$(rabbitmqadmin -q -f bash list users)"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+ -f)
+ COMPREPLY=( $(compgen -W \"""" + " ".join(FORMATS.keys()) + """\" -- ${cur}) )
+ return 0
+ ;;
+ --format)
+ COMPREPLY=( $(compgen -W \"""" + " ".join(FORMATS.keys()) + """\" -- ${cur}) )
+ return 0
+ ;;
+
+"""
+ for l in LISTABLE:
+ key = l[0:len(l) - 1]
+ script += " " + key + """)
+ opts="$(rabbitmqadmin -q -f bash list """ + l + """)"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+"""
+ script += """ *)
+ ;;
+ esac
+
+ COMPREPLY=($(compgen -W "${opts} ${fargs}" -- ${cur}))
+ return 0
+}
+complete -F _rabbitmqadmin rabbitmqadmin
+"""
+ output(script)
+
+if __name__ == "__main__":
+ main()
--- /dev/null
+[{rabbitmq_management, [{listener,[{port, 15674}]}]}].
--- /dev/null
+[{rabbitmq_management, [{listener,[{port, 15673}]}]}].
--- /dev/null
+%% We test sample retention separately in rabbit_mgmt_test_db_unit,
+%% but for rabbit_mgmt_test_db we want to make sure samples never
+%% expire.
+[{rabbitmq_management, [{sample_retention_policies,
+ %% List of {MaxAgeSecs, IfTimestampDivisibleBySecs}
+ [{global, [{10000000000000, 1}]},
+ {basic, [{10000000000000, 1}]},
+ {detailed, [{10000000000000, 1}]}]}
+ ]}
+].
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Console.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-record(context, {user, password = none}).
+-record(range, {first, last, incr}).
+-record(stats, {diffs, base}).
+
+-define(AUTH_REALM, "Basic realm=\"RabbitMQ Management\"").
--- /dev/null
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-define(OK, 200).
+-define(CREATED, 201).
+-define(NO_CONTENT, 204).
+-define(BAD_REQUEST, 400).
+-define(NOT_AUTHORISED, 401).
+%%-define(NOT_FOUND, 404). Defined for AMQP by amqp_client.hrl (as 404)
+-define(PREFIX, "http://localhost:15672/api").
+%% httpc seems to get racy when using HTTP 1.1
+-define(HTTPC_OPTS, [{version, "HTTP/1.0"}]).
--- /dev/null
+jQuery is "Copyright (c) 2010 John Resig" and is covered by the MIT
+license. It was downloaded from http://jquery.com/
+
+EJS is "Copyright (c) 2007 Edward Benson" and is covered by the MIT
+license. It was downloaded from http://embeddedjs.com/
+
+Sammy is "Copyright (c) 2008 Aaron Quint, Quirkey NYC, LLC" and is
+covered by the MIT license. It was downloaded from
+http://code.quirkey.com/sammy/
+
+ExplorerCanvas is "Copyright 2006 Google Inc" and is covered by the
+Apache License version 2.0. It was downloaded from
+http://code.google.com/p/explorercanvas/
+
+Flot is "Copyright (c) 2007-2013 IOLA and Ole Laursen" and is covered
+by the MIT license. It was downloaded from
+http://www.flotcharts.org/
--- /dev/null
+RELEASABLE:=true
+DEPS:=rabbitmq-web-dispatch webmachine-wrapper rabbitmq-server rabbitmq-erlang-client rabbitmq-management-agent rabbitmq-test
+FILTER:=all
+COVER:=false
+WITH_BROKER_TEST_COMMANDS:=rabbit_test_runner:run_in_broker(\"$(PACKAGE_DIR)/test/ebin\",\"$(FILTER)\")
+WITH_BROKER_TEST_CONFIG:=$(PACKAGE_DIR)/etc/rabbit-test
+STANDALONE_TEST_COMMANDS:=rabbit_test_runner:run_multi(\"$(UMBRELLA_BASE_DIR)/rabbitmq-server\",\"$(PACKAGE_DIR)/test/ebin\",\"$(FILTER)\",$(COVER),\"/tmp/rabbitmq-multi-node/plugins\")
+WITH_BROKER_TEST_SCRIPTS:=$(PACKAGE_DIR)/test/src/rabbitmqadmin-test.py
+
+CONSTRUCT_APP_PREREQS:=$(shell find $(PACKAGE_DIR)/priv -type f) $(PACKAGE_DIR)/bin/rabbitmqadmin
+define construct_app_commands
+ cp -r $(PACKAGE_DIR)/priv $(APP_DIR)
+ sed 's/%%VSN%%/$(VERSION)/' $(PACKAGE_DIR)/bin/rabbitmqadmin > $(APP_DIR)/priv/www/cli/rabbitmqadmin
+endef
+
+# The tests require erlang/OTP R14 (httpc issue)
+$(PACKAGE_DIR)+pre-test::
+ if [ "`erl -noshell -eval 'io:format([list_to_integer(X) || X <- string:tokens(erlang:system_info(version), ".")] >= [5,8]), halt().'`" != true ] ; then \
+ echo "Need Erlang/OTP R14A or higher" ; \
+ exit 1 ; \
+ fi
+ rm -rf /tmp/rabbitmq-multi-node/plugins
+ mkdir -p /tmp/rabbitmq-multi-node/plugins/plugins
+ cp -p $(UMBRELLA_BASE_DIR)/rabbitmq-management/dist/*.ez /tmp/rabbitmq-multi-node/plugins/plugins
+
--- /dev/null
+<html>
+ <head>
+ <title>RabbitMQ Management HTTP API</title>
+ <style>
+ body { font: 12px Verdana,sans-serif; color: #444; padding: 8px 35px; }
+ td, th { font: 12px Verdana,sans-serif; color: #444; }
+ h1 { font-size: 2em; }
+ h2 { font-size: 1.5em; }
+ td.path { font-family: monospace; }
+ th { font-size 1em; font-weight: bold; }
+ table { border-collapse: collapse; }
+ table th, table td { vertical-align: top; border: 1px solid #bbb; padding: 5px; }
+ code { background: #ffa; }
+ pre { background: black; color: #0f0; padding: 10px; word-wrap: break-word;}
+ table pre { background: #ffa; color: black; }
+ </style>
+ </head>
+ <body>
+ <h1>RabbitMQ Management HTTP API</h1>
+
+ <h2>Introduction</h2>
+
+ <p>Apart from this help page, all URIs will serve only resources
+ of type <code>application/json</code>, and will require HTTP basic
+ authentication (using the standard RabbitMQ user database). The
+ default user is guest/guest.</p>
+
+ <p>Many URIs require the name of a virtual host as part of the
+ path, since names only uniquely identify objects within a virtual
+ host. As the default virtual host is called "<code>/</code>", this
+ will need to be encoded as "<code>%2f</code>".</p>
+
+ <p>PUTing a resource creates it. The JSON object you upload must
+ have certain mandatory keys (documented below) and may have
+ optional keys. Other keys are ignored. Missing mandatory keys
+ constitute an error.</p>
+
+ <p>Since bindings do not have names or IDs in AMQP we synthesise
+ one based on all its properties. Since predicting this name is
+ hard in the general case, you can also create bindings by POSTing
+ to a factory URI. See the example below.</p>
+
+ <p>Many URIs return lists. Such URIs can have the query string
+ parameters <code>sort</code> and <code>sort_reverse</code>
+ added. <code>sort</code> allows you to select a primary field to
+ sort by, and <code>sort_reverse</code> will reverse the sort order
+ if set to <code>true</code>. The <code>sort</code> parameter can
+ contain subfields separated by dots. This allows you to sort by a
+ nested component of the listed items; it does not allow you to
+ sort by more than one field. See the example below.</p>
+
+ <p>You can also restrict what information is returned per item
+ with the <code>columns</code> parameter. This is a comma-separated
+ list of subfields separated by dots. See the example below.</p>
+
+ <p>Most of the GET queries return many fields per
+ object. See <a href="/doc/stats.html">the separate stats
+ documentation</a>.</p>
+
+ <h2>Examples</h2>
+
+ <p>A few quick examples for Windows and Unix, using the command line
+ tool <code>curl</code>:</p>
+
+ <ul>
+ <li>
+ Get a list of vhosts:
+<pre>:: Windows
+C:\> curl -i -u guest:guest http://localhost:15672/api/vhosts
+
+# Unix
+$ curl -i -u guest:guest http://localhost:15672/api/vhosts
+
+HTTP/1.1 200 OK
+Server: MochiWeb/1.1 WebMachine/1.10.0 (never breaks eye contact)
+Date: Mon, 16 Sep 2013 12:00:02 GMT
+Content-Type: application/json
+Content-Length: 30
+
+[{"name":"/","tracing":false}]
+</pre>
+ </li>
+ <li>
+ Get a list of channels, fast publishers first, restricting the info
+ items we get back:
+<pre>:: Windows
+C:\> curl -i -u guest:guest "http://localhost:15672/api/channels?sort=message_stats.publish_details.rate&sort_reverse=true&columns=name,message_stats.publish_details.rate,message_stats.deliver_get_details.rate"
+
+# Unix
+$ curl -i -u guest:guest 'http://localhost:15672/api/channels?sort=message_stats.publish_details.rate&sort_reverse=true&columns=name,message_stats.publish_details.rate,message_stats.deliver_get_details.rate'
+
+HTTP/1.1 200 OK
+Server: MochiWeb/1.1 WebMachine/1.10.0 (never breaks eye contact)
+Date: Mon, 16 Sep 2013 12:01:17 GMT
+Content-Type: application/json
+Content-Length: 219
+Cache-Control: no-cache
+
+[{"message_stats":{"publish_details":{"rate" <i>... (remainder elided)</i></pre>
+ </li>
+ <li>
+ Create a new vhost:
+<pre>:: Windows
+C:\> curl -i -u guest:guest -H "content-type:application/json" ^
+ -XPUT http://localhost:15672/api/vhosts/foo
+
+# Unix
+$ curl -i -u guest:guest -H "content-type:application/json" \
+ -XPUT http://localhost:15672/api/vhosts/foo
+
+HTTP/1.1 204 No Content
+Server: MochiWeb/1.1 WebMachine/1.10.0 (never breaks eye contact)
+Date: Mon, 16 Sep 2013 12:03:00 GMT
+Content-Type: application/json
+Content-Length: 0</pre>
+ <p>Note: you must specify <code>application/json</code> as the
+ mime type.</p>
+ <p>Note: the name of the object is not needed in the JSON
+ object uploaded, since it is in the URI. As a virtual host
+ has no properties apart from its name, this means you do not
+ need to specify a body at all!</p>
+ </li>
+ <li>
+ Create a new exchange in the default virtual host:
+<pre>:: Windows
+C:\> curl -i -u guest:guest -H "content-type:application/json" ^
+ -XPUT -d"{""type"":""direct"",""durable"":true}" ^
+ http://localhost:15672/api/exchanges/%2f/my-new-exchange
+
+# Unix
+$ curl -i -u guest:guest -H "content-type:application/json" \
+ -XPUT -d'{"type":"direct","durable":true}' \
+ http://localhost:15672/api/exchanges/%2f/my-new-exchange
+
+HTTP/1.1 204 No Content
+Server: MochiWeb/1.1 WebMachine/1.10.0 (never breaks eye contact)
+Date: Mon, 16 Sep 2013 12:04:00 GMT
+Content-Type: application/json
+Content-Length: 0</pre>
+ <p>Note: we never return a body in response to a PUT or
+ DELETE, unless it fails.</p>
+ </li>
+ <li>
+ And delete it again:
+<pre>:: Windows
+C:\> curl -i -u guest:guest -H "content-type:application/json" ^
+ -XDELETE http://localhost:15672/api/exchanges/%2f/my-new-exchange
+
+# Unix
+$ curl -i -u guest:guest -H "content-type:application/json" \
+ -XDELETE http://localhost:15672/api/exchanges/%2f/my-new-exchange
+
+HTTP/1.1 204 No Content
+Server: MochiWeb/1.1 WebMachine/1.10.0 (never breaks eye contact)
+Date: Mon, 16 Sep 2013 12:05:30 GMT
+Content-Type: application/json
+Content-Length: 0</pre>
+ </li>
+ </ul>
+
+ <h2>Reference</h2>
+
+ <table>
+ <tr>
+ <th>GET</th>
+ <th>PUT</th>
+ <th>DELETE</th>
+ <th>POST</th>
+ <th>Path</th>
+ <th>Description</th>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/overview</td>
+ <td>Various random bits of information that describe the whole
+ system.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/cluster-name</td>
+ <td>Name identifying this RabbitMQ cluster.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/nodes</td>
+ <td>A list of nodes in the RabbitMQ cluster.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/nodes/<i>name</i></td>
+ <td>
+ An individual node in the RabbitMQ cluster. Add
+ "?memory=true" to get memory statistics.
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/extensions</td>
+ <td>A list of extensions to the management plugin.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td>X</td>
+ <td class="path">/api/definitions<br/>
+ /api/all-configuration <em>(deprecated)</em>
+ </td>
+ <td>
+ The server definitions - exchanges, queues, bindings, users,
+ virtual hosts, permissions and parameters. Everything apart from
+ messages. POST to upload an existing set of definitions. Note
+ that:
+ <ul>
+ <li>The definitions are merged. Anything already existing is
+ untouched.</li>
+ <li>Conflicts will cause an error.</li>
+ <li>In the event of an error you will be left with a
+ part-applied set of definitions.</li>
+ </ul>
+ For convenience you may upload a file from a browser to this
+ URI (i.e. you can use <code>multipart/form-data</code> as
+ well as <code>application/json</code>) in which case the
+ definitions should be uploaded as a form field named
+ "file".
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/connections</td>
+ <td>A list of all open connections.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/connections/<i>name</i></td>
+ <td>
+ An individual connection. DELETEing it will close the
+ connection. Optionally set the "X-Reason" header when
+ DELETEing to provide a reason.
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/connections/<i>name</i>/channels</td>
+ <td>
+ List of all channels for a given connection.
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/channels</td>
+ <td>A list of all open channels.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/channels/<i>channel</i></td>
+ <td>Details about an individual channel.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/exchanges</td>
+ <td>A list of all exchanges.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/exchanges/<i>vhost</i></td>
+ <td>A list of all exchanges in a given virtual host.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td>X</td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/exchanges/<i>vhost</i>/<i>name</i></td>
+ <td>An individual exchange. To PUT an exchange, you will need a body looking something like this:
+<pre>{"type":"direct","auto_delete":false,"durable":true,"internal":false,"arguments":[]}</pre>
+ The <code>type</code> key is mandatory; other keys are optional.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/exchanges/<i>vhost</i>/<i>name</i>/bindings/source</td>
+ <td>A list of all bindings in which a given exchange is the source.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/exchanges/<i>vhost</i>/<i>name</i>/bindings/destination</td>
+ <td>A list of all bindings in which a given exchange is the destination.</td>
+ </tr>
+ <tr>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td>X</td>
+ <td class="path">/api/exchanges/<i>vhost</i>/<i>name</i>/publish</td>
+ <td>
+ Publish a message to a given exchange. You will need a body
+ looking something like:
+ <pre>{"properties":{},"routing_key":"my key","payload":"my body","payload_encoding":"string"}</pre>
+ All keys are mandatory. The <code>payload_encoding</code>
+ key should be either "string" (in which case the payload
+ will be taken to be the UTF-8 encoding of the payload field)
+ or "base64" (in which case the payload field is taken to be
+ base64 encoded).<br/>
+ If the message is published successfully, the response will
+ look like:
+ <pre>{"routed": true}</pre>
+ <code>routed</code> will be true if the message was sent to
+ at least one queue.
+ <p>Please note that the publish / get paths in the HTTP API are
+ intended for injecting test messages, diagnostics etc - they do not
+ implement reliable delivery and so should be treated as a sysadmin's
+ tool rather than a general API for messaging.</p>
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/queues</td>
+ <td>A list of all queues.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/queues/<i>vhost</i></td>
+ <td>A list of all queues in a given virtual host.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td>X</td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/queues/<i>vhost</i>/<i>name</i></td>
+ <td>An individual queue. To PUT a queue, you will need a body looking something like this:
+<pre>{"auto_delete":false,"durable":true,"arguments":[],"node":"rabbit@smacmullen"}</pre>
+ All keys are optional.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/queues/<i>vhost</i>/<i>name</i>/bindings</td>
+ <td>A list of all bindings on a given queue.</td>
+ </tr>
+ <tr>
+ <td></td>
+ <td></td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/queues/<i>vhost</i>/<i>name</i>/contents</td>
+ <td>Contents of a queue. DELETE to purge. Note you can't GET this.</td>
+ </tr>
+
+ <tr>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td>X</td>
+ <td class="path">/api/queues/<i>vhost</i>/<i>name</i>/actions</td>
+ <td>
+ Actions that can be taken on a queue. POST a body like:
+ <pre>{"action":"sync"}</pre> Currently the actions which are
+ supported are <code>sync</code> and <code>cancel_sync</code>.
+ </td>
+ </tr>
+
+ <tr>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td>X</td>
+ <td class="path">/api/queues/<i>vhost</i>/<i>name</i>/get</td>
+ <td>
+ Get messages from a queue. (This is not an HTTP GET as it
+ will alter the state of the queue.) You should post a body looking like:
+ <pre>{"count":5,"requeue":true,"encoding":"auto","truncate":50000}</pre>
+ <ul>
+ <li><code>count</code> controls the maximum number of
+ messages to get. You may get fewer messages than this if
+ the queue cannot immediately provide them.</li>
+ <li><code>requeue</code> determines whether the messages will be
+ removed from the queue. If requeue is true they will be requeued -
+ but their <code>redelivered</code> flag will be set.</li>
+ <li><code>encoding</code> must be either "auto" (in which case the
+ payload will be returned as a string if it is valid UTF-8, and
+ base64 encoded otherwise), or "base64" (in which case the payload
+ will always be base64 encoded).</li>
+ <li>If <code>truncate</code> is present it will truncate the
+ message payload if it is larger than the size given (in bytes).</li>
+ </ul>
+ <p><code>truncate</code> is optional; all other keys are mandatory.</p>
+ <p>Please note that the publish / get paths in the HTTP API are
+ intended for injecting test messages, diagnostics etc - they do not
+ implement reliable delivery and so should be treated as a sysadmin's
+ tool rather than a general API for messaging.</p>
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/bindings</td>
+ <td>A list of all bindings.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/bindings/<i>vhost</i></td>
+ <td>A list of all bindings in a given virtual host.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td>X</td>
+ <td class="path">/api/bindings/<i>vhost</i>/e/<i>exchange</i>/q/<i>queue</i></td>
+ <td>A list of all bindings between an exchange and a
+ queue. Remember, an exchange and a queue can be bound
+ together many times! To create a new binding, POST to this
+ URI. You will need a body looking something like this:
+ <pre>{"routing_key":"my_routing_key","arguments":[]}</pre>
+ All keys are optional.
+ The response will contain a <code>Location</code> header
+ telling you the URI of your new binding.
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/bindings/<i>vhost</i>/e/<i>exchange</i>/q/<i>queue</i>/<i>props</i></td>
+ <td>An individual binding between an exchange and a queue.
+ The <i>props</i> part of the URI is a "name" for the binding
+ composed of its routing key and a hash of its arguments.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td>X</td>
+ <td class="path">/api/bindings/<i>vhost</i>/e/<i>source</i>/e/<i>destination</i></td>
+ <td>
+ A list of all bindings between two exchanges. Similar to
+ the list of all bindings between an exchange and a queue,
+ above.
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/bindings/<i>vhost</i>/e/<i>source</i>/e/<i>destination</i>/<i>props</i></td>
+ <td>
+ An individual binding between two exchanges. Similar to
+ the individual binding between an exchange and a queue,
+ above.
+ </tD>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/vhosts</td>
+ <td>A list of all vhosts.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td>X</td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/vhosts/<i>name</i></td>
+ <td>An individual virtual host. As a virtual host usually only
+ has a name, you do not need an HTTP body when PUTing one of
+ these. To enable / disable tracing, provide a body looking like:
+ <pre>{"tracing":true}</pre></td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/vhosts/<i>name</i>/permissions</td>
+ <td>A list of all permissions for a given virtual host.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/users</td>
+ <td>A list of all users.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td>X</td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/users/<i>name</i></td>
+ <td>An individual user. To PUT a user, you will need a body looking something like this:
+<pre>{"password":"secret","tags":"administrator"}</pre>
+or:
+<pre>{"password_hash":"2lmoth8l4H0DViLaK9Fxi6l9ds8=", "tags":"administrator"}</pre>
+ The <code>tags</code> key is mandatory. Either
+ <code>password</code> or <code>password_hash</code>
+ must be set. Setting <code>password_hash</code> to "" will ensure the
+ user cannot use a password to log in. <code>tags</code> is a
+ comma-separated list of tags for the user. Currently recognised tags
+ are "administrator", "monitoring" and "management".
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/users/<i>user</i>/permissions</td>
+ <td>A list of all permissions for a given user.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/whoami</td>
+ <td>Details of the currently authenticated user.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/permissions</td>
+ <td>A list of all permissions for all users.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td>X</td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/permissions/<i>vhost</i>/<i>user</i></td>
+ <td>An individual permission of a user and virtual host. To PUT a permission, you will need a body looking something like this:
+<pre>{"configure":".*","write":".*","read":".*"}</pre>
+ All keys are mandatory.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/parameters</td>
+ <td>A list of all parameters.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/parameters/<i>component</i></td>
+ <td>A list of all parameters for a given component.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/parameters/<i>component</i>/<i>vhost</i></td>
+ <td>A list of all parameters for a given component and virtual host.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td>X</td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/parameters/<i>component</i>/<i>vhost</i>/<i>name</i></td>
+ <td>An individual parameter. To PUT a parameter, you will need a body looking something like this:
+<pre>{"vhost": "/","component":"federation","name":"local_username","value":"guest"}</pre>
+</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/policies</td>
+ <td>A list of all policies.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/policies/<i>vhost</i></td>
+ <td>A list of all policies in a given virtual host.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td>X</td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/policies/<i>vhost</i>/<i>name</i></td>
+ <td>
+ An individual policy. To PUT a policy, you will need a body looking something like this:
+<pre>{"pattern":"^amq.", "definition": {"federation-upstream-set":"all"}, "priority":0, "apply-to": "all"}</pre>
+ <code>pattern</code> and <code>definition</code> are mandatory, <code>priority</code> and <code>apply-to</code> are optional.
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/aliveness-test/<i>vhost</i></td>
+ <td>
+ Declares a test queue, then publishes and consumes a
+ message. Intended for use by monitoring tools. If everything
+ is working correctly, will return HTTP status 200 with
+ body: <pre>{"status":"ok"}</pre> Note: the test queue will
+ not be deleted (to to prevent queue churn if this is
+ repeatedly pinged).
+ </td>
+ </tr>
+ </table>
+ </body>
+</html>
--- /dev/null
+<html>
+ <head>
+ <title>rabbitmqadmin</title>
+ <style>
+ body { font: 12px Verdana,sans-serif; color: #444; padding: 8px 35px; }
+ h1 { font-size: 2em; }
+ code { background: #ffa; }
+ </style>
+ </head>
+ <body>
+ <h1>rabbitmqadmin</h1>
+
+ <p>
+ Download it from <a href="rabbitmqadmin">here</a> (Right click,
+ Save as), make executable, and drop it in your path. Note that
+ many browsers will rename the
+ file <code>rabbitmqadmin.txt</code>. You will need Python
+ 2.x, 2.6 or later (i.e. not Python 3).
+ </p>
+
+ <p>
+ See <a href="http://www.rabbitmq.com/management-cli.html">the
+ rabbitmqadmin page on the website</a> for more information on
+ its use, or invoke <code>rabbitmqadmin --help</code> for usage
+ instructions. Windows users will need to ensure Python is on
+ their path, and invoke rabbitmqadmin as <code>python.exe
+ rabbitmqadmin</code>.
+ </p>
+
+ </body>
+</html>
--- /dev/null
+#login { text-align: center; }
--- /dev/null
+body { font: 12px Verdana,sans-serif; color: #484848; padding: 8px 35px; }
+
+#login table { margin: auto; }
+#login p { text-align: center; }
+
+#logo { margin-bottom: 20px; }
+#logo img { border: none; }
+#login-version { float: right; color: #444; }
+#login-version form { float: left; display: block; margin-left: 10px; }
+#login-version form input { padding: 5px; border: 1px solid #ddd !important; color: #666 !important; background: none; }
+#login-details { float: left; }
+#login-details p { padding: 0 0 0.2em 0; margin: 0; text-align: right; }
+#login-details b { color: black; font-weight: normal; }
+#menu ul { float: left; padding: 0; margin: 0; }
+#menu li { float: left; list-style-type: none; padding: 0 2em 0 0; }
+#menu li a { display: block; padding: 0.5em; }
+#menu { border-bottom: 1px solid #FF8C00; overflow: auto; width: 100%; }
+#menu a { color: #444; font-weight: bold; text-decoration: none; }
+#main a { color: black; font-weight: bold; text-decoration: none; }
+#main a:hover { color: #222; }
+#menu a.selected { background-color: #F60; color:white; -moz-border-radius: 8px 8px 0 0; border-radius: 8px 8px 0 0; }
+#menu a:hover { color: black; }
+#menu a.selected:hover { color: white; }
+#login-details a { color: black; }
+#vhost-form { float: right; padding: 0; margin: 0; }
+
+#main.with-rhs { margin-right: 210px; }
+#rhs { float: right; width: 200px; padding-top: 10px; }
+#rhs ul { padding: 0; margin: 0; }
+#rhs li { list-style-type: none; padding: 0; }
+#rhs a { display: block; padding: 0.5em; color: #444; font-weight: bold; text-decoration: none; }
+#rhs a.selected { background-color: #F60; color:white; -moz-border-radius: 8px 0 0 8px; border-radius: 8px 0 0 8px; }
+
+h1 { font-size: 2em; font-weight: normal; padding: 0; }
+b, dt { color: black; font-weight: normal; }
+dd { margin-bottom: 5px; }
+div.box, div.section, div.section-hidden { overflow: auto; width: 100%; }
+
+.left { float: left; }
+.right { float: right; }
+.clear { clear: both; }
+
+.help, .rate-options { color: #888; cursor: pointer; }
+.help:hover, .rate-options:hover { color: #444; }
+
+.tag-link { color: #444; cursor: pointer; }
+.tag-link:hover { color: #888; }
+
+.filter { overflow: auto; width: 100%; margin-bottom: 10px; }
+.filter table { float: left; }
+.filter label { margin-top: 4px;}
+.filter input#filter-regex-mode { vertical-align: middle; }
+.filter p#filter-truncate { float: right; padding: 4px; margin: 0; }
+.filter p.filter-warning { border-radius: 5px; background: #ff8; }
+.filter-active { background: #99ebff; border-radius: 5px; }
+.filter-highlight { background: #99ebff; }
+
+input#truncate { width: 50px; text-align: right; }
+
+table { border-collapse: collapse; }
+table th { font-weight: normal; color: black; }
+table th, table td { font: 12px/17px Verdana,sans-serif; padding: 4px; }
+table.list th, table.list td { vertical-align: top; min-width: 5em; width: auto; }
+
+table.list { border-width: 1px; border-bottom: 1px solid #ccc; margin-bottom: 1em; }
+table.list th, table.list td { border-left: 1px solid #ccc; border-right: 1px solid #ccc; }
+table.list th { text-align: center; border-top: 1px solid #ccc; border-bottom: 1px solid #ccc; }
+table.list td a { display: block; width: 100%; }
+table.list th a.sort { display: block; width: 100%; cursor: pointer; }
+table.list th a.sort .arrow { color: #888; }
+table.list td p { margin: 0; padding: 1px 0 0 0; }
+table.list td p.warning { margin: 0; padding: 5px; }
+
+#main .internal-purpose, #main .internal-purpose * { color: #aaa; }
+
+div.section table.list, div.section-hidden table.list { margin-bottom: 0; }
+
+div.memory-bar { margin: 10px 0 5px 0; border-radius: 5px; border: 1px solid #ddd; float: left; }
+div.memory-section { float: left; height: 30px; }
+div.colour-key { float: left; width: 10px; height: 10px; margin: 3px 5px 0 0;}
+div.memory-info { float: left; padding: 10px 10px 0 0; }
+button.memory-button { margin-top: 10px; }
+
+div.memory_connection_procs { background: #955300; }
+div.memory_queue_procs { background: #da7900; }
+div.memory_plugins { background: #ffc884; }
+div.memory_other_proc { background: #fff4e7; }
+div.memory_mnesia { background: #005395; }
+div.memory_msg_index { background: #0079da; }
+div.memory_mgmt_db { background: #84c8ff; }
+div.memory_other_ets { background: #e7f4ff; }
+div.memory_binary { background: #666; }
+div.memory_code { background: #999; }
+div.memory_atom { background: #bbb; }
+div.memory_other_system { background: #ddd; }
+
+sub { display: block; font-size: 0.8em; color: #888; }
+small { font-size: 0.8em; color: #888; }
+#main sub a { color: #888; }
+#main sub a:hover { color: #444; }
+.unknown { color: #888; }
+
+table.facts { float: left; margin-right: 50px; }
+table.facts th { color: black; text-align: right; border-right: 1px solid #ccc; }
+table.facts th, table.facts td { vertical-align: top; padding: 0 10px 10px 10px; }
+
+table.facts-long th { text-align: right; font-weight: bold; }
+table.facts-long th, table.facts-long td { vertical-align: top; }
+
+table.facts-fixed-width th, table.facts-fixed-width td { width: 130px; }
+
+table.mini th { border: none; padding: 0 2px 2px 2px; text-align: right; }
+table.mini td { border: none; padding: 0 2px 2px 2px; }
+
+tr.alt1>td { background: #eee; }
+tr.alt2>td { background: #fff; }
+
+div.status-bar, div.status-red, div.status-yellow, div.status-green, div.status-grey { text-align: center; }
+div.status-bar-main, div.status-red, div.status-yellow, div.status-green, div.status-grey { border-radius: 3px; -moz-border-radius: 3px; padding: 3px; }
+div.status-bar sub { white-space: nowrap; }
+
+div.status-bar .grey, div.status-grey { background: #ddd; }
+div.status-bar .red, div.status-red { background: #ff7a7a; color: white; }
+div.status-bar .yellow, div.status-yellow { background: #ffff7b; }
+div.status-bar .green, div.status-green { background: #98f898; }
+div.status-bar .red-dark { background: #e24545; color: white; }
+/* yellow-dark and green-dark can never happen */
+div.status-bar .red *, div.status-bar .red-dark *, div.status-red * { color: white; }
+
+div.status-key-grey { background: #ddd; }
+div.status-key-red { background: #ff7a7a; color: white; }
+div.status-key-yellow { background: #ffff7b; }
+div.status-key-green { background: #98f898; }
+
+.l { text-align: left !important; }
+.c { text-align: center !important; }
+.r { text-align: right !important; }
+.t { vertical-align: top !important; }
+
+p.status-ok { color: #888; text-align: right; }
+p.status-error { background: #ff7a7a; color: white; margin-top: 50px !important; }
+p.status-error th { background: white; }
+p.warning, div.form-popup-warn { background: #ff8; }
+div.form-popup-info { background: #8f8; }
+div.form-popup-help { text-align: left !important; background: #f8f8f8; border: 1px solid #ccc; }
+div.form-popup-warn, div.form-popup-info, div.form-popup-help { margin: 20px; padding: 15px; border-radius: 10px; -moz-border-radius: 10px; text-align: center; max-width: 600px; z-index: 1; display: none; position: fixed; min-width: 500px; }
+div.form-popup-warn span, div.form-popup-info span, div.form-popup-help span, div.form-popup-rate-options span { color: black; font-weight: bold; cursor: pointer; }
+
+div.form-popup-rate-options {
+ z-index: 1; position: absolute; right: 35px; padding: 15px; background: white; border-left: 1px solid #ccc; border-top: 1px solid #ccc; border-bottom: 1px solid #ccc; border-radius: 10px 0 0 10px; -moz-border-radius: 10px 0 0 10px;
+}
+
+.popup-owner { background: #eee; padding: 5px; border-radius: 3px; -moz-border-radius: 3px; }
+
+p.status-error, p.warning { margin: 20px; padding: 15px; border-radius: 10px; -moz-border-radius: 10px; text-align: center; }
+
+.highlight { min-width: 120px; font-size: 120%; text-align:center; padding:10px; background-color: #ddd; margin: 0 20px 0 0; color: #888; border-radius: 10px; -moz-border-radius: 10px; }
+.highlight strong { font-size: 2em; display: block; color: #444; font-weight: normal; }
+.highlight, .micro-highlight { float: left; }
+
+.chart { margin: 0 20px 20px 0; float: left; }
+.chart-small { width: 400px; height: 100px; }
+.chart-medium { width: 600px; height: 200px; }
+.chart-large { width: 800px; height: 300px; }
+
+.chart-legend { float: left; }
+
+.micro-highlight { min-width: 120px; font-size: 100%; text-align:center; padding:10px; background-color: #ddd; margin: 0 20px 0 0; color: #888; border-radius: 10px; -moz-border-radius: 10px; }
+.micro-highlight a { font-weight: normal !important; color: #888 !important; }
+.micro-highlight strong { font-size: 120%; color: #444; font-weight: normal; }
+
+div.section, div.section-hidden { margin: 0 0 1em 0; }
+div.section-invisible div.hider { display: none; }
+div.section div.hider, div.section-hidden div.hider { padding: 0.5em 0; }
+div.section h2, div.section-hidden h2 { font-size: 1em; padding: 5px 5px 5px 25px; cursor: pointer; margin: 0; }
+div.section h2:hover, div.section-hidden h2:hover { color: black; }
+div.section-invisible h2 { background: white; border-bottom: 1px solid #ddd; background-image: url(../img/collapse.png); background-repeat:no-repeat; background-position:4px 4px; }
+div.section-visible h2 { background: #f8f8f8; border-bottom: 1px solid #ddd; background-image: url(../img/expand.png); background-repeat:no-repeat; background-position:4px 4px; }
+
+form { margin: 0; }
+form.inline-form { float: left; }
+form.inline-form-right { float: right; }
+input, select { padding: 0.2em; }
+input[type=text], input[type=password] { font: 1.1em Andale Mono, Lucidatypewriter, Courier New, Courier, monospace; border: 1px solid #ccc; }
+textarea { width: 600px; height: 200px; border: 1px solid #ccc; }
+.mand { color: #f88; padding: 0 5px;}
+input[type=submit].wait { cursor: wait; }
+
+table.form { margin-bottom: 0.5em; }
+table.form th { text-align: right; vertical-align: top; }
+table.form input[type=text], table.form input[type=password] { width: 200px; }
+table.form input[type=text].wide, table.form input[type=password].wide { width: 300px; }
+table.form select { width: 200px; }
+table.form select.narrow { width: 110px; }
+table.form .multifield { margin: 0; padding: 0; }
+table.form .multifield td { margin: 0; padding: 0; vertical-align: top; }
+table.form .multifield td.equals { padding: 3px; }
+table.form .multifield td input { float: left; }
+table.form .multifield td select { width: 70px; display: block; float: left; margin-left: 5px; }
+table.form label { margin-top: 5px; display: block; }
+
+table.form table.subform { margin-bottom: 5px; }
+table.form table.subform th { text-align: left; }
+table.form table.subform th, table.form table.subform td { padding: 0; }
+
+.multifield-sub { border: 1px solid #ddd; background: #f8f8f8; padding: 10px; border-radius: 10px; -moz-border-radius: 10px; float: left; margin-bottom: 10px; }
+
+label.radio { padding: 5px; border: 1px solid #eee; cursor: pointer; border-radius: 5px; -moz-border-radius: 5px; }
+
+table.two-col-layout { width: 100%; }
+table.two-col-layout > tbody > tr > td { width: 50%; vertical-align: top; }
+
+input[type=submit], button { padding: 8px; border-radius: 5px; -moz-border-radius: 5px; color: black !important; text-decoration: none; cursor: pointer; font-weight: normal; }
+table.list input[type=submit], table.list button { padding: 4px; }
+
+input[type=submit], button {
+ background: #ddf;
+ background: -webkit-gradient(linear, left top, left bottom, color-stop(0, #ddf),color-stop(1, #bbf));
+ border: 1px solid #88d;
+}
+
+input[type=submit]:hover, button:hover {
+ background: #bbf;
+ background: -webkit-gradient(linear, left top, left bottom, color-stop(0, #bbf),color-stop(1, #99d));
+ border: 1px solid #66b;
+}
+
+input[type=submit][disabled], button[disabled] { background: #aac; }
+input[type=submit][disabled]:hover, button[disabled]:hover { background: #aac; }
+
+h3 { padding: 0 0 2px 0; margin: 1em 0 1em 0; font-size: 1em; border-bottom: 1px solid #E4E4E4; font-weight: normal; }
+
+acronym { background: #add; color: #222; padding: 2px 4px; border-radius: 2px; -moz-border-radius: 2px; border: none; cursor: default; }
+
+acronym.warning { background: #daa; }
+
+.status-red acronym, .status-yellow acronym, .status-green acronym, .status-grey acronym, small acronym, acronym.normal { background: none; color: inherit; padding: 0; border-bottom: 1px dotted; cursor: default; }
+
+acronym.type { background: none; color: inherit; padding: 0; border-bottom: 1px dotted #ddd; cursor: default; }
+
+div.bindings-wrapper { display: inline-block; }
+div.bindings-wrapper table { margin: auto; }
+div.bindings-wrapper p { margin: 10px; text-align: center; }
+div.bindings-wrapper span.exchange { border: 1px solid #bbb; padding: 10px; border-radius: 10px; -moz-border-radius: 10px; }
+div.bindings-wrapper span.queue { border: 1px solid #666; padding: 10px; }
+div.bindings-wrapper td span.exchange, div.bindings-wrapper td span.queue { background: white; display: block; }
+div.bindings-wrapper span.exchange a, div.bindings-wrapper span.queue a { font-weight: normal !important; }
+div.bindings-wrapper p.arrow { font-size: 200%; }
+
+#footer { overflow: auto; width: 100%; }
+#footer-nav { padding-top: 5px; }
+#footer-nav ul { float: left; list-style-type: none; padding: 0; margin: 0;}
+#footer-nav ul li { float: left; border-right: 1px solid #ccc; padding: 2px 5px 2px 0;}
+#footer-nav ul li + li { border-right: none; padding: 2px 5px;}
+#footer-nav ul li a { color: #888; text-decoration: none; }
+#footer-nav ul li a:hover { color: #444; }
+
+#update-form { float: right; margin: 0; padding: 0; }
+
+#status { clear: both; }
+#scratch { display: none; }
+
+tr.alt1>td {
+ background: -moz-linear-gradient(center top, #f0f0f0 0%,#e0e0e0 100%);
+ background: -webkit-gradient(linear, left top, left bottom, color-stop(0, #f0f0f0),color-stop(1, #e0e0e0));
+}
+
+tr.alt2>td {
+ background: -moz-linear-gradient(center top, #f8f8f8 0%,#ffffff 100%);
+ background: -webkit-gradient(linear, left top, left bottom, color-stop(0, #f8f8f8),color-stop(1, #ffffff));
+}
+
+.highlight, .mini-highlight, .micro-highlight {
+ background: -moz-linear-gradient(center top, #f0f0f0 0%,#e0e0e0 100%);
+ background: -webkit-gradient(linear, left top, left bottom, color-stop(0, #f0f0f0),color-stop(1, #e0e0e0));
+ border: 1px solid #e0e0e0;
+}
--- /dev/null
+<html>
+ <head>
+ <title>RabbitMQ Management HTTP Stats</title>
+ <style>
+ body { font: 12px Verdana,sans-serif; color: #444; padding: 8px 35px; }
+ td, th { font: 12px Verdana,sans-serif; color: #444; }
+ h1 { font-size: 2em; }
+ h2 { font-size: 1.5em; }
+ td.path { font-family: monospace; }
+ th { font-size 1em; font-weight: bold; }
+ table { border-collapse: collapse; }
+ table th, table td { vertical-align: top; border: 1px solid #bbb; padding: 5px; }
+ code { background: #ffa; }
+ pre { background: black; color: #0f0; padding: 10px; word-wrap: break-word;}
+ table pre { background: #ffa; color: black; }
+ </style>
+ </head>
+ <body>
+ <h1>RabbitMQ Management HTTP Stats</h1>
+ <h2>Introduction</h2>
+ <p>
+ Most of the GET requests you can issue to the HTTP API return
+ JSON objects with a large number of keys. While a few of these
+ keys represent things you set yourself in a PUT request or AMQP
+ command (e.g. queue durability or arguments), most of them
+ represent statistics to do with the object in question. This
+ page attempts to document them.
+ </p>
+
+ <p>
+ It should be read in conjunction with the manual page
+ for <code>rabbitmqctl</code> (see your installation if on Unix / Linux,
+ or <a href="http://www.rabbitmq.com/man/rabbitmqctl.1.man.html">the
+ RabbitMQ website</a> for the latest version). Any field which can
+ be returned by a command of the form <code>rabbitmqctl
+ list_<i>something</i></code> will also be returned in the
+ equivalent part of the HTTP API, so all those keys are not
+ documented here. However, the HTTP API also adds a lot of extra
+ fields which are not available in <code>rabbitmqctl</code>.
+ </p>
+
+ <h2>_details objects</h2>
+ <p>
+ Many fields represent a count of some kind: queue length,
+ messages acknowledged, bytes received and so on. Such absolute
+ counts returned by the HTTP API will often have a
+ corresponding <code>_details</code> object which offers
+ information on how this count has changed. So for example, from
+ a queue:
+ </p>
+<pre> "messages": 123619,
+ "messages_details": {
+ "avg": 41206.333333333336,
+ "avg_rate": 1030.1583333333333,
+ "rate": 24723.8,
+ "samples": [
+ {
+ "sample": 123619,
+ "timestamp": 1400680560000
+ },
+ {
+ "sample": 0,
+ "timestamp": 1400680500000
+ },
+ {
+ "sample": 0,
+ "timestamp": 1400680440000
+ }
+ ]
+ }</pre>
+
+ <p>
+ Here we have a <code>messages</code> count (the total messages
+ in the queue), with some additional data:
+ </p>
+
+ <table>
+ <tr>
+ <td><code>avg</code></td>
+ <td>
+ The average value for the requested time period (see below).
+ </td>
+ </tr>
+ <tr>
+ <td><code>avg_rate</code></td>
+ <td>
+ The average rate for the requested time period.
+ </td>
+ </tr>
+ <tr>
+ <td><code>rate</code></td>
+ <td>
+ How much the count has changed per second in the most recent
+ sampling interval.
+ </td>
+ </tr>
+ <tr>
+ <td><code>samples</code></td>
+ <td>
+ Snapshots showing how the value has changed over the
+ requested time period.
+ </td>
+ </tr>
+ </table>
+
+ <p>
+ <code>avg</code>, <code>avg_rate</code> and <code>samples</code>
+ will only appear if you request a specific time period by
+ appending query parameters to the URL. To do this you need to
+ set an age and an increment for the samples you want. The end of
+ the range returned will always correspond to the present.
+ </p>
+ <p>
+ Use <code>msg_rates_age</code>
+ and <code>msg_rates_incr</code> to return samples for messages
+ sent and received, <code>data_rates_age</code>
+ and <code>data_rates_incr</code> to return samples for bytes
+ sent and received, and <code>lengths_age</code>
+ and <code>lengths_incr</code> to return samples for queue
+ lengths. For example,
+ appending <code>?lengths_age=3600&lengths_incr=60</code> will
+ return the last hour's data on queue lengths, with a sample for
+ every minute.
+ </p>
+
+ <h2>message_stats objects</h2>
+ <p>
+ Many objects (including queues, exchanges and channels) will
+ return counts of messages passing through them. These are
+ included in a <code>message_stats</code> object (which in turn
+ will contain <code>_details</code> objects for each count, as
+ described above).
+ </p>
+ <p>
+ These can contain:
+ </p>
+
+ <table>
+ <tr>
+ <td><code>publish</code></td>
+ <td>
+ Count of messages published.
+ </td>
+ </tr>
+ <tr>
+ <td><code>publish_in</code></td>
+ <td>
+ Count of messages published "in" to an exchange, i.e. not
+ taking account of routing.
+ </td>
+ </tr>
+ <tr>
+ <td><code>publish_out</code></td>
+ <td>
+ Count of messages published "out" of an exchange,
+ i.e. taking account of routing.
+ </td>
+ </tr>
+ <tr>
+ <td><code>confirm</code></td>
+ <td>
+ Count of messages confirmed.
+ </td>
+ </tr>
+ <tr>
+ <td><code>deliver</code></td>
+ <td>
+ Count of messages delivered in acknowledgement mode to consumers.
+ </td>
+ </tr>
+ <tr>
+ <td><code>deliver_noack</code></td>
+ <td>
+ Count of messages delivered in no-acknowledgement mode to consumers.
+ </td>
+ </tr>
+ <tr>
+ <td><code>get</code></td>
+ <td>
+ Count of messages delivered in acknowledgement mode in
+ response to basic.get.
+ </td>
+ </tr>
+ <tr>
+ <td><code>get_noack</code></td>
+ <td>
+ Count of messages delivered in no-acknowledgement mode in
+ response to basic.get.
+ </td>
+ </tr>
+ <tr>
+ <td><code>deliver_get</code></td>
+ <td>
+ Sum of all four of the above.
+ </td>
+ </tr>
+ <tr>
+ <td><code>redeliver</code></td>
+ <td>
+ Count of subset of messages in <code>deliver_get</code>
+ which had the redelivered flag set.
+ </td>
+ </tr>
+ <tr>
+ <td><code>return</code></td>
+ <td>
+ Count of messages returned to publisher as unroutable.
+ </td>
+ </tr>
+ </table>
+
+ <p>
+ Only fields for which some activity has taken place will appear.
+ </p>
+
+ <h2>Detailed message stats objects</h2>
+ <p>
+ In addition, queues, exchanges and channels will return a
+ breakdown of message stats for each of their neighbours
+ (i.e. adjacent objects in the chain: channel -> exchange ->
+ queue -> channel).
+ </p>
+ <p>
+ As this possibly constitutes a large quantity of data, it is
+ only returned when querying a single channel, queue or exchange
+ rather than a list. Note also that the default sample retention
+ policy means that these detailed message stats do not retain
+ historical data for more than a few seconds.
+ </p>
+ <p>
+ The detailed message stats objects have different names
+ depending on where they are (documented below). Each set of
+ detailed stats consists of a list of objects with two fields,
+ one identifying the partner object and one <code>stats</code>
+ which is a message_stats object as described above.
+ </p>
+ <p>
+ For example, from a queue:
+ </p>
+ <pre> "incoming": [
+ {
+ "stats": {
+ "publish": 352593,
+ "publish_details": {
+ "rate": 100.2
+ }
+ },
+ "exchange": {
+ "name": "my-exchange",
+ "vhost": "/"
+ }
+ }
+ {
+ "stats": {
+ "publish": 543784,
+ "publish_details": {
+ "rate": 54.6
+ }
+ },
+ "exchange": {
+ "name": "amq.topic",
+ "vhost": "/"
+ }
+ }
+ ],</pre>
+
+ <p>
+ This queue is currently receiving messages from two exchanges:
+ 100.2 msg/s from "my-exchange" and 54.6 msg/s from "amq.topic".
+ </p>
+
+ <h2>/api/overview</h2>
+
+ <p>
+ This has the following fields:
+ </p>
+
+ <table>
+ <tr>
+ <td><code>cluster_name</code></td>
+ <td>
+ The name of the entire cluster, as set with <code>rabbitmqctl
+ set_cluster_name</code>.
+ </td>
+ </tr>
+ <tr>
+ <td><code>erlang_full_version</code></td>
+ <td>
+ A string with extended detail about the Erlang VM and how it
+ was compiled, for the node connected to.
+ </td>
+ </tr>
+ <tr>
+ <td><code>erlang_version</code></td>
+ <td>
+ A string with the Erlang version of the node connected
+ to. As clusters should all run the same version this can be
+ taken as representing the cluster.
+ </td>
+ </tr>
+ <tr>
+ <td><code>exchange_types</code></td>
+ <td>
+ A list of all exchange types available.
+ </td>
+ </tr>
+ <tr>
+ <td><code>listeners</code></td>
+ <td>
+ All (non-HTTP) network listeners for all nodes in the
+ cluster. (See <code>contexts</code>
+ in <code>/api/nodes</code> for HTTP).
+ </td>
+ </tr>
+ <tr>
+ <td><code>management_version</code></td>
+ <td>
+ Version of the management plugin in use.
+ </td>
+ </tr>
+ <tr>
+ <td><code>message_stats</code></td>
+ <td>
+ A message_stats object for everything the user can see - for
+ all vhosts regardless of permissions in the case
+ of <code>monitoring</code> and <code>administrator</code>
+ users, and for all vhosts the user has access to for other
+ users.
+ </td>
+ </tr>
+ <tr>
+ <td><code>node</code></td>
+ <td>
+ The name of the cluster node this management plugin instance
+ is running on.
+ </td>
+ </tr>
+ <tr>
+ <td><code>object_totals</code></td>
+ <td>
+ An object containing global counts of all connections,
+ channels, exchanges, queues and consumers, subject to the
+ same visibility rules as for <code>message_stats</code>.
+ </td>
+ </tr>
+ <tr>
+ <td><code>queue_totals</code></td>
+ <td>
+ An object containing sums of
+ the <code>messages</code>, <code>messages_ready</code>
+ and <code>messages_unacknowledged</code> fields for all
+ queues, again subject to the same visibility rules as
+ for <code>message_stats</code>.
+ </td>
+ </tr>
+ <tr>
+ <td><code>rabbitmq_version</code></td>
+ <td>
+ Version of RabbitMQ on the node which processed this request.
+ </td>
+ </tr>
+ <tr>
+ <td><code>statistics_db_node</code></td>
+ <td>
+ Name of the cluster node hosting the management statistics database.
+ </td>
+ </tr>
+ <tr>
+ <td><code>statistics_level</code></td>
+ <td>
+ Whether the node is running fine or coarse statistics.
+ </td>
+ </tr>
+ </table>
+
+ <h2>/api/nodes</h2>
+
+ <p>
+ This has the following fields:
+ </p>
+
+ <table>
+ <tr>
+ <td><code>applications</code></td>
+ <td>
+ List of all Erlang applications running on the node.
+ </td>
+ </tr>
+ <tr>
+ <td><code>auth_mechanisms</code></td>
+ <td>
+ List of all SASL authentication mechanisms installed on the node.
+ </td>
+ </tr>
+ <tr>
+ <td><code>contexts</code></td>
+ <td>
+ List of all HTTP listeners on the node.
+ </td>
+ </tr>
+ <tr>
+ <td><code>disk_free</code></td>
+ <td>
+ Disk free space in bytes.
+ </td>
+ </tr>
+ <tr>
+ <td><code>disk_free_alarm</code></td>
+ <td>
+ Whether the disk alarm has gone off.
+ </td>
+ </tr>
+ <tr>
+ <td><code>disk_free_limit</code></td>
+ <td>
+ Point at which the disk alarm will go off.
+ </td>
+ </tr>
+ <tr>
+ <td><code>exchange_types</code></td>
+ <td>
+ Exchange types available on the node.
+ </td>
+ </tr>
+ <tr>
+ <td><code>fd_total</code></td>
+ <td>
+ File descriptors available.
+ </td>
+ </tr>
+ <tr>
+ <td><code>fd_used</code></td>
+ <td>
+ Used file descriptors.
+ </td>
+ </tr>
+ <tr>
+ <td><code>mem_used</code></td>
+ <td>
+ Memory used in bytes.
+ </td>
+ </tr>
+ <tr>
+ <td><code>mem_alarm</code></td>
+ <td>
+ Whether the memory alarm has gone off.
+ </td>
+ </tr>
+ <tr>
+ <td><code>mem_limit</code></td>
+ <td>
+ Point at which the memory alarm will go off.
+ </td>
+ </tr>
+ <tr>
+ <td><code>name</code></td>
+ <td>
+ Node name.
+ </td>
+ </tr>
+ <tr>
+ <td><code>os_pid</code></td>
+ <td>
+ Process identifier for the Operating System under which this
+ node is running.
+ </td>
+ </tr>
+ <tr>
+ <td><code>partitions</code></td>
+ <td>
+ List of network partitions this node is seeing.
+ </td>
+ </tr>
+ <tr>
+ <td><code>proc_total</code></td>
+ <td>
+ Maximum number of Erlang processes.
+ </td>
+ </tr>
+ <tr>
+ <td><code>proc_used</code></td>
+ <td>
+ Number of Erlang processes in use.
+ </td>
+ </tr>
+ <tr>
+ <td><code>processors</code></td>
+ <td>
+ Number of cores detected and usable by Erlang.
+ </td>
+ </tr>
+ <tr>
+ <td><code>run_queue</code></td>
+ <td>
+ Average number of Erlang processes waiting to run.
+ </td>
+ </tr>
+ <tr>
+ <td><code>running</code></td>
+ <td>
+ Boolean for whether this node is up. Obviously if this is
+ false, most other stats will be missing.
+ </td>
+ </tr>
+ <tr>
+ <td><code>sockets_total</code></td>
+ <td>
+ File descriptors available for use as sockets.
+ </td>
+ </tr>
+ <tr>
+ <td><code>sockets_used</code></td>
+ <td>
+ File descriptors used as sockets.
+ </td>
+ </tr>
+ <tr>
+ <td><code>statistics_level</code></td>
+ <td>
+ 'fine' or 'coarse'.
+ </td>
+ </tr>
+ <tr>
+ <td><code>type</code></td>
+ <td>
+ 'disc' or 'ram'.
+ </td>
+ </tr>
+ <tr>
+ <td><code>uptime</code></td>
+ <td>
+ Time since the Erlang VM started, in milliseconds.
+ </td>
+ </tr>
+ </table>
+
+ <h2>/api/nodes/(name)</h2>
+
+ <p>
+ All of the above, plus:
+ </p>
+
+ <table>
+ <tr>
+ <td><code>memory</code></td>
+ <td>
+ Detailed memory use statistics. Only appears
+ if <code>?memory=true</code> is appended to the URL.
+ </td>
+ </tr>
+ </table>
+
+ <h2>/api/connections</h2>
+ <h2>/api/connections/(name)</h2>
+
+ <p>
+ See documentation for <code>rabbitmqctl
+ list_connections</code>. No additional fields,
+ although <code>pid</code> is replaced by <code>node</code>.
+ </p>
+
+ <p>
+ Note also that while non-AMQP connections will appear in this
+ list (unlike <code>rabbitmqctl list_connections</code>), they
+ will omit many of the connection-level statistics.
+ </p>
+
+ <h2>/api/connections/(name)/channels</h2>
+ <h2>/api/channels</h2>
+
+ <p>
+ See documentation for <code>rabbitmqctl list_channels</code>,
+ with <code>pid</code> replaced by <code>node</code>, plus:
+ </p>
+
+ <table>
+ <tr>
+ <td><code>connection_details</code></td>
+ <td>
+ Some basic details about the owning connection.
+ </td>
+ </tr>
+ <tr>
+ <td><code>message_stats</code></td>
+ <td>
+ See the section on message_stats above.
+ </td>
+ </tr>
+ </table>
+
+ <h2>/api/channels/(name)</h2>
+
+ <p>
+ All the above, plus
+ </p>
+
+ <table>
+ <tr>
+ <td><code>publishes</code></td>
+ <td>
+ Detailed message stats (see section above) for publishes to
+ exchanges.
+ </td>
+ </tr>
+ <tr>
+ <td><code>deliveries</code></td>
+ <td>
+ Detailed message stats for deliveries from queues.
+ </td>
+ </tr>
+ <tr>
+ <td><code>consumer_details</code></td>
+ <td>
+ List of consumers on this channel, with some details on each.
+ </td>
+ </tr>
+ </table>
+
+ <h2>/api/exchanges</h2>
+ <h2>/api/exchanges/(vhost)</h2>
+
+ <p>
+ See documentation for <code>rabbitmqctl list_exchanges</code>, plus:
+ </p>
+
+ <table>
+ <tr>
+ <td><code>message_stats</code></td>
+ <td>
+ See the section on message_stats above.
+ </td>
+ </tr>
+ </table>
+
+ <h2>/api/exchanges/(vhost)/(name)</h2>
+
+ <p>
+ All the above, plus:
+ </p>
+
+ <table>
+ <tr>
+ <td><code>incoming</code></td>
+ <td>
+ Detailed message stats (see section above) for publishes
+ from channels into this exchange.
+ </td>
+ </tr>
+ <tr>
+ <td><code>outgoing</code></td>
+ <td>
+ Detailed message stats for publishes from this exchange into
+ queues.
+ </td>
+ </tr>
+ </table>
+
+ <h2>/api/queues</h2>
+ <h2>/api/queues/(vhost)</h2>
+
+ <p>
+ See documentation for <code>rabbitmqctl list_queues</code>, with
+ all references to <code>pid</code>s replaced by <code>node</code>s
+ plus:
+ </p>
+
+ <table>
+ <tr>
+ <td><code>message_stats</code></td>
+ <td>
+ See the section on message_stats above.
+ </td>
+ </tr>
+ </table>
+
+ <h2>/api/queues/(vhost)/(name)</h2>
+
+ <p>
+ All the above, plus:
+ </p>
+
+ <table>
+ <tr>
+ <td><code>incoming</code></td>
+ <td>
+ Detailed message stats (see section above) for
+ publishes from exchanges into this queue.
+ </td>
+ </tr>
+ <tr>
+ <td><code>deliveries</code></td>
+ <td>
+ Detailed message stats for deliveries from this queue into
+ channels.
+ </td>
+ </tr>
+ <tr>
+ <td><code>consumer_details</code></td>
+ <td>
+ List of consumers on this channel, with some details on each.
+ </td>
+ </tr>
+ </table>
+
+ <h2>/api/vhosts/</h2>
+ <h2>/api/vhosts/(name)</h2>
+
+ <p>
+ All the fields from <code>rabbitmqctl list_vhosts</code>
+ (i.e. <code>name</code> and <code>tracing</code>) plus:
+ </p>
+
+ <table>
+ <tr>
+ <td><code>message_stats</code></td>
+ <td>
+ Global message_stats for this vhost. Note that activity for
+ other users in this vhost <b>is</b> shown, even for users
+ without the <code>monitoring</code> tag.
+ </td>
+ </tr>
+ <tr>
+ <td><code>messages</code> <code>messages_ready</code> <code>messages_acknowledged</code></td>
+ <td>
+ Sum of these fields for all queues in the vhost.
+ </td>
+ </tr>
+ <tr>
+ <td><code>recv_oct</code> <code>send_oct</code></td>
+ <td>
+ Sum of these fields for all connections to the vhost.
+ </td>
+ </tr>
+ </table>
+ </body>
+</html>
--- /dev/null
+<html>
+ <head>
+ <title>RabbitMQ Management</title>
+ <script src="js/ejs.min.js" type="text/javascript"></script>
+ <script src="js/jquery-1.6.4.min.js" type="text/javascript"></script>
+ <script src="js/jquery.flot.min.js" type="text/javascript"></script>
+ <script src="js/jquery.flot.time.min.js" type="text/javascript"></script>
+ <script src="js/sammy-0.6.0.min.js" type="text/javascript"></script>
+ <script src="js/json2.js" type="text/javascript"></script>
+ <script src="js/base64.js" type="text/javascript"></script>
+ <script src="js/global.js" type="text/javascript"></script>
+ <script src="js/main.js" type="text/javascript"></script>
+ <script src="js/prefs.js" type="text/javascript"></script>
+ <script src="js/help.js" type="text/javascript"></script>
+ <script src="js/formatters.js" type="text/javascript"></script>
+ <script src="js/charts.js" type="text/javascript"></script>
+
+ <link href="css/main.css" rel="stylesheet" type="text/css"/>
+ <link href="favicon.ico" rel="shortcut icon" type="image/x-icon"/>
+
+<!--[if lte IE 8]>
+ <script src="js/excanvas.min.js" type="text/javascript"></script>
+ <link href="css/evil.css" rel="stylesheet" type="text/css"/>
+<![endif]-->
+ </head>
+ <body>
+ <div id="outer"></div>
+ <div id="debug"></div>
+ <div id="scratch"></div>
+ </body>
+</html>
--- /dev/null
+/*
+ * Copyright (c) 2010 Nick Galbreath
+ * http://code.google.com/p/stringencoders/source/browse/#svn/trunk/javascript
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+/* base64 encode/decode compatible with window.btoa/atob
+ *
+ * window.atob/btoa is a Firefox extension to convert binary data (the "b")
+ * to base64 (ascii, the "a").
+ *
+ * It is also found in Safari and Chrome. It is not available in IE.
+ *
+ * if (!window.btoa) window.btoa = base64.encode
+ * if (!window.atob) window.atob = base64.decode
+ *
+ * The original spec's for atob/btoa are a bit lacking
+ * https://developer.mozilla.org/en/DOM/window.atob
+ * https://developer.mozilla.org/en/DOM/window.btoa
+ *
+ * window.btoa and base64.encode takes a string where charCodeAt is [0,255]
+ * If any character is not [0,255], then an exception is thrown.
+ *
+ * window.atob and base64.decode take a base64-encoded string
+ * If the input length is not a multiple of 4, or contains invalid characters
+ * then an exception is thrown.
+ */
+base64 = {};
+base64.PADCHAR = '=';
+base64.ALPHA = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
+base64.getbyte64 = function(s,i) {
+ // This is oddly fast, except on Chrome/V8.
+ // Minimal or no improvement in performance by using a
+ // object with properties mapping chars to value (eg. 'A': 0)
+ var idx = base64.ALPHA.indexOf(s.charAt(i));
+ if (idx == -1) {
+ throw "Cannot decode base64";
+ }
+ return idx;
+}
+
+base64.decode = function(s) {
+ // convert to string
+ s = "" + s;
+ var getbyte64 = base64.getbyte64;
+ var pads, i, b10;
+ var imax = s.length
+ if (imax == 0) {
+ return s;
+ }
+
+ if (imax % 4 != 0) {
+ throw "Cannot decode base64";
+ }
+
+ pads = 0
+ if (s.charAt(imax -1) == base64.PADCHAR) {
+ pads = 1;
+ if (s.charAt(imax -2) == base64.PADCHAR) {
+ pads = 2;
+ }
+ // either way, we want to ignore this last block
+ imax -= 4;
+ }
+
+ var x = [];
+ for (i = 0; i < imax; i += 4) {
+ b10 = (getbyte64(s,i) << 18) | (getbyte64(s,i+1) << 12) |
+ (getbyte64(s,i+2) << 6) | getbyte64(s,i+3);
+ x.push(String.fromCharCode(b10 >> 16, (b10 >> 8) & 0xff, b10 & 0xff));
+ }
+
+ switch (pads) {
+ case 1:
+ b10 = (getbyte64(s,i) << 18) | (getbyte64(s,i+1) << 12) | (getbyte64(s,i+2) << 6)
+ x.push(String.fromCharCode(b10 >> 16, (b10 >> 8) & 0xff));
+ break;
+ case 2:
+ b10 = (getbyte64(s,i) << 18) | (getbyte64(s,i+1) << 12);
+ x.push(String.fromCharCode(b10 >> 16));
+ break;
+ }
+ return x.join('');
+}
+
+base64.getbyte = function(s,i) {
+ var x = s.charCodeAt(i);
+ if (x > 255) {
+ throw "INVALID_CHARACTER_ERR: DOM Exception 5";
+ }
+ return x;
+}
+
+
+base64.encode = function(s) {
+ if (arguments.length != 1) {
+ throw "SyntaxError: Not enough arguments";
+ }
+ var padchar = base64.PADCHAR;
+ var alpha = base64.ALPHA;
+ var getbyte = base64.getbyte;
+
+ var i, b10;
+ var x = [];
+
+ // convert to string
+ s = "" + s;
+
+ var imax = s.length - s.length % 3;
+
+ if (s.length == 0) {
+ return s;
+ }
+ for (i = 0; i < imax; i += 3) {
+ b10 = (getbyte(s,i) << 16) | (getbyte(s,i+1) << 8) | getbyte(s,i+2);
+ x.push(alpha.charAt(b10 >> 18));
+ x.push(alpha.charAt((b10 >> 12) & 0x3F));
+ x.push(alpha.charAt((b10 >> 6) & 0x3f));
+ x.push(alpha.charAt(b10 & 0x3f));
+ }
+ switch (s.length - imax) {
+ case 1:
+ b10 = getbyte(s,i) << 16;
+ x.push(alpha.charAt(b10 >> 18) + alpha.charAt((b10 >> 12) & 0x3F) +
+ padchar + padchar);
+ break;
+ case 2:
+ b10 = (getbyte(s,i) << 16) | (getbyte(s,i+1) << 8);
+ x.push(alpha.charAt(b10 >> 18) + alpha.charAt((b10 >> 12) & 0x3F) +
+ alpha.charAt((b10 >> 6) & 0x3f) + padchar);
+ break;
+ }
+ return x.join('');
+}
--- /dev/null
+function render_charts() {
+ $('.chart').map(function() {
+ render_chart($(this));
+ });
+}
+
+var chart_colors = ['#edc240', '#afd8f8', '#cb4b4b', '#4da74d', '#9440ed', '#666666', '#aaaaaa'];
+
+var chart_chrome = {
+ series: { lines: { show: true } },
+ grid: { borderWidth: 2, borderColor: "#aaa" },
+ xaxis: { tickColor: "#fff", mode: "time", timezone: "browser" },
+ yaxis: { tickColor: "#eee", min: 0 },
+ legend: { show: false }
+};
+
+function render_chart(div) {
+ var id = div.attr('id').substring('chart-'.length);
+ var rate_mode = div.hasClass('chart-rates');
+
+ var out_data = [];
+ var i = 0;
+ var data = chart_data[id]['data'];
+ var fmt = chart_data[id]['fmt'];
+ for (var name in data) {
+ var series = data[name];
+ var samples = series.samples;
+ var d = [];
+ for (var j = 1; j < samples.length; j++) {
+ var x = samples[j].timestamp;
+ var y;
+ if (rate_mode) {
+ // TODO This doesn't work well if you are looking at
+ // stuff in the browser that is finer granularity than
+ // the data we have in the DB (and thus we get
+ // duplicated entries). Do we care? We should just
+ // never allow that...
+ y = (samples[j - 1].sample - samples[j].sample) * 1000 /
+ (samples[j - 1].timestamp - samples[j].timestamp);
+ }
+ else {
+ y = samples[j].sample;
+ }
+ d.push([x, y]);
+ }
+ out_data.push({data: d, color: chart_colors[i], shadowSize: 0});
+ i++;
+ }
+ chart_data[id] = {};
+
+ chart_chrome.yaxis.tickFormatter = fmt_y_axis(fmt);
+ $.plot(div, out_data, chart_chrome);
+}
+
+function fmt_y_axis(fmt) {
+ return function (val, axis) {
+ // axis.ticks seems to include the bottom value but not the top
+ if (axis.max == 1 && axis.ticks.length > 1) {
+ var newTicks = [axis.ticks[0]];
+ axis.ticks = newTicks;
+ }
+ return fmt(val, axis.max);
+ }
+}
+
+function update_rate_options(sammy) {
+ var id = sammy.params['id'];
+ store_pref('rate-mode-' + id, sammy.params['mode']);
+ store_pref('chart-size-' + id, sammy.params['size']);
+ store_pref('chart-range-' + id, sammy.params['range']);
+ partial_update();
+}
--- /dev/null
+dispatcher_add(function(sammy) {
+ function path(p, r, t) {
+ sammy.get(p, function() {
+ render(r, t, p);
+ });
+ }
+ sammy.get('#/', function() {
+ var reqs = {'overview': {path: '/overview',
+ options: {ranges: ['lengths-over',
+ 'msg-rates-over']}}};
+ if (user_monitor) {
+ reqs['nodes'] = '/nodes';
+ }
+ render(reqs, 'overview', '#/');
+ });
+ sammy.get('#/login/:username/:password', login_route);
+
+ path('#/cluster-name', {'cluster_name': '/cluster-name'}, 'cluster-name');
+ sammy.put('#/cluster-name', function() {
+ if (sync_put(this, '/cluster-name')) {
+ setup_global_vars();
+ update();
+ }
+ return false;
+ });
+
+ sammy.get('#/nodes/:name', function() {
+ var name = esc(this.params['name']);
+ render({'node': '/nodes/' + name},
+ 'node', '');
+ });
+
+ path('#/connections',
+ {'connections': {path: '/connections', options: {sort:true}}},
+ 'connections');
+ sammy.get('#/connections/:name', function() {
+ var name = esc(this.params['name']);
+ render({'connection': {path: '/connections/' + name,
+ options: {ranges: ['data-rates-conn']}},
+ 'channels': '/connections/' + name + '/channels'},
+ 'connection', '#/connections');
+ });
+ sammy.del('#/connections', function() {
+ var options = {headers: {
+ 'X-Reason': this.params['reason']
+ }};
+ if (sync_delete(this, '/connections/:name', options)) {
+ go_to('#/connections');
+ }
+
+ return false;
+ });
+
+ path('#/channels', {'channels': {path: '/channels', options: {sort:true}}},
+ 'channels');
+ sammy.get('#/channels/:name', function() {
+ render({'channel': {path: '/channels/' + esc(this.params['name']),
+ options:{ranges:['msg-rates-ch']}}},
+ 'channel', '#/channels');
+ });
+
+ path('#/exchanges', {'exchanges': {path: '/exchanges',
+ options: {sort:true,vhost:true}},
+ 'vhosts': '/vhosts'}, 'exchanges');
+ sammy.get('#/exchanges/:vhost/:name', function() {
+ var path = '/exchanges/' + esc(this.params['vhost']) + '/' + esc(this.params['name']);
+ render({'exchange': {path: path,
+ options: {ranges:['msg-rates-x']}},
+ 'bindings_source': path + '/bindings/source',
+ 'bindings_destination': path + '/bindings/destination'},
+ 'exchange', '#/exchanges');
+ });
+ sammy.put('#/exchanges', function() {
+ if (sync_put(this, '/exchanges/:vhost/:name'))
+ update();
+ return false;
+ });
+ sammy.del('#/exchanges', function() {
+ if (sync_delete(this, '/exchanges/:vhost/:name'))
+ go_to('#/exchanges');
+ return false;
+ });
+ sammy.post('#/exchanges/publish', function() {
+ publish_msg(this.params);
+ return false;
+ });
+
+ path('#/queues', {'queues': {path: '/queues',
+ options: {sort:true,vhost:true}},
+ 'vhosts': '/vhosts'}, 'queues');
+ sammy.get('#/queues/:vhost/:name', function() {
+ var path = '/queues/' + esc(this.params['vhost']) + '/' + esc(this.params['name']);
+ render({'queue': {path: path,
+ options: {ranges:['lengths-q', 'msg-rates-q']}},
+ 'bindings': path + '/bindings'}, 'queue', '#/queues');
+ });
+ sammy.put('#/queues', function() {
+ if (sync_put(this, '/queues/:vhost/:name'))
+ update();
+ return false;
+ });
+ sammy.del('#/queues', function() {
+ if (this.params['mode'] == 'delete') {
+ if (sync_delete(this, '/queues/:vhost/:name'))
+ go_to('#/queues');
+ }
+ else if (this.params['mode'] == 'purge') {
+ if (sync_delete(this, '/queues/:vhost/:name/contents')) {
+ show_popup('info', "Queue purged");
+ update_partial();
+ }
+ }
+ return false;
+ });
+ sammy.post('#/queues/get', function() {
+ get_msgs(this.params);
+ return false;
+ });
+ sammy.post('#/queues/actions', function() {
+ if (sync_post(this, '/queues/:vhost/:name/actions'))
+ // We can't refresh fast enough, it's racy. So grey
+ // the button and wait for a normal refresh.
+ $('#action-button').addClass('wait').prop('disabled', true);
+ return false;
+ });
+ sammy.post('#/bindings', function() {
+ if (sync_post(this, '/bindings/:vhost/e/:source/:destination_type/:destination'))
+ update();
+ return false;
+ });
+ sammy.del('#/bindings', function() {
+ if (sync_delete(this, '/bindings/:vhost/e/:source/:destination_type/:destination/:properties_key'))
+ update();
+ return false;
+ });
+
+ path('#/vhosts', {'vhosts': {path: '/vhosts',
+ options: {sort:true}},
+ 'permissions': '/permissions'}, 'vhosts');
+ sammy.get('#/vhosts/:id', function() {
+ render({'vhost': {path: '/vhosts/' + esc(this.params['id']),
+ options: {ranges: ['lengths-vhost',
+ 'msg-rates-vhost',
+ 'data-rates-vhost']}},
+ 'permissions': '/vhosts/' + esc(this.params['id']) + '/permissions',
+ 'users': '/users/'},
+ 'vhost', '#/vhosts');
+ });
+ sammy.put('#/vhosts', function() {
+ if (sync_put(this, '/vhosts/:name')) {
+ update_vhosts();
+ update();
+ }
+ return false;
+ });
+ sammy.del('#/vhosts', function() {
+ if (sync_delete(this, '/vhosts/:name')) {
+ update_vhosts();
+ go_to('#/vhosts');
+ }
+ return false;
+ });
+
+ path('#/users', {'users': {path: '/users',
+ options: {sort:true}},
+ 'permissions': '/permissions'}, 'users');
+ sammy.get('#/users/:id', function() {
+ render({'user': '/users/' + esc(this.params['id']),
+ 'permissions': '/users/' + esc(this.params['id']) + '/permissions',
+ 'vhosts': '/vhosts/'}, 'user',
+ '#/users');
+ });
+ sammy.put('#/users-add', function() {
+ if (sync_put(this, '/users/:username'))
+ update();
+ return false;
+ });
+ sammy.put('#/users-modify', function() {
+ if (sync_put(this, '/users/:username'))
+ go_to('#/users');
+ return false;
+ });
+ sammy.del('#/users', function() {
+ if (sync_delete(this, '/users/:username'))
+ go_to('#/users');
+ return false;
+ });
+
+ sammy.put('#/permissions', function() {
+ if (sync_put(this, '/permissions/:vhost/:username'))
+ update();
+ return false;
+ });
+ sammy.del('#/permissions', function() {
+ if (sync_delete(this, '/permissions/:vhost/:username'))
+ update();
+ return false;
+ });
+ path('#/policies', {'policies': '/policies',
+ 'vhosts': '/vhosts'}, 'policies');
+ sammy.get('#/policies/:vhost/:id', function() {
+ render({'policy': '/policies/' + esc(this.params['vhost'])
+ + '/' + esc(this.params['id'])},
+ 'policy', '#/policies');
+ });
+ sammy.put('#/policies', function() {
+ put_policy(this, ['name', 'pattern', 'policy'], ['priority'], []);
+ return false;
+ });
+ sammy.del('#/policies', function() {
+ if (sync_delete(this, '/policies/:vhost/:name'))
+ go_to('#/policies');
+ return false;
+ });
+
+ sammy.put('#/logout', function() {
+ document.cookie = 'auth=; expires=Thu, 01 Jan 1970 00:00:00 GMT';
+ location.reload();
+ });
+
+ sammy.get('#/import-succeeded', function() {
+ render({}, 'import-succeeded', '#/overview');
+ });
+ sammy.put('#/rate-options', function() {
+ update_rate_options(this);
+ });
+});
--- /dev/null
+(function(){
+
+
+var rsplit = function(string, regex) {
+ var result = regex.exec(string),retArr = new Array(), first_idx, last_idx, first_bit;
+ while (result != null)
+ {
+ first_idx = result.index; last_idx = regex.lastIndex;
+ if ((first_idx) != 0)
+ {
+ first_bit = string.substring(0,first_idx);
+ retArr.push(string.substring(0,first_idx));
+ string = string.slice(first_idx);
+ }
+ retArr.push(result[0]);
+ string = string.slice(result[0].length);
+ result = regex.exec(string);
+ }
+ if (! string == '')
+ {
+ retArr.push(string);
+ }
+ return retArr;
+},
+chop = function(string){
+ return string.substr(0, string.length - 1);
+},
+extend = function(d, s){
+ for(var n in s){
+ if(s.hasOwnProperty(n)) d[n] = s[n]
+ }
+}
+
+
+EJS = function( options ){
+ options = typeof options == "string" ? {view: options} : options
+ this.set_options(options);
+ if(options.precompiled){
+ this.template = {};
+ this.template.process = options.precompiled;
+ EJS.update(this.name, this);
+ return;
+ }
+ if(options.element)
+ {
+ if(typeof options.element == 'string'){
+ var name = options.element
+ options.element = document.getElementById( options.element )
+ if(options.element == null) throw name+'does not exist!'
+ }
+ if(options.element.value){
+ this.text = options.element.value
+ }else{
+ this.text = options.element.innerHTML
+ }
+ this.name = options.element.id
+ this.type = '['
+ }else if(options.url){
+ options.url = EJS.endExt(options.url, this.extMatch);
+ this.name = this.name ? this.name : options.url;
+ var url = options.url
+ //options.view = options.absolute_url || options.view || options.;
+ var template = EJS.get(this.name /*url*/, this.cache);
+ if (template) return template;
+ if (template == EJS.INVALID_PATH) return null;
+ try{
+ this.text = EJS.request( url+(this.cache ? '' : '?'+Math.random() ));
+ }catch(e){}
+
+ if(this.text == null){
+ throw( {type: 'EJS', message: 'There is no template at '+url} );
+ }
+ //this.name = url;
+ }
+ var template = new EJS.Compiler(this.text, this.type);
+
+ template.compile(options, this.name);
+
+
+ EJS.update(this.name, this);
+ this.template = template;
+};
+/* @Prototype*/
+EJS.prototype = {
+ /**
+ * Renders an object with extra view helpers attached to the view.
+ * @param {Object} object data to be rendered
+ * @param {Object} extra_helpers an object with additonal view helpers
+ * @return {String} returns the result of the string
+ */
+ render : function(object, extra_helpers){
+ object = object || {};
+ this._extra_helpers = extra_helpers;
+ var v = new EJS.Helpers(object, extra_helpers || {});
+ return this.template.process.call(object, object,v);
+ },
+ update : function(element, options){
+ if(typeof element == 'string'){
+ element = document.getElementById(element)
+ }
+ if(options == null){
+ _template = this;
+ return function(object){
+ EJS.prototype.update.call(_template, element, object)
+ }
+ }
+ if(typeof options == 'string'){
+ params = {}
+ params.url = options
+ _template = this;
+ params.onComplete = function(request){
+ var object = eval( request.responseText )
+ EJS.prototype.update.call(_template, element, object)
+ }
+ EJS.ajax_request(params)
+ }else
+ {
+ element.innerHTML = this.render(options)
+ }
+ },
+ out : function(){
+ return this.template.out;
+ },
+ /**
+ * Sets options on this view to be rendered with.
+ * @param {Object} options
+ */
+ set_options : function(options){
+ this.type = options.type || EJS.type;
+ this.cache = options.cache != null ? options.cache : EJS.cache;
+ this.text = options.text || null;
+ this.name = options.name || null;
+ this.ext = options.ext || EJS.ext;
+ this.extMatch = new RegExp(this.ext.replace(/\./, '\.'));
+ }
+};
+EJS.endExt = function(path, match){
+ if(!path) return null;
+ match.lastIndex = 0
+ return path+ (match.test(path) ? '' : this.ext )
+}
+
+
+
+
+/* @Static*/
+EJS.Scanner = function(source, left, right) {
+
+ extend(this,
+ {left_delimiter: left +'%',
+ right_delimiter: '%'+right,
+ double_left: left+'%%',
+ double_right: '%%'+right,
+ left_equal: left+'%=',
+ left_comment: left+'%#'})
+
+ this.SplitRegexp = left=='[' ? /(\[%%)|(%%\])|(\[%=)|(\[%#)|(\[%)|(%\]\n)|(%\])|(\n)/ : new RegExp('('+this.double_left+')|(%%'+this.double_right+')|('+this.left_equal+')|('+this.left_comment+')|('+this.left_delimiter+')|('+this.right_delimiter+'\n)|('+this.right_delimiter+')|(\n)') ;
+
+ this.source = source;
+ this.stag = null;
+ this.lines = 0;
+};
+
+EJS.Scanner.to_text = function(input){
+ if(input == null || input === undefined)
+ return '';
+ if(input instanceof Date)
+ return input.toDateString();
+ if(input.toString)
+ return input.toString();
+ return '';
+};
+
+EJS.Scanner.prototype = {
+ scan: function(block) {
+ scanline = this.scanline;
+ regex = this.SplitRegexp;
+ if (! this.source == '')
+ {
+ var source_split = rsplit(this.source, /\n/);
+ for(var i=0; i<source_split.length; i++) {
+ var item = source_split[i];
+ this.scanline(item, regex, block);
+ }
+ }
+ },
+ scanline: function(line, regex, block) {
+ this.lines++;
+ var line_split = rsplit(line, regex);
+ for(var i=0; i<line_split.length; i++) {
+ var token = line_split[i];
+ if (token != null) {
+ try{
+ block(token, this);
+ }catch(e){
+ throw {type: 'EJS.Scanner', line: this.lines};
+ }
+ }
+ }
+ }
+};
+
+
+EJS.Buffer = function(pre_cmd, post_cmd) {
+ this.line = new Array();
+ this.script = "";
+ this.pre_cmd = pre_cmd;
+ this.post_cmd = post_cmd;
+ for (var i=0; i<this.pre_cmd.length; i++)
+ {
+ this.push(pre_cmd[i]);
+ }
+};
+EJS.Buffer.prototype = {
+
+ push: function(cmd) {
+ this.line.push(cmd);
+ },
+
+ cr: function() {
+ this.script = this.script + this.line.join('; ');
+ this.line = new Array();
+ this.script = this.script + "\n";
+ },
+
+ close: function() {
+ if (this.line.length > 0)
+ {
+ for (var i=0; i<this.post_cmd.length; i++){
+ this.push(pre_cmd[i]);
+ }
+ this.script = this.script + this.line.join('; ');
+ line = null;
+ }
+ }
+
+};
+
+
+EJS.Compiler = function(source, left) {
+ this.pre_cmd = ['var ___ViewO = [];'];
+ this.post_cmd = new Array();
+ this.source = ' ';
+ if (source != null)
+ {
+ if (typeof source == 'string')
+ {
+ source = source.replace(/\r\n/g, "\n");
+ source = source.replace(/\r/g, "\n");
+ this.source = source;
+ }else if (source.innerHTML){
+ this.source = source.innerHTML;
+ }
+ if (typeof this.source != 'string'){
+ this.source = "";
+ }
+ }
+ left = left || '<';
+ var right = '>';
+ switch(left) {
+ case '[':
+ right = ']';
+ break;
+ case '<':
+ break;
+ default:
+ throw left+' is not a supported deliminator';
+ break;
+ }
+ this.scanner = new EJS.Scanner(this.source, left, right);
+ this.out = '';
+};
+EJS.Compiler.prototype = {
+ compile: function(options, name) {
+ options = options || {};
+ this.out = '';
+ var put_cmd = "___ViewO.push(";
+ var insert_cmd = put_cmd;
+ var buff = new EJS.Buffer(this.pre_cmd, this.post_cmd);
+ var content = '';
+ var clean = function(content)
+ {
+ content = content.replace(/\\/g, '\\\\');
+ content = content.replace(/\n/g, '\\n');
+ content = content.replace(/"/g, '\\"');
+ return content;
+ };
+ this.scanner.scan(function(token, scanner) {
+ if (scanner.stag == null)
+ {
+ switch(token) {
+ case '\n':
+ content = content + "\n";
+ buff.push(put_cmd + '"' + clean(content) + '");');
+ buff.cr();
+ content = '';
+ break;
+ case scanner.left_delimiter:
+ case scanner.left_equal:
+ case scanner.left_comment:
+ scanner.stag = token;
+ if (content.length > 0)
+ {
+ buff.push(put_cmd + '"' + clean(content) + '")');
+ }
+ content = '';
+ break;
+ case scanner.double_left:
+ content = content + scanner.left_delimiter;
+ break;
+ default:
+ content = content + token;
+ break;
+ }
+ }
+ else {
+ switch(token) {
+ case scanner.right_delimiter:
+ switch(scanner.stag) {
+ case scanner.left_delimiter:
+ if (content[content.length - 1] == '\n')
+ {
+ content = chop(content);
+ buff.push(content);
+ buff.cr();
+ }
+ else {
+ buff.push(content);
+ }
+ break;
+ case scanner.left_equal:
+ buff.push(insert_cmd + "(EJS.Scanner.to_text(" + content + ")))");
+ break;
+ }
+ scanner.stag = null;
+ content = '';
+ break;
+ case scanner.double_right:
+ content = content + scanner.right_delimiter;
+ break;
+ default:
+ content = content + token;
+ break;
+ }
+ }
+ });
+ if (content.length > 0)
+ {
+ // Chould be content.dump in Ruby
+ buff.push(put_cmd + '"' + clean(content) + '")');
+ }
+ buff.close();
+ this.out = buff.script + ";";
+ var to_be_evaled = '/*'+name+'*/this.process = function(_CONTEXT,_VIEW) { try { with(_VIEW) { with (_CONTEXT) {'+this.out+" return ___ViewO.join('');}}}catch(e){e.lineNumber=null;throw e;}};";
+
+ try{
+ eval(to_be_evaled);
+ }catch(e){
+ if(typeof JSLINT != 'undefined'){
+ JSLINT(this.out);
+ for(var i = 0; i < JSLINT.errors.length; i++){
+ var error = JSLINT.errors[i];
+ if(error.reason != "Unnecessary semicolon."){
+ error.line++;
+ var e = new Error();
+ e.lineNumber = error.line;
+ e.message = error.reason;
+ if(options.view)
+ e.fileName = options.view;
+ throw e;
+ }
+ }
+ }else{
+ throw e;
+ }
+ }
+ }
+};
+
+
+//type, cache, folder
+/**
+ * Sets default options for all views
+ * @param {Object} options Set view with the following options
+ * <table class="options">
+ <tbody><tr><th>Option</th><th>Default</th><th>Description</th></tr>
+ <tr>
+ <td>type</td>
+ <td>'<'</td>
+ <td>type of magic tags. Options are '<' or '['
+ </td>
+ </tr>
+ <tr>
+ <td>cache</td>
+ <td>true in production mode, false in other modes</td>
+ <td>true to cache template.
+ </td>
+ </tr>
+ </tbody></table>
+ *
+ */
+EJS.config = function(options){
+ EJS.cache = options.cache != null ? options.cache : EJS.cache;
+ EJS.type = options.type != null ? options.type : EJS.type;
+ EJS.ext = options.ext != null ? options.ext : EJS.ext;
+
+ var templates_directory = EJS.templates_directory || {}; //nice and private container
+ EJS.templates_directory = templates_directory;
+ EJS.get = function(path, cache){
+ if(cache == false) return null;
+ if(templates_directory[path]) return templates_directory[path];
+ return null;
+ };
+
+ EJS.update = function(path, template) {
+ if(path == null) return;
+ templates_directory[path] = template ;
+ };
+
+ EJS.INVALID_PATH = -1;
+};
+EJS.config( {cache: true, type: '<', ext: '.ejs' } );
+
+
+
+/**
+ * @constructor
+ * By adding functions to EJS.Helpers.prototype, those functions will be available in the
+ * views.
+ * @init Creates a view helper. This function is called internally. You should never call it.
+ * @param {Object} data The data passed to the view. Helpers have access to it through this._data
+ */
+EJS.Helpers = function(data, extras){
+ this._data = data;
+ this._extras = extras;
+ extend(this, extras );
+};
+/* @prototype*/
+EJS.Helpers.prototype = {
+ /**
+ * Renders a new view. If data is passed in, uses that to render the view.
+ * @param {Object} options standard options passed to a new view.
+ * @param {optional:Object} data
+ * @return {String}
+ */
+ view: function(options, data, helpers){
+ if(!helpers) helpers = this._extras
+ if(!data) data = this._data;
+ return new EJS(options).render(data, helpers);
+ },
+ /**
+ * For a given value, tries to create a human representation.
+ * @param {Object} input the value being converted.
+ * @param {Object} null_text what text should be present if input == null or undefined, defaults to ''
+ * @return {String}
+ */
+ to_text: function(input, null_text) {
+ if(input == null || input === undefined) return null_text || '';
+ if(input instanceof Date) return input.toDateString();
+ if(input.toString) return input.toString().replace(/\n/g, '<br />').replace(/''/g, "'");
+ return '';
+ }
+};
+ EJS.newRequest = function(){
+ var factories = [function() { return new ActiveXObject("Msxml2.XMLHTTP"); },function() { return new XMLHttpRequest(); },function() { return new ActiveXObject("Microsoft.XMLHTTP"); }];
+ for(var i = 0; i < factories.length; i++) {
+ try {
+ var request = factories[i]();
+ if (request != null) return request;
+ }
+ catch(e) { continue;}
+ }
+ }
+
+ EJS.request = function(path){
+ var request = new EJS.newRequest()
+ request.open("GET", path, false);
+
+ try{request.send(null);}
+ catch(e){return null;}
+
+ if ( request.status == 404 || request.status == 2 ||(request.status == 0 && request.responseText == '') ) return null;
+
+ return request.responseText
+ }
+ EJS.ajax_request = function(params){
+ params.method = ( params.method ? params.method : 'GET')
+
+ var request = new EJS.newRequest();
+ request.onreadystatechange = function(){
+ if(request.readyState == 4){
+ if(request.status == 200){
+ params.onComplete(request)
+ }else
+ {
+ params.onComplete(request)
+ }
+ }
+ }
+ request.open(params.method, params.url)
+ request.send(null)
+ }
+
+
+})();
\ No newline at end of file
--- /dev/null
+(function(){var rsplit=function(string,regex){var result=regex.exec(string),retArr=new Array(),first_idx,last_idx,first_bit;while(result!=null){first_idx=result.index;last_idx=regex.lastIndex;if((first_idx)!=0){first_bit=string.substring(0,first_idx);retArr.push(string.substring(0,first_idx));string=string.slice(first_idx)}retArr.push(result[0]);string=string.slice(result[0].length);result=regex.exec(string)}if(!string==""){retArr.push(string)}return retArr},chop=function(string){return string.substr(0,string.length-1)},extend=function(d,s){for(var n in s){if(s.hasOwnProperty(n)){d[n]=s[n]}}};EJS=function(options){options=typeof options=="string"?{view:options}:options;this.set_options(options);if(options.precompiled){this.template={};this.template.process=options.precompiled;EJS.update(this.name,this);return }if(options.element){if(typeof options.element=="string"){var name=options.element;options.element=document.getElementById(options.element);if(options.element==null){throw name+"does not exist!"}}if(options.element.value){this.text=options.element.value}else{this.text=options.element.innerHTML}this.name=options.element.id;this.type="["}else{if(options.url){options.url=EJS.endExt(options.url,this.extMatch);this.name=this.name?this.name:options.url;var url=options.url;var template=EJS.get(this.name,this.cache);if(template){return template}if(template==EJS.INVALID_PATH){return null}try{this.text=EJS.request(url+(this.cache?"":"?"+Math.random()))}catch(e){}if(this.text==null){throw ({type:"EJS",message:"There is no template at "+url})}}}var template=new EJS.Compiler(this.text,this.type);template.compile(options,this.name);EJS.update(this.name,this);this.template=template};EJS.prototype={render:function(object,extra_helpers){object=object||{};this._extra_helpers=extra_helpers;var v=new EJS.Helpers(object,extra_helpers||{});return this.template.process.call(object,object,v)},update:function(element,options){if(typeof element=="string"){element=document.getElementById(element)}if(options==null){_template=this;return function(object){EJS.prototype.update.call(_template,element,object)}}if(typeof options=="string"){params={};params.url=options;_template=this;params.onComplete=function(request){var object=eval(request.responseText);EJS.prototype.update.call(_template,element,object)};EJS.ajax_request(params)}else{element.innerHTML=this.render(options)}},out:function(){return this.template.out},set_options:function(options){this.type=options.type||EJS.type;this.cache=options.cache!=null?options.cache:EJS.cache;this.text=options.text||null;this.name=options.name||null;this.ext=options.ext||EJS.ext;this.extMatch=new RegExp(this.ext.replace(/\./,"."))}};EJS.endExt=function(path,match){if(!path){return null}match.lastIndex=0;return path+(match.test(path)?"":this.ext)};EJS.Scanner=function(source,left,right){extend(this,{left_delimiter:left+"%",right_delimiter:"%"+right,double_left:left+"%%",double_right:"%%"+right,left_equal:left+"%=",left_comment:left+"%#"});this.SplitRegexp=left=="["?/(\[%%)|(%%\])|(\[%=)|(\[%#)|(\[%)|(%\]\n)|(%\])|(\n)/:new RegExp("("+this.double_left+")|(%%"+this.double_right+")|("+this.left_equal+")|("+this.left_comment+")|("+this.left_delimiter+")|("+this.right_delimiter+"\n)|("+this.right_delimiter+")|(\n)");this.source=source;this.stag=null;this.lines=0};EJS.Scanner.to_text=function(input){if(input==null||input===undefined){return""}if(input instanceof Date){return input.toDateString()}if(input.toString){return input.toString()}return""};EJS.Scanner.prototype={scan:function(block){scanline=this.scanline;regex=this.SplitRegexp;if(!this.source==""){var source_split=rsplit(this.source,/\n/);for(var i=0;i<source_split.length;i++){var item=source_split[i];this.scanline(item,regex,block)}}},scanline:function(line,regex,block){this.lines++;var line_split=rsplit(line,regex);for(var i=0;i<line_split.length;i++){var token=line_split[i];if(token!=null){try{block(token,this)}catch(e){throw {type:"EJS.Scanner",line:this.lines}}}}}};EJS.Buffer=function(pre_cmd,post_cmd){this.line=new Array();this.script="";this.pre_cmd=pre_cmd;this.post_cmd=post_cmd;for(var i=0;i<this.pre_cmd.length;i++){this.push(pre_cmd[i])}};EJS.Buffer.prototype={push:function(cmd){this.line.push(cmd)},cr:function(){this.script=this.script+this.line.join("; ");this.line=new Array();this.script=this.script+"\n"},close:function(){if(this.line.length>0){for(var i=0;i<this.post_cmd.length;i++){this.push(pre_cmd[i])}this.script=this.script+this.line.join("; ");line=null}}};EJS.Compiler=function(source,left){this.pre_cmd=["var ___ViewO = [];"];this.post_cmd=new Array();this.source=" ";if(source!=null){if(typeof source=="string"){source=source.replace(/\r\n/g,"\n");source=source.replace(/\r/g,"\n");this.source=source}else{if(source.innerHTML){this.source=source.innerHTML}}if(typeof this.source!="string"){this.source=""}}left=left||"<";var right=">";switch(left){case"[":right="]";break;case"<":break;default:throw left+" is not a supported deliminator";break}this.scanner=new EJS.Scanner(this.source,left,right);this.out=""};EJS.Compiler.prototype={compile:function(options,name){options=options||{};this.out="";var put_cmd="___ViewO.push(";var insert_cmd=put_cmd;var buff=new EJS.Buffer(this.pre_cmd,this.post_cmd);var content="";var clean=function(content){content=content.replace(/\\/g,"\\\\");content=content.replace(/\n/g,"\\n");content=content.replace(/"/g,'\\"');return content};this.scanner.scan(function(token,scanner){if(scanner.stag==null){switch(token){case"\n":content=content+"\n";buff.push(put_cmd+'"'+clean(content)+'");');buff.cr();content="";break;case scanner.left_delimiter:case scanner.left_equal:case scanner.left_comment:scanner.stag=token;if(content.length>0){buff.push(put_cmd+'"'+clean(content)+'")')}content="";break;case scanner.double_left:content=content+scanner.left_delimiter;break;default:content=content+token;break}}else{switch(token){case scanner.right_delimiter:switch(scanner.stag){case scanner.left_delimiter:if(content[content.length-1]=="\n"){content=chop(content);buff.push(content);buff.cr()}else{buff.push(content)}break;case scanner.left_equal:buff.push(insert_cmd+"(EJS.Scanner.to_text("+content+")))");break}scanner.stag=null;content="";break;case scanner.double_right:content=content+scanner.right_delimiter;break;default:content=content+token;break}}});if(content.length>0){buff.push(put_cmd+'"'+clean(content)+'")')}buff.close();this.out=buff.script+";";var to_be_evaled="/*"+name+"*/this.process = function(_CONTEXT,_VIEW) { try { with(_VIEW) { with (_CONTEXT) {"+this.out+" return ___ViewO.join('');}}}catch(e){e.lineNumber=null;throw e;}};";try{eval(to_be_evaled)}catch(e){if(typeof JSLINT!="undefined"){JSLINT(this.out);for(var i=0;i<JSLINT.errors.length;i++){var error=JSLINT.errors[i];if(error.reason!="Unnecessary semicolon."){error.line++;var e=new Error();e.lineNumber=error.line;e.message=error.reason;if(options.view){e.fileName=options.view}throw e}}}else{throw e}}}};EJS.config=function(options){EJS.cache=options.cache!=null?options.cache:EJS.cache;EJS.type=options.type!=null?options.type:EJS.type;EJS.ext=options.ext!=null?options.ext:EJS.ext;var templates_directory=EJS.templates_directory||{};EJS.templates_directory=templates_directory;EJS.get=function(path,cache){if(cache==false){return null}if(templates_directory[path]){return templates_directory[path]}return null};EJS.update=function(path,template){if(path==null){return }templates_directory[path]=template};EJS.INVALID_PATH=-1};EJS.config({cache:true,type:"<",ext:".ejs"});EJS.Helpers=function(data,extras){this._data=data;this._extras=extras;extend(this,extras)};EJS.Helpers.prototype={view:function(options,data,helpers){if(!helpers){helpers=this._extras}if(!data){data=this._data}return new EJS(options).render(data,helpers)},to_text:function(input,null_text){if(input==null||input===undefined){return null_text||""}if(input instanceof Date){return input.toDateString()}if(input.toString){return input.toString().replace(/\n/g,"<br />").replace(/''/g,"'")}return""}};EJS.newRequest=function(){var factories=[function(){return new ActiveXObject("Msxml2.XMLHTTP")},function(){return new XMLHttpRequest()},function(){return new ActiveXObject("Microsoft.XMLHTTP")}];for(var i=0;i<factories.length;i++){try{var request=factories[i]();if(request!=null){return request}}catch(e){continue}}};EJS.request=function(path){var request=new EJS.newRequest();request.open("GET",path,false);try{request.send(null)}catch(e){return null}if(request.status==404||request.status==2||(request.status==0&&request.responseText=="")){return null}return request.responseText};EJS.ajax_request=function(params){params.method=(params.method?params.method:"GET");var request=new EJS.newRequest();request.onreadystatechange=function(){if(request.readyState==4){if(request.status==200){params.onComplete(request)}else{params.onComplete(request)}}};request.open(params.method,params.url);request.send(null)}})();EJS.Helpers.prototype.date_tag=function(C,O,A){if(!(O instanceof Date)){O=new Date()}var B=["January","February","March","April","May","June","July","August","September","October","November","December"];var G=[],D=[],P=[];var J=O.getFullYear();var H=O.getMonth();var N=O.getDate();for(var M=J-15;M<J+15;M++){G.push({value:M,text:M})}for(var E=0;E<12;E++){D.push({value:(E),text:B[E]})}for(var I=0;I<31;I++){P.push({value:(I+1),text:(I+1)})}var L=this.select_tag(C+"[year]",J,G,{id:C+"[year]"});var F=this.select_tag(C+"[month]",H,D,{id:C+"[month]"});var K=this.select_tag(C+"[day]",N,P,{id:C+"[day]"});return L+F+K};EJS.Helpers.prototype.form_tag=function(B,A){A=A||{};A.action=B;if(A.multipart==true){A.method="post";A.enctype="multipart/form-data"}return this.start_tag_for("form",A)};EJS.Helpers.prototype.form_tag_end=function(){return this.tag_end("form")};EJS.Helpers.prototype.hidden_field_tag=function(A,C,B){return this.input_field_tag(A,C,"hidden",B)};EJS.Helpers.prototype.input_field_tag=function(A,D,C,B){B=B||{};B.id=B.id||A;B.value=D||"";B.type=C||"text";B.name=A;return this.single_tag_for("input",B)};EJS.Helpers.prototype.is_current_page=function(A){return(window.location.href==A||window.location.pathname==A?true:false)};EJS.Helpers.prototype.link_to=function(B,A,C){if(!B){var B="null"}if(!C){var C={}}if(C.confirm){C.onclick=' var ret_confirm = confirm("'+C.confirm+'"); if(!ret_confirm){ return false;} ';C.confirm=null}C.href=A;return this.start_tag_for("a",C)+B+this.tag_end("a")};EJS.Helpers.prototype.submit_link_to=function(B,A,C){if(!B){var B="null"}if(!C){var C={}}C.onclick=C.onclick||"";if(C.confirm){C.onclick=' var ret_confirm = confirm("'+C.confirm+'"); if(!ret_confirm){ return false;} ';C.confirm=null}C.value=B;C.type="submit";C.onclick=C.onclick+(A?this.url_for(A):"")+"return false;";return this.start_tag_for("input",C)};EJS.Helpers.prototype.link_to_if=function(F,B,A,D,C,E){return this.link_to_unless((F==false),B,A,D,C,E)};EJS.Helpers.prototype.link_to_unless=function(E,B,A,C,D){C=C||{};if(E){if(D&&typeof D=="function"){return D(B,A,C,D)}else{return B}}else{return this.link_to(B,A,C)}};EJS.Helpers.prototype.link_to_unless_current=function(B,A,C,D){C=C||{};return this.link_to_unless(this.is_current_page(A),B,A,C,D)};EJS.Helpers.prototype.password_field_tag=function(A,C,B){return this.input_field_tag(A,C,"password",B)};EJS.Helpers.prototype.select_tag=function(D,G,H,F){F=F||{};F.id=F.id||D;F.value=G;F.name=D;var B="";B+=this.start_tag_for("select",F);for(var E=0;E<H.length;E++){var C=H[E];var A={value:C.value};if(C.value==G){A.selected="selected"}B+=this.start_tag_for("option",A)+C.text+this.tag_end("option")}B+=this.tag_end("select");return B};EJS.Helpers.prototype.single_tag_for=function(A,B){return this.tag(A,B,"/>")};EJS.Helpers.prototype.start_tag_for=function(A,B){return this.tag(A,B)};EJS.Helpers.prototype.submit_tag=function(A,B){B=B||{};B.type=B.type||"submit";B.value=A||"Submit";return this.single_tag_for("input",B)};EJS.Helpers.prototype.tag=function(C,E,D){if(!D){var D=">"}var B=" ";for(var A in E){if(E[A]!=null){var F=E[A].toString()}else{var F=""}if(A=="Class"){A="class"}if(F.indexOf("'")!=-1){B+=A+'="'+F+'" '}else{B+=A+"='"+F+"' "}}return"<"+C+B+D};EJS.Helpers.prototype.tag_end=function(A){return"</"+A+">"};EJS.Helpers.prototype.text_area_tag=function(A,C,B){B=B||{};B.id=B.id||A;B.name=B.name||A;C=C||"";if(B.size){B.cols=B.size.split("x")[0];B.rows=B.size.split("x")[1];delete B.size}B.cols=B.cols||50;B.rows=B.rows||4;return this.start_tag_for("textarea",B)+C+this.tag_end("textarea")};EJS.Helpers.prototype.text_tag=EJS.Helpers.prototype.text_area_tag;EJS.Helpers.prototype.text_field_tag=function(A,C,B){return this.input_field_tag(A,C,"text",B)};EJS.Helpers.prototype.url_for=function(A){return'window.location="'+A+'";'};EJS.Helpers.prototype.img_tag=function(B,C,A){A=A||{};A.src=B;A.alt=C;return this.single_tag_for("img",A)}
--- /dev/null
+// Copyright 2006 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+// Known Issues:
+//
+// * Patterns only support repeat.
+// * Radial gradient are not implemented. The VML version of these look very
+// different from the canvas one.
+// * Clipping paths are not implemented.
+// * Coordsize. The width and height attribute have higher priority than the
+// width and height style values which isn't correct.
+// * Painting mode isn't implemented.
+// * Canvas width/height should is using content-box by default. IE in
+// Quirks mode will draw the canvas using border-box. Either change your
+// doctype to HTML5
+// (http://www.whatwg.org/specs/web-apps/current-work/#the-doctype)
+// or use Box Sizing Behavior from WebFX
+// (http://webfx.eae.net/dhtml/boxsizing/boxsizing.html)
+// * Non uniform scaling does not correctly scale strokes.
+// * Filling very large shapes (above 5000 points) is buggy.
+// * Optimize. There is always room for speed improvements.
+
+// Only add this code if we do not already have a canvas implementation
+if (!document.createElement('canvas').getContext) {
+
+(function() {
+
+ // alias some functions to make (compiled) code shorter
+ var m = Math;
+ var mr = m.round;
+ var ms = m.sin;
+ var mc = m.cos;
+ var abs = m.abs;
+ var sqrt = m.sqrt;
+
+ // this is used for sub pixel precision
+ var Z = 10;
+ var Z2 = Z / 2;
+
+ var IE_VERSION = +navigator.userAgent.match(/MSIE ([\d.]+)?/)[1];
+
+ /**
+ * This funtion is assigned to the <canvas> elements as element.getContext().
+ * @this {HTMLElement}
+ * @return {CanvasRenderingContext2D_}
+ */
+ function getContext() {
+ return this.context_ ||
+ (this.context_ = new CanvasRenderingContext2D_(this));
+ }
+
+ var slice = Array.prototype.slice;
+
+ /**
+ * Binds a function to an object. The returned function will always use the
+ * passed in {@code obj} as {@code this}.
+ *
+ * Example:
+ *
+ * g = bind(f, obj, a, b)
+ * g(c, d) // will do f.call(obj, a, b, c, d)
+ *
+ * @param {Function} f The function to bind the object to
+ * @param {Object} obj The object that should act as this when the function
+ * is called
+ * @param {*} var_args Rest arguments that will be used as the initial
+ * arguments when the function is called
+ * @return {Function} A new function that has bound this
+ */
+ function bind(f, obj, var_args) {
+ var a = slice.call(arguments, 2);
+ return function() {
+ return f.apply(obj, a.concat(slice.call(arguments)));
+ };
+ }
+
+ function encodeHtmlAttribute(s) {
+ return String(s).replace(/&/g, '&').replace(/"/g, '"');
+ }
+
+ function addNamespace(doc, prefix, urn) {
+ if (!doc.namespaces[prefix]) {
+ doc.namespaces.add(prefix, urn, '#default#VML');
+ }
+ }
+
+ function addNamespacesAndStylesheet(doc) {
+ addNamespace(doc, 'g_vml_', 'urn:schemas-microsoft-com:vml');
+ addNamespace(doc, 'g_o_', 'urn:schemas-microsoft-com:office:office');
+
+ // Setup default CSS. Only add one style sheet per document
+ if (!doc.styleSheets['ex_canvas_']) {
+ var ss = doc.createStyleSheet();
+ ss.owningElement.id = 'ex_canvas_';
+ ss.cssText = 'canvas{display:inline-block;overflow:hidden;' +
+ // default size is 300x150 in Gecko and Opera
+ 'text-align:left;width:300px;height:150px}';
+ }
+ }
+
+ // Add namespaces and stylesheet at startup.
+ addNamespacesAndStylesheet(document);
+
+ var G_vmlCanvasManager_ = {
+ init: function(opt_doc) {
+ var doc = opt_doc || document;
+ // Create a dummy element so that IE will allow canvas elements to be
+ // recognized.
+ doc.createElement('canvas');
+ doc.attachEvent('onreadystatechange', bind(this.init_, this, doc));
+ },
+
+ init_: function(doc) {
+ // find all canvas elements
+ var els = doc.getElementsByTagName('canvas');
+ for (var i = 0; i < els.length; i++) {
+ this.initElement(els[i]);
+ }
+ },
+
+ /**
+ * Public initializes a canvas element so that it can be used as canvas
+ * element from now on. This is called automatically before the page is
+ * loaded but if you are creating elements using createElement you need to
+ * make sure this is called on the element.
+ * @param {HTMLElement} el The canvas element to initialize.
+ * @return {HTMLElement} the element that was created.
+ */
+ initElement: function(el) {
+ if (!el.getContext) {
+ el.getContext = getContext;
+
+ // Add namespaces and stylesheet to document of the element.
+ addNamespacesAndStylesheet(el.ownerDocument);
+
+ // Remove fallback content. There is no way to hide text nodes so we
+ // just remove all childNodes. We could hide all elements and remove
+ // text nodes but who really cares about the fallback content.
+ el.innerHTML = '';
+
+ // do not use inline function because that will leak memory
+ el.attachEvent('onpropertychange', onPropertyChange);
+ el.attachEvent('onresize', onResize);
+
+ var attrs = el.attributes;
+ if (attrs.width && attrs.width.specified) {
+ // TODO: use runtimeStyle and coordsize
+ // el.getContext().setWidth_(attrs.width.nodeValue);
+ el.style.width = attrs.width.nodeValue + 'px';
+ } else {
+ el.width = el.clientWidth;
+ }
+ if (attrs.height && attrs.height.specified) {
+ // TODO: use runtimeStyle and coordsize
+ // el.getContext().setHeight_(attrs.height.nodeValue);
+ el.style.height = attrs.height.nodeValue + 'px';
+ } else {
+ el.height = el.clientHeight;
+ }
+ //el.getContext().setCoordsize_()
+ }
+ return el;
+ }
+ };
+
+ function onPropertyChange(e) {
+ var el = e.srcElement;
+
+ switch (e.propertyName) {
+ case 'width':
+ el.getContext().clearRect();
+ el.style.width = el.attributes.width.nodeValue + 'px';
+ // In IE8 this does not trigger onresize.
+ el.firstChild.style.width = el.clientWidth + 'px';
+ break;
+ case 'height':
+ el.getContext().clearRect();
+ el.style.height = el.attributes.height.nodeValue + 'px';
+ el.firstChild.style.height = el.clientHeight + 'px';
+ break;
+ }
+ }
+
+ function onResize(e) {
+ var el = e.srcElement;
+ if (el.firstChild) {
+ el.firstChild.style.width = el.clientWidth + 'px';
+ el.firstChild.style.height = el.clientHeight + 'px';
+ }
+ }
+
+ G_vmlCanvasManager_.init();
+
+ // precompute "00" to "FF"
+ var decToHex = [];
+ for (var i = 0; i < 16; i++) {
+ for (var j = 0; j < 16; j++) {
+ decToHex[i * 16 + j] = i.toString(16) + j.toString(16);
+ }
+ }
+
+ function createMatrixIdentity() {
+ return [
+ [1, 0, 0],
+ [0, 1, 0],
+ [0, 0, 1]
+ ];
+ }
+
+ function matrixMultiply(m1, m2) {
+ var result = createMatrixIdentity();
+
+ for (var x = 0; x < 3; x++) {
+ for (var y = 0; y < 3; y++) {
+ var sum = 0;
+
+ for (var z = 0; z < 3; z++) {
+ sum += m1[x][z] * m2[z][y];
+ }
+
+ result[x][y] = sum;
+ }
+ }
+ return result;
+ }
+
+ function copyState(o1, o2) {
+ o2.fillStyle = o1.fillStyle;
+ o2.lineCap = o1.lineCap;
+ o2.lineJoin = o1.lineJoin;
+ o2.lineWidth = o1.lineWidth;
+ o2.miterLimit = o1.miterLimit;
+ o2.shadowBlur = o1.shadowBlur;
+ o2.shadowColor = o1.shadowColor;
+ o2.shadowOffsetX = o1.shadowOffsetX;
+ o2.shadowOffsetY = o1.shadowOffsetY;
+ o2.strokeStyle = o1.strokeStyle;
+ o2.globalAlpha = o1.globalAlpha;
+ o2.font = o1.font;
+ o2.textAlign = o1.textAlign;
+ o2.textBaseline = o1.textBaseline;
+ o2.arcScaleX_ = o1.arcScaleX_;
+ o2.arcScaleY_ = o1.arcScaleY_;
+ o2.lineScale_ = o1.lineScale_;
+ }
+
+ var colorData = {
+ aliceblue: '#F0F8FF',
+ antiquewhite: '#FAEBD7',
+ aquamarine: '#7FFFD4',
+ azure: '#F0FFFF',
+ beige: '#F5F5DC',
+ bisque: '#FFE4C4',
+ black: '#000000',
+ blanchedalmond: '#FFEBCD',
+ blueviolet: '#8A2BE2',
+ brown: '#A52A2A',
+ burlywood: '#DEB887',
+ cadetblue: '#5F9EA0',
+ chartreuse: '#7FFF00',
+ chocolate: '#D2691E',
+ coral: '#FF7F50',
+ cornflowerblue: '#6495ED',
+ cornsilk: '#FFF8DC',
+ crimson: '#DC143C',
+ cyan: '#00FFFF',
+ darkblue: '#00008B',
+ darkcyan: '#008B8B',
+ darkgoldenrod: '#B8860B',
+ darkgray: '#A9A9A9',
+ darkgreen: '#006400',
+ darkgrey: '#A9A9A9',
+ darkkhaki: '#BDB76B',
+ darkmagenta: '#8B008B',
+ darkolivegreen: '#556B2F',
+ darkorange: '#FF8C00',
+ darkorchid: '#9932CC',
+ darkred: '#8B0000',
+ darksalmon: '#E9967A',
+ darkseagreen: '#8FBC8F',
+ darkslateblue: '#483D8B',
+ darkslategray: '#2F4F4F',
+ darkslategrey: '#2F4F4F',
+ darkturquoise: '#00CED1',
+ darkviolet: '#9400D3',
+ deeppink: '#FF1493',
+ deepskyblue: '#00BFFF',
+ dimgray: '#696969',
+ dimgrey: '#696969',
+ dodgerblue: '#1E90FF',
+ firebrick: '#B22222',
+ floralwhite: '#FFFAF0',
+ forestgreen: '#228B22',
+ gainsboro: '#DCDCDC',
+ ghostwhite: '#F8F8FF',
+ gold: '#FFD700',
+ goldenrod: '#DAA520',
+ grey: '#808080',
+ greenyellow: '#ADFF2F',
+ honeydew: '#F0FFF0',
+ hotpink: '#FF69B4',
+ indianred: '#CD5C5C',
+ indigo: '#4B0082',
+ ivory: '#FFFFF0',
+ khaki: '#F0E68C',
+ lavender: '#E6E6FA',
+ lavenderblush: '#FFF0F5',
+ lawngreen: '#7CFC00',
+ lemonchiffon: '#FFFACD',
+ lightblue: '#ADD8E6',
+ lightcoral: '#F08080',
+ lightcyan: '#E0FFFF',
+ lightgoldenrodyellow: '#FAFAD2',
+ lightgreen: '#90EE90',
+ lightgrey: '#D3D3D3',
+ lightpink: '#FFB6C1',
+ lightsalmon: '#FFA07A',
+ lightseagreen: '#20B2AA',
+ lightskyblue: '#87CEFA',
+ lightslategray: '#778899',
+ lightslategrey: '#778899',
+ lightsteelblue: '#B0C4DE',
+ lightyellow: '#FFFFE0',
+ limegreen: '#32CD32',
+ linen: '#FAF0E6',
+ magenta: '#FF00FF',
+ mediumaquamarine: '#66CDAA',
+ mediumblue: '#0000CD',
+ mediumorchid: '#BA55D3',
+ mediumpurple: '#9370DB',
+ mediumseagreen: '#3CB371',
+ mediumslateblue: '#7B68EE',
+ mediumspringgreen: '#00FA9A',
+ mediumturquoise: '#48D1CC',
+ mediumvioletred: '#C71585',
+ midnightblue: '#191970',
+ mintcream: '#F5FFFA',
+ mistyrose: '#FFE4E1',
+ moccasin: '#FFE4B5',
+ navajowhite: '#FFDEAD',
+ oldlace: '#FDF5E6',
+ olivedrab: '#6B8E23',
+ orange: '#FFA500',
+ orangered: '#FF4500',
+ orchid: '#DA70D6',
+ palegoldenrod: '#EEE8AA',
+ palegreen: '#98FB98',
+ paleturquoise: '#AFEEEE',
+ palevioletred: '#DB7093',
+ papayawhip: '#FFEFD5',
+ peachpuff: '#FFDAB9',
+ peru: '#CD853F',
+ pink: '#FFC0CB',
+ plum: '#DDA0DD',
+ powderblue: '#B0E0E6',
+ rosybrown: '#BC8F8F',
+ royalblue: '#4169E1',
+ saddlebrown: '#8B4513',
+ salmon: '#FA8072',
+ sandybrown: '#F4A460',
+ seagreen: '#2E8B57',
+ seashell: '#FFF5EE',
+ sienna: '#A0522D',
+ skyblue: '#87CEEB',
+ slateblue: '#6A5ACD',
+ slategray: '#708090',
+ slategrey: '#708090',
+ snow: '#FFFAFA',
+ springgreen: '#00FF7F',
+ steelblue: '#4682B4',
+ tan: '#D2B48C',
+ thistle: '#D8BFD8',
+ tomato: '#FF6347',
+ turquoise: '#40E0D0',
+ violet: '#EE82EE',
+ wheat: '#F5DEB3',
+ whitesmoke: '#F5F5F5',
+ yellowgreen: '#9ACD32'
+ };
+
+
+ function getRgbHslContent(styleString) {
+ var start = styleString.indexOf('(', 3);
+ var end = styleString.indexOf(')', start + 1);
+ var parts = styleString.substring(start + 1, end).split(',');
+ // add alpha if needed
+ if (parts.length != 4 || styleString.charAt(3) != 'a') {
+ parts[3] = 1;
+ }
+ return parts;
+ }
+
+ function percent(s) {
+ return parseFloat(s) / 100;
+ }
+
+ function clamp(v, min, max) {
+ return Math.min(max, Math.max(min, v));
+ }
+
+ function hslToRgb(parts){
+ var r, g, b, h, s, l;
+ h = parseFloat(parts[0]) / 360 % 360;
+ if (h < 0)
+ h++;
+ s = clamp(percent(parts[1]), 0, 1);
+ l = clamp(percent(parts[2]), 0, 1);
+ if (s == 0) {
+ r = g = b = l; // achromatic
+ } else {
+ var q = l < 0.5 ? l * (1 + s) : l + s - l * s;
+ var p = 2 * l - q;
+ r = hueToRgb(p, q, h + 1 / 3);
+ g = hueToRgb(p, q, h);
+ b = hueToRgb(p, q, h - 1 / 3);
+ }
+
+ return '#' + decToHex[Math.floor(r * 255)] +
+ decToHex[Math.floor(g * 255)] +
+ decToHex[Math.floor(b * 255)];
+ }
+
+ function hueToRgb(m1, m2, h) {
+ if (h < 0)
+ h++;
+ if (h > 1)
+ h--;
+
+ if (6 * h < 1)
+ return m1 + (m2 - m1) * 6 * h;
+ else if (2 * h < 1)
+ return m2;
+ else if (3 * h < 2)
+ return m1 + (m2 - m1) * (2 / 3 - h) * 6;
+ else
+ return m1;
+ }
+
+ var processStyleCache = {};
+
+ function processStyle(styleString) {
+ if (styleString in processStyleCache) {
+ return processStyleCache[styleString];
+ }
+
+ var str, alpha = 1;
+
+ styleString = String(styleString);
+ if (styleString.charAt(0) == '#') {
+ str = styleString;
+ } else if (/^rgb/.test(styleString)) {
+ var parts = getRgbHslContent(styleString);
+ var str = '#', n;
+ for (var i = 0; i < 3; i++) {
+ if (parts[i].indexOf('%') != -1) {
+ n = Math.floor(percent(parts[i]) * 255);
+ } else {
+ n = +parts[i];
+ }
+ str += decToHex[clamp(n, 0, 255)];
+ }
+ alpha = +parts[3];
+ } else if (/^hsl/.test(styleString)) {
+ var parts = getRgbHslContent(styleString);
+ str = hslToRgb(parts);
+ alpha = parts[3];
+ } else {
+ str = colorData[styleString] || styleString;
+ }
+ return processStyleCache[styleString] = {color: str, alpha: alpha};
+ }
+
+ var DEFAULT_STYLE = {
+ style: 'normal',
+ variant: 'normal',
+ weight: 'normal',
+ size: 10,
+ family: 'sans-serif'
+ };
+
+ // Internal text style cache
+ var fontStyleCache = {};
+
+ function processFontStyle(styleString) {
+ if (fontStyleCache[styleString]) {
+ return fontStyleCache[styleString];
+ }
+
+ var el = document.createElement('div');
+ var style = el.style;
+ try {
+ style.font = styleString;
+ } catch (ex) {
+ // Ignore failures to set to invalid font.
+ }
+
+ return fontStyleCache[styleString] = {
+ style: style.fontStyle || DEFAULT_STYLE.style,
+ variant: style.fontVariant || DEFAULT_STYLE.variant,
+ weight: style.fontWeight || DEFAULT_STYLE.weight,
+ size: style.fontSize || DEFAULT_STYLE.size,
+ family: style.fontFamily || DEFAULT_STYLE.family
+ };
+ }
+
+ function getComputedStyle(style, element) {
+ var computedStyle = {};
+
+ for (var p in style) {
+ computedStyle[p] = style[p];
+ }
+
+ // Compute the size
+ var canvasFontSize = parseFloat(element.currentStyle.fontSize),
+ fontSize = parseFloat(style.size);
+
+ if (typeof style.size == 'number') {
+ computedStyle.size = style.size;
+ } else if (style.size.indexOf('px') != -1) {
+ computedStyle.size = fontSize;
+ } else if (style.size.indexOf('em') != -1) {
+ computedStyle.size = canvasFontSize * fontSize;
+ } else if(style.size.indexOf('%') != -1) {
+ computedStyle.size = (canvasFontSize / 100) * fontSize;
+ } else if (style.size.indexOf('pt') != -1) {
+ computedStyle.size = fontSize / .75;
+ } else {
+ computedStyle.size = canvasFontSize;
+ }
+
+ // Different scaling between normal text and VML text. This was found using
+ // trial and error to get the same size as non VML text.
+ computedStyle.size *= 0.981;
+
+ return computedStyle;
+ }
+
+ function buildStyle(style) {
+ return style.style + ' ' + style.variant + ' ' + style.weight + ' ' +
+ style.size + 'px ' + style.family;
+ }
+
+ var lineCapMap = {
+ 'butt': 'flat',
+ 'round': 'round'
+ };
+
+ function processLineCap(lineCap) {
+ return lineCapMap[lineCap] || 'square';
+ }
+
+ /**
+ * This class implements CanvasRenderingContext2D interface as described by
+ * the WHATWG.
+ * @param {HTMLElement} canvasElement The element that the 2D context should
+ * be associated with
+ */
+ function CanvasRenderingContext2D_(canvasElement) {
+ this.m_ = createMatrixIdentity();
+
+ this.mStack_ = [];
+ this.aStack_ = [];
+ this.currentPath_ = [];
+
+ // Canvas context properties
+ this.strokeStyle = '#000';
+ this.fillStyle = '#000';
+
+ this.lineWidth = 1;
+ this.lineJoin = 'miter';
+ this.lineCap = 'butt';
+ this.miterLimit = Z * 1;
+ this.globalAlpha = 1;
+ this.font = '10px sans-serif';
+ this.textAlign = 'left';
+ this.textBaseline = 'alphabetic';
+ this.canvas = canvasElement;
+
+ var cssText = 'width:' + canvasElement.clientWidth + 'px;height:' +
+ canvasElement.clientHeight + 'px;overflow:hidden;position:absolute';
+ var el = canvasElement.ownerDocument.createElement('div');
+ el.style.cssText = cssText;
+ canvasElement.appendChild(el);
+
+ var overlayEl = el.cloneNode(false);
+ // Use a non transparent background.
+ overlayEl.style.backgroundColor = 'red';
+ overlayEl.style.filter = 'alpha(opacity=0)';
+ canvasElement.appendChild(overlayEl);
+
+ this.element_ = el;
+ this.arcScaleX_ = 1;
+ this.arcScaleY_ = 1;
+ this.lineScale_ = 1;
+ }
+
+ var contextPrototype = CanvasRenderingContext2D_.prototype;
+ contextPrototype.clearRect = function() {
+ if (this.textMeasureEl_) {
+ this.textMeasureEl_.removeNode(true);
+ this.textMeasureEl_ = null;
+ }
+ this.element_.innerHTML = '';
+ };
+
+ contextPrototype.beginPath = function() {
+ // TODO: Branch current matrix so that save/restore has no effect
+ // as per safari docs.
+ this.currentPath_ = [];
+ };
+
+ contextPrototype.moveTo = function(aX, aY) {
+ var p = getCoords(this, aX, aY);
+ this.currentPath_.push({type: 'moveTo', x: p.x, y: p.y});
+ this.currentX_ = p.x;
+ this.currentY_ = p.y;
+ };
+
+ contextPrototype.lineTo = function(aX, aY) {
+ var p = getCoords(this, aX, aY);
+ this.currentPath_.push({type: 'lineTo', x: p.x, y: p.y});
+
+ this.currentX_ = p.x;
+ this.currentY_ = p.y;
+ };
+
+ contextPrototype.bezierCurveTo = function(aCP1x, aCP1y,
+ aCP2x, aCP2y,
+ aX, aY) {
+ var p = getCoords(this, aX, aY);
+ var cp1 = getCoords(this, aCP1x, aCP1y);
+ var cp2 = getCoords(this, aCP2x, aCP2y);
+ bezierCurveTo(this, cp1, cp2, p);
+ };
+
+ // Helper function that takes the already fixed cordinates.
+ function bezierCurveTo(self, cp1, cp2, p) {
+ self.currentPath_.push({
+ type: 'bezierCurveTo',
+ cp1x: cp1.x,
+ cp1y: cp1.y,
+ cp2x: cp2.x,
+ cp2y: cp2.y,
+ x: p.x,
+ y: p.y
+ });
+ self.currentX_ = p.x;
+ self.currentY_ = p.y;
+ }
+
+ contextPrototype.quadraticCurveTo = function(aCPx, aCPy, aX, aY) {
+ // the following is lifted almost directly from
+ // http://developer.mozilla.org/en/docs/Canvas_tutorial:Drawing_shapes
+
+ var cp = getCoords(this, aCPx, aCPy);
+ var p = getCoords(this, aX, aY);
+
+ var cp1 = {
+ x: this.currentX_ + 2.0 / 3.0 * (cp.x - this.currentX_),
+ y: this.currentY_ + 2.0 / 3.0 * (cp.y - this.currentY_)
+ };
+ var cp2 = {
+ x: cp1.x + (p.x - this.currentX_) / 3.0,
+ y: cp1.y + (p.y - this.currentY_) / 3.0
+ };
+
+ bezierCurveTo(this, cp1, cp2, p);
+ };
+
+ contextPrototype.arc = function(aX, aY, aRadius,
+ aStartAngle, aEndAngle, aClockwise) {
+ aRadius *= Z;
+ var arcType = aClockwise ? 'at' : 'wa';
+
+ var xStart = aX + mc(aStartAngle) * aRadius - Z2;
+ var yStart = aY + ms(aStartAngle) * aRadius - Z2;
+
+ var xEnd = aX + mc(aEndAngle) * aRadius - Z2;
+ var yEnd = aY + ms(aEndAngle) * aRadius - Z2;
+
+ // IE won't render arches drawn counter clockwise if xStart == xEnd.
+ if (xStart == xEnd && !aClockwise) {
+ xStart += 0.125; // Offset xStart by 1/80 of a pixel. Use something
+ // that can be represented in binary
+ }
+
+ var p = getCoords(this, aX, aY);
+ var pStart = getCoords(this, xStart, yStart);
+ var pEnd = getCoords(this, xEnd, yEnd);
+
+ this.currentPath_.push({type: arcType,
+ x: p.x,
+ y: p.y,
+ radius: aRadius,
+ xStart: pStart.x,
+ yStart: pStart.y,
+ xEnd: pEnd.x,
+ yEnd: pEnd.y});
+
+ };
+
+ contextPrototype.rect = function(aX, aY, aWidth, aHeight) {
+ this.moveTo(aX, aY);
+ this.lineTo(aX + aWidth, aY);
+ this.lineTo(aX + aWidth, aY + aHeight);
+ this.lineTo(aX, aY + aHeight);
+ this.closePath();
+ };
+
+ contextPrototype.strokeRect = function(aX, aY, aWidth, aHeight) {
+ var oldPath = this.currentPath_;
+ this.beginPath();
+
+ this.moveTo(aX, aY);
+ this.lineTo(aX + aWidth, aY);
+ this.lineTo(aX + aWidth, aY + aHeight);
+ this.lineTo(aX, aY + aHeight);
+ this.closePath();
+ this.stroke();
+
+ this.currentPath_ = oldPath;
+ };
+
+ contextPrototype.fillRect = function(aX, aY, aWidth, aHeight) {
+ var oldPath = this.currentPath_;
+ this.beginPath();
+
+ this.moveTo(aX, aY);
+ this.lineTo(aX + aWidth, aY);
+ this.lineTo(aX + aWidth, aY + aHeight);
+ this.lineTo(aX, aY + aHeight);
+ this.closePath();
+ this.fill();
+
+ this.currentPath_ = oldPath;
+ };
+
+ contextPrototype.createLinearGradient = function(aX0, aY0, aX1, aY1) {
+ var gradient = new CanvasGradient_('gradient');
+ gradient.x0_ = aX0;
+ gradient.y0_ = aY0;
+ gradient.x1_ = aX1;
+ gradient.y1_ = aY1;
+ return gradient;
+ };
+
+ contextPrototype.createRadialGradient = function(aX0, aY0, aR0,
+ aX1, aY1, aR1) {
+ var gradient = new CanvasGradient_('gradientradial');
+ gradient.x0_ = aX0;
+ gradient.y0_ = aY0;
+ gradient.r0_ = aR0;
+ gradient.x1_ = aX1;
+ gradient.y1_ = aY1;
+ gradient.r1_ = aR1;
+ return gradient;
+ };
+
+ contextPrototype.drawImage = function(image, var_args) {
+ var dx, dy, dw, dh, sx, sy, sw, sh;
+
+ // to find the original width we overide the width and height
+ var oldRuntimeWidth = image.runtimeStyle.width;
+ var oldRuntimeHeight = image.runtimeStyle.height;
+ image.runtimeStyle.width = 'auto';
+ image.runtimeStyle.height = 'auto';
+
+ // get the original size
+ var w = image.width;
+ var h = image.height;
+
+ // and remove overides
+ image.runtimeStyle.width = oldRuntimeWidth;
+ image.runtimeStyle.height = oldRuntimeHeight;
+
+ if (arguments.length == 3) {
+ dx = arguments[1];
+ dy = arguments[2];
+ sx = sy = 0;
+ sw = dw = w;
+ sh = dh = h;
+ } else if (arguments.length == 5) {
+ dx = arguments[1];
+ dy = arguments[2];
+ dw = arguments[3];
+ dh = arguments[4];
+ sx = sy = 0;
+ sw = w;
+ sh = h;
+ } else if (arguments.length == 9) {
+ sx = arguments[1];
+ sy = arguments[2];
+ sw = arguments[3];
+ sh = arguments[4];
+ dx = arguments[5];
+ dy = arguments[6];
+ dw = arguments[7];
+ dh = arguments[8];
+ } else {
+ throw Error('Invalid number of arguments');
+ }
+
+ var d = getCoords(this, dx, dy);
+
+ var w2 = sw / 2;
+ var h2 = sh / 2;
+
+ var vmlStr = [];
+
+ var W = 10;
+ var H = 10;
+
+ // For some reason that I've now forgotten, using divs didn't work
+ vmlStr.push(' <g_vml_:group',
+ ' coordsize="', Z * W, ',', Z * H, '"',
+ ' coordorigin="0,0"' ,
+ ' style="width:', W, 'px;height:', H, 'px;position:absolute;');
+
+ // If filters are necessary (rotation exists), create them
+ // filters are bog-slow, so only create them if abbsolutely necessary
+ // The following check doesn't account for skews (which don't exist
+ // in the canvas spec (yet) anyway.
+
+ if (this.m_[0][0] != 1 || this.m_[0][1] ||
+ this.m_[1][1] != 1 || this.m_[1][0]) {
+ var filter = [];
+
+ // Note the 12/21 reversal
+ filter.push('M11=', this.m_[0][0], ',',
+ 'M12=', this.m_[1][0], ',',
+ 'M21=', this.m_[0][1], ',',
+ 'M22=', this.m_[1][1], ',',
+ 'Dx=', mr(d.x / Z), ',',
+ 'Dy=', mr(d.y / Z), '');
+
+ // Bounding box calculation (need to minimize displayed area so that
+ // filters don't waste time on unused pixels.
+ var max = d;
+ var c2 = getCoords(this, dx + dw, dy);
+ var c3 = getCoords(this, dx, dy + dh);
+ var c4 = getCoords(this, dx + dw, dy + dh);
+
+ max.x = m.max(max.x, c2.x, c3.x, c4.x);
+ max.y = m.max(max.y, c2.y, c3.y, c4.y);
+
+ vmlStr.push('padding:0 ', mr(max.x / Z), 'px ', mr(max.y / Z),
+ 'px 0;filter:progid:DXImageTransform.Microsoft.Matrix(',
+ filter.join(''), ", sizingmethod='clip');");
+
+ } else {
+ vmlStr.push('top:', mr(d.y / Z), 'px;left:', mr(d.x / Z), 'px;');
+ }
+
+ vmlStr.push(' ">' ,
+ '<g_vml_:image src="', image.src, '"',
+ ' style="width:', Z * dw, 'px;',
+ ' height:', Z * dh, 'px"',
+ ' cropleft="', sx / w, '"',
+ ' croptop="', sy / h, '"',
+ ' cropright="', (w - sx - sw) / w, '"',
+ ' cropbottom="', (h - sy - sh) / h, '"',
+ ' />',
+ '</g_vml_:group>');
+
+ this.element_.insertAdjacentHTML('BeforeEnd', vmlStr.join(''));
+ };
+
+ contextPrototype.stroke = function(aFill) {
+ var W = 10;
+ var H = 10;
+ // Divide the shape into chunks if it's too long because IE has a limit
+ // somewhere for how long a VML shape can be. This simple division does
+ // not work with fills, only strokes, unfortunately.
+ var chunkSize = 5000;
+
+ var min = {x: null, y: null};
+ var max = {x: null, y: null};
+
+ for (var j = 0; j < this.currentPath_.length; j += chunkSize) {
+ var lineStr = [];
+ var lineOpen = false;
+
+ lineStr.push('<g_vml_:shape',
+ ' filled="', !!aFill, '"',
+ ' style="position:absolute;width:', W, 'px;height:', H, 'px;"',
+ ' coordorigin="0,0"',
+ ' coordsize="', Z * W, ',', Z * H, '"',
+ ' stroked="', !aFill, '"',
+ ' path="');
+
+ var newSeq = false;
+
+ for (var i = j; i < Math.min(j + chunkSize, this.currentPath_.length); i++) {
+ if (i % chunkSize == 0 && i > 0) { // move into position for next chunk
+ lineStr.push(' m ', mr(this.currentPath_[i-1].x), ',', mr(this.currentPath_[i-1].y));
+ }
+
+ var p = this.currentPath_[i];
+ var c;
+
+ switch (p.type) {
+ case 'moveTo':
+ c = p;
+ lineStr.push(' m ', mr(p.x), ',', mr(p.y));
+ break;
+ case 'lineTo':
+ lineStr.push(' l ', mr(p.x), ',', mr(p.y));
+ break;
+ case 'close':
+ lineStr.push(' x ');
+ p = null;
+ break;
+ case 'bezierCurveTo':
+ lineStr.push(' c ',
+ mr(p.cp1x), ',', mr(p.cp1y), ',',
+ mr(p.cp2x), ',', mr(p.cp2y), ',',
+ mr(p.x), ',', mr(p.y));
+ break;
+ case 'at':
+ case 'wa':
+ lineStr.push(' ', p.type, ' ',
+ mr(p.x - this.arcScaleX_ * p.radius), ',',
+ mr(p.y - this.arcScaleY_ * p.radius), ' ',
+ mr(p.x + this.arcScaleX_ * p.radius), ',',
+ mr(p.y + this.arcScaleY_ * p.radius), ' ',
+ mr(p.xStart), ',', mr(p.yStart), ' ',
+ mr(p.xEnd), ',', mr(p.yEnd));
+ break;
+ }
+
+
+ // TODO: Following is broken for curves due to
+ // move to proper paths.
+
+ // Figure out dimensions so we can do gradient fills
+ // properly
+ if (p) {
+ if (min.x == null || p.x < min.x) {
+ min.x = p.x;
+ }
+ if (max.x == null || p.x > max.x) {
+ max.x = p.x;
+ }
+ if (min.y == null || p.y < min.y) {
+ min.y = p.y;
+ }
+ if (max.y == null || p.y > max.y) {
+ max.y = p.y;
+ }
+ }
+ }
+ lineStr.push(' ">');
+
+ if (!aFill) {
+ appendStroke(this, lineStr);
+ } else {
+ appendFill(this, lineStr, min, max);
+ }
+
+ lineStr.push('</g_vml_:shape>');
+
+ this.element_.insertAdjacentHTML('beforeEnd', lineStr.join(''));
+ }
+ };
+
+ function appendStroke(ctx, lineStr) {
+ var a = processStyle(ctx.strokeStyle);
+ var color = a.color;
+ var opacity = a.alpha * ctx.globalAlpha;
+ var lineWidth = ctx.lineScale_ * ctx.lineWidth;
+
+ // VML cannot correctly render a line if the width is less than 1px.
+ // In that case, we dilute the color to make the line look thinner.
+ if (lineWidth < 1) {
+ opacity *= lineWidth;
+ }
+
+ lineStr.push(
+ '<g_vml_:stroke',
+ ' opacity="', opacity, '"',
+ ' joinstyle="', ctx.lineJoin, '"',
+ ' miterlimit="', ctx.miterLimit, '"',
+ ' endcap="', processLineCap(ctx.lineCap), '"',
+ ' weight="', lineWidth, 'px"',
+ ' color="', color, '" />'
+ );
+ }
+
+ function appendFill(ctx, lineStr, min, max) {
+ var fillStyle = ctx.fillStyle;
+ var arcScaleX = ctx.arcScaleX_;
+ var arcScaleY = ctx.arcScaleY_;
+ var width = max.x - min.x;
+ var height = max.y - min.y;
+ if (fillStyle instanceof CanvasGradient_) {
+ // TODO: Gradients transformed with the transformation matrix.
+ var angle = 0;
+ var focus = {x: 0, y: 0};
+
+ // additional offset
+ var shift = 0;
+ // scale factor for offset
+ var expansion = 1;
+
+ if (fillStyle.type_ == 'gradient') {
+ var x0 = fillStyle.x0_ / arcScaleX;
+ var y0 = fillStyle.y0_ / arcScaleY;
+ var x1 = fillStyle.x1_ / arcScaleX;
+ var y1 = fillStyle.y1_ / arcScaleY;
+ var p0 = getCoords(ctx, x0, y0);
+ var p1 = getCoords(ctx, x1, y1);
+ var dx = p1.x - p0.x;
+ var dy = p1.y - p0.y;
+ angle = Math.atan2(dx, dy) * 180 / Math.PI;
+
+ // The angle should be a non-negative number.
+ if (angle < 0) {
+ angle += 360;
+ }
+
+ // Very small angles produce an unexpected result because they are
+ // converted to a scientific notation string.
+ if (angle < 1e-6) {
+ angle = 0;
+ }
+ } else {
+ var p0 = getCoords(ctx, fillStyle.x0_, fillStyle.y0_);
+ focus = {
+ x: (p0.x - min.x) / width,
+ y: (p0.y - min.y) / height
+ };
+
+ width /= arcScaleX * Z;
+ height /= arcScaleY * Z;
+ var dimension = m.max(width, height);
+ shift = 2 * fillStyle.r0_ / dimension;
+ expansion = 2 * fillStyle.r1_ / dimension - shift;
+ }
+
+ // We need to sort the color stops in ascending order by offset,
+ // otherwise IE won't interpret it correctly.
+ var stops = fillStyle.colors_;
+ stops.sort(function(cs1, cs2) {
+ return cs1.offset - cs2.offset;
+ });
+
+ var length = stops.length;
+ var color1 = stops[0].color;
+ var color2 = stops[length - 1].color;
+ var opacity1 = stops[0].alpha * ctx.globalAlpha;
+ var opacity2 = stops[length - 1].alpha * ctx.globalAlpha;
+
+ var colors = [];
+ for (var i = 0; i < length; i++) {
+ var stop = stops[i];
+ colors.push(stop.offset * expansion + shift + ' ' + stop.color);
+ }
+
+ // When colors attribute is used, the meanings of opacity and o:opacity2
+ // are reversed.
+ lineStr.push('<g_vml_:fill type="', fillStyle.type_, '"',
+ ' method="none" focus="100%"',
+ ' color="', color1, '"',
+ ' color2="', color2, '"',
+ ' colors="', colors.join(','), '"',
+ ' opacity="', opacity2, '"',
+ ' g_o_:opacity2="', opacity1, '"',
+ ' angle="', angle, '"',
+ ' focusposition="', focus.x, ',', focus.y, '" />');
+ } else if (fillStyle instanceof CanvasPattern_) {
+ if (width && height) {
+ var deltaLeft = -min.x;
+ var deltaTop = -min.y;
+ lineStr.push('<g_vml_:fill',
+ ' position="',
+ deltaLeft / width * arcScaleX * arcScaleX, ',',
+ deltaTop / height * arcScaleY * arcScaleY, '"',
+ ' type="tile"',
+ // TODO: Figure out the correct size to fit the scale.
+ //' size="', w, 'px ', h, 'px"',
+ ' src="', fillStyle.src_, '" />');
+ }
+ } else {
+ var a = processStyle(ctx.fillStyle);
+ var color = a.color;
+ var opacity = a.alpha * ctx.globalAlpha;
+ lineStr.push('<g_vml_:fill color="', color, '" opacity="', opacity,
+ '" />');
+ }
+ }
+
+ contextPrototype.fill = function() {
+ this.stroke(true);
+ };
+
+ contextPrototype.closePath = function() {
+ this.currentPath_.push({type: 'close'});
+ };
+
+ function getCoords(ctx, aX, aY) {
+ var m = ctx.m_;
+ return {
+ x: Z * (aX * m[0][0] + aY * m[1][0] + m[2][0]) - Z2,
+ y: Z * (aX * m[0][1] + aY * m[1][1] + m[2][1]) - Z2
+ };
+ };
+
+ contextPrototype.save = function() {
+ var o = {};
+ copyState(this, o);
+ this.aStack_.push(o);
+ this.mStack_.push(this.m_);
+ this.m_ = matrixMultiply(createMatrixIdentity(), this.m_);
+ };
+
+ contextPrototype.restore = function() {
+ if (this.aStack_.length) {
+ copyState(this.aStack_.pop(), this);
+ this.m_ = this.mStack_.pop();
+ }
+ };
+
+ function matrixIsFinite(m) {
+ return isFinite(m[0][0]) && isFinite(m[0][1]) &&
+ isFinite(m[1][0]) && isFinite(m[1][1]) &&
+ isFinite(m[2][0]) && isFinite(m[2][1]);
+ }
+
+ function setM(ctx, m, updateLineScale) {
+ if (!matrixIsFinite(m)) {
+ return;
+ }
+ ctx.m_ = m;
+
+ if (updateLineScale) {
+ // Get the line scale.
+ // Determinant of this.m_ means how much the area is enlarged by the
+ // transformation. So its square root can be used as a scale factor
+ // for width.
+ var det = m[0][0] * m[1][1] - m[0][1] * m[1][0];
+ ctx.lineScale_ = sqrt(abs(det));
+ }
+ }
+
+ contextPrototype.translate = function(aX, aY) {
+ var m1 = [
+ [1, 0, 0],
+ [0, 1, 0],
+ [aX, aY, 1]
+ ];
+
+ setM(this, matrixMultiply(m1, this.m_), false);
+ };
+
+ contextPrototype.rotate = function(aRot) {
+ var c = mc(aRot);
+ var s = ms(aRot);
+
+ var m1 = [
+ [c, s, 0],
+ [-s, c, 0],
+ [0, 0, 1]
+ ];
+
+ setM(this, matrixMultiply(m1, this.m_), false);
+ };
+
+ contextPrototype.scale = function(aX, aY) {
+ this.arcScaleX_ *= aX;
+ this.arcScaleY_ *= aY;
+ var m1 = [
+ [aX, 0, 0],
+ [0, aY, 0],
+ [0, 0, 1]
+ ];
+
+ setM(this, matrixMultiply(m1, this.m_), true);
+ };
+
+ contextPrototype.transform = function(m11, m12, m21, m22, dx, dy) {
+ var m1 = [
+ [m11, m12, 0],
+ [m21, m22, 0],
+ [dx, dy, 1]
+ ];
+
+ setM(this, matrixMultiply(m1, this.m_), true);
+ };
+
+ contextPrototype.setTransform = function(m11, m12, m21, m22, dx, dy) {
+ var m = [
+ [m11, m12, 0],
+ [m21, m22, 0],
+ [dx, dy, 1]
+ ];
+
+ setM(this, m, true);
+ };
+
+ /**
+ * The text drawing function.
+ * The maxWidth argument isn't taken in account, since no browser supports
+ * it yet.
+ */
+ contextPrototype.drawText_ = function(text, x, y, maxWidth, stroke) {
+ var m = this.m_,
+ delta = 1000,
+ left = 0,
+ right = delta,
+ offset = {x: 0, y: 0},
+ lineStr = [];
+
+ var fontStyle = getComputedStyle(processFontStyle(this.font),
+ this.element_);
+
+ var fontStyleString = buildStyle(fontStyle);
+
+ var elementStyle = this.element_.currentStyle;
+ var textAlign = this.textAlign.toLowerCase();
+ switch (textAlign) {
+ case 'left':
+ case 'center':
+ case 'right':
+ break;
+ case 'end':
+ textAlign = elementStyle.direction == 'ltr' ? 'right' : 'left';
+ break;
+ case 'start':
+ textAlign = elementStyle.direction == 'rtl' ? 'right' : 'left';
+ break;
+ default:
+ textAlign = 'left';
+ }
+
+ // 1.75 is an arbitrary number, as there is no info about the text baseline
+ switch (this.textBaseline) {
+ case 'hanging':
+ case 'top':
+ offset.y = fontStyle.size / 1.75;
+ break;
+ case 'middle':
+ break;
+ default:
+ case null:
+ case 'alphabetic':
+ case 'ideographic':
+ case 'bottom':
+ offset.y = -fontStyle.size / 2.25;
+ break;
+ }
+
+ switch(textAlign) {
+ case 'right':
+ left = delta;
+ right = 0.05;
+ break;
+ case 'center':
+ left = right = delta / 2;
+ break;
+ }
+
+ var d = getCoords(this, x + offset.x, y + offset.y);
+
+ lineStr.push('<g_vml_:line from="', -left ,' 0" to="', right ,' 0.05" ',
+ ' coordsize="100 100" coordorigin="0 0"',
+ ' filled="', !stroke, '" stroked="', !!stroke,
+ '" style="position:absolute;width:1px;height:1px;">');
+
+ if (stroke) {
+ appendStroke(this, lineStr);
+ } else {
+ // TODO: Fix the min and max params.
+ appendFill(this, lineStr, {x: -left, y: 0},
+ {x: right, y: fontStyle.size});
+ }
+
+ var skewM = m[0][0].toFixed(3) + ',' + m[1][0].toFixed(3) + ',' +
+ m[0][1].toFixed(3) + ',' + m[1][1].toFixed(3) + ',0,0';
+
+ var skewOffset = mr(d.x / Z) + ',' + mr(d.y / Z);
+
+ lineStr.push('<g_vml_:skew on="t" matrix="', skewM ,'" ',
+ ' offset="', skewOffset, '" origin="', left ,' 0" />',
+ '<g_vml_:path textpathok="true" />',
+ '<g_vml_:textpath on="true" string="',
+ encodeHtmlAttribute(text),
+ '" style="v-text-align:', textAlign,
+ ';font:', encodeHtmlAttribute(fontStyleString),
+ '" /></g_vml_:line>');
+
+ this.element_.insertAdjacentHTML('beforeEnd', lineStr.join(''));
+ };
+
+ contextPrototype.fillText = function(text, x, y, maxWidth) {
+ this.drawText_(text, x, y, maxWidth, false);
+ };
+
+ contextPrototype.strokeText = function(text, x, y, maxWidth) {
+ this.drawText_(text, x, y, maxWidth, true);
+ };
+
+ contextPrototype.measureText = function(text) {
+ if (!this.textMeasureEl_) {
+ var s = '<span style="position:absolute;' +
+ 'top:-20000px;left:0;padding:0;margin:0;border:none;' +
+ 'white-space:pre;"></span>';
+ this.element_.insertAdjacentHTML('beforeEnd', s);
+ this.textMeasureEl_ = this.element_.lastChild;
+ }
+ var doc = this.element_.ownerDocument;
+ this.textMeasureEl_.innerHTML = '';
+ this.textMeasureEl_.style.font = this.font;
+ // Don't use innerHTML or innerText because they allow markup/whitespace.
+ this.textMeasureEl_.appendChild(doc.createTextNode(text));
+ return {width: this.textMeasureEl_.offsetWidth};
+ };
+
+ /******** STUBS ********/
+ contextPrototype.clip = function() {
+ // TODO: Implement
+ };
+
+ contextPrototype.arcTo = function() {
+ // TODO: Implement
+ };
+
+ contextPrototype.createPattern = function(image, repetition) {
+ return new CanvasPattern_(image, repetition);
+ };
+
+ // Gradient / Pattern Stubs
+ function CanvasGradient_(aType) {
+ this.type_ = aType;
+ this.x0_ = 0;
+ this.y0_ = 0;
+ this.r0_ = 0;
+ this.x1_ = 0;
+ this.y1_ = 0;
+ this.r1_ = 0;
+ this.colors_ = [];
+ }
+
+ CanvasGradient_.prototype.addColorStop = function(aOffset, aColor) {
+ aColor = processStyle(aColor);
+ this.colors_.push({offset: aOffset,
+ color: aColor.color,
+ alpha: aColor.alpha});
+ };
+
+ function CanvasPattern_(image, repetition) {
+ assertImageIsValid(image);
+ switch (repetition) {
+ case 'repeat':
+ case null:
+ case '':
+ this.repetition_ = 'repeat';
+ break
+ case 'repeat-x':
+ case 'repeat-y':
+ case 'no-repeat':
+ this.repetition_ = repetition;
+ break;
+ default:
+ throwException('SYNTAX_ERR');
+ }
+
+ this.src_ = image.src;
+ this.width_ = image.width;
+ this.height_ = image.height;
+ }
+
+ function throwException(s) {
+ throw new DOMException_(s);
+ }
+
+ function assertImageIsValid(img) {
+ if (!img || img.nodeType != 1 || img.tagName != 'IMG') {
+ throwException('TYPE_MISMATCH_ERR');
+ }
+ if (img.readyState != 'complete') {
+ throwException('INVALID_STATE_ERR');
+ }
+ }
+
+ function DOMException_(s) {
+ this.code = this[s];
+ this.message = s +': DOM Exception ' + this.code;
+ }
+ var p = DOMException_.prototype = new Error;
+ p.INDEX_SIZE_ERR = 1;
+ p.DOMSTRING_SIZE_ERR = 2;
+ p.HIERARCHY_REQUEST_ERR = 3;
+ p.WRONG_DOCUMENT_ERR = 4;
+ p.INVALID_CHARACTER_ERR = 5;
+ p.NO_DATA_ALLOWED_ERR = 6;
+ p.NO_MODIFICATION_ALLOWED_ERR = 7;
+ p.NOT_FOUND_ERR = 8;
+ p.NOT_SUPPORTED_ERR = 9;
+ p.INUSE_ATTRIBUTE_ERR = 10;
+ p.INVALID_STATE_ERR = 11;
+ p.SYNTAX_ERR = 12;
+ p.INVALID_MODIFICATION_ERR = 13;
+ p.NAMESPACE_ERR = 14;
+ p.INVALID_ACCESS_ERR = 15;
+ p.VALIDATION_ERR = 16;
+ p.TYPE_MISMATCH_ERR = 17;
+
+ // set up externs
+ G_vmlCanvasManager = G_vmlCanvasManager_;
+ CanvasRenderingContext2D = CanvasRenderingContext2D_;
+ CanvasGradient = CanvasGradient_;
+ CanvasPattern = CanvasPattern_;
+ DOMException = DOMException_;
+})();
+
+} // if
--- /dev/null
+if(!document.createElement("canvas").getContext){(function(){var z=Math;var K=z.round;var J=z.sin;var U=z.cos;var b=z.abs;var k=z.sqrt;var D=10;var F=D/2;function T(){return this.context_||(this.context_=new W(this))}var O=Array.prototype.slice;function G(i,j,m){var Z=O.call(arguments,2);return function(){return i.apply(j,Z.concat(O.call(arguments)))}}function AD(Z){return String(Z).replace(/&/g,"&").replace(/"/g,""")}function r(i){if(!i.namespaces.g_vml_){i.namespaces.add("g_vml_","urn:schemas-microsoft-com:vml","#default#VML")}if(!i.namespaces.g_o_){i.namespaces.add("g_o_","urn:schemas-microsoft-com:office:office","#default#VML")}if(!i.styleSheets.ex_canvas_){var Z=i.createStyleSheet();Z.owningElement.id="ex_canvas_";Z.cssText="canvas{display:inline-block;overflow:hidden;text-align:left;width:300px;height:150px}"}}r(document);var E={init:function(Z){if(/MSIE/.test(navigator.userAgent)&&!window.opera){var i=Z||document;i.createElement("canvas");i.attachEvent("onreadystatechange",G(this.init_,this,i))}},init_:function(m){var j=m.getElementsByTagName("canvas");for(var Z=0;Z<j.length;Z++){this.initElement(j[Z])}},initElement:function(i){if(!i.getContext){i.getContext=T;r(i.ownerDocument);i.innerHTML="";i.attachEvent("onpropertychange",S);i.attachEvent("onresize",w);var Z=i.attributes;if(Z.width&&Z.width.specified){i.style.width=Z.width.nodeValue+"px"}else{i.width=i.clientWidth}if(Z.height&&Z.height.specified){i.style.height=Z.height.nodeValue+"px"}else{i.height=i.clientHeight}}return i}};function S(i){var Z=i.srcElement;switch(i.propertyName){case"width":Z.getContext().clearRect();Z.style.width=Z.attributes.width.nodeValue+"px";Z.firstChild.style.width=Z.clientWidth+"px";break;case"height":Z.getContext().clearRect();Z.style.height=Z.attributes.height.nodeValue+"px";Z.firstChild.style.height=Z.clientHeight+"px";break}}function w(i){var Z=i.srcElement;if(Z.firstChild){Z.firstChild.style.width=Z.clientWidth+"px";Z.firstChild.style.height=Z.clientHeight+"px"}}E.init();var I=[];for(var AC=0;AC<16;AC++){for(var AB=0;AB<16;AB++){I[AC*16+AB]=AC.toString(16)+AB.toString(16)}}function V(){return[[1,0,0],[0,1,0],[0,0,1]]}function d(m,j){var i=V();for(var Z=0;Z<3;Z++){for(var AF=0;AF<3;AF++){var p=0;for(var AE=0;AE<3;AE++){p+=m[Z][AE]*j[AE][AF]}i[Z][AF]=p}}return i}function Q(i,Z){Z.fillStyle=i.fillStyle;Z.lineCap=i.lineCap;Z.lineJoin=i.lineJoin;Z.lineWidth=i.lineWidth;Z.miterLimit=i.miterLimit;Z.shadowBlur=i.shadowBlur;Z.shadowColor=i.shadowColor;Z.shadowOffsetX=i.shadowOffsetX;Z.shadowOffsetY=i.shadowOffsetY;Z.strokeStyle=i.strokeStyle;Z.globalAlpha=i.globalAlpha;Z.font=i.font;Z.textAlign=i.textAlign;Z.textBaseline=i.textBaseline;Z.arcScaleX_=i.arcScaleX_;Z.arcScaleY_=i.arcScaleY_;Z.lineScale_=i.lineScale_}var B={aliceblue:"#F0F8FF",antiquewhite:"#FAEBD7",aquamarine:"#7FFFD4",azure:"#F0FFFF",beige:"#F5F5DC",bisque:"#FFE4C4",black:"#000000",blanchedalmond:"#FFEBCD",blueviolet:"#8A2BE2",brown:"#A52A2A",burlywood:"#DEB887",cadetblue:"#5F9EA0",chartreuse:"#7FFF00",chocolate:"#D2691E",coral:"#FF7F50",cornflowerblue:"#6495ED",cornsilk:"#FFF8DC",crimson:"#DC143C",cyan:"#00FFFF",darkblue:"#00008B",darkcyan:"#008B8B",darkgoldenrod:"#B8860B",darkgray:"#A9A9A9",darkgreen:"#006400",darkgrey:"#A9A9A9",darkkhaki:"#BDB76B",darkmagenta:"#8B008B",darkolivegreen:"#556B2F",darkorange:"#FF8C00",darkorchid:"#9932CC",darkred:"#8B0000",darksalmon:"#E9967A",darkseagreen:"#8FBC8F",darkslateblue:"#483D8B",darkslategray:"#2F4F4F",darkslategrey:"#2F4F4F",darkturquoise:"#00CED1",darkviolet:"#9400D3",deeppink:"#FF1493",deepskyblue:"#00BFFF",dimgray:"#696969",dimgrey:"#696969",dodgerblue:"#1E90FF",firebrick:"#B22222",floralwhite:"#FFFAF0",forestgreen:"#228B22",gainsboro:"#DCDCDC",ghostwhite:"#F8F8FF",gold:"#FFD700",goldenrod:"#DAA520",grey:"#808080",greenyellow:"#ADFF2F",honeydew:"#F0FFF0",hotpink:"#FF69B4",indianred:"#CD5C5C",indigo:"#4B0082",ivory:"#FFFFF0",khaki:"#F0E68C",lavender:"#E6E6FA",lavenderblush:"#FFF0F5",lawngreen:"#7CFC00",lemonchiffon:"#FFFACD",lightblue:"#ADD8E6",lightcoral:"#F08080",lightcyan:"#E0FFFF",lightgoldenrodyellow:"#FAFAD2",lightgreen:"#90EE90",lightgrey:"#D3D3D3",lightpink:"#FFB6C1",lightsalmon:"#FFA07A",lightseagreen:"#20B2AA",lightskyblue:"#87CEFA",lightslategray:"#778899",lightslategrey:"#778899",lightsteelblue:"#B0C4DE",lightyellow:"#FFFFE0",limegreen:"#32CD32",linen:"#FAF0E6",magenta:"#FF00FF",mediumaquamarine:"#66CDAA",mediumblue:"#0000CD",mediumorchid:"#BA55D3",mediumpurple:"#9370DB",mediumseagreen:"#3CB371",mediumslateblue:"#7B68EE",mediumspringgreen:"#00FA9A",mediumturquoise:"#48D1CC",mediumvioletred:"#C71585",midnightblue:"#191970",mintcream:"#F5FFFA",mistyrose:"#FFE4E1",moccasin:"#FFE4B5",navajowhite:"#FFDEAD",oldlace:"#FDF5E6",olivedrab:"#6B8E23",orange:"#FFA500",orangered:"#FF4500",orchid:"#DA70D6",palegoldenrod:"#EEE8AA",palegreen:"#98FB98",paleturquoise:"#AFEEEE",palevioletred:"#DB7093",papayawhip:"#FFEFD5",peachpuff:"#FFDAB9",peru:"#CD853F",pink:"#FFC0CB",plum:"#DDA0DD",powderblue:"#B0E0E6",rosybrown:"#BC8F8F",royalblue:"#4169E1",saddlebrown:"#8B4513",salmon:"#FA8072",sandybrown:"#F4A460",seagreen:"#2E8B57",seashell:"#FFF5EE",sienna:"#A0522D",skyblue:"#87CEEB",slateblue:"#6A5ACD",slategray:"#708090",slategrey:"#708090",snow:"#FFFAFA",springgreen:"#00FF7F",steelblue:"#4682B4",tan:"#D2B48C",thistle:"#D8BFD8",tomato:"#FF6347",turquoise:"#40E0D0",violet:"#EE82EE",wheat:"#F5DEB3",whitesmoke:"#F5F5F5",yellowgreen:"#9ACD32"};function g(i){var m=i.indexOf("(",3);var Z=i.indexOf(")",m+1);var j=i.substring(m+1,Z).split(",");if(j.length==4&&i.substr(3,1)=="a"){alpha=Number(j[3])}else{j[3]=1}return j}function C(Z){return parseFloat(Z)/100}function N(i,j,Z){return Math.min(Z,Math.max(j,i))}function c(AF){var j,i,Z;h=parseFloat(AF[0])/360%360;if(h<0){h++}s=N(C(AF[1]),0,1);l=N(C(AF[2]),0,1);if(s==0){j=i=Z=l}else{var m=l<0.5?l*(1+s):l+s-l*s;var AE=2*l-m;j=A(AE,m,h+1/3);i=A(AE,m,h);Z=A(AE,m,h-1/3)}return"#"+I[Math.floor(j*255)]+I[Math.floor(i*255)]+I[Math.floor(Z*255)]}function A(i,Z,j){if(j<0){j++}if(j>1){j--}if(6*j<1){return i+(Z-i)*6*j}else{if(2*j<1){return Z}else{if(3*j<2){return i+(Z-i)*(2/3-j)*6}else{return i}}}}function Y(Z){var AE,p=1;Z=String(Z);if(Z.charAt(0)=="#"){AE=Z}else{if(/^rgb/.test(Z)){var m=g(Z);var AE="#",AF;for(var j=0;j<3;j++){if(m[j].indexOf("%")!=-1){AF=Math.floor(C(m[j])*255)}else{AF=Number(m[j])}AE+=I[N(AF,0,255)]}p=m[3]}else{if(/^hsl/.test(Z)){var m=g(Z);AE=c(m);p=m[3]}else{AE=B[Z]||Z}}}return{color:AE,alpha:p}}var L={style:"normal",variant:"normal",weight:"normal",size:10,family:"sans-serif"};var f={};function X(Z){if(f[Z]){return f[Z]}var m=document.createElement("div");var j=m.style;try{j.font=Z}catch(i){}return f[Z]={style:j.fontStyle||L.style,variant:j.fontVariant||L.variant,weight:j.fontWeight||L.weight,size:j.fontSize||L.size,family:j.fontFamily||L.family}}function P(j,i){var Z={};for(var AF in j){Z[AF]=j[AF]}var AE=parseFloat(i.currentStyle.fontSize),m=parseFloat(j.size);if(typeof j.size=="number"){Z.size=j.size}else{if(j.size.indexOf("px")!=-1){Z.size=m}else{if(j.size.indexOf("em")!=-1){Z.size=AE*m}else{if(j.size.indexOf("%")!=-1){Z.size=(AE/100)*m}else{if(j.size.indexOf("pt")!=-1){Z.size=m/0.75}else{Z.size=AE}}}}}Z.size*=0.981;return Z}function AA(Z){return Z.style+" "+Z.variant+" "+Z.weight+" "+Z.size+"px "+Z.family}function t(Z){switch(Z){case"butt":return"flat";case"round":return"round";case"square":default:return"square"}}function W(i){this.m_=V();this.mStack_=[];this.aStack_=[];this.currentPath_=[];this.strokeStyle="#000";this.fillStyle="#000";this.lineWidth=1;this.lineJoin="miter";this.lineCap="butt";this.miterLimit=D*1;this.globalAlpha=1;this.font="10px sans-serif";this.textAlign="left";this.textBaseline="alphabetic";this.canvas=i;var Z=i.ownerDocument.createElement("div");Z.style.width=i.clientWidth+"px";Z.style.height=i.clientHeight+"px";Z.style.overflow="hidden";Z.style.position="absolute";i.appendChild(Z);this.element_=Z;this.arcScaleX_=1;this.arcScaleY_=1;this.lineScale_=1}var M=W.prototype;M.clearRect=function(){if(this.textMeasureEl_){this.textMeasureEl_.removeNode(true);this.textMeasureEl_=null}this.element_.innerHTML=""};M.beginPath=function(){this.currentPath_=[]};M.moveTo=function(i,Z){var j=this.getCoords_(i,Z);this.currentPath_.push({type:"moveTo",x:j.x,y:j.y});this.currentX_=j.x;this.currentY_=j.y};M.lineTo=function(i,Z){var j=this.getCoords_(i,Z);this.currentPath_.push({type:"lineTo",x:j.x,y:j.y});this.currentX_=j.x;this.currentY_=j.y};M.bezierCurveTo=function(j,i,AI,AH,AG,AE){var Z=this.getCoords_(AG,AE);var AF=this.getCoords_(j,i);var m=this.getCoords_(AI,AH);e(this,AF,m,Z)};function e(Z,m,j,i){Z.currentPath_.push({type:"bezierCurveTo",cp1x:m.x,cp1y:m.y,cp2x:j.x,cp2y:j.y,x:i.x,y:i.y});Z.currentX_=i.x;Z.currentY_=i.y}M.quadraticCurveTo=function(AG,j,i,Z){var AF=this.getCoords_(AG,j);var AE=this.getCoords_(i,Z);var AH={x:this.currentX_+2/3*(AF.x-this.currentX_),y:this.currentY_+2/3*(AF.y-this.currentY_)};var m={x:AH.x+(AE.x-this.currentX_)/3,y:AH.y+(AE.y-this.currentY_)/3};e(this,AH,m,AE)};M.arc=function(AJ,AH,AI,AE,i,j){AI*=D;var AN=j?"at":"wa";var AK=AJ+U(AE)*AI-F;var AM=AH+J(AE)*AI-F;var Z=AJ+U(i)*AI-F;var AL=AH+J(i)*AI-F;if(AK==Z&&!j){AK+=0.125}var m=this.getCoords_(AJ,AH);var AG=this.getCoords_(AK,AM);var AF=this.getCoords_(Z,AL);this.currentPath_.push({type:AN,x:m.x,y:m.y,radius:AI,xStart:AG.x,yStart:AG.y,xEnd:AF.x,yEnd:AF.y})};M.rect=function(j,i,Z,m){this.moveTo(j,i);this.lineTo(j+Z,i);this.lineTo(j+Z,i+m);this.lineTo(j,i+m);this.closePath()};M.strokeRect=function(j,i,Z,m){var p=this.currentPath_;this.beginPath();this.moveTo(j,i);this.lineTo(j+Z,i);this.lineTo(j+Z,i+m);this.lineTo(j,i+m);this.closePath();this.stroke();this.currentPath_=p};M.fillRect=function(j,i,Z,m){var p=this.currentPath_;this.beginPath();this.moveTo(j,i);this.lineTo(j+Z,i);this.lineTo(j+Z,i+m);this.lineTo(j,i+m);this.closePath();this.fill();this.currentPath_=p};M.createLinearGradient=function(i,m,Z,j){var p=new v("gradient");p.x0_=i;p.y0_=m;p.x1_=Z;p.y1_=j;return p};M.createRadialGradient=function(m,AE,j,i,p,Z){var AF=new v("gradientradial");AF.x0_=m;AF.y0_=AE;AF.r0_=j;AF.x1_=i;AF.y1_=p;AF.r1_=Z;return AF};M.drawImage=function(AO,j){var AH,AF,AJ,AV,AM,AK,AQ,AX;var AI=AO.runtimeStyle.width;var AN=AO.runtimeStyle.height;AO.runtimeStyle.width="auto";AO.runtimeStyle.height="auto";var AG=AO.width;var AT=AO.height;AO.runtimeStyle.width=AI;AO.runtimeStyle.height=AN;if(arguments.length==3){AH=arguments[1];AF=arguments[2];AM=AK=0;AQ=AJ=AG;AX=AV=AT}else{if(arguments.length==5){AH=arguments[1];AF=arguments[2];AJ=arguments[3];AV=arguments[4];AM=AK=0;AQ=AG;AX=AT}else{if(arguments.length==9){AM=arguments[1];AK=arguments[2];AQ=arguments[3];AX=arguments[4];AH=arguments[5];AF=arguments[6];AJ=arguments[7];AV=arguments[8]}else{throw Error("Invalid number of arguments")}}}var AW=this.getCoords_(AH,AF);var m=AQ/2;var i=AX/2;var AU=[];var Z=10;var AE=10;AU.push(" <g_vml_:group",' coordsize="',D*Z,",",D*AE,'"',' coordorigin="0,0"',' style="width:',Z,"px;height:",AE,"px;position:absolute;");if(this.m_[0][0]!=1||this.m_[0][1]||this.m_[1][1]!=1||this.m_[1][0]){var p=[];p.push("M11=",this.m_[0][0],",","M12=",this.m_[1][0],",","M21=",this.m_[0][1],",","M22=",this.m_[1][1],",","Dx=",K(AW.x/D),",","Dy=",K(AW.y/D),"");var AS=AW;var AR=this.getCoords_(AH+AJ,AF);var AP=this.getCoords_(AH,AF+AV);var AL=this.getCoords_(AH+AJ,AF+AV);AS.x=z.max(AS.x,AR.x,AP.x,AL.x);AS.y=z.max(AS.y,AR.y,AP.y,AL.y);AU.push("padding:0 ",K(AS.x/D),"px ",K(AS.y/D),"px 0;filter:progid:DXImageTransform.Microsoft.Matrix(",p.join(""),", sizingmethod='clip');")}else{AU.push("top:",K(AW.y/D),"px;left:",K(AW.x/D),"px;")}AU.push(' ">','<g_vml_:image src="',AO.src,'"',' style="width:',D*AJ,"px;"," height:",D*AV,'px"',' cropleft="',AM/AG,'"',' croptop="',AK/AT,'"',' cropright="',(AG-AM-AQ)/AG,'"',' cropbottom="',(AT-AK-AX)/AT,'"'," />","</g_vml_:group>");this.element_.insertAdjacentHTML("BeforeEnd",AU.join(""))};M.stroke=function(AM){var m=10;var AN=10;var AE=5000;var AG={x:null,y:null};var AL={x:null,y:null};for(var AH=0;AH<this.currentPath_.length;AH+=AE){var AK=[];var AF=false;AK.push("<g_vml_:shape",' filled="',!!AM,'"',' style="position:absolute;width:',m,"px;height:",AN,'px;"',' coordorigin="0,0"',' coordsize="',D*m,",",D*AN,'"',' stroked="',!AM,'"',' path="');var AO=false;for(var AI=AH;AI<Math.min(AH+AE,this.currentPath_.length);AI++){if(AI%AE==0&&AI>0){AK.push(" m ",K(this.currentPath_[AI-1].x),",",K(this.currentPath_[AI-1].y))}var Z=this.currentPath_[AI];var AJ;switch(Z.type){case"moveTo":AJ=Z;AK.push(" m ",K(Z.x),",",K(Z.y));break;case"lineTo":AK.push(" l ",K(Z.x),",",K(Z.y));break;case"close":AK.push(" x ");Z=null;break;case"bezierCurveTo":AK.push(" c ",K(Z.cp1x),",",K(Z.cp1y),",",K(Z.cp2x),",",K(Z.cp2y),",",K(Z.x),",",K(Z.y));break;case"at":case"wa":AK.push(" ",Z.type," ",K(Z.x-this.arcScaleX_*Z.radius),",",K(Z.y-this.arcScaleY_*Z.radius)," ",K(Z.x+this.arcScaleX_*Z.radius),",",K(Z.y+this.arcScaleY_*Z.radius)," ",K(Z.xStart),",",K(Z.yStart)," ",K(Z.xEnd),",",K(Z.yEnd));break}if(Z){if(AG.x==null||Z.x<AG.x){AG.x=Z.x}if(AL.x==null||Z.x>AL.x){AL.x=Z.x}if(AG.y==null||Z.y<AG.y){AG.y=Z.y}if(AL.y==null||Z.y>AL.y){AL.y=Z.y}}}AK.push(' ">');if(!AM){R(this,AK)}else{a(this,AK,AG,AL)}AK.push("</g_vml_:shape>");this.element_.insertAdjacentHTML("beforeEnd",AK.join(""))}};function R(j,AE){var i=Y(j.strokeStyle);var m=i.color;var p=i.alpha*j.globalAlpha;var Z=j.lineScale_*j.lineWidth;if(Z<1){p*=Z}AE.push("<g_vml_:stroke",' opacity="',p,'"',' joinstyle="',j.lineJoin,'"',' miterlimit="',j.miterLimit,'"',' endcap="',t(j.lineCap),'"',' weight="',Z,'px"',' color="',m,'" />')}function a(AO,AG,Ah,AP){var AH=AO.fillStyle;var AY=AO.arcScaleX_;var AX=AO.arcScaleY_;var Z=AP.x-Ah.x;var m=AP.y-Ah.y;if(AH instanceof v){var AL=0;var Ac={x:0,y:0};var AU=0;var AK=1;if(AH.type_=="gradient"){var AJ=AH.x0_/AY;var j=AH.y0_/AX;var AI=AH.x1_/AY;var Aj=AH.y1_/AX;var Ag=AO.getCoords_(AJ,j);var Af=AO.getCoords_(AI,Aj);var AE=Af.x-Ag.x;var p=Af.y-Ag.y;AL=Math.atan2(AE,p)*180/Math.PI;if(AL<0){AL+=360}if(AL<0.000001){AL=0}}else{var Ag=AO.getCoords_(AH.x0_,AH.y0_);Ac={x:(Ag.x-Ah.x)/Z,y:(Ag.y-Ah.y)/m};Z/=AY*D;m/=AX*D;var Aa=z.max(Z,m);AU=2*AH.r0_/Aa;AK=2*AH.r1_/Aa-AU}var AS=AH.colors_;AS.sort(function(Ak,i){return Ak.offset-i.offset});var AN=AS.length;var AR=AS[0].color;var AQ=AS[AN-1].color;var AW=AS[0].alpha*AO.globalAlpha;var AV=AS[AN-1].alpha*AO.globalAlpha;var Ab=[];for(var Ae=0;Ae<AN;Ae++){var AM=AS[Ae];Ab.push(AM.offset*AK+AU+" "+AM.color)}AG.push('<g_vml_:fill type="',AH.type_,'"',' method="none" focus="100%"',' color="',AR,'"',' color2="',AQ,'"',' colors="',Ab.join(","),'"',' opacity="',AV,'"',' g_o_:opacity2="',AW,'"',' angle="',AL,'"',' focusposition="',Ac.x,",",Ac.y,'" />')}else{if(AH instanceof u){if(Z&&m){var AF=-Ah.x;var AZ=-Ah.y;AG.push("<g_vml_:fill",' position="',AF/Z*AY*AY,",",AZ/m*AX*AX,'"',' type="tile"',' src="',AH.src_,'" />')}}else{var Ai=Y(AO.fillStyle);var AT=Ai.color;var Ad=Ai.alpha*AO.globalAlpha;AG.push('<g_vml_:fill color="',AT,'" opacity="',Ad,'" />')}}}M.fill=function(){this.stroke(true)};M.closePath=function(){this.currentPath_.push({type:"close"})};M.getCoords_=function(j,i){var Z=this.m_;return{x:D*(j*Z[0][0]+i*Z[1][0]+Z[2][0])-F,y:D*(j*Z[0][1]+i*Z[1][1]+Z[2][1])-F}};M.save=function(){var Z={};Q(this,Z);this.aStack_.push(Z);this.mStack_.push(this.m_);this.m_=d(V(),this.m_)};M.restore=function(){if(this.aStack_.length){Q(this.aStack_.pop(),this);this.m_=this.mStack_.pop()}};function H(Z){return isFinite(Z[0][0])&&isFinite(Z[0][1])&&isFinite(Z[1][0])&&isFinite(Z[1][1])&&isFinite(Z[2][0])&&isFinite(Z[2][1])}function y(i,Z,j){if(!H(Z)){return }i.m_=Z;if(j){var p=Z[0][0]*Z[1][1]-Z[0][1]*Z[1][0];i.lineScale_=k(b(p))}}M.translate=function(j,i){var Z=[[1,0,0],[0,1,0],[j,i,1]];y(this,d(Z,this.m_),false)};M.rotate=function(i){var m=U(i);var j=J(i);var Z=[[m,j,0],[-j,m,0],[0,0,1]];y(this,d(Z,this.m_),false)};M.scale=function(j,i){this.arcScaleX_*=j;this.arcScaleY_*=i;var Z=[[j,0,0],[0,i,0],[0,0,1]];y(this,d(Z,this.m_),true)};M.transform=function(p,m,AF,AE,i,Z){var j=[[p,m,0],[AF,AE,0],[i,Z,1]];y(this,d(j,this.m_),true)};M.setTransform=function(AE,p,AG,AF,j,i){var Z=[[AE,p,0],[AG,AF,0],[j,i,1]];y(this,Z,true)};M.drawText_=function(AK,AI,AH,AN,AG){var AM=this.m_,AQ=1000,i=0,AP=AQ,AF={x:0,y:0},AE=[];var Z=P(X(this.font),this.element_);var j=AA(Z);var AR=this.element_.currentStyle;var p=this.textAlign.toLowerCase();switch(p){case"left":case"center":case"right":break;case"end":p=AR.direction=="ltr"?"right":"left";break;case"start":p=AR.direction=="rtl"?"right":"left";break;default:p="left"}switch(this.textBaseline){case"hanging":case"top":AF.y=Z.size/1.75;break;case"middle":break;default:case null:case"alphabetic":case"ideographic":case"bottom":AF.y=-Z.size/2.25;break}switch(p){case"right":i=AQ;AP=0.05;break;case"center":i=AP=AQ/2;break}var AO=this.getCoords_(AI+AF.x,AH+AF.y);AE.push('<g_vml_:line from="',-i,' 0" to="',AP,' 0.05" ',' coordsize="100 100" coordorigin="0 0"',' filled="',!AG,'" stroked="',!!AG,'" style="position:absolute;width:1px;height:1px;">');if(AG){R(this,AE)}else{a(this,AE,{x:-i,y:0},{x:AP,y:Z.size})}var AL=AM[0][0].toFixed(3)+","+AM[1][0].toFixed(3)+","+AM[0][1].toFixed(3)+","+AM[1][1].toFixed(3)+",0,0";var AJ=K(AO.x/D)+","+K(AO.y/D);AE.push('<g_vml_:skew on="t" matrix="',AL,'" ',' offset="',AJ,'" origin="',i,' 0" />','<g_vml_:path textpathok="true" />','<g_vml_:textpath on="true" string="',AD(AK),'" style="v-text-align:',p,";font:",AD(j),'" /></g_vml_:line>');this.element_.insertAdjacentHTML("beforeEnd",AE.join(""))};M.fillText=function(j,Z,m,i){this.drawText_(j,Z,m,i,false)};M.strokeText=function(j,Z,m,i){this.drawText_(j,Z,m,i,true)};M.measureText=function(j){if(!this.textMeasureEl_){var Z='<span style="position:absolute;top:-20000px;left:0;padding:0;margin:0;border:none;white-space:pre;"></span>';this.element_.insertAdjacentHTML("beforeEnd",Z);this.textMeasureEl_=this.element_.lastChild}var i=this.element_.ownerDocument;this.textMeasureEl_.innerHTML="";this.textMeasureEl_.style.font=this.font;this.textMeasureEl_.appendChild(i.createTextNode(j));return{width:this.textMeasureEl_.offsetWidth}};M.clip=function(){};M.arcTo=function(){};M.createPattern=function(i,Z){return new u(i,Z)};function v(Z){this.type_=Z;this.x0_=0;this.y0_=0;this.r0_=0;this.x1_=0;this.y1_=0;this.r1_=0;this.colors_=[]}v.prototype.addColorStop=function(i,Z){Z=Y(Z);this.colors_.push({offset:i,color:Z.color,alpha:Z.alpha})};function u(i,Z){q(i);switch(Z){case"repeat":case null:case"":this.repetition_="repeat";break;case"repeat-x":case"repeat-y":case"no-repeat":this.repetition_=Z;break;default:n("SYNTAX_ERR")}this.src_=i.src;this.width_=i.width;this.height_=i.height}function n(Z){throw new o(Z)}function q(Z){if(!Z||Z.nodeType!=1||Z.tagName!="IMG"){n("TYPE_MISMATCH_ERR")}if(Z.readyState!="complete"){n("INVALID_STATE_ERR")}}function o(Z){this.code=this[Z];this.message=Z+": DOM Exception "+this.code}var x=o.prototype=new Error;x.INDEX_SIZE_ERR=1;x.DOMSTRING_SIZE_ERR=2;x.HIERARCHY_REQUEST_ERR=3;x.WRONG_DOCUMENT_ERR=4;x.INVALID_CHARACTER_ERR=5;x.NO_DATA_ALLOWED_ERR=6;x.NO_MODIFICATION_ALLOWED_ERR=7;x.NOT_FOUND_ERR=8;x.NOT_SUPPORTED_ERR=9;x.INUSE_ATTRIBUTE_ERR=10;x.INVALID_STATE_ERR=11;x.SYNTAX_ERR=12;x.INVALID_MODIFICATION_ERR=13;x.NAMESPACE_ERR=14;x.INVALID_ACCESS_ERR=15;x.VALIDATION_ERR=16;x.TYPE_MISMATCH_ERR=17;G_vmlCanvasManager=E;CanvasRenderingContext2D=W;CanvasGradient=v;CanvasPattern=u;DOMException=o})()};
--- /dev/null
+UNKNOWN_REPR = '<span class="unknown">?</span>';
+FD_THRESHOLDS=[[0.95, 'red'],
+ [0.8, 'yellow']];
+SOCKETS_THRESHOLDS=[[1.0, 'red'],
+ [0.8, 'yellow']];
+PROCESS_THRESHOLDS=[[0.75, 'red'],
+ [0.5, 'yellow']];
+
+function fmt_string(str, unknown) {
+ if (unknown == undefined) unknown = UNKNOWN_REPR;
+ if (str == undefined) return unknown;
+ return fmt_escape_html("" + str);
+}
+
+function fmt_bytes(bytes) {
+ if (bytes == undefined) return UNKNOWN_REPR;
+ return fmt_si_prefix(bytes, bytes, 1024, false) + 'B';
+}
+
+function fmt_si_prefix(num0, max0, thousand, allow_fractions) {
+ if (num == 0) return 0;
+
+ function f(n, m, p) {
+ if (m > thousand) return f(n / thousand, m / thousand, p + 1);
+ else return [n, m, p];
+ }
+
+ var num_power = f(num0, max0, 0);
+ var num = num_power[0];
+ var max = num_power[1];
+ var power = num_power[2];
+ var powers = ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'];
+ return (((power != 0 || allow_fractions) && max <= 10) ? num.toFixed(1) :
+ num.toFixed(0)) + powers[power];
+}
+
+function fmt_memory(memory, key) {
+ return '<div class="colour-key memory_' + key + '"></div>' +
+ fmt_bytes(memory[key]);
+}
+
+function fmt_boolean(b) {
+ if (b == undefined) return UNKNOWN_REPR;
+
+ return b ? "●" : "○";
+}
+
+function fmt_date(d) {
+ function f(i) {
+ return i < 10 ? "0" + i : i;
+ }
+
+ return d.getFullYear() + "-" + f(d.getMonth() + 1) + "-" +
+ f(d.getDate()) + " " + f(d.getHours()) + ":" + f(d.getMinutes()) +
+ ":" + f(d.getSeconds());
+}
+
+function fmt_time(t, suffix) {
+ if (t == undefined || t == 0) return '';
+ return t + suffix;
+}
+
+function fmt_millis(millis) {
+ return Math.round(millis / 1000) + "s";
+}
+
+function fmt_parameters(obj) {
+ return fmt_table_short(args_to_params(obj));
+}
+
+function fmt_parameters_short(obj) {
+ var res = '';
+ var params = args_to_params(obj);
+
+ for (var k in ALL_ARGS) {
+ if (params[k] != undefined) {
+ res += '<acronym title="' + k + ': ' + fmt_string(params[k]) +
+ '">' + ALL_ARGS[k].short + '</acronym> ';
+ }
+ }
+
+ if (params.arguments) {
+ res += '<acronym title="' + fmt_table_flat(params.arguments) +
+ '">Args</acronym>';
+ }
+ return res;
+}
+
+function short_conn(name) {
+ var pat = /^(.*)->/;
+ var match = pat.exec(name);
+ return (match != null && match.length == 2) ? match[1] : name;
+}
+
+function short_chan(name) {
+ var pat = /^(.*)->.*( \(.*\))/;
+ var match = pat.exec(name);
+ return (match != null && match.length == 3) ? match[1] + match[2] : name;
+}
+
+function args_to_params(obj) {
+ var res = {};
+ for (var k in obj.arguments) {
+ if (k in KNOWN_ARGS) {
+ res[k] = obj.arguments[k];
+ }
+ else {
+ if (res.arguments == undefined) res.arguments = {};
+ res.arguments[k] = obj.arguments[k];
+ }
+ }
+ if (obj.durable) {
+ res['durable'] = true;
+ }
+ if (obj.auto_delete) {
+ res['auto-delete'] = true;
+ }
+ if (obj.internal != undefined && obj.internal) {
+ res['internal'] = true;
+ }
+ return res;
+}
+
+function fmt_mirrors(queue) {
+ var synced = queue.synchronised_slave_nodes || [];
+ var unsynced = queue.slave_nodes || [];
+ unsynced = jQuery.grep(unsynced,
+ function (node, i) {
+ return jQuery.inArray(node, synced) == -1
+ });
+ var res = '';
+ if (synced.length > 0) {
+ res += ' <acronym title="Synchronised mirrors: ' + synced + '">+' +
+ synced.length + '</acronym>';
+ }
+ if (synced.length == 0 && unsynced.length > 0) {
+ res += ' <acronym title="There are no synchronised mirrors">+0</acronym>';
+ }
+ if (unsynced.length > 0) {
+ res += ' <acronym class="warning" title="Unsynchronised mirrors: ' +
+ unsynced + '">+' + unsynced.length + '</acronym>';
+ }
+ return res;
+}
+
+function fmt_sync_state(queue) {
+ var res = '<p><b>Syncing: ';
+ res += (queue.messages == 0) ? 100 : Math.round(100 * queue.sync_messages /
+ queue.messages);
+ res += '%</b></p>';
+ return res;
+}
+
+function fmt_channel_mode(ch) {
+ if (ch.transactional) {
+ return '<acronym title="Transactional">T</acronym>';
+ }
+ else if (ch.confirm) {
+ return '<acronym title="Confirm">C</acronym>';
+ }
+ else {
+ return '';
+ }
+}
+
+function fmt_color(r, thresholds) {
+ if (r == undefined) return '';
+
+ for (var i in thresholds) {
+ var threshold = thresholds[i][0];
+ var color = thresholds[i][1];
+
+ if (r >= threshold) {
+ return color;
+ }
+ }
+ return 'green';
+}
+
+function fmt_deliver_rate(obj, show_redeliver) {
+ var res = fmt_rate(obj, 'deliver_get');
+ if (show_redeliver) {
+ res += '<sub>' + fmt_rate(obj, 'redeliver') + '</sub>';
+ }
+ return res;
+}
+
+function fmt_rate_num(num) {
+ if (num == undefined) return UNKNOWN_REPR;
+ else if (num < 1) return num.toFixed(2);
+ else if (num < 10) return num.toFixed(1);
+ else return fmt_num_thousands(num.toFixed(0));
+}
+
+function fmt_num_thousands(num) {
+ if (num == undefined) return UNKNOWN_REPR;
+ num = '' + num;
+ if (num.length < 4) return num;
+ return fmt_num_thousands(num.slice(0, -3)) + ',' + num.slice(-3);
+}
+
+function fmt_percent(num) {
+ if (num === '') {
+ return 'N/A';
+ } else {
+ return Math.round(num * 100) + '%';
+ }
+}
+
+function fmt_rate(obj, name, mode) {
+ var raw = fmt_rate0(obj, name, mode, fmt_rate_num);
+ return raw == '' ? '' : (raw + '/s');
+}
+
+function fmt_rate_bytes(obj, name, mode) {
+ var raw = fmt_rate0(obj, name, mode, fmt_bytes);
+ return raw == '' ? '' : (raw + '/s' +
+ '<sub>(' + fmt_bytes(obj[name]) + ' total)</sub>');
+}
+
+function fmt_rate_large(obj, name, mode) {
+ return '<strong>' + fmt_rate0(obj, name, mode, fmt_rate_num) +
+ '</strong>msg/s';
+}
+
+function fmt_rate_bytes_large(obj, name, mode) {
+ return '<strong>' + fmt_rate0(obj, name, mode, fmt_bytes) + '/s</strong>' +
+ '(' + fmt_bytes(obj[name]) + ' total)';
+}
+
+function fmt_rate0(obj, name, mode, fmt) {
+ if (obj == undefined || obj[name] == undefined ||
+ obj[name + '_details'] == undefined) return '';
+ var details = obj[name + '_details'];
+ return fmt(mode == 'avg' ? details.avg_rate : details.rate);
+}
+
+function fmt_msgs(obj, name, mode) {
+ return fmt_msgs0(obj, name, mode) + ' msg';
+}
+
+function fmt_msgs_large(obj, name, mode) {
+ return '<strong>' + fmt_msgs0(obj, name, mode) + '</strong>' +
+ fmt_rate0(obj, name, mode, fmt_msgs_rate);
+}
+
+function fmt_msgs0(obj, name, mode) {
+ if (obj == undefined || obj[name] == undefined ||
+ obj[name + '_details'] == undefined) return '';
+ var details = obj[name + '_details'];
+ return mode == 'avg' ? fmt_rate_num(details.avg) :
+ fmt_num_thousands(obj[name]);
+}
+
+function fmt_msgs_rate(num) {
+ if (num > 0) return '+' + fmt_rate_num(num) + ' msg/s';
+ else if (num < 0) return '-' + fmt_rate_num(-num) + ' msg/s';
+ else return ' ';
+}
+
+function fmt_rate_axis(num, max) {
+ return fmt_si_prefix(num, max, 1000, true) + '/s';
+}
+
+function fmt_msgs_axis(num, max) {
+ return fmt_si_prefix(num, max, 1000, true);
+}
+
+function fmt_rate_bytes_axis(num, max) {
+ num = parseInt(num);
+ return fmt_bytes(isNaN(num) ? 0 : num) + '/s';
+}
+
+function is_stat_empty(obj, name) {
+ if (obj == undefined
+ || obj[name] == undefined
+ || obj[name + '_details'] == undefined
+ || obj[name + '_details'].rate < 0.00001) return true;
+ return false;
+}
+
+function is_col_empty(objects, name, accessor) {
+ if (accessor == undefined) accessor = function(o) {return o.message_stats;};
+ for (var i = 0; i < objects.length; i++) {
+ var object = objects[i];
+ if (!is_stat_empty(accessor(object), name)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+function fmt_exchange(name) {
+ return fmt_escape_html(fmt_exchange0(name));
+}
+
+function fmt_exchange0(name) {
+ return name == '' ? '(AMQP default)' : name;
+}
+
+function fmt_exchange_type(type) {
+ for (var i in exchange_types) {
+ if (exchange_types[i].name == type) {
+ return fmt_escape_html(type);
+ }
+ }
+ return '<div class="status-red"><acronym title="Exchange type not found. ' +
+ 'Publishing to this exchange will fail.">' + fmt_escape_html(type) +
+ '</acronym></div>';
+}
+
+function fmt_exchange_url(name) {
+ return name == '' ? 'amq.default' : fmt_escape_html(name);
+}
+
+function fmt_download_filename(host) {
+ var now = new Date();
+ return host.replace('@', '_') + "_" + now.getFullYear() + "-" +
+ (now.getMonth() + 1) + "-" + now.getDate() + ".json";
+}
+
+function fmt_fd_used(used, total) {
+ if (used == 'install_handle_from_sysinternals') {
+ return '<p class="c">handle.exe missing <span class="help" id="handle-exe"></span><sub>' + total + ' available</sub></p>';
+ }
+ else {
+ return used;
+ }
+}
+
+function fmt_table_short(table) {
+ return '<table class="mini">' + fmt_table_body(table, ':') + '</table>';
+}
+
+function fmt_table_long(table) {
+ return '<table class="facts">' + fmt_table_body(table, '') +
+ '</table>';
+}
+
+function fmt_table_body(table, x) {
+ var res = '';
+ for (k in table) {
+ res += '<tr><th>' + k + x + '</th><td>' + fmt_amqp_value(table[k]) +
+ '</td>';
+ }
+ return res;
+}
+
+function fmt_amqp_value(val) {
+ if (val instanceof Array) {
+ var val2 = new Array();
+ for (var i = 0; i < val.length; i++) {
+ val2[i] = fmt_amqp_value(val[i]);
+ }
+ return val2.join("<br/>");
+ } else if (val instanceof Object) {
+ return fmt_table_short(val);
+ } else {
+ var t = typeof(val);
+ if (t == 'string') {
+ return '<acronym class="type" title="string">' +
+ fmt_escape_html(val) + '</acronym>';
+ } else {
+ return '<acronym class="type" title="' + t + '">' + val + '</acronym>';
+ }
+ }
+}
+
+function fmt_table_flat(table) {
+ var res = [];
+ for (k in table) {
+ res.push(k + ': ' + fmt_amqp_value_flat(table[k]));
+ }
+ return res.join(', ');
+}
+
+function fmt_amqp_value_flat(val) {
+ if (val instanceof Array) {
+ var val2 = new Array();
+ for (var i = 0; i < val.length; i++) {
+ val2[i] = fmt_amqp_value_flat(val[i]);
+ }
+ return '[' + val2.join(",") + ']';
+ } else if (val instanceof Object) {
+ return '(' + fmt_table_flat(val) + ')';
+ } else if (typeof(val) == 'string') {
+ return fmt_escape_html(val);
+ } else {
+ return val;
+ }
+}
+
+function fmt_uptime(u) {
+ var uptime = Math.floor(u / 1000);
+ var sec = uptime % 60;
+ var min = Math.floor(uptime / 60) % 60;
+ var hour = Math.floor(uptime / 3600) % 24;
+ var day = Math.floor(uptime / 86400);
+
+ if (day > 0)
+ return day + 'd ' + hour + 'h';
+ else if (hour > 0)
+ return hour + 'h ' + min + 'm';
+ else
+ return min + 'm ' + sec + 's';
+}
+
+function fmt_rabbit_version(applications) {
+ for (var i in applications) {
+ if (applications[i].name == 'rabbit') {
+ return applications[i].version;
+ }
+ }
+ return 'unknown';
+}
+
+function fmt_escape_html(txt) {
+ return fmt_escape_html0(txt).replace(/\n/g, '<br/>');
+}
+
+function fmt_escape_html_one_line(txt) {
+ return fmt_escape_html0(txt).replace(/\n/g, '');
+}
+
+function fmt_escape_html0(txt) {
+ return txt.replace(/&/g, '&')
+ .replace(/</g, '<')
+ .replace(/>/g, '>')
+ .replace(/\"/g, '"');
+}
+
+function fmt_maybe_wrap(txt, encoding) {
+ if (encoding == 'string') return fmt_escape_html(txt);
+
+ var WRAP = 120;
+ var res = '';
+ while (txt != '') {
+ var i = txt.indexOf('\n');
+ if (i == -1 || i > WRAP) {
+ i = Math.min(WRAP, txt.length);
+ res += txt.substring(0, i) + '\n';
+ txt = txt.substring(i);
+ }
+ else {
+ res += txt.substring(0, i + 1);
+ txt = txt.substring(i + 1);
+ }
+ }
+ return fmt_escape_html(res);
+}
+
+function fmt_node(node_host) {
+ var both = node_host.split('@');
+ var node = both.slice(0, 1);
+ var host = both.slice(1);
+ return '<small>' + node + '@</small>' + host;
+}
+
+function fmt_object_state(obj) {
+ if (obj.state == undefined) return '';
+
+ var colour = 'green';
+ var text = obj.state;
+ var explanation;
+
+ if (obj.idle_since !== undefined) {
+ colour = 'grey';
+ explanation = 'Idle since ' + obj.idle_since;
+ text = 'idle';
+ }
+ // Only connections can be 'blocked' or 'blocking'
+ else if (obj.state == 'blocked') {
+ colour = 'red';
+ explanation = 'Resource alarm: connection blocked.';
+ }
+ else if (obj.state == 'blocking') {
+ colour = 'yellow';
+ explanation = 'Resource alarm: connection will block on publish.';
+ }
+ else if (obj.state == 'flow') {
+ colour = 'yellow';
+ explanation = 'Publishing rate recently restricted by server.';
+ }
+
+ return fmt_state(colour, text, explanation);
+}
+
+function fmt_state(colour, text, explanation) {
+ var key;
+ if (explanation) {
+ key = '<acronym class="normal" title="' + explanation + '">' +
+ text + '</acronym>';
+ }
+ else {
+ key = text;
+ }
+
+ return '<div class="colour-key status-key-' + colour + '"></div>' + key;
+}
+
+function fmt_resource_bar(used_label, limit_label, ratio, colour, help) {
+ var width = 120;
+
+ var res = '';
+ var other_colour = colour;
+ if (ratio > 1) {
+ ratio = 1 / ratio;
+ inverted = true;
+ colour += '-dark';
+ }
+ else {
+ other_colour += '-dark';
+ }
+ var offset = Math.round(width * (1 - ratio));
+
+ res += '<div class="status-bar" style="width: ' + width + 'px;">';
+ res += '<div class="status-bar-main ' + colour + '" style="background-image: url(img/bg-' + other_colour + '.png); background-position: -' + offset + 'px 0px; background-repeat: no-repeat;">';
+ res += used_label;
+ if (help != null) {
+ res += ' <span class="help" id="' + help + '"></span>';
+ }
+ res += '</div>'; // status-bar-main
+ if (limit_label != null) {
+ res += '<sub>' + limit_label + '</sub>';
+ }
+ res += '</div>'; // status-bar
+ return res;
+}
+
+function fmt_resource_bar_count(used, total, thresholds) {
+ if (typeof used == 'number') {
+ return fmt_resource_bar(used, total + ' available', used / total,
+ fmt_color(used / total, thresholds));
+ } else {
+ return used;
+ }
+}
+
+function fmt_shortened_uri(uri) {
+ if (typeof uri == 'object') {
+ var res = '';
+ for (i in uri) {
+ res += fmt_shortened_uri(uri[i]) + '<br/>';
+ }
+ return res;
+ }
+ var uri = fmt_escape_html(uri);
+ if (uri.indexOf('?') == -1) {
+ return uri;
+ }
+ else {
+ return '<acronym title="' + uri + '">' +
+ uri.substr(0, uri.indexOf('?')) + '?...</acronym>';
+ }
+}
+
+function fmt_client_name(properties) {
+ var res = [];
+ if (properties.product != undefined) {
+ res.push(fmt_trunc(properties.product, 10));
+ }
+ if (properties.platform != undefined) {
+ res.push(fmt_trunc(properties.platform, 10));
+ }
+ res = res.join(" / ");
+
+ if (properties.version != undefined) {
+ res += '<sub>' + fmt_trunc(properties.version) + '</sub>';
+ }
+ return res;
+}
+
+function fmt_trunc(str, max_length) {
+ return str.length > max_length ?
+ ('<acronym class="normal" title="' + str + '">' +
+ str.substring(0, max_length) + '...</acronym>') : str;
+}
+
+function alt_rows(i, args) {
+ var css = [(i % 2 == 0) ? 'alt1' : 'alt2'];
+ if (args != undefined && args['x-internal-purpose'] != undefined) {
+ css.push('internal-purpose');
+ }
+ return ' class="' + css.join(' ') + '"';
+}
+
+function esc(str) {
+ return encodeURIComponent(str);
+}
+
+function link_conn(name, desc) {
+ if (desc == undefined) {
+ return _link_to(short_conn(name), '#/connections/' + esc(name));
+ }
+ else {
+ return _link_to(desc, '#/connections/' + esc(name), false);
+ }
+}
+
+function link_channel(name) {
+ return _link_to(short_chan(name), '#/channels/' + esc(name))
+}
+
+function link_exchange(vhost, name, args) {
+ var url = esc(vhost) + '/' + (name == '' ? 'amq.default' : esc(name));
+ return _link_to(fmt_exchange0(name), '#/exchanges/' + url, true, args);
+}
+
+function link_queue(vhost, name, args) {
+ return _link_to(name, '#/queues/' + esc(vhost) + '/' + esc(name), true, args);
+}
+
+function link_vhost(name) {
+ return _link_to(name, '#/vhosts/' + esc(name))
+}
+
+function link_user(name) {
+ return _link_to(name, '#/users/' + esc(name))
+}
+
+function link_node(name) {
+ return _link_to(name, '#/nodes/' + esc(name))
+}
+
+function link_policy(vhost, name) {
+ return _link_to(name, '#/policies/' + esc(vhost) + '/' + esc(name))
+}
+
+function _link_to(name, url, highlight, args) {
+ if (highlight == undefined) highlight = true;
+ var title = null;
+ if (args != undefined && args['x-internal-purpose'] != undefined) {
+ var purpose = args['x-internal-purpose'];
+ title = 'This is used internally by the ' + purpose + ' mechanism.';
+ }
+ return '<a href="' + url + '"' +
+ (title ? ' title="' + title + '"' : '') + '>' +
+ (highlight ? fmt_highlight_filter(name) : fmt_escape_html(name)) +
+ '</a>';
+}
+
+function fmt_highlight_filter(text) {
+ if (current_filter == '') return fmt_escape_html(text);
+
+ var text_to_match = current_filter.toLowerCase();
+ if (current_filter_regex) {
+ var potential_match = current_filter_regex.exec(text.toLowerCase());
+ if (potential_match) {
+ text_to_match = potential_match[0];
+ }
+ }
+ var ix = text.toLowerCase().indexOf(text_to_match);
+ var l = text_to_match.length;
+ if (ix == -1) {
+ return fmt_escape_html(text);
+ }
+ else {
+ return fmt_escape_html(text.substring(0, ix)) +
+ '<span class="filter-highlight">' +
+ fmt_escape_html(text.substring(ix, ix + l)) + '</span>' +
+ fmt_escape_html(text.substring(ix + l));
+ }
+}
+
+function message_rates(id, stats) {
+ var items = [['Publish', 'publish'], ['Confirm', 'confirm'],
+ ['Publish (In)', 'publish_in'],
+ ['Publish (Out)', 'publish_out'],
+ ['Deliver', 'deliver'],
+ ['Redelivered', 'redeliver'],
+ ['Acknowledge', 'ack'],
+ ['Get', 'get'], ['Deliver (noack)', 'deliver_no_ack'],
+ ['Get (noack)', 'get_no_ack'],
+ ['Return', 'return_unroutable']];
+ return rates_chart_or_text(id, stats, items, fmt_rate, fmt_rate_large, fmt_rate_axis, true, 'Message rates', 'message-rates');
+}
+
+function queue_lengths(id, stats) {
+ var items = [['Ready', 'messages_ready'],
+ ['Unacknowledged', 'messages_unacknowledged'],
+ ['Total', 'messages']];
+ return rates_chart_or_text(id, stats, items, fmt_msgs, fmt_msgs_large, fmt_msgs_axis, false, 'Queued messages', 'queued-messages');
+}
+
+function data_rates(id, stats) {
+ var items = [['From client', 'recv_oct'], ['To client', 'send_oct']];
+ return rates_chart_or_text(id, stats, items, fmt_rate_bytes, fmt_rate_bytes_large, fmt_rate_bytes_axis, true, 'Data rates');
+}
+
+function rates_chart_or_text(id, stats, items, chart_fmt, text_fmt, axis_fmt, chart_rates,
+ heading, heading_help) {
+ var mode = get_pref('rate-mode-' + id);
+ var range = get_pref('chart-range-' + id);
+ var prefix = '<h3>' + heading +
+ ' <span class="rate-options updatable" title="Click to change" for="'
+ + id + '">(' + prefix_title(mode, range) + ')</span>' +
+ (heading_help == undefined ? '' :
+ ' <span class="help" id="' + heading_help + '"></span>') +
+ '</h3>';
+ var res;
+
+ if (keys(stats).length > 0) {
+ if (mode == 'chart') {
+ res = rates_chart(id, items, stats, chart_fmt, axis_fmt, chart_rates);
+ }
+ else {
+ res = rates_text(items, stats, mode, text_fmt);
+ }
+ if (res == "") res = '<p>Waiting for data...</p>';
+ }
+ else {
+ res = '<p>Currently idle</p>';
+ }
+ return prefix + '<div class="updatable">' + res + '</div>';
+}
+
+function prefix_title(mode, range) {
+ var desc = CHART_PERIODS[range];
+ if (mode == 'chart') {
+ return 'chart: ' + desc.toLowerCase();
+ }
+ else if (mode == 'curr') {
+ return 'current value';
+ }
+ else {
+ return 'moving average: ' + desc.toLowerCase();
+ }
+}
+
+function rates_chart(id, items, stats, rate_fmt, axis_fmt, chart_rates) {
+ var size = get_pref('chart-size-' + id);
+ var show = [];
+ chart_data[id] = {};
+ chart_data[id]['data'] = {};
+ chart_data[id]['fmt'] = axis_fmt;
+ for (var i in items) {
+ var name = items[i][0];
+ var key = items[i][1];
+ var key_details = key + '_details';
+ if (key_details in stats) {
+ chart_data[id]['data'][name] = stats[key_details];
+ show.push([name, rate_fmt(stats, key)]);
+ }
+ }
+ var html = '<div class="box"><div id="chart-' + id +
+ '" class="chart chart-' + size +
+ (chart_rates ? ' chart-rates' : '') + '"></div>';
+ html += '<table class="facts facts-fixed-width">';
+ for (var i = 0; i < show.length; i++) {
+ html += '<tr><th>' + show[i][0] + '</th><td>';
+ html += '<div class="colour-key" style="background: ' + chart_colors[i];
+ html += ';"></div>' + show[i][1] + '</td></tr>'
+ }
+ html += '</table></div>';
+ return show.length > 0 ? html : '';
+}
+
+function rates_text(items, stats, mode, rate_fmt) {
+ var res = '';
+ for (var i in items) {
+ var name = items[i][0];
+ var key = items[i][1];
+ var key_details = key + '_details';
+ if (key_details in stats) {
+ var details = stats[key_details];
+ res += '<div class="highlight">' + name;
+ res += rate_fmt(stats, key, mode);
+ res += '</div>';
+ }
+ }
+ return res == '' ? '' : '<div class="box">' + res + '</div>';
+}
+
+function filter_ui(items) {
+ current_truncate = (current_truncate == null) ?
+ parseInt(get_pref('truncate')) : current_truncate;
+ var total = items.length;
+
+ if (current_filter != '') {
+ var items2 = [];
+ for (var i in items) {
+ var item = items[i];
+ var item_name = item.name.toLowerCase();
+ if ((current_filter_regex_on &&
+ current_filter_regex &&
+ current_filter_regex.test(item_name)) ||
+ item_name.indexOf(current_filter.toLowerCase()) != -1) {
+ items2.push(item);
+ }
+ }
+ items.length = items2.length;
+ for (var i in items2) items[i] = items2[i];
+ }
+
+ var res = '<div class="filter"><table' +
+ (current_filter == '' ? '' : ' class="filter-active"') +
+ '><tr><th>Filter:</th>' +
+ '<td><input id="filter" type="text" value="' +
+ fmt_escape_html(current_filter) + '"/>' +
+ '<input type="checkbox" name="filter-regex-mode" id="filter-regex-mode"' +
+ (current_filter_regex_on ? ' checked' : '') +
+ '/><label for="filter-regex-mode">Regex</label> <span class="help" id="filter-regex">(?)</span>' +
+ '</td></tr></table>';
+
+ function items_desc(l) {
+ return l == 1 ? (l + ' item') : (l + ' items');
+ }
+
+ var selected = current_filter == '' ? (items_desc(items.length)) :
+ (items.length + ' of ' + items_desc(total) + ' selected');
+
+ var truncate_input = '<input type="text" id="truncate" value="' +
+ current_truncate + '">';
+
+ if (items.length > current_truncate) {
+ selected += '<span id="filter-warning-show"> ' +
+ '(only showing first</span> ';
+ items.length = current_truncate;
+ }
+ else {
+ selected += ' (show at most ';
+ }
+ res += '<p id="filter-truncate"><span class="updatable">' + selected +
+ '</span>' + truncate_input + ')</p>';
+ res += '</div>';
+
+ return res;
+}
+
+function maybe_truncate(items) {
+ var maximum = 500;
+ var str = '';
+
+ if (items.length > maximum) {
+ str = '<p class="warning">Only ' + maximum + ' of ' +
+ items.length + ' items are shown.</p>';
+ items.length = maximum;
+ }
+
+ return str;
+}
+
+function fmt_sort(display, sort) {
+ var prefix = '';
+ if (current_sort == sort) {
+ prefix = '<span class="arrow">' +
+ (current_sort_reverse ? '▲ ' : '▼ ') +
+ '</span>';
+ }
+ return '<a class="sort" sort="' + sort + '">' + prefix + display + '</a>';
+}
+
+function fmt_permissions(obj, permissions, lookup, show, warning) {
+ var res = [];
+ for (var i in permissions) {
+ var permission = permissions[i];
+ if (permission[lookup] == obj.name) {
+ res.push(permission[show]);
+ }
+ }
+ return res.length == 0 ? warning : res.join(', ');
+}
+
+var radio_id = 0;
+
+function fmt_radio(name, text, value, current) {
+ radio_id++;
+ return '<label class="radio" for="radio-' + radio_id + '">' +
+ '<input type="radio" id="radio-' + radio_id + '" name="' + name +
+ '" value="' + value + '"' +
+ ((value == current) ? ' checked="checked"' : '') +
+ '>' + text + '</label>';
+}
+
+function properties_size(obj) {
+ var count = 0;
+ for (k in obj) {
+ if (obj.hasOwnProperty(k)) count++;
+ }
+ return count;
+}
--- /dev/null
+///////////////////////
+// //
+// Genuine constants //
+// //
+///////////////////////
+
+// Just used below
+function map(list) {
+ var res = {};
+ for (i in list) {
+ res[list[i]] = '';
+ }
+ return res;
+}
+
+// Extension arguments that we know about and present specially in the UI.
+var KNOWN_ARGS = {'alternate-exchange': {'short': 'AE', 'type': 'string'},
+ 'x-message-ttl': {'short': 'TTL', 'type': 'int'},
+ 'x-expires': {'short': 'Exp', 'type': 'int'},
+ 'x-max-length': {'short': 'Lim', 'type': 'int'},
+ 'x-dead-letter-exchange': {'short': 'DLX', 'type': 'string'},
+ 'x-dead-letter-routing-key': {'short': 'DLK', 'type': 'string'}};
+
+// Things that are like arguments that we format the same way in listings.
+var IMPLICIT_ARGS = {'durable': {'short': 'D', 'type': 'boolean'},
+ 'auto-delete': {'short': 'AD', 'type': 'boolean'},
+ 'internal': {'short': 'I', 'type': 'boolean'}};
+
+// Both the above
+var ALL_ARGS = {};
+for (var k in KNOWN_ARGS) ALL_ARGS[k] = KNOWN_ARGS[k];
+for (var k in IMPLICIT_ARGS) ALL_ARGS[k] = IMPLICIT_ARGS[k];
+
+var NAVIGATION = {'Overview': ['#/', "management"],
+ 'Connections': ['#/connections', "management"],
+ 'Channels': ['#/channels', "management"],
+ 'Exchanges': ['#/exchanges', "management"],
+ 'Queues': ['#/queues', "management"],
+ 'Admin':
+ [{'Users': ['#/users', "administrator"],
+ 'Virtual Hosts': ['#/vhosts', "administrator"],
+ 'Policies': ['#/policies', "policymaker"]},
+ "policymaker"]
+ };
+
+var CHART_PERIODS = {'60|5': 'Last minute',
+ '600|5': 'Last ten minutes',
+ '3600|60': 'Last hour',
+ '28800|600': 'Last eight hours',
+ '86400|1800': 'Last day'};
+
+///////////////////////////////////////////////////////////////////////////
+// //
+// Mostly constant, typically get set once at startup (or rarely anyway) //
+// //
+///////////////////////////////////////////////////////////////////////////
+
+// All these are to do with hiding UI elements if
+var statistics_level; // ...there are no fine stats
+var user_administrator; // ...user is not an admin
+var user_monitor; // ...user cannot monitor
+var nodes_interesting; // ...we are not in a cluster
+var vhosts_interesting; // ...there is only one vhost
+var rabbit_versions_interesting; // ...all cluster nodes run the same version
+
+// Extensions write to this, the dispatcher maker reads it
+var dispatcher_modules = [];
+
+// We need to know when all extension script files have loaded
+var extension_count;
+
+// The dispatcher needs access to the Sammy app
+var app;
+
+// Used for the new exchange form, and to display broken exchange types
+var exchange_types;
+
+// Used for access control
+var user_tags;
+var user;
+
+// Set up the above vars
+function setup_global_vars() {
+ var overview = JSON.parse(sync_get('/overview'));
+ statistics_level = overview.statistics_level;
+ user_tags = expand_user_tags(user.tags.split(","));
+ user_administrator = jQuery.inArray("administrator", user_tags) != -1;
+ user_monitor = jQuery.inArray("monitoring", user_tags) != -1;
+ replace_content('login-details',
+ '<p>User: <b>' + user.name + '</b></p>' +
+ '<p>Cluster: <b>' + overview.cluster_name + '</b> ' +
+ (user_administrator ?
+ '(<a href="#/cluster-name">change</a>)' : '') + '</p>' +
+ '<p>RabbitMQ ' + overview.rabbitmq_version +
+ ', <acronym class="normal" title="' +
+ overview.erlang_full_version + '">Erlang ' +
+ overview.erlang_version + '</acronym></p>');
+ nodes_interesting = false;
+ rabbit_versions_interesting = false;
+ if (user_monitor) {
+ var nodes = JSON.parse(sync_get('/nodes'));
+ if (nodes.length > 1) {
+ nodes_interesting = true;
+ var v = '';
+ for (var i = 0; i < nodes.length; i++) {
+ var v1 = fmt_rabbit_version(nodes[i].applications);
+ if (v1 != 'unknown') {
+ if (v != '' && v != v1) rabbit_versions_interesting = true;
+ v = v1;
+ }
+ }
+ }
+ }
+ vhosts_interesting = JSON.parse(sync_get('/vhosts')).length > 1;
+ current_vhost = get_pref('vhost');
+ exchange_types = overview.exchange_types;
+}
+
+function expand_user_tags(tags) {
+ var new_tags = [];
+ for (var i = 0; i < tags.length; i++) {
+ var tag = tags[i];
+ new_tags.push(tag);
+ switch (tag) { // Note deliberate fall-through
+ case "administrator": new_tags.push("monitoring");
+ new_tags.push("policymaker");
+ case "monitoring": new_tags.push("management");
+ break;
+ case "policymaker": new_tags.push("management");
+ default: break;
+ }
+ }
+ return new_tags;
+}
+
+////////////////////////////////////////////////////
+// //
+// Change frequently (typically every "new page") //
+// //
+////////////////////////////////////////////////////
+
+// Which top level template we're showing
+var current_template;
+
+// Which JSON requests do we need to populate it
+var current_reqs;
+
+// Which tab is highlighted
+var current_highlight;
+
+// Which vhost are we looking at
+var current_vhost = '';
+
+// What is our current sort order
+var current_sort;
+var current_sort_reverse = false;
+
+var current_filter = '';
+var current_filter_regex_on = false;
+var current_filter_regex;
+var current_truncate;
+
+// The timer object for auto-updates, and how often it goes off
+var timer;
+var timer_interval;
+
+// When did we last connect successfully (for the "could not connect" error)
+var last_successful_connect;
+
+// Every 200 updates without user interaction we do a full refresh, to
+// work around memory leaks in browser DOM implementations.
+// TODO: maybe we don't need this any more?
+var update_counter = 0;
+
+// Holds chart data in between writing the div in an ejs and rendering
+// the chart.
+var chart_data = {};
--- /dev/null
+HELP = {
+ 'exchange-auto-delete':
+ 'If yes, the exchange will delete itself after at least one queue or exchange has been bound to this one, and then all queues or exchanges have been unbound.',
+
+ 'exchange-internal':
+ 'If yes, clients cannot publish to this exchange directly. It can only be used with exchange to exchange bindings.',
+
+ 'exchange-alternate':
+ 'If messages to this exchange cannot otherwise be routed, send them to the alternate exchange named here.<br/>(Sets the "<a target="_blank" href="http://rabbitmq.com/ae.html">alternate-exchange</a>" argument.)',
+
+ 'queue-message-ttl':
+ 'How long a message published to a queue can live before it is discarded (milliseconds).<br/>(Sets the "<a target="_blank" href="http://rabbitmq.com/ttl.html#per-queue-message-ttl">x-message-ttl</a>" argument.)',
+
+ 'queue-expires':
+ 'How long a queue can be unused for before it is automatically deleted (milliseconds).<br/>(Sets the "<a target="_blank" href="http://rabbitmq.com/ttl.html#queue-ttl">x-expires</a>" argument.)',
+
+ 'queue-max-length':
+ 'How many (ready) messages a queue can contain before it starts to drop them from its head.<br/>(Sets the "<a target="_blank" href="http://rabbitmq.com/maxlength.html">x-max-length</a>" argument.)',
+
+ 'queue-auto-delete':
+ 'If yes, the queue will delete itself after at least one consumer has connected, and then all consumers have disconnected.',
+
+ 'queue-dead-letter-exchange':
+ 'Optional name of an exchange to which messages will be republished if they are rejected or expire.<br/>(Sets the "<a target="_blank" href="http://rabbitmq.com/dlx.html">x-dead-letter-exchange</a>" argument.)',
+
+ 'queue-dead-letter-routing-key':
+ 'Optional replacement routing key to use when a message is dead-lettered. If this is not set, the message\'s original routing key will be used.<br/>(Sets the "<a target="_blank" href="http://rabbitmq.com/dlx.html">x-dead-letter-routing-key</a>" argument.)',
+
+ 'queue-memory-resident':
+ '<p>Number of messages in the queue which are held in memory. These messages may also be on disc (if they are persistent).</p><p>There may be a limit imposed in order to manage total memory use. If the number of memory-resident messages in the queue exceeds the limit some messages will be paged out.</p>',
+
+ 'queue-persistent':
+ 'Number of messages in the queue which are persistent. These messages will be on disc but may also be available in memory. Note that if a message is published as persistent but routed to a transient queue it is not considered persistent by that queue, so transient queues will always report 0 persistent messages.',
+
+ 'queue-consumer-utilisation':
+ 'Fraction of the time that the queue is able to immediately deliver messages to consumers. If this number is less than 100% you may be able to deliver messages faster if: \
+ <ul> \
+ <li>There were more consumers or</li> \
+ <li>The consumers were faster or</li> \
+ <li>The consumers had a higher prefetch count</li> \
+ </ul>',
+
+ 'internal-users-only':
+ 'Only users within the internal RabbitMQ database are shown here. Other users (e.g. those authenticated over LDAP) will not appear.',
+
+ 'export-definitions':
+ 'The definitions consist of users, virtual hosts, permissions, parameters, exchanges, queues and bindings. They do not include the contents of queues or the cluster name. Exclusive queues will not be exported.',
+
+ 'import-definitions':
+ 'The definitions that are imported will be merged with the current definitions. If an error occurs during import, any changes made will not be rolled back.',
+
+ 'exchange-rates-incoming':
+ 'The incoming rate is the rate at which messages are published directly to this exchange.',
+
+ 'exchange-rates-outgoing':
+ 'The outgoing rate is the rate at which messages enter queues, having been published directly to this exchange.',
+
+ 'channel-mode':
+ 'Channel guarantee mode. Can be one of the following, or neither:<br/> \
+ <dl> \
+ <dt><acronym title="Confirm">C</acronym> – <a target="_blank" href="http://www.rabbitmq.com/confirms.html">confirm</a></dt> \
+ <dd>Channel will send streaming publish confirmations.</dd> \
+ <dt><acronym title="Transactional">T</acronym> – <a target="_blank" href="http://www.rabbitmq.com/amqp-0-9-1-reference.html#class.tx">transactional</a></dt> \
+ <dd>Channel is transactional.</dd> \
+ </dl>',
+
+ 'channel-prefetch':
+ 'Channel prefetch counts. \
+ <p> \
+ Each channel can have two prefetch counts: A per-consumer count, which \
+ will limit each new consumer created on the channel, and a global \
+ count, which is shared between all consumers on the channel.\
+ </p> \
+ <p> \
+ This column shows one, the other, or both limits if they are set. \
+ </p>',
+
+ 'file-descriptors':
+ '<p>File descriptor count and limit, as reported by the operating \
+ system. The count includes network sockets and file handles.</p> \
+ <p>To optimize disk access RabbitMQ uses as many free descriptors as are \
+ available, so the count may safely approach the limit. \
+ However, if most of the file descriptors are used by sockets then \
+ persister performance will be negatively impacted.</p> \
+ <p>To change the limit on Unix / Linux, use "ulimit -n". To change \
+ the limit on Windows, set the ERL_MAX_PORTS environment variable</p> \
+ <p>To report used file handles on Windows, handle.exe from \
+ sysinternals must be installed in your path. You can download it \
+ <a target="_blank" href="http://technet.microsoft.com/en-us/sysinternals/bb896655">here</a>.</p>',
+
+ 'socket-descriptors':
+ 'The network sockets count and limit managed by RabbitMQ.<br/> \
+ When the limit is exhausted RabbitMQ will stop accepting new \
+ network connections.',
+
+ 'memory-alarm':
+ '<p>The <a target="_blank" href="http://www.rabbitmq.com/memory.html#memsup">memory \
+ alarm</a> for this node has gone off. It will block \
+ incoming network traffic until the memory usage drops below \
+ the watermark.</p>\
+ <p>Note that the pale line in this case indicates the high watermark \
+ in relation to how much memory is used in total. </p>',
+
+ 'disk-free-alarm':
+ 'The <a target="_blank" href="http://www.rabbitmq.com/memory.html#diskfreesup">disk \
+ free space alarm</a> for this node has gone off. It will block \
+ incoming network traffic until the amount of free space exceeds \
+ the limit.',
+
+ 'message-get-requeue':
+ '<p>Clicking "Get Message(s)" will consume messages from the queue. \
+ If requeue is set the message will be put back into the queue in place, \
+ but "redelivered" will be set on the message.</p> \
+ <p>If requeue is not set messages will be removed from the queue.</p> \
+ <p>Furthermore, message payloads will be truncated to 50000 bytes.</p>',
+
+ 'message-publish-headers':
+ 'Headers can have any name. Only long string headers can be set here.',
+
+ 'message-publish-properties':
+ '<p>You can set other message properties here (delivery mode and headers \
+ are pulled out as the most common cases).</p>\
+ <p>Invalid properties will be ignored. Valid properties are:</p>\
+ <ul>\
+ <li>content_type</li>\
+ <li>content_encoding</li>\
+ <li>priority</li>\
+ <li>correlation_id</li>\
+ <li>reply_to</li>\
+ <li>expiration</li>\
+ <li>message_id</li>\
+ <li>timestamp</li>\
+ <li>type</li>\
+ <li>user_id</li>\
+ <li>app_id</li>\
+ <li>cluster_id</li>\
+ </ul>',
+
+ 'string-base64':
+ '<p>AMQP message payloads can contain any binary content. They can \
+ therefore be difficult to display in a browser. The options here \
+ have the following meanings:</p> \
+ <dl> \
+ <dt>Auto string / base64</dt> \
+ <dd>If the message payload can be interpreted as a string in UTF-8 \
+ encoding, do so. Otherwise return the payload encoded as \
+ base64.</dd> \
+ <dt>base64</dt> \
+ <dd>Return the payload encoded as base64 unconditionally.</dd> \
+ </dl>',
+
+ 'user-tags':
+ 'Comma-separated list of tags to apply to the user. Currently \
+ <a target="_blank" href="http://www.rabbitmq.com/management.html#permissions">supported \
+ by the management plugin</a>: \
+ <dl> \
+ <dt>management</dt> \
+ <dd> \
+ User can access the management plugin \
+ </dd> \
+ <dt>policymaker</dt> \
+ <dd> \
+ User can access the management plugin and manage policies and \
+ parameters for the vhosts they have access to. \
+ </dd> \
+ <dt>monitoring</dt> \
+ <dd> \
+ User can access the management plugin and see all connections and \
+ channels as well as node-related information. \
+ </dd> \
+ <dt>administrator</dt> \
+ <dd> \
+ User can do everything monitoring can do, manage users, \
+ vhosts and permissions, close other user\'s connections, and manage \
+ policies and parameters for all vhosts. \
+ </dd> \
+ </dl> \
+ <p> \
+ Note that you can set any tag here; the links for the above four \
+ tags are just for convenience. \
+ </p>',
+
+ 'queued-messages':
+ '<dl> \
+ <dt>Ready</dt>\
+ <dd>Number of messages that are available to be delivered now.</dd>\
+ <dt>Unacknowledged</dt>\
+ <dd>Number of messages for which the server is waiting for acknowledgement.</dd>\
+ <dt>Total</dt>\
+ <dd>The total of these two numbers.</dd>\
+ </dl>\
+ Note that the rate of change of total queued messages does \
+ <b>not</b> include messages removed due to queue deletion.',
+
+ 'message-rates':
+ 'Only rates for which some activity is taking place will be shown.\
+ <dl>\
+ <dt>Publish</dt>\
+ <dd>Rate at which messages are entering the server.</dd>\
+ <dt>Confirm</dt>\
+ <dd>Rate at which the server is confirming publishes.</dd>\
+ <dt>Deliver</dt>\
+ <dd>Rate at which messages requiring acknowledgement are being delivered in response to basic.consume.</dd>\
+ <dt>Deliver (noack)</dt>\
+ <dd>Rate at which messages not requiring acknowledgement are being delivered in response to basic.consume.</dd>\
+ <dt>Get</dt>\
+ <dd>Rate at which messages requiring acknowledgement are being delivered in response to basic.get.</dd>\
+ <dt>Get (noack)</dt>\
+ <dd>Rate at which messages not requiring acknowledgement are being delivered in response to basic.get.</dd>\
+ <dt>Acknowledge</dt>\
+ <dd>Rate at which messages are being acknowledged.</dd>\
+ <dt>Redelivered</dt>\
+ <dd>Rate at which messages with the \'redelivered\' flag set are being delivered. Note that these messages will <b>also</b> be counted in one of the delivery rates above.</dd>\
+ <dt>Return</dt>\
+ <dd>Rate at which basic.return is sent to publishers for unroutable messages published with the \'mandatory\' flag set.</dd>\
+ </dl>',
+
+ 'disk-monitoring-no-watermark' : 'There is no <a target="_blank" href="http://www.rabbitmq.com/memory.html#diskfreesup">disk space low watermark</a> set. RabbitMQ will not take any action to avoid running out of disk space.',
+
+ 'resource-counts' : 'Shows total number of objects for all virtual hosts the current user has access to.',
+
+ 'memory-use' : '<p>Note that the memory details shown here are only updated on request - they could be too expensive to calculate every few seconds on a busy server.</p><p><a target="_blank" href="http://www.rabbitmq.com/memory-use.html">Read more</a> on memory use.</p>',
+
+ 'policy-definitions' : '<dl>\
+<dt><code>ha-mode</code></dt>\
+ <dd>\
+ One of <code>all</code>, <code>exactly</code>\
+ or <code>nodes</code>.\
+ </dd>\
+ <dt><code>ha-params</code></dt>\
+ <dd>\
+ Absent if <code>ha-mode</code> is <code>all</code>, a number\
+ if <code>ha-mode</code> is <code>exactly</code>, or a list\
+ of strings if <code>ha-mode</code> is <code>nodes</code>.\
+ </dd>\
+ <dt><code>ha-sync-mode</code></dt>\
+ <dd>\
+ One of <code>manual</code> or <code>automatic</code>.\
+ </dd>\
+ <dt><code>alternate-exchange</code></dt>\
+ <dd>\
+ The name of an alternate exchange.\
+ </dd>\
+ <dt><code>dead-letter-exchange</code></dt>\
+ <dd>\
+ The name of a dead letter exchange.\
+ </dd>\
+ <dt><code>dead-letter-routing-key</code></dt>\
+ <dd>\
+ Key to use when dead-lettering.\
+ </dd>\
+ <dt><code>message-ttl</code></dt>\
+ <dd>\
+ Per-queue message TTL, in milliseconds.\
+ </dd>\
+ <dt><code>expires</code></dt>\
+ <dd>\
+ Queue TTL, in milliseconds.\
+ </dd>\
+ <dt><code>max-length</code></dt>\
+ <dd>\
+ Maximum queue length, in messages.\
+ </dd>\
+ <dt><code>federation-upstream-set</code></dt>\
+ <dd>\
+ A string; only if the federation plugin is enabled.\
+ </dd>\
+</dl>',
+
+ 'handle-exe' : 'In order to monitor the number of file descriptors in use on Windows, RabbitMQ needs the <a href="http://technet.microsoft.com/en-us/sysinternals/bb896655" target="_blank">handle.exe command line tool from Microsoft</a>. Download it and place it in the path (e.g. in C:\Windows).',
+
+ 'filter-regex' :
+ 'Whether to enable regular expression matching. Both string literals \
+ and regular expressions are matched in a case-insensitive manner.<br/></br/> \
+ (<a href="https://developer.mozilla.org/en/docs/Web/JavaScript/Guide/Regular_Expressions" target="_blank">Regular expression reference</a>)',
+
+ 'foo': 'foo' // No comma.
+};
+
+function help(id) {
+ show_popup('help', HELP[id]);
+}
--- /dev/null
+/*!
+ * jQuery JavaScript Library v1.6.4
+ * http://jquery.com/
+ *
+ * Copyright 2011, John Resig
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ * http://jquery.org/license
+ *
+ * Includes Sizzle.js
+ * http://sizzlejs.com/
+ * Copyright 2011, The Dojo Foundation
+ * Released under the MIT, BSD, and GPL Licenses.
+ *
+ * Date: Mon Sep 12 18:54:48 2011 -0400
+ */
+(function( window, undefined ) {
+
+// Use the correct document accordingly with window argument (sandbox)
+var document = window.document,
+ navigator = window.navigator,
+ location = window.location;
+var jQuery = (function() {
+
+// Define a local copy of jQuery
+var jQuery = function( selector, context ) {
+ // The jQuery object is actually just the init constructor 'enhanced'
+ return new jQuery.fn.init( selector, context, rootjQuery );
+ },
+
+ // Map over jQuery in case of overwrite
+ _jQuery = window.jQuery,
+
+ // Map over the $ in case of overwrite
+ _$ = window.$,
+
+ // A central reference to the root jQuery(document)
+ rootjQuery,
+
+ // A simple way to check for HTML strings or ID strings
+ // Prioritize #id over <tag> to avoid XSS via location.hash (#9521)
+ quickExpr = /^(?:[^#<]*(<[\w\W]+>)[^>]*$|#([\w\-]*)$)/,
+
+ // Check if a string has a non-whitespace character in it
+ rnotwhite = /\S/,
+
+ // Used for trimming whitespace
+ trimLeft = /^\s+/,
+ trimRight = /\s+$/,
+
+ // Check for digits
+ rdigit = /\d/,
+
+ // Match a standalone tag
+ rsingleTag = /^<(\w+)\s*\/?>(?:<\/\1>)?$/,
+
+ // JSON RegExp
+ rvalidchars = /^[\],:{}\s]*$/,
+ rvalidescape = /\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g,
+ rvalidtokens = /"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,
+ rvalidbraces = /(?:^|:|,)(?:\s*\[)+/g,
+
+ // Useragent RegExp
+ rwebkit = /(webkit)[ \/]([\w.]+)/,
+ ropera = /(opera)(?:.*version)?[ \/]([\w.]+)/,
+ rmsie = /(msie) ([\w.]+)/,
+ rmozilla = /(mozilla)(?:.*? rv:([\w.]+))?/,
+
+ // Matches dashed string for camelizing
+ rdashAlpha = /-([a-z]|[0-9])/ig,
+ rmsPrefix = /^-ms-/,
+
+ // Used by jQuery.camelCase as callback to replace()
+ fcamelCase = function( all, letter ) {
+ return ( letter + "" ).toUpperCase();
+ },
+
+ // Keep a UserAgent string for use with jQuery.browser
+ userAgent = navigator.userAgent,
+
+ // For matching the engine and version of the browser
+ browserMatch,
+
+ // The deferred used on DOM ready
+ readyList,
+
+ // The ready event handler
+ DOMContentLoaded,
+
+ // Save a reference to some core methods
+ toString = Object.prototype.toString,
+ hasOwn = Object.prototype.hasOwnProperty,
+ push = Array.prototype.push,
+ slice = Array.prototype.slice,
+ trim = String.prototype.trim,
+ indexOf = Array.prototype.indexOf,
+
+ // [[Class]] -> type pairs
+ class2type = {};
+
+jQuery.fn = jQuery.prototype = {
+ constructor: jQuery,
+ init: function( selector, context, rootjQuery ) {
+ var match, elem, ret, doc;
+
+ // Handle $(""), $(null), or $(undefined)
+ if ( !selector ) {
+ return this;
+ }
+
+ // Handle $(DOMElement)
+ if ( selector.nodeType ) {
+ this.context = this[0] = selector;
+ this.length = 1;
+ return this;
+ }
+
+ // The body element only exists once, optimize finding it
+ if ( selector === "body" && !context && document.body ) {
+ this.context = document;
+ this[0] = document.body;
+ this.selector = selector;
+ this.length = 1;
+ return this;
+ }
+
+ // Handle HTML strings
+ if ( typeof selector === "string" ) {
+ // Are we dealing with HTML string or an ID?
+ if ( selector.charAt(0) === "<" && selector.charAt( selector.length - 1 ) === ">" && selector.length >= 3 ) {
+ // Assume that strings that start and end with <> are HTML and skip the regex check
+ match = [ null, selector, null ];
+
+ } else {
+ match = quickExpr.exec( selector );
+ }
+
+ // Verify a match, and that no context was specified for #id
+ if ( match && (match[1] || !context) ) {
+
+ // HANDLE: $(html) -> $(array)
+ if ( match[1] ) {
+ context = context instanceof jQuery ? context[0] : context;
+ doc = (context ? context.ownerDocument || context : document);
+
+ // If a single string is passed in and it's a single tag
+ // just do a createElement and skip the rest
+ ret = rsingleTag.exec( selector );
+
+ if ( ret ) {
+ if ( jQuery.isPlainObject( context ) ) {
+ selector = [ document.createElement( ret[1] ) ];
+ jQuery.fn.attr.call( selector, context, true );
+
+ } else {
+ selector = [ doc.createElement( ret[1] ) ];
+ }
+
+ } else {
+ ret = jQuery.buildFragment( [ match[1] ], [ doc ] );
+ selector = (ret.cacheable ? jQuery.clone(ret.fragment) : ret.fragment).childNodes;
+ }
+
+ return jQuery.merge( this, selector );
+
+ // HANDLE: $("#id")
+ } else {
+ elem = document.getElementById( match[2] );
+
+ // Check parentNode to catch when Blackberry 4.6 returns
+ // nodes that are no longer in the document #6963
+ if ( elem && elem.parentNode ) {
+ // Handle the case where IE and Opera return items
+ // by name instead of ID
+ if ( elem.id !== match[2] ) {
+ return rootjQuery.find( selector );
+ }
+
+ // Otherwise, we inject the element directly into the jQuery object
+ this.length = 1;
+ this[0] = elem;
+ }
+
+ this.context = document;
+ this.selector = selector;
+ return this;
+ }
+
+ // HANDLE: $(expr, $(...))
+ } else if ( !context || context.jquery ) {
+ return (context || rootjQuery).find( selector );
+
+ // HANDLE: $(expr, context)
+ // (which is just equivalent to: $(context).find(expr)
+ } else {
+ return this.constructor( context ).find( selector );
+ }
+
+ // HANDLE: $(function)
+ // Shortcut for document ready
+ } else if ( jQuery.isFunction( selector ) ) {
+ return rootjQuery.ready( selector );
+ }
+
+ if (selector.selector !== undefined) {
+ this.selector = selector.selector;
+ this.context = selector.context;
+ }
+
+ return jQuery.makeArray( selector, this );
+ },
+
+ // Start with an empty selector
+ selector: "",
+
+ // The current version of jQuery being used
+ jquery: "1.6.4",
+
+ // The default length of a jQuery object is 0
+ length: 0,
+
+ // The number of elements contained in the matched element set
+ size: function() {
+ return this.length;
+ },
+
+ toArray: function() {
+ return slice.call( this, 0 );
+ },
+
+ // Get the Nth element in the matched element set OR
+ // Get the whole matched element set as a clean array
+ get: function( num ) {
+ return num == null ?
+
+ // Return a 'clean' array
+ this.toArray() :
+
+ // Return just the object
+ ( num < 0 ? this[ this.length + num ] : this[ num ] );
+ },
+
+ // Take an array of elements and push it onto the stack
+ // (returning the new matched element set)
+ pushStack: function( elems, name, selector ) {
+ // Build a new jQuery matched element set
+ var ret = this.constructor();
+
+ if ( jQuery.isArray( elems ) ) {
+ push.apply( ret, elems );
+
+ } else {
+ jQuery.merge( ret, elems );
+ }
+
+ // Add the old object onto the stack (as a reference)
+ ret.prevObject = this;
+
+ ret.context = this.context;
+
+ if ( name === "find" ) {
+ ret.selector = this.selector + (this.selector ? " " : "") + selector;
+ } else if ( name ) {
+ ret.selector = this.selector + "." + name + "(" + selector + ")";
+ }
+
+ // Return the newly-formed element set
+ return ret;
+ },
+
+ // Execute a callback for every element in the matched set.
+ // (You can seed the arguments with an array of args, but this is
+ // only used internally.)
+ each: function( callback, args ) {
+ return jQuery.each( this, callback, args );
+ },
+
+ ready: function( fn ) {
+ // Attach the listeners
+ jQuery.bindReady();
+
+ // Add the callback
+ readyList.done( fn );
+
+ return this;
+ },
+
+ eq: function( i ) {
+ return i === -1 ?
+ this.slice( i ) :
+ this.slice( i, +i + 1 );
+ },
+
+ first: function() {
+ return this.eq( 0 );
+ },
+
+ last: function() {
+ return this.eq( -1 );
+ },
+
+ slice: function() {
+ return this.pushStack( slice.apply( this, arguments ),
+ "slice", slice.call(arguments).join(",") );
+ },
+
+ map: function( callback ) {
+ return this.pushStack( jQuery.map(this, function( elem, i ) {
+ return callback.call( elem, i, elem );
+ }));
+ },
+
+ end: function() {
+ return this.prevObject || this.constructor(null);
+ },
+
+ // For internal use only.
+ // Behaves like an Array's method, not like a jQuery method.
+ push: push,
+ sort: [].sort,
+ splice: [].splice
+};
+
+// Give the init function the jQuery prototype for later instantiation
+jQuery.fn.init.prototype = jQuery.fn;
+
+jQuery.extend = jQuery.fn.extend = function() {
+ var options, name, src, copy, copyIsArray, clone,
+ target = arguments[0] || {},
+ i = 1,
+ length = arguments.length,
+ deep = false;
+
+ // Handle a deep copy situation
+ if ( typeof target === "boolean" ) {
+ deep = target;
+ target = arguments[1] || {};
+ // skip the boolean and the target
+ i = 2;
+ }
+
+ // Handle case when target is a string or something (possible in deep copy)
+ if ( typeof target !== "object" && !jQuery.isFunction(target) ) {
+ target = {};
+ }
+
+ // extend jQuery itself if only one argument is passed
+ if ( length === i ) {
+ target = this;
+ --i;
+ }
+
+ for ( ; i < length; i++ ) {
+ // Only deal with non-null/undefined values
+ if ( (options = arguments[ i ]) != null ) {
+ // Extend the base object
+ for ( name in options ) {
+ src = target[ name ];
+ copy = options[ name ];
+
+ // Prevent never-ending loop
+ if ( target === copy ) {
+ continue;
+ }
+
+ // Recurse if we're merging plain objects or arrays
+ if ( deep && copy && ( jQuery.isPlainObject(copy) || (copyIsArray = jQuery.isArray(copy)) ) ) {
+ if ( copyIsArray ) {
+ copyIsArray = false;
+ clone = src && jQuery.isArray(src) ? src : [];
+
+ } else {
+ clone = src && jQuery.isPlainObject(src) ? src : {};
+ }
+
+ // Never move original objects, clone them
+ target[ name ] = jQuery.extend( deep, clone, copy );
+
+ // Don't bring in undefined values
+ } else if ( copy !== undefined ) {
+ target[ name ] = copy;
+ }
+ }
+ }
+ }
+
+ // Return the modified object
+ return target;
+};
+
+jQuery.extend({
+ noConflict: function( deep ) {
+ if ( window.$ === jQuery ) {
+ window.$ = _$;
+ }
+
+ if ( deep && window.jQuery === jQuery ) {
+ window.jQuery = _jQuery;
+ }
+
+ return jQuery;
+ },
+
+ // Is the DOM ready to be used? Set to true once it occurs.
+ isReady: false,
+
+ // A counter to track how many items to wait for before
+ // the ready event fires. See #6781
+ readyWait: 1,
+
+ // Hold (or release) the ready event
+ holdReady: function( hold ) {
+ if ( hold ) {
+ jQuery.readyWait++;
+ } else {
+ jQuery.ready( true );
+ }
+ },
+
+ // Handle when the DOM is ready
+ ready: function( wait ) {
+ // Either a released hold or an DOMready/load event and not yet ready
+ if ( (wait === true && !--jQuery.readyWait) || (wait !== true && !jQuery.isReady) ) {
+ // Make sure body exists, at least, in case IE gets a little overzealous (ticket #5443).
+ if ( !document.body ) {
+ return setTimeout( jQuery.ready, 1 );
+ }
+
+ // Remember that the DOM is ready
+ jQuery.isReady = true;
+
+ // If a normal DOM Ready event fired, decrement, and wait if need be
+ if ( wait !== true && --jQuery.readyWait > 0 ) {
+ return;
+ }
+
+ // If there are functions bound, to execute
+ readyList.resolveWith( document, [ jQuery ] );
+
+ // Trigger any bound ready events
+ if ( jQuery.fn.trigger ) {
+ jQuery( document ).trigger( "ready" ).unbind( "ready" );
+ }
+ }
+ },
+
+ bindReady: function() {
+ if ( readyList ) {
+ return;
+ }
+
+ readyList = jQuery._Deferred();
+
+ // Catch cases where $(document).ready() is called after the
+ // browser event has already occurred.
+ if ( document.readyState === "complete" ) {
+ // Handle it asynchronously to allow scripts the opportunity to delay ready
+ return setTimeout( jQuery.ready, 1 );
+ }
+
+ // Mozilla, Opera and webkit nightlies currently support this event
+ if ( document.addEventListener ) {
+ // Use the handy event callback
+ document.addEventListener( "DOMContentLoaded", DOMContentLoaded, false );
+
+ // A fallback to window.onload, that will always work
+ window.addEventListener( "load", jQuery.ready, false );
+
+ // If IE event model is used
+ } else if ( document.attachEvent ) {
+ // ensure firing before onload,
+ // maybe late but safe also for iframes
+ document.attachEvent( "onreadystatechange", DOMContentLoaded );
+
+ // A fallback to window.onload, that will always work
+ window.attachEvent( "onload", jQuery.ready );
+
+ // If IE and not a frame
+ // continually check to see if the document is ready
+ var toplevel = false;
+
+ try {
+ toplevel = window.frameElement == null;
+ } catch(e) {}
+
+ if ( document.documentElement.doScroll && toplevel ) {
+ doScrollCheck();
+ }
+ }
+ },
+
+ // See test/unit/core.js for details concerning isFunction.
+ // Since version 1.3, DOM methods and functions like alert
+ // aren't supported. They return false on IE (#2968).
+ isFunction: function( obj ) {
+ return jQuery.type(obj) === "function";
+ },
+
+ isArray: Array.isArray || function( obj ) {
+ return jQuery.type(obj) === "array";
+ },
+
+ // A crude way of determining if an object is a window
+ isWindow: function( obj ) {
+ return obj && typeof obj === "object" && "setInterval" in obj;
+ },
+
+ isNaN: function( obj ) {
+ return obj == null || !rdigit.test( obj ) || isNaN( obj );
+ },
+
+ type: function( obj ) {
+ return obj == null ?
+ String( obj ) :
+ class2type[ toString.call(obj) ] || "object";
+ },
+
+ isPlainObject: function( obj ) {
+ // Must be an Object.
+ // Because of IE, we also have to check the presence of the constructor property.
+ // Make sure that DOM nodes and window objects don't pass through, as well
+ if ( !obj || jQuery.type(obj) !== "object" || obj.nodeType || jQuery.isWindow( obj ) ) {
+ return false;
+ }
+
+ try {
+ // Not own constructor property must be Object
+ if ( obj.constructor &&
+ !hasOwn.call(obj, "constructor") &&
+ !hasOwn.call(obj.constructor.prototype, "isPrototypeOf") ) {
+ return false;
+ }
+ } catch ( e ) {
+ // IE8,9 Will throw exceptions on certain host objects #9897
+ return false;
+ }
+
+ // Own properties are enumerated firstly, so to speed up,
+ // if last one is own, then all properties are own.
+
+ var key;
+ for ( key in obj ) {}
+
+ return key === undefined || hasOwn.call( obj, key );
+ },
+
+ isEmptyObject: function( obj ) {
+ for ( var name in obj ) {
+ return false;
+ }
+ return true;
+ },
+
+ error: function( msg ) {
+ throw msg;
+ },
+
+ parseJSON: function( data ) {
+ if ( typeof data !== "string" || !data ) {
+ return null;
+ }
+
+ // Make sure leading/trailing whitespace is removed (IE can't handle it)
+ data = jQuery.trim( data );
+
+ // Attempt to parse using the native JSON parser first
+ if ( window.JSON && window.JSON.parse ) {
+ return window.JSON.parse( data );
+ }
+
+ // Make sure the incoming data is actual JSON
+ // Logic borrowed from http://json.org/json2.js
+ if ( rvalidchars.test( data.replace( rvalidescape, "@" )
+ .replace( rvalidtokens, "]" )
+ .replace( rvalidbraces, "")) ) {
+
+ return (new Function( "return " + data ))();
+
+ }
+ jQuery.error( "Invalid JSON: " + data );
+ },
+
+ // Cross-browser xml parsing
+ parseXML: function( data ) {
+ var xml, tmp;
+ try {
+ if ( window.DOMParser ) { // Standard
+ tmp = new DOMParser();
+ xml = tmp.parseFromString( data , "text/xml" );
+ } else { // IE
+ xml = new ActiveXObject( "Microsoft.XMLDOM" );
+ xml.async = "false";
+ xml.loadXML( data );
+ }
+ } catch( e ) {
+ xml = undefined;
+ }
+ if ( !xml || !xml.documentElement || xml.getElementsByTagName( "parsererror" ).length ) {
+ jQuery.error( "Invalid XML: " + data );
+ }
+ return xml;
+ },
+
+ noop: function() {},
+
+ // Evaluates a script in a global context
+ // Workarounds based on findings by Jim Driscoll
+ // http://weblogs.java.net/blog/driscoll/archive/2009/09/08/eval-javascript-global-context
+ globalEval: function( data ) {
+ if ( data && rnotwhite.test( data ) ) {
+ // We use execScript on Internet Explorer
+ // We use an anonymous function so that context is window
+ // rather than jQuery in Firefox
+ ( window.execScript || function( data ) {
+ window[ "eval" ].call( window, data );
+ } )( data );
+ }
+ },
+
+ // Convert dashed to camelCase; used by the css and data modules
+ // Microsoft forgot to hump their vendor prefix (#9572)
+ camelCase: function( string ) {
+ return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase );
+ },
+
+ nodeName: function( elem, name ) {
+ return elem.nodeName && elem.nodeName.toUpperCase() === name.toUpperCase();
+ },
+
+ // args is for internal usage only
+ each: function( object, callback, args ) {
+ var name, i = 0,
+ length = object.length,
+ isObj = length === undefined || jQuery.isFunction( object );
+
+ if ( args ) {
+ if ( isObj ) {
+ for ( name in object ) {
+ if ( callback.apply( object[ name ], args ) === false ) {
+ break;
+ }
+ }
+ } else {
+ for ( ; i < length; ) {
+ if ( callback.apply( object[ i++ ], args ) === false ) {
+ break;
+ }
+ }
+ }
+
+ // A special, fast, case for the most common use of each
+ } else {
+ if ( isObj ) {
+ for ( name in object ) {
+ if ( callback.call( object[ name ], name, object[ name ] ) === false ) {
+ break;
+ }
+ }
+ } else {
+ for ( ; i < length; ) {
+ if ( callback.call( object[ i ], i, object[ i++ ] ) === false ) {
+ break;
+ }
+ }
+ }
+ }
+
+ return object;
+ },
+
+ // Use native String.trim function wherever possible
+ trim: trim ?
+ function( text ) {
+ return text == null ?
+ "" :
+ trim.call( text );
+ } :
+
+ // Otherwise use our own trimming functionality
+ function( text ) {
+ return text == null ?
+ "" :
+ text.toString().replace( trimLeft, "" ).replace( trimRight, "" );
+ },
+
+ // results is for internal usage only
+ makeArray: function( array, results ) {
+ var ret = results || [];
+
+ if ( array != null ) {
+ // The window, strings (and functions) also have 'length'
+ // The extra typeof function check is to prevent crashes
+ // in Safari 2 (See: #3039)
+ // Tweaked logic slightly to handle Blackberry 4.7 RegExp issues #6930
+ var type = jQuery.type( array );
+
+ if ( array.length == null || type === "string" || type === "function" || type === "regexp" || jQuery.isWindow( array ) ) {
+ push.call( ret, array );
+ } else {
+ jQuery.merge( ret, array );
+ }
+ }
+
+ return ret;
+ },
+
+ inArray: function( elem, array ) {
+ if ( !array ) {
+ return -1;
+ }
+
+ if ( indexOf ) {
+ return indexOf.call( array, elem );
+ }
+
+ for ( var i = 0, length = array.length; i < length; i++ ) {
+ if ( array[ i ] === elem ) {
+ return i;
+ }
+ }
+
+ return -1;
+ },
+
+ merge: function( first, second ) {
+ var i = first.length,
+ j = 0;
+
+ if ( typeof second.length === "number" ) {
+ for ( var l = second.length; j < l; j++ ) {
+ first[ i++ ] = second[ j ];
+ }
+
+ } else {
+ while ( second[j] !== undefined ) {
+ first[ i++ ] = second[ j++ ];
+ }
+ }
+
+ first.length = i;
+
+ return first;
+ },
+
+ grep: function( elems, callback, inv ) {
+ var ret = [], retVal;
+ inv = !!inv;
+
+ // Go through the array, only saving the items
+ // that pass the validator function
+ for ( var i = 0, length = elems.length; i < length; i++ ) {
+ retVal = !!callback( elems[ i ], i );
+ if ( inv !== retVal ) {
+ ret.push( elems[ i ] );
+ }
+ }
+
+ return ret;
+ },
+
+ // arg is for internal usage only
+ map: function( elems, callback, arg ) {
+ var value, key, ret = [],
+ i = 0,
+ length = elems.length,
+ // jquery objects are treated as arrays
+ isArray = elems instanceof jQuery || length !== undefined && typeof length === "number" && ( ( length > 0 && elems[ 0 ] && elems[ length -1 ] ) || length === 0 || jQuery.isArray( elems ) ) ;
+
+ // Go through the array, translating each of the items to their
+ if ( isArray ) {
+ for ( ; i < length; i++ ) {
+ value = callback( elems[ i ], i, arg );
+
+ if ( value != null ) {
+ ret[ ret.length ] = value;
+ }
+ }
+
+ // Go through every key on the object,
+ } else {
+ for ( key in elems ) {
+ value = callback( elems[ key ], key, arg );
+
+ if ( value != null ) {
+ ret[ ret.length ] = value;
+ }
+ }
+ }
+
+ // Flatten any nested arrays
+ return ret.concat.apply( [], ret );
+ },
+
+ // A global GUID counter for objects
+ guid: 1,
+
+ // Bind a function to a context, optionally partially applying any
+ // arguments.
+ proxy: function( fn, context ) {
+ if ( typeof context === "string" ) {
+ var tmp = fn[ context ];
+ context = fn;
+ fn = tmp;
+ }
+
+ // Quick check to determine if target is callable, in the spec
+ // this throws a TypeError, but we will just return undefined.
+ if ( !jQuery.isFunction( fn ) ) {
+ return undefined;
+ }
+
+ // Simulated bind
+ var args = slice.call( arguments, 2 ),
+ proxy = function() {
+ return fn.apply( context, args.concat( slice.call( arguments ) ) );
+ };
+
+ // Set the guid of unique handler to the same of original handler, so it can be removed
+ proxy.guid = fn.guid = fn.guid || proxy.guid || jQuery.guid++;
+
+ return proxy;
+ },
+
+ // Mutifunctional method to get and set values to a collection
+ // The value/s can optionally be executed if it's a function
+ access: function( elems, key, value, exec, fn, pass ) {
+ var length = elems.length;
+
+ // Setting many attributes
+ if ( typeof key === "object" ) {
+ for ( var k in key ) {
+ jQuery.access( elems, k, key[k], exec, fn, value );
+ }
+ return elems;
+ }
+
+ // Setting one attribute
+ if ( value !== undefined ) {
+ // Optionally, function values get executed if exec is true
+ exec = !pass && exec && jQuery.isFunction(value);
+
+ for ( var i = 0; i < length; i++ ) {
+ fn( elems[i], key, exec ? value.call( elems[i], i, fn( elems[i], key ) ) : value, pass );
+ }
+
+ return elems;
+ }
+
+ // Getting an attribute
+ return length ? fn( elems[0], key ) : undefined;
+ },
+
+ now: function() {
+ return (new Date()).getTime();
+ },
+
+ // Use of jQuery.browser is frowned upon.
+ // More details: http://docs.jquery.com/Utilities/jQuery.browser
+ uaMatch: function( ua ) {
+ ua = ua.toLowerCase();
+
+ var match = rwebkit.exec( ua ) ||
+ ropera.exec( ua ) ||
+ rmsie.exec( ua ) ||
+ ua.indexOf("compatible") < 0 && rmozilla.exec( ua ) ||
+ [];
+
+ return { browser: match[1] || "", version: match[2] || "0" };
+ },
+
+ sub: function() {
+ function jQuerySub( selector, context ) {
+ return new jQuerySub.fn.init( selector, context );
+ }
+ jQuery.extend( true, jQuerySub, this );
+ jQuerySub.superclass = this;
+ jQuerySub.fn = jQuerySub.prototype = this();
+ jQuerySub.fn.constructor = jQuerySub;
+ jQuerySub.sub = this.sub;
+ jQuerySub.fn.init = function init( selector, context ) {
+ if ( context && context instanceof jQuery && !(context instanceof jQuerySub) ) {
+ context = jQuerySub( context );
+ }
+
+ return jQuery.fn.init.call( this, selector, context, rootjQuerySub );
+ };
+ jQuerySub.fn.init.prototype = jQuerySub.fn;
+ var rootjQuerySub = jQuerySub(document);
+ return jQuerySub;
+ },
+
+ browser: {}
+});
+
+// Populate the class2type map
+jQuery.each("Boolean Number String Function Array Date RegExp Object".split(" "), function(i, name) {
+ class2type[ "[object " + name + "]" ] = name.toLowerCase();
+});
+
+browserMatch = jQuery.uaMatch( userAgent );
+if ( browserMatch.browser ) {
+ jQuery.browser[ browserMatch.browser ] = true;
+ jQuery.browser.version = browserMatch.version;
+}
+
+// Deprecated, use jQuery.browser.webkit instead
+if ( jQuery.browser.webkit ) {
+ jQuery.browser.safari = true;
+}
+
+// IE doesn't match non-breaking spaces with \s
+if ( rnotwhite.test( "\xA0" ) ) {
+ trimLeft = /^[\s\xA0]+/;
+ trimRight = /[\s\xA0]+$/;
+}
+
+// All jQuery objects should point back to these
+rootjQuery = jQuery(document);
+
+// Cleanup functions for the document ready method
+if ( document.addEventListener ) {
+ DOMContentLoaded = function() {
+ document.removeEventListener( "DOMContentLoaded", DOMContentLoaded, false );
+ jQuery.ready();
+ };
+
+} else if ( document.attachEvent ) {
+ DOMContentLoaded = function() {
+ // Make sure body exists, at least, in case IE gets a little overzealous (ticket #5443).
+ if ( document.readyState === "complete" ) {
+ document.detachEvent( "onreadystatechange", DOMContentLoaded );
+ jQuery.ready();
+ }
+ };
+}
+
+// The DOM ready check for Internet Explorer
+function doScrollCheck() {
+ if ( jQuery.isReady ) {
+ return;
+ }
+
+ try {
+ // If IE is used, use the trick by Diego Perini
+ // http://javascript.nwbox.com/IEContentLoaded/
+ document.documentElement.doScroll("left");
+ } catch(e) {
+ setTimeout( doScrollCheck, 1 );
+ return;
+ }
+
+ // and execute any waiting functions
+ jQuery.ready();
+}
+
+return jQuery;
+
+})();
+
+
+var // Promise methods
+ promiseMethods = "done fail isResolved isRejected promise then always pipe".split( " " ),
+ // Static reference to slice
+ sliceDeferred = [].slice;
+
+jQuery.extend({
+ // Create a simple deferred (one callbacks list)
+ _Deferred: function() {
+ var // callbacks list
+ callbacks = [],
+ // stored [ context , args ]
+ fired,
+ // to avoid firing when already doing so
+ firing,
+ // flag to know if the deferred has been cancelled
+ cancelled,
+ // the deferred itself
+ deferred = {
+
+ // done( f1, f2, ...)
+ done: function() {
+ if ( !cancelled ) {
+ var args = arguments,
+ i,
+ length,
+ elem,
+ type,
+ _fired;
+ if ( fired ) {
+ _fired = fired;
+ fired = 0;
+ }
+ for ( i = 0, length = args.length; i < length; i++ ) {
+ elem = args[ i ];
+ type = jQuery.type( elem );
+ if ( type === "array" ) {
+ deferred.done.apply( deferred, elem );
+ } else if ( type === "function" ) {
+ callbacks.push( elem );
+ }
+ }
+ if ( _fired ) {
+ deferred.resolveWith( _fired[ 0 ], _fired[ 1 ] );
+ }
+ }
+ return this;
+ },
+
+ // resolve with given context and args
+ resolveWith: function( context, args ) {
+ if ( !cancelled && !fired && !firing ) {
+ // make sure args are available (#8421)
+ args = args || [];
+ firing = 1;
+ try {
+ while( callbacks[ 0 ] ) {
+ callbacks.shift().apply( context, args );
+ }
+ }
+ finally {
+ fired = [ context, args ];
+ firing = 0;
+ }
+ }
+ return this;
+ },
+
+ // resolve with this as context and given arguments
+ resolve: function() {
+ deferred.resolveWith( this, arguments );
+ return this;
+ },
+
+ // Has this deferred been resolved?
+ isResolved: function() {
+ return !!( firing || fired );
+ },
+
+ // Cancel
+ cancel: function() {
+ cancelled = 1;
+ callbacks = [];
+ return this;
+ }
+ };
+
+ return deferred;
+ },
+
+ // Full fledged deferred (two callbacks list)
+ Deferred: function( func ) {
+ var deferred = jQuery._Deferred(),
+ failDeferred = jQuery._Deferred(),
+ promise;
+ // Add errorDeferred methods, then and promise
+ jQuery.extend( deferred, {
+ then: function( doneCallbacks, failCallbacks ) {
+ deferred.done( doneCallbacks ).fail( failCallbacks );
+ return this;
+ },
+ always: function() {
+ return deferred.done.apply( deferred, arguments ).fail.apply( this, arguments );
+ },
+ fail: failDeferred.done,
+ rejectWith: failDeferred.resolveWith,
+ reject: failDeferred.resolve,
+ isRejected: failDeferred.isResolved,
+ pipe: function( fnDone, fnFail ) {
+ return jQuery.Deferred(function( newDefer ) {
+ jQuery.each( {
+ done: [ fnDone, "resolve" ],
+ fail: [ fnFail, "reject" ]
+ }, function( handler, data ) {
+ var fn = data[ 0 ],
+ action = data[ 1 ],
+ returned;
+ if ( jQuery.isFunction( fn ) ) {
+ deferred[ handler ](function() {
+ returned = fn.apply( this, arguments );
+ if ( returned && jQuery.isFunction( returned.promise ) ) {
+ returned.promise().then( newDefer.resolve, newDefer.reject );
+ } else {
+ newDefer[ action + "With" ]( this === deferred ? newDefer : this, [ returned ] );
+ }
+ });
+ } else {
+ deferred[ handler ]( newDefer[ action ] );
+ }
+ });
+ }).promise();
+ },
+ // Get a promise for this deferred
+ // If obj is provided, the promise aspect is added to the object
+ promise: function( obj ) {
+ if ( obj == null ) {
+ if ( promise ) {
+ return promise;
+ }
+ promise = obj = {};
+ }
+ var i = promiseMethods.length;
+ while( i-- ) {
+ obj[ promiseMethods[i] ] = deferred[ promiseMethods[i] ];
+ }
+ return obj;
+ }
+ });
+ // Make sure only one callback list will be used
+ deferred.done( failDeferred.cancel ).fail( deferred.cancel );
+ // Unexpose cancel
+ delete deferred.cancel;
+ // Call given func if any
+ if ( func ) {
+ func.call( deferred, deferred );
+ }
+ return deferred;
+ },
+
+ // Deferred helper
+ when: function( firstParam ) {
+ var args = arguments,
+ i = 0,
+ length = args.length,
+ count = length,
+ deferred = length <= 1 && firstParam && jQuery.isFunction( firstParam.promise ) ?
+ firstParam :
+ jQuery.Deferred();
+ function resolveFunc( i ) {
+ return function( value ) {
+ args[ i ] = arguments.length > 1 ? sliceDeferred.call( arguments, 0 ) : value;
+ if ( !( --count ) ) {
+ // Strange bug in FF4:
+ // Values changed onto the arguments object sometimes end up as undefined values
+ // outside the $.when method. Cloning the object into a fresh array solves the issue
+ deferred.resolveWith( deferred, sliceDeferred.call( args, 0 ) );
+ }
+ };
+ }
+ if ( length > 1 ) {
+ for( ; i < length; i++ ) {
+ if ( args[ i ] && jQuery.isFunction( args[ i ].promise ) ) {
+ args[ i ].promise().then( resolveFunc(i), deferred.reject );
+ } else {
+ --count;
+ }
+ }
+ if ( !count ) {
+ deferred.resolveWith( deferred, args );
+ }
+ } else if ( deferred !== firstParam ) {
+ deferred.resolveWith( deferred, length ? [ firstParam ] : [] );
+ }
+ return deferred.promise();
+ }
+});
+
+
+
+jQuery.support = (function() {
+
+ var div = document.createElement( "div" ),
+ documentElement = document.documentElement,
+ all,
+ a,
+ select,
+ opt,
+ input,
+ marginDiv,
+ support,
+ fragment,
+ body,
+ testElementParent,
+ testElement,
+ testElementStyle,
+ tds,
+ events,
+ eventName,
+ i,
+ isSupported;
+
+ // Preliminary tests
+ div.setAttribute("className", "t");
+ div.innerHTML = " <link/><table></table><a href='/a' style='top:1px;float:left;opacity:.55;'>a</a><input type='checkbox'/>";
+
+
+ all = div.getElementsByTagName( "*" );
+ a = div.getElementsByTagName( "a" )[ 0 ];
+
+ // Can't get basic test support
+ if ( !all || !all.length || !a ) {
+ return {};
+ }
+
+ // First batch of supports tests
+ select = document.createElement( "select" );
+ opt = select.appendChild( document.createElement("option") );
+ input = div.getElementsByTagName( "input" )[ 0 ];
+
+ support = {
+ // IE strips leading whitespace when .innerHTML is used
+ leadingWhitespace: ( div.firstChild.nodeType === 3 ),
+
+ // Make sure that tbody elements aren't automatically inserted
+ // IE will insert them into empty tables
+ tbody: !div.getElementsByTagName( "tbody" ).length,
+
+ // Make sure that link elements get serialized correctly by innerHTML
+ // This requires a wrapper element in IE
+ htmlSerialize: !!div.getElementsByTagName( "link" ).length,
+
+ // Get the style information from getAttribute
+ // (IE uses .cssText instead)
+ style: /top/.test( a.getAttribute("style") ),
+
+ // Make sure that URLs aren't manipulated
+ // (IE normalizes it by default)
+ hrefNormalized: ( a.getAttribute( "href" ) === "/a" ),
+
+ // Make sure that element opacity exists
+ // (IE uses filter instead)
+ // Use a regex to work around a WebKit issue. See #5145
+ opacity: /^0.55$/.test( a.style.opacity ),
+
+ // Verify style float existence
+ // (IE uses styleFloat instead of cssFloat)
+ cssFloat: !!a.style.cssFloat,
+
+ // Make sure that if no value is specified for a checkbox
+ // that it defaults to "on".
+ // (WebKit defaults to "" instead)
+ checkOn: ( input.value === "on" ),
+
+ // Make sure that a selected-by-default option has a working selected property.
+ // (WebKit defaults to false instead of true, IE too, if it's in an optgroup)
+ optSelected: opt.selected,
+
+ // Test setAttribute on camelCase class. If it works, we need attrFixes when doing get/setAttribute (ie6/7)
+ getSetAttribute: div.className !== "t",
+
+ // Will be defined later
+ submitBubbles: true,
+ changeBubbles: true,
+ focusinBubbles: false,
+ deleteExpando: true,
+ noCloneEvent: true,
+ inlineBlockNeedsLayout: false,
+ shrinkWrapBlocks: false,
+ reliableMarginRight: true
+ };
+
+ // Make sure checked status is properly cloned
+ input.checked = true;
+ support.noCloneChecked = input.cloneNode( true ).checked;
+
+ // Make sure that the options inside disabled selects aren't marked as disabled
+ // (WebKit marks them as disabled)
+ select.disabled = true;
+ support.optDisabled = !opt.disabled;
+
+ // Test to see if it's possible to delete an expando from an element
+ // Fails in Internet Explorer
+ try {
+ delete div.test;
+ } catch( e ) {
+ support.deleteExpando = false;
+ }
+
+ if ( !div.addEventListener && div.attachEvent && div.fireEvent ) {
+ div.attachEvent( "onclick", function() {
+ // Cloning a node shouldn't copy over any
+ // bound event handlers (IE does this)
+ support.noCloneEvent = false;
+ });
+ div.cloneNode( true ).fireEvent( "onclick" );
+ }
+
+ // Check if a radio maintains it's value
+ // after being appended to the DOM
+ input = document.createElement("input");
+ input.value = "t";
+ input.setAttribute("type", "radio");
+ support.radioValue = input.value === "t";
+
+ input.setAttribute("checked", "checked");
+ div.appendChild( input );
+ fragment = document.createDocumentFragment();
+ fragment.appendChild( div.firstChild );
+
+ // WebKit doesn't clone checked state correctly in fragments
+ support.checkClone = fragment.cloneNode( true ).cloneNode( true ).lastChild.checked;
+
+ div.innerHTML = "";
+
+ // Figure out if the W3C box model works as expected
+ div.style.width = div.style.paddingLeft = "1px";
+
+ body = document.getElementsByTagName( "body" )[ 0 ];
+ // We use our own, invisible, body unless the body is already present
+ // in which case we use a div (#9239)
+ testElement = document.createElement( body ? "div" : "body" );
+ testElementStyle = {
+ visibility: "hidden",
+ width: 0,
+ height: 0,
+ border: 0,
+ margin: 0,
+ background: "none"
+ };
+ if ( body ) {
+ jQuery.extend( testElementStyle, {
+ position: "absolute",
+ left: "-1000px",
+ top: "-1000px"
+ });
+ }
+ for ( i in testElementStyle ) {
+ testElement.style[ i ] = testElementStyle[ i ];
+ }
+ testElement.appendChild( div );
+ testElementParent = body || documentElement;
+ testElementParent.insertBefore( testElement, testElementParent.firstChild );
+
+ // Check if a disconnected checkbox will retain its checked
+ // value of true after appended to the DOM (IE6/7)
+ support.appendChecked = input.checked;
+
+ support.boxModel = div.offsetWidth === 2;
+
+ if ( "zoom" in div.style ) {
+ // Check if natively block-level elements act like inline-block
+ // elements when setting their display to 'inline' and giving
+ // them layout
+ // (IE < 8 does this)
+ div.style.display = "inline";
+ div.style.zoom = 1;
+ support.inlineBlockNeedsLayout = ( div.offsetWidth === 2 );
+
+ // Check if elements with layout shrink-wrap their children
+ // (IE 6 does this)
+ div.style.display = "";
+ div.innerHTML = "<div style='width:4px;'></div>";
+ support.shrinkWrapBlocks = ( div.offsetWidth !== 2 );
+ }
+
+ div.innerHTML = "<table><tr><td style='padding:0;border:0;display:none'></td><td>t</td></tr></table>";
+ tds = div.getElementsByTagName( "td" );
+
+ // Check if table cells still have offsetWidth/Height when they are set
+ // to display:none and there are still other visible table cells in a
+ // table row; if so, offsetWidth/Height are not reliable for use when
+ // determining if an element has been hidden directly using
+ // display:none (it is still safe to use offsets if a parent element is
+ // hidden; don safety goggles and see bug #4512 for more information).
+ // (only IE 8 fails this test)
+ isSupported = ( tds[ 0 ].offsetHeight === 0 );
+
+ tds[ 0 ].style.display = "";
+ tds[ 1 ].style.display = "none";
+
+ // Check if empty table cells still have offsetWidth/Height
+ // (IE < 8 fail this test)
+ support.reliableHiddenOffsets = isSupported && ( tds[ 0 ].offsetHeight === 0 );
+ div.innerHTML = "";
+
+ // Check if div with explicit width and no margin-right incorrectly
+ // gets computed margin-right based on width of container. For more
+ // info see bug #3333
+ // Fails in WebKit before Feb 2011 nightlies
+ // WebKit Bug 13343 - getComputedStyle returns wrong value for margin-right
+ if ( document.defaultView && document.defaultView.getComputedStyle ) {
+ marginDiv = document.createElement( "div" );
+ marginDiv.style.width = "0";
+ marginDiv.style.marginRight = "0";
+ div.appendChild( marginDiv );
+ support.reliableMarginRight =
+ ( parseInt( ( document.defaultView.getComputedStyle( marginDiv, null ) || { marginRight: 0 } ).marginRight, 10 ) || 0 ) === 0;
+ }
+
+ // Remove the body element we added
+ testElement.innerHTML = "";
+ testElementParent.removeChild( testElement );
+
+ // Technique from Juriy Zaytsev
+ // http://thinkweb2.com/projects/prototype/detecting-event-support-without-browser-sniffing/
+ // We only care about the case where non-standard event systems
+ // are used, namely in IE. Short-circuiting here helps us to
+ // avoid an eval call (in setAttribute) which can cause CSP
+ // to go haywire. See: https://developer.mozilla.org/en/Security/CSP
+ if ( div.attachEvent ) {
+ for( i in {
+ submit: 1,
+ change: 1,
+ focusin: 1
+ } ) {
+ eventName = "on" + i;
+ isSupported = ( eventName in div );
+ if ( !isSupported ) {
+ div.setAttribute( eventName, "return;" );
+ isSupported = ( typeof div[ eventName ] === "function" );
+ }
+ support[ i + "Bubbles" ] = isSupported;
+ }
+ }
+
+ // Null connected elements to avoid leaks in IE
+ testElement = fragment = select = opt = body = marginDiv = div = input = null;
+
+ return support;
+})();
+
+// Keep track of boxModel
+jQuery.boxModel = jQuery.support.boxModel;
+
+
+
+
+var rbrace = /^(?:\{.*\}|\[.*\])$/,
+ rmultiDash = /([A-Z])/g;
+
+jQuery.extend({
+ cache: {},
+
+ // Please use with caution
+ uuid: 0,
+
+ // Unique for each copy of jQuery on the page
+ // Non-digits removed to match rinlinejQuery
+ expando: "jQuery" + ( jQuery.fn.jquery + Math.random() ).replace( /\D/g, "" ),
+
+ // The following elements throw uncatchable exceptions if you
+ // attempt to add expando properties to them.
+ noData: {
+ "embed": true,
+ // Ban all objects except for Flash (which handle expandos)
+ "object": "clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",
+ "applet": true
+ },
+
+ hasData: function( elem ) {
+ elem = elem.nodeType ? jQuery.cache[ elem[jQuery.expando] ] : elem[ jQuery.expando ];
+
+ return !!elem && !isEmptyDataObject( elem );
+ },
+
+ data: function( elem, name, data, pvt /* Internal Use Only */ ) {
+ if ( !jQuery.acceptData( elem ) ) {
+ return;
+ }
+
+ var thisCache, ret,
+ internalKey = jQuery.expando,
+ getByName = typeof name === "string",
+
+ // We have to handle DOM nodes and JS objects differently because IE6-7
+ // can't GC object references properly across the DOM-JS boundary
+ isNode = elem.nodeType,
+
+ // Only DOM nodes need the global jQuery cache; JS object data is
+ // attached directly to the object so GC can occur automatically
+ cache = isNode ? jQuery.cache : elem,
+
+ // Only defining an ID for JS objects if its cache already exists allows
+ // the code to shortcut on the same path as a DOM node with no cache
+ id = isNode ? elem[ jQuery.expando ] : elem[ jQuery.expando ] && jQuery.expando;
+
+ // Avoid doing any more work than we need to when trying to get data on an
+ // object that has no data at all
+ if ( (!id || (pvt && id && (cache[ id ] && !cache[ id ][ internalKey ]))) && getByName && data === undefined ) {
+ return;
+ }
+
+ if ( !id ) {
+ // Only DOM nodes need a new unique ID for each element since their data
+ // ends up in the global cache
+ if ( isNode ) {
+ elem[ jQuery.expando ] = id = ++jQuery.uuid;
+ } else {
+ id = jQuery.expando;
+ }
+ }
+
+ if ( !cache[ id ] ) {
+ cache[ id ] = {};
+
+ // TODO: This is a hack for 1.5 ONLY. Avoids exposing jQuery
+ // metadata on plain JS objects when the object is serialized using
+ // JSON.stringify
+ if ( !isNode ) {
+ cache[ id ].toJSON = jQuery.noop;
+ }
+ }
+
+ // An object can be passed to jQuery.data instead of a key/value pair; this gets
+ // shallow copied over onto the existing cache
+ if ( typeof name === "object" || typeof name === "function" ) {
+ if ( pvt ) {
+ cache[ id ][ internalKey ] = jQuery.extend(cache[ id ][ internalKey ], name);
+ } else {
+ cache[ id ] = jQuery.extend(cache[ id ], name);
+ }
+ }
+
+ thisCache = cache[ id ];
+
+ // Internal jQuery data is stored in a separate object inside the object's data
+ // cache in order to avoid key collisions between internal data and user-defined
+ // data
+ if ( pvt ) {
+ if ( !thisCache[ internalKey ] ) {
+ thisCache[ internalKey ] = {};
+ }
+
+ thisCache = thisCache[ internalKey ];
+ }
+
+ if ( data !== undefined ) {
+ thisCache[ jQuery.camelCase( name ) ] = data;
+ }
+
+ // TODO: This is a hack for 1.5 ONLY. It will be removed in 1.6. Users should
+ // not attempt to inspect the internal events object using jQuery.data, as this
+ // internal data object is undocumented and subject to change.
+ if ( name === "events" && !thisCache[name] ) {
+ return thisCache[ internalKey ] && thisCache[ internalKey ].events;
+ }
+
+ // Check for both converted-to-camel and non-converted data property names
+ // If a data property was specified
+ if ( getByName ) {
+
+ // First Try to find as-is property data
+ ret = thisCache[ name ];
+
+ // Test for null|undefined property data
+ if ( ret == null ) {
+
+ // Try to find the camelCased property
+ ret = thisCache[ jQuery.camelCase( name ) ];
+ }
+ } else {
+ ret = thisCache;
+ }
+
+ return ret;
+ },
+
+ removeData: function( elem, name, pvt /* Internal Use Only */ ) {
+ if ( !jQuery.acceptData( elem ) ) {
+ return;
+ }
+
+ var thisCache,
+
+ // Reference to internal data cache key
+ internalKey = jQuery.expando,
+
+ isNode = elem.nodeType,
+
+ // See jQuery.data for more information
+ cache = isNode ? jQuery.cache : elem,
+
+ // See jQuery.data for more information
+ id = isNode ? elem[ jQuery.expando ] : jQuery.expando;
+
+ // If there is already no cache entry for this object, there is no
+ // purpose in continuing
+ if ( !cache[ id ] ) {
+ return;
+ }
+
+ if ( name ) {
+
+ thisCache = pvt ? cache[ id ][ internalKey ] : cache[ id ];
+
+ if ( thisCache ) {
+
+ // Support interoperable removal of hyphenated or camelcased keys
+ if ( !thisCache[ name ] ) {
+ name = jQuery.camelCase( name );
+ }
+
+ delete thisCache[ name ];
+
+ // If there is no data left in the cache, we want to continue
+ // and let the cache object itself get destroyed
+ if ( !isEmptyDataObject(thisCache) ) {
+ return;
+ }
+ }
+ }
+
+ // See jQuery.data for more information
+ if ( pvt ) {
+ delete cache[ id ][ internalKey ];
+
+ // Don't destroy the parent cache unless the internal data object
+ // had been the only thing left in it
+ if ( !isEmptyDataObject(cache[ id ]) ) {
+ return;
+ }
+ }
+
+ var internalCache = cache[ id ][ internalKey ];
+
+ // Browsers that fail expando deletion also refuse to delete expandos on
+ // the window, but it will allow it on all other JS objects; other browsers
+ // don't care
+ // Ensure that `cache` is not a window object #10080
+ if ( jQuery.support.deleteExpando || !cache.setInterval ) {
+ delete cache[ id ];
+ } else {
+ cache[ id ] = null;
+ }
+
+ // We destroyed the entire user cache at once because it's faster than
+ // iterating through each key, but we need to continue to persist internal
+ // data if it existed
+ if ( internalCache ) {
+ cache[ id ] = {};
+ // TODO: This is a hack for 1.5 ONLY. Avoids exposing jQuery
+ // metadata on plain JS objects when the object is serialized using
+ // JSON.stringify
+ if ( !isNode ) {
+ cache[ id ].toJSON = jQuery.noop;
+ }
+
+ cache[ id ][ internalKey ] = internalCache;
+
+ // Otherwise, we need to eliminate the expando on the node to avoid
+ // false lookups in the cache for entries that no longer exist
+ } else if ( isNode ) {
+ // IE does not allow us to delete expando properties from nodes,
+ // nor does it have a removeAttribute function on Document nodes;
+ // we must handle all of these cases
+ if ( jQuery.support.deleteExpando ) {
+ delete elem[ jQuery.expando ];
+ } else if ( elem.removeAttribute ) {
+ elem.removeAttribute( jQuery.expando );
+ } else {
+ elem[ jQuery.expando ] = null;
+ }
+ }
+ },
+
+ // For internal use only.
+ _data: function( elem, name, data ) {
+ return jQuery.data( elem, name, data, true );
+ },
+
+ // A method for determining if a DOM node can handle the data expando
+ acceptData: function( elem ) {
+ if ( elem.nodeName ) {
+ var match = jQuery.noData[ elem.nodeName.toLowerCase() ];
+
+ if ( match ) {
+ return !(match === true || elem.getAttribute("classid") !== match);
+ }
+ }
+
+ return true;
+ }
+});
+
+jQuery.fn.extend({
+ data: function( key, value ) {
+ var data = null;
+
+ if ( typeof key === "undefined" ) {
+ if ( this.length ) {
+ data = jQuery.data( this[0] );
+
+ if ( this[0].nodeType === 1 ) {
+ var attr = this[0].attributes, name;
+ for ( var i = 0, l = attr.length; i < l; i++ ) {
+ name = attr[i].name;
+
+ if ( name.indexOf( "data-" ) === 0 ) {
+ name = jQuery.camelCase( name.substring(5) );
+
+ dataAttr( this[0], name, data[ name ] );
+ }
+ }
+ }
+ }
+
+ return data;
+
+ } else if ( typeof key === "object" ) {
+ return this.each(function() {
+ jQuery.data( this, key );
+ });
+ }
+
+ var parts = key.split(".");
+ parts[1] = parts[1] ? "." + parts[1] : "";
+
+ if ( value === undefined ) {
+ data = this.triggerHandler("getData" + parts[1] + "!", [parts[0]]);
+
+ // Try to fetch any internally stored data first
+ if ( data === undefined && this.length ) {
+ data = jQuery.data( this[0], key );
+ data = dataAttr( this[0], key, data );
+ }
+
+ return data === undefined && parts[1] ?
+ this.data( parts[0] ) :
+ data;
+
+ } else {
+ return this.each(function() {
+ var $this = jQuery( this ),
+ args = [ parts[0], value ];
+
+ $this.triggerHandler( "setData" + parts[1] + "!", args );
+ jQuery.data( this, key, value );
+ $this.triggerHandler( "changeData" + parts[1] + "!", args );
+ });
+ }
+ },
+
+ removeData: function( key ) {
+ return this.each(function() {
+ jQuery.removeData( this, key );
+ });
+ }
+});
+
+function dataAttr( elem, key, data ) {
+ // If nothing was found internally, try to fetch any
+ // data from the HTML5 data-* attribute
+ if ( data === undefined && elem.nodeType === 1 ) {
+
+ var name = "data-" + key.replace( rmultiDash, "-$1" ).toLowerCase();
+
+ data = elem.getAttribute( name );
+
+ if ( typeof data === "string" ) {
+ try {
+ data = data === "true" ? true :
+ data === "false" ? false :
+ data === "null" ? null :
+ !jQuery.isNaN( data ) ? parseFloat( data ) :
+ rbrace.test( data ) ? jQuery.parseJSON( data ) :
+ data;
+ } catch( e ) {}
+
+ // Make sure we set the data so it isn't changed later
+ jQuery.data( elem, key, data );
+
+ } else {
+ data = undefined;
+ }
+ }
+
+ return data;
+}
+
+// TODO: This is a hack for 1.5 ONLY to allow objects with a single toJSON
+// property to be considered empty objects; this property always exists in
+// order to make sure JSON.stringify does not expose internal metadata
+function isEmptyDataObject( obj ) {
+ for ( var name in obj ) {
+ if ( name !== "toJSON" ) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+
+
+function handleQueueMarkDefer( elem, type, src ) {
+ var deferDataKey = type + "defer",
+ queueDataKey = type + "queue",
+ markDataKey = type + "mark",
+ defer = jQuery.data( elem, deferDataKey, undefined, true );
+ if ( defer &&
+ ( src === "queue" || !jQuery.data( elem, queueDataKey, undefined, true ) ) &&
+ ( src === "mark" || !jQuery.data( elem, markDataKey, undefined, true ) ) ) {
+ // Give room for hard-coded callbacks to fire first
+ // and eventually mark/queue something else on the element
+ setTimeout( function() {
+ if ( !jQuery.data( elem, queueDataKey, undefined, true ) &&
+ !jQuery.data( elem, markDataKey, undefined, true ) ) {
+ jQuery.removeData( elem, deferDataKey, true );
+ defer.resolve();
+ }
+ }, 0 );
+ }
+}
+
+jQuery.extend({
+
+ _mark: function( elem, type ) {
+ if ( elem ) {
+ type = (type || "fx") + "mark";
+ jQuery.data( elem, type, (jQuery.data(elem,type,undefined,true) || 0) + 1, true );
+ }
+ },
+
+ _unmark: function( force, elem, type ) {
+ if ( force !== true ) {
+ type = elem;
+ elem = force;
+ force = false;
+ }
+ if ( elem ) {
+ type = type || "fx";
+ var key = type + "mark",
+ count = force ? 0 : ( (jQuery.data( elem, key, undefined, true) || 1 ) - 1 );
+ if ( count ) {
+ jQuery.data( elem, key, count, true );
+ } else {
+ jQuery.removeData( elem, key, true );
+ handleQueueMarkDefer( elem, type, "mark" );
+ }
+ }
+ },
+
+ queue: function( elem, type, data ) {
+ if ( elem ) {
+ type = (type || "fx") + "queue";
+ var q = jQuery.data( elem, type, undefined, true );
+ // Speed up dequeue by getting out quickly if this is just a lookup
+ if ( data ) {
+ if ( !q || jQuery.isArray(data) ) {
+ q = jQuery.data( elem, type, jQuery.makeArray(data), true );
+ } else {
+ q.push( data );
+ }
+ }
+ return q || [];
+ }
+ },
+
+ dequeue: function( elem, type ) {
+ type = type || "fx";
+
+ var queue = jQuery.queue( elem, type ),
+ fn = queue.shift(),
+ defer;
+
+ // If the fx queue is dequeued, always remove the progress sentinel
+ if ( fn === "inprogress" ) {
+ fn = queue.shift();
+ }
+
+ if ( fn ) {
+ // Add a progress sentinel to prevent the fx queue from being
+ // automatically dequeued
+ if ( type === "fx" ) {
+ queue.unshift("inprogress");
+ }
+
+ fn.call(elem, function() {
+ jQuery.dequeue(elem, type);
+ });
+ }
+
+ if ( !queue.length ) {
+ jQuery.removeData( elem, type + "queue", true );
+ handleQueueMarkDefer( elem, type, "queue" );
+ }
+ }
+});
+
+jQuery.fn.extend({
+ queue: function( type, data ) {
+ if ( typeof type !== "string" ) {
+ data = type;
+ type = "fx";
+ }
+
+ if ( data === undefined ) {
+ return jQuery.queue( this[0], type );
+ }
+ return this.each(function() {
+ var queue = jQuery.queue( this, type, data );
+
+ if ( type === "fx" && queue[0] !== "inprogress" ) {
+ jQuery.dequeue( this, type );
+ }
+ });
+ },
+ dequeue: function( type ) {
+ return this.each(function() {
+ jQuery.dequeue( this, type );
+ });
+ },
+ // Based off of the plugin by Clint Helfers, with permission.
+ // http://blindsignals.com/index.php/2009/07/jquery-delay/
+ delay: function( time, type ) {
+ time = jQuery.fx ? jQuery.fx.speeds[time] || time : time;
+ type = type || "fx";
+
+ return this.queue( type, function() {
+ var elem = this;
+ setTimeout(function() {
+ jQuery.dequeue( elem, type );
+ }, time );
+ });
+ },
+ clearQueue: function( type ) {
+ return this.queue( type || "fx", [] );
+ },
+ // Get a promise resolved when queues of a certain type
+ // are emptied (fx is the type by default)
+ promise: function( type, object ) {
+ if ( typeof type !== "string" ) {
+ object = type;
+ type = undefined;
+ }
+ type = type || "fx";
+ var defer = jQuery.Deferred(),
+ elements = this,
+ i = elements.length,
+ count = 1,
+ deferDataKey = type + "defer",
+ queueDataKey = type + "queue",
+ markDataKey = type + "mark",
+ tmp;
+ function resolve() {
+ if ( !( --count ) ) {
+ defer.resolveWith( elements, [ elements ] );
+ }
+ }
+ while( i-- ) {
+ if (( tmp = jQuery.data( elements[ i ], deferDataKey, undefined, true ) ||
+ ( jQuery.data( elements[ i ], queueDataKey, undefined, true ) ||
+ jQuery.data( elements[ i ], markDataKey, undefined, true ) ) &&
+ jQuery.data( elements[ i ], deferDataKey, jQuery._Deferred(), true ) )) {
+ count++;
+ tmp.done( resolve );
+ }
+ }
+ resolve();
+ return defer.promise();
+ }
+});
+
+
+
+
+var rclass = /[\n\t\r]/g,
+ rspace = /\s+/,
+ rreturn = /\r/g,
+ rtype = /^(?:button|input)$/i,
+ rfocusable = /^(?:button|input|object|select|textarea)$/i,
+ rclickable = /^a(?:rea)?$/i,
+ rboolean = /^(?:autofocus|autoplay|async|checked|controls|defer|disabled|hidden|loop|multiple|open|readonly|required|scoped|selected)$/i,
+ nodeHook, boolHook;
+
+jQuery.fn.extend({
+ attr: function( name, value ) {
+ return jQuery.access( this, name, value, true, jQuery.attr );
+ },
+
+ removeAttr: function( name ) {
+ return this.each(function() {
+ jQuery.removeAttr( this, name );
+ });
+ },
+
+ prop: function( name, value ) {
+ return jQuery.access( this, name, value, true, jQuery.prop );
+ },
+
+ removeProp: function( name ) {
+ name = jQuery.propFix[ name ] || name;
+ return this.each(function() {
+ // try/catch handles cases where IE balks (such as removing a property on window)
+ try {
+ this[ name ] = undefined;
+ delete this[ name ];
+ } catch( e ) {}
+ });
+ },
+
+ addClass: function( value ) {
+ var classNames, i, l, elem,
+ setClass, c, cl;
+
+ if ( jQuery.isFunction( value ) ) {
+ return this.each(function( j ) {
+ jQuery( this ).addClass( value.call(this, j, this.className) );
+ });
+ }
+
+ if ( value && typeof value === "string" ) {
+ classNames = value.split( rspace );
+
+ for ( i = 0, l = this.length; i < l; i++ ) {
+ elem = this[ i ];
+
+ if ( elem.nodeType === 1 ) {
+ if ( !elem.className && classNames.length === 1 ) {
+ elem.className = value;
+
+ } else {
+ setClass = " " + elem.className + " ";
+
+ for ( c = 0, cl = classNames.length; c < cl; c++ ) {
+ if ( !~setClass.indexOf( " " + classNames[ c ] + " " ) ) {
+ setClass += classNames[ c ] + " ";
+ }
+ }
+ elem.className = jQuery.trim( setClass );
+ }
+ }
+ }
+ }
+
+ return this;
+ },
+
+ removeClass: function( value ) {
+ var classNames, i, l, elem, className, c, cl;
+
+ if ( jQuery.isFunction( value ) ) {
+ return this.each(function( j ) {
+ jQuery( this ).removeClass( value.call(this, j, this.className) );
+ });
+ }
+
+ if ( (value && typeof value === "string") || value === undefined ) {
+ classNames = (value || "").split( rspace );
+
+ for ( i = 0, l = this.length; i < l; i++ ) {
+ elem = this[ i ];
+
+ if ( elem.nodeType === 1 && elem.className ) {
+ if ( value ) {
+ className = (" " + elem.className + " ").replace( rclass, " " );
+ for ( c = 0, cl = classNames.length; c < cl; c++ ) {
+ className = className.replace(" " + classNames[ c ] + " ", " ");
+ }
+ elem.className = jQuery.trim( className );
+
+ } else {
+ elem.className = "";
+ }
+ }
+ }
+ }
+
+ return this;
+ },
+
+ toggleClass: function( value, stateVal ) {
+ var type = typeof value,
+ isBool = typeof stateVal === "boolean";
+
+ if ( jQuery.isFunction( value ) ) {
+ return this.each(function( i ) {
+ jQuery( this ).toggleClass( value.call(this, i, this.className, stateVal), stateVal );
+ });
+ }
+
+ return this.each(function() {
+ if ( type === "string" ) {
+ // toggle individual class names
+ var className,
+ i = 0,
+ self = jQuery( this ),
+ state = stateVal,
+ classNames = value.split( rspace );
+
+ while ( (className = classNames[ i++ ]) ) {
+ // check each className given, space seperated list
+ state = isBool ? state : !self.hasClass( className );
+ self[ state ? "addClass" : "removeClass" ]( className );
+ }
+
+ } else if ( type === "undefined" || type === "boolean" ) {
+ if ( this.className ) {
+ // store className if set
+ jQuery._data( this, "__className__", this.className );
+ }
+
+ // toggle whole className
+ this.className = this.className || value === false ? "" : jQuery._data( this, "__className__" ) || "";
+ }
+ });
+ },
+
+ hasClass: function( selector ) {
+ var className = " " + selector + " ";
+ for ( var i = 0, l = this.length; i < l; i++ ) {
+ if ( this[i].nodeType === 1 && (" " + this[i].className + " ").replace(rclass, " ").indexOf( className ) > -1 ) {
+ return true;
+ }
+ }
+
+ return false;
+ },
+
+ val: function( value ) {
+ var hooks, ret,
+ elem = this[0];
+
+ if ( !arguments.length ) {
+ if ( elem ) {
+ hooks = jQuery.valHooks[ elem.nodeName.toLowerCase() ] || jQuery.valHooks[ elem.type ];
+
+ if ( hooks && "get" in hooks && (ret = hooks.get( elem, "value" )) !== undefined ) {
+ return ret;
+ }
+
+ ret = elem.value;
+
+ return typeof ret === "string" ?
+ // handle most common string cases
+ ret.replace(rreturn, "") :
+ // handle cases where value is null/undef or number
+ ret == null ? "" : ret;
+ }
+
+ return undefined;
+ }
+
+ var isFunction = jQuery.isFunction( value );
+
+ return this.each(function( i ) {
+ var self = jQuery(this), val;
+
+ if ( this.nodeType !== 1 ) {
+ return;
+ }
+
+ if ( isFunction ) {
+ val = value.call( this, i, self.val() );
+ } else {
+ val = value;
+ }
+
+ // Treat null/undefined as ""; convert numbers to string
+ if ( val == null ) {
+ val = "";
+ } else if ( typeof val === "number" ) {
+ val += "";
+ } else if ( jQuery.isArray( val ) ) {
+ val = jQuery.map(val, function ( value ) {
+ return value == null ? "" : value + "";
+ });
+ }
+
+ hooks = jQuery.valHooks[ this.nodeName.toLowerCase() ] || jQuery.valHooks[ this.type ];
+
+ // If set returns undefined, fall back to normal setting
+ if ( !hooks || !("set" in hooks) || hooks.set( this, val, "value" ) === undefined ) {
+ this.value = val;
+ }
+ });
+ }
+});
+
+jQuery.extend({
+ valHooks: {
+ option: {
+ get: function( elem ) {
+ // attributes.value is undefined in Blackberry 4.7 but
+ // uses .value. See #6932
+ var val = elem.attributes.value;
+ return !val || val.specified ? elem.value : elem.text;
+ }
+ },
+ select: {
+ get: function( elem ) {
+ var value,
+ index = elem.selectedIndex,
+ values = [],
+ options = elem.options,
+ one = elem.type === "select-one";
+
+ // Nothing was selected
+ if ( index < 0 ) {
+ return null;
+ }
+
+ // Loop through all the selected options
+ for ( var i = one ? index : 0, max = one ? index + 1 : options.length; i < max; i++ ) {
+ var option = options[ i ];
+
+ // Don't return options that are disabled or in a disabled optgroup
+ if ( option.selected && (jQuery.support.optDisabled ? !option.disabled : option.getAttribute("disabled") === null) &&
+ (!option.parentNode.disabled || !jQuery.nodeName( option.parentNode, "optgroup" )) ) {
+
+ // Get the specific value for the option
+ value = jQuery( option ).val();
+
+ // We don't need an array for one selects
+ if ( one ) {
+ return value;
+ }
+
+ // Multi-Selects return an array
+ values.push( value );
+ }
+ }
+
+ // Fixes Bug #2551 -- select.val() broken in IE after form.reset()
+ if ( one && !values.length && options.length ) {
+ return jQuery( options[ index ] ).val();
+ }
+
+ return values;
+ },
+
+ set: function( elem, value ) {
+ var values = jQuery.makeArray( value );
+
+ jQuery(elem).find("option").each(function() {
+ this.selected = jQuery.inArray( jQuery(this).val(), values ) >= 0;
+ });
+
+ if ( !values.length ) {
+ elem.selectedIndex = -1;
+ }
+ return values;
+ }
+ }
+ },
+
+ attrFn: {
+ val: true,
+ css: true,
+ html: true,
+ text: true,
+ data: true,
+ width: true,
+ height: true,
+ offset: true
+ },
+
+ attrFix: {
+ // Always normalize to ensure hook usage
+ tabindex: "tabIndex"
+ },
+
+ attr: function( elem, name, value, pass ) {
+ var nType = elem.nodeType;
+
+ // don't get/set attributes on text, comment and attribute nodes
+ if ( !elem || nType === 3 || nType === 8 || nType === 2 ) {
+ return undefined;
+ }
+
+ if ( pass && name in jQuery.attrFn ) {
+ return jQuery( elem )[ name ]( value );
+ }
+
+ // Fallback to prop when attributes are not supported
+ if ( !("getAttribute" in elem) ) {
+ return jQuery.prop( elem, name, value );
+ }
+
+ var ret, hooks,
+ notxml = nType !== 1 || !jQuery.isXMLDoc( elem );
+
+ // Normalize the name if needed
+ if ( notxml ) {
+ name = jQuery.attrFix[ name ] || name;
+
+ hooks = jQuery.attrHooks[ name ];
+
+ if ( !hooks ) {
+ // Use boolHook for boolean attributes
+ if ( rboolean.test( name ) ) {
+ hooks = boolHook;
+
+ // Use nodeHook if available( IE6/7 )
+ } else if ( nodeHook ) {
+ hooks = nodeHook;
+ }
+ }
+ }
+
+ if ( value !== undefined ) {
+
+ if ( value === null ) {
+ jQuery.removeAttr( elem, name );
+ return undefined;
+
+ } else if ( hooks && "set" in hooks && notxml && (ret = hooks.set( elem, value, name )) !== undefined ) {
+ return ret;
+
+ } else {
+ elem.setAttribute( name, "" + value );
+ return value;
+ }
+
+ } else if ( hooks && "get" in hooks && notxml && (ret = hooks.get( elem, name )) !== null ) {
+ return ret;
+
+ } else {
+
+ ret = elem.getAttribute( name );
+
+ // Non-existent attributes return null, we normalize to undefined
+ return ret === null ?
+ undefined :
+ ret;
+ }
+ },
+
+ removeAttr: function( elem, name ) {
+ var propName;
+ if ( elem.nodeType === 1 ) {
+ name = jQuery.attrFix[ name ] || name;
+
+ jQuery.attr( elem, name, "" );
+ elem.removeAttribute( name );
+
+ // Set corresponding property to false for boolean attributes
+ if ( rboolean.test( name ) && (propName = jQuery.propFix[ name ] || name) in elem ) {
+ elem[ propName ] = false;
+ }
+ }
+ },
+
+ attrHooks: {
+ type: {
+ set: function( elem, value ) {
+ // We can't allow the type property to be changed (since it causes problems in IE)
+ if ( rtype.test( elem.nodeName ) && elem.parentNode ) {
+ jQuery.error( "type property can't be changed" );
+ } else if ( !jQuery.support.radioValue && value === "radio" && jQuery.nodeName(elem, "input") ) {
+ // Setting the type on a radio button after the value resets the value in IE6-9
+ // Reset value to it's default in case type is set after value
+ // This is for element creation
+ var val = elem.value;
+ elem.setAttribute( "type", value );
+ if ( val ) {
+ elem.value = val;
+ }
+ return value;
+ }
+ }
+ },
+ // Use the value property for back compat
+ // Use the nodeHook for button elements in IE6/7 (#1954)
+ value: {
+ get: function( elem, name ) {
+ if ( nodeHook && jQuery.nodeName( elem, "button" ) ) {
+ return nodeHook.get( elem, name );
+ }
+ return name in elem ?
+ elem.value :
+ null;
+ },
+ set: function( elem, value, name ) {
+ if ( nodeHook && jQuery.nodeName( elem, "button" ) ) {
+ return nodeHook.set( elem, value, name );
+ }
+ // Does not return so that setAttribute is also used
+ elem.value = value;
+ }
+ }
+ },
+
+ propFix: {
+ tabindex: "tabIndex",
+ readonly: "readOnly",
+ "for": "htmlFor",
+ "class": "className",
+ maxlength: "maxLength",
+ cellspacing: "cellSpacing",
+ cellpadding: "cellPadding",
+ rowspan: "rowSpan",
+ colspan: "colSpan",
+ usemap: "useMap",
+ frameborder: "frameBorder",
+ contenteditable: "contentEditable"
+ },
+
+ prop: function( elem, name, value ) {
+ var nType = elem.nodeType;
+
+ // don't get/set properties on text, comment and attribute nodes
+ if ( !elem || nType === 3 || nType === 8 || nType === 2 ) {
+ return undefined;
+ }
+
+ var ret, hooks,
+ notxml = nType !== 1 || !jQuery.isXMLDoc( elem );
+
+ if ( notxml ) {
+ // Fix name and attach hooks
+ name = jQuery.propFix[ name ] || name;
+ hooks = jQuery.propHooks[ name ];
+ }
+
+ if ( value !== undefined ) {
+ if ( hooks && "set" in hooks && (ret = hooks.set( elem, value, name )) !== undefined ) {
+ return ret;
+
+ } else {
+ return (elem[ name ] = value);
+ }
+
+ } else {
+ if ( hooks && "get" in hooks && (ret = hooks.get( elem, name )) !== null ) {
+ return ret;
+
+ } else {
+ return elem[ name ];
+ }
+ }
+ },
+
+ propHooks: {
+ tabIndex: {
+ get: function( elem ) {
+ // elem.tabIndex doesn't always return the correct value when it hasn't been explicitly set
+ // http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/
+ var attributeNode = elem.getAttributeNode("tabindex");
+
+ return attributeNode && attributeNode.specified ?
+ parseInt( attributeNode.value, 10 ) :
+ rfocusable.test( elem.nodeName ) || rclickable.test( elem.nodeName ) && elem.href ?
+ 0 :
+ undefined;
+ }
+ }
+ }
+});
+
+// Add the tabindex propHook to attrHooks for back-compat
+jQuery.attrHooks.tabIndex = jQuery.propHooks.tabIndex;
+
+// Hook for boolean attributes
+boolHook = {
+ get: function( elem, name ) {
+ // Align boolean attributes with corresponding properties
+ // Fall back to attribute presence where some booleans are not supported
+ var attrNode;
+ return jQuery.prop( elem, name ) === true || ( attrNode = elem.getAttributeNode( name ) ) && attrNode.nodeValue !== false ?
+ name.toLowerCase() :
+ undefined;
+ },
+ set: function( elem, value, name ) {
+ var propName;
+ if ( value === false ) {
+ // Remove boolean attributes when set to false
+ jQuery.removeAttr( elem, name );
+ } else {
+ // value is true since we know at this point it's type boolean and not false
+ // Set boolean attributes to the same name and set the DOM property
+ propName = jQuery.propFix[ name ] || name;
+ if ( propName in elem ) {
+ // Only set the IDL specifically if it already exists on the element
+ elem[ propName ] = true;
+ }
+
+ elem.setAttribute( name, name.toLowerCase() );
+ }
+ return name;
+ }
+};
+
+// IE6/7 do not support getting/setting some attributes with get/setAttribute
+if ( !jQuery.support.getSetAttribute ) {
+
+ // Use this for any attribute in IE6/7
+ // This fixes almost every IE6/7 issue
+ nodeHook = jQuery.valHooks.button = {
+ get: function( elem, name ) {
+ var ret;
+ ret = elem.getAttributeNode( name );
+ // Return undefined if nodeValue is empty string
+ return ret && ret.nodeValue !== "" ?
+ ret.nodeValue :
+ undefined;
+ },
+ set: function( elem, value, name ) {
+ // Set the existing or create a new attribute node
+ var ret = elem.getAttributeNode( name );
+ if ( !ret ) {
+ ret = document.createAttribute( name );
+ elem.setAttributeNode( ret );
+ }
+ return (ret.nodeValue = value + "");
+ }
+ };
+
+ // Set width and height to auto instead of 0 on empty string( Bug #8150 )
+ // This is for removals
+ jQuery.each([ "width", "height" ], function( i, name ) {
+ jQuery.attrHooks[ name ] = jQuery.extend( jQuery.attrHooks[ name ], {
+ set: function( elem, value ) {
+ if ( value === "" ) {
+ elem.setAttribute( name, "auto" );
+ return value;
+ }
+ }
+ });
+ });
+}
+
+
+// Some attributes require a special call on IE
+if ( !jQuery.support.hrefNormalized ) {
+ jQuery.each([ "href", "src", "width", "height" ], function( i, name ) {
+ jQuery.attrHooks[ name ] = jQuery.extend( jQuery.attrHooks[ name ], {
+ get: function( elem ) {
+ var ret = elem.getAttribute( name, 2 );
+ return ret === null ? undefined : ret;
+ }
+ });
+ });
+}
+
+if ( !jQuery.support.style ) {
+ jQuery.attrHooks.style = {
+ get: function( elem ) {
+ // Return undefined in the case of empty string
+ // Normalize to lowercase since IE uppercases css property names
+ return elem.style.cssText.toLowerCase() || undefined;
+ },
+ set: function( elem, value ) {
+ return (elem.style.cssText = "" + value);
+ }
+ };
+}
+
+// Safari mis-reports the default selected property of an option
+// Accessing the parent's selectedIndex property fixes it
+if ( !jQuery.support.optSelected ) {
+ jQuery.propHooks.selected = jQuery.extend( jQuery.propHooks.selected, {
+ get: function( elem ) {
+ var parent = elem.parentNode;
+
+ if ( parent ) {
+ parent.selectedIndex;
+
+ // Make sure that it also works with optgroups, see #5701
+ if ( parent.parentNode ) {
+ parent.parentNode.selectedIndex;
+ }
+ }
+ return null;
+ }
+ });
+}
+
+// Radios and checkboxes getter/setter
+if ( !jQuery.support.checkOn ) {
+ jQuery.each([ "radio", "checkbox" ], function() {
+ jQuery.valHooks[ this ] = {
+ get: function( elem ) {
+ // Handle the case where in Webkit "" is returned instead of "on" if a value isn't specified
+ return elem.getAttribute("value") === null ? "on" : elem.value;
+ }
+ };
+ });
+}
+jQuery.each([ "radio", "checkbox" ], function() {
+ jQuery.valHooks[ this ] = jQuery.extend( jQuery.valHooks[ this ], {
+ set: function( elem, value ) {
+ if ( jQuery.isArray( value ) ) {
+ return (elem.checked = jQuery.inArray( jQuery(elem).val(), value ) >= 0);
+ }
+ }
+ });
+});
+
+
+
+
+var rnamespaces = /\.(.*)$/,
+ rformElems = /^(?:textarea|input|select)$/i,
+ rperiod = /\./g,
+ rspaces = / /g,
+ rescape = /[^\w\s.|`]/g,
+ fcleanup = function( nm ) {
+ return nm.replace(rescape, "\\$&");
+ };
+
+/*
+ * A number of helper functions used for managing events.
+ * Many of the ideas behind this code originated from
+ * Dean Edwards' addEvent library.
+ */
+jQuery.event = {
+
+ // Bind an event to an element
+ // Original by Dean Edwards
+ add: function( elem, types, handler, data ) {
+ if ( elem.nodeType === 3 || elem.nodeType === 8 ) {
+ return;
+ }
+
+ if ( handler === false ) {
+ handler = returnFalse;
+ } else if ( !handler ) {
+ // Fixes bug #7229. Fix recommended by jdalton
+ return;
+ }
+
+ var handleObjIn, handleObj;
+
+ if ( handler.handler ) {
+ handleObjIn = handler;
+ handler = handleObjIn.handler;
+ }
+
+ // Make sure that the function being executed has a unique ID
+ if ( !handler.guid ) {
+ handler.guid = jQuery.guid++;
+ }
+
+ // Init the element's event structure
+ var elemData = jQuery._data( elem );
+
+ // If no elemData is found then we must be trying to bind to one of the
+ // banned noData elements
+ if ( !elemData ) {
+ return;
+ }
+
+ var events = elemData.events,
+ eventHandle = elemData.handle;
+
+ if ( !events ) {
+ elemData.events = events = {};
+ }
+
+ if ( !eventHandle ) {
+ elemData.handle = eventHandle = function( e ) {
+ // Discard the second event of a jQuery.event.trigger() and
+ // when an event is called after a page has unloaded
+ return typeof jQuery !== "undefined" && (!e || jQuery.event.triggered !== e.type) ?
+ jQuery.event.handle.apply( eventHandle.elem, arguments ) :
+ undefined;
+ };
+ }
+
+ // Add elem as a property of the handle function
+ // This is to prevent a memory leak with non-native events in IE.
+ eventHandle.elem = elem;
+
+ // Handle multiple events separated by a space
+ // jQuery(...).bind("mouseover mouseout", fn);
+ types = types.split(" ");
+
+ var type, i = 0, namespaces;
+
+ while ( (type = types[ i++ ]) ) {
+ handleObj = handleObjIn ?
+ jQuery.extend({}, handleObjIn) :
+ { handler: handler, data: data };
+
+ // Namespaced event handlers
+ if ( type.indexOf(".") > -1 ) {
+ namespaces = type.split(".");
+ type = namespaces.shift();
+ handleObj.namespace = namespaces.slice(0).sort().join(".");
+
+ } else {
+ namespaces = [];
+ handleObj.namespace = "";
+ }
+
+ handleObj.type = type;
+ if ( !handleObj.guid ) {
+ handleObj.guid = handler.guid;
+ }
+
+ // Get the current list of functions bound to this event
+ var handlers = events[ type ],
+ special = jQuery.event.special[ type ] || {};
+
+ // Init the event handler queue
+ if ( !handlers ) {
+ handlers = events[ type ] = [];
+
+ // Check for a special event handler
+ // Only use addEventListener/attachEvent if the special
+ // events handler returns false
+ if ( !special.setup || special.setup.call( elem, data, namespaces, eventHandle ) === false ) {
+ // Bind the global event handler to the element
+ if ( elem.addEventListener ) {
+ elem.addEventListener( type, eventHandle, false );
+
+ } else if ( elem.attachEvent ) {
+ elem.attachEvent( "on" + type, eventHandle );
+ }
+ }
+ }
+
+ if ( special.add ) {
+ special.add.call( elem, handleObj );
+
+ if ( !handleObj.handler.guid ) {
+ handleObj.handler.guid = handler.guid;
+ }
+ }
+
+ // Add the function to the element's handler list
+ handlers.push( handleObj );
+
+ // Keep track of which events have been used, for event optimization
+ jQuery.event.global[ type ] = true;
+ }
+
+ // Nullify elem to prevent memory leaks in IE
+ elem = null;
+ },
+
+ global: {},
+
+ // Detach an event or set of events from an element
+ remove: function( elem, types, handler, pos ) {
+ // don't do events on text and comment nodes
+ if ( elem.nodeType === 3 || elem.nodeType === 8 ) {
+ return;
+ }
+
+ if ( handler === false ) {
+ handler = returnFalse;
+ }
+
+ var ret, type, fn, j, i = 0, all, namespaces, namespace, special, eventType, handleObj, origType,
+ elemData = jQuery.hasData( elem ) && jQuery._data( elem ),
+ events = elemData && elemData.events;
+
+ if ( !elemData || !events ) {
+ return;
+ }
+
+ // types is actually an event object here
+ if ( types && types.type ) {
+ handler = types.handler;
+ types = types.type;
+ }
+
+ // Unbind all events for the element
+ if ( !types || typeof types === "string" && types.charAt(0) === "." ) {
+ types = types || "";
+
+ for ( type in events ) {
+ jQuery.event.remove( elem, type + types );
+ }
+
+ return;
+ }
+
+ // Handle multiple events separated by a space
+ // jQuery(...).unbind("mouseover mouseout", fn);
+ types = types.split(" ");
+
+ while ( (type = types[ i++ ]) ) {
+ origType = type;
+ handleObj = null;
+ all = type.indexOf(".") < 0;
+ namespaces = [];
+
+ if ( !all ) {
+ // Namespaced event handlers
+ namespaces = type.split(".");
+ type = namespaces.shift();
+
+ namespace = new RegExp("(^|\\.)" +
+ jQuery.map( namespaces.slice(0).sort(), fcleanup ).join("\\.(?:.*\\.)?") + "(\\.|$)");
+ }
+
+ eventType = events[ type ];
+
+ if ( !eventType ) {
+ continue;
+ }
+
+ if ( !handler ) {
+ for ( j = 0; j < eventType.length; j++ ) {
+ handleObj = eventType[ j ];
+
+ if ( all || namespace.test( handleObj.namespace ) ) {
+ jQuery.event.remove( elem, origType, handleObj.handler, j );
+ eventType.splice( j--, 1 );
+ }
+ }
+
+ continue;
+ }
+
+ special = jQuery.event.special[ type ] || {};
+
+ for ( j = pos || 0; j < eventType.length; j++ ) {
+ handleObj = eventType[ j ];
+
+ if ( handler.guid === handleObj.guid ) {
+ // remove the given handler for the given type
+ if ( all || namespace.test( handleObj.namespace ) ) {
+ if ( pos == null ) {
+ eventType.splice( j--, 1 );
+ }
+
+ if ( special.remove ) {
+ special.remove.call( elem, handleObj );
+ }
+ }
+
+ if ( pos != null ) {
+ break;
+ }
+ }
+ }
+
+ // remove generic event handler if no more handlers exist
+ if ( eventType.length === 0 || pos != null && eventType.length === 1 ) {
+ if ( !special.teardown || special.teardown.call( elem, namespaces ) === false ) {
+ jQuery.removeEvent( elem, type, elemData.handle );
+ }
+
+ ret = null;
+ delete events[ type ];
+ }
+ }
+
+ // Remove the expando if it's no longer used
+ if ( jQuery.isEmptyObject( events ) ) {
+ var handle = elemData.handle;
+ if ( handle ) {
+ handle.elem = null;
+ }
+
+ delete elemData.events;
+ delete elemData.handle;
+
+ if ( jQuery.isEmptyObject( elemData ) ) {
+ jQuery.removeData( elem, undefined, true );
+ }
+ }
+ },
+
+ // Events that are safe to short-circuit if no handlers are attached.
+ // Native DOM events should not be added, they may have inline handlers.
+ customEvent: {
+ "getData": true,
+ "setData": true,
+ "changeData": true
+ },
+
+ trigger: function( event, data, elem, onlyHandlers ) {
+ // Event object or event type
+ var type = event.type || event,
+ namespaces = [],
+ exclusive;
+
+ if ( type.indexOf("!") >= 0 ) {
+ // Exclusive events trigger only for the exact event (no namespaces)
+ type = type.slice(0, -1);
+ exclusive = true;
+ }
+
+ if ( type.indexOf(".") >= 0 ) {
+ // Namespaced trigger; create a regexp to match event type in handle()
+ namespaces = type.split(".");
+ type = namespaces.shift();
+ namespaces.sort();
+ }
+
+ if ( (!elem || jQuery.event.customEvent[ type ]) && !jQuery.event.global[ type ] ) {
+ // No jQuery handlers for this event type, and it can't have inline handlers
+ return;
+ }
+
+ // Caller can pass in an Event, Object, or just an event type string
+ event = typeof event === "object" ?
+ // jQuery.Event object
+ event[ jQuery.expando ] ? event :
+ // Object literal
+ new jQuery.Event( type, event ) :
+ // Just the event type (string)
+ new jQuery.Event( type );
+
+ event.type = type;
+ event.exclusive = exclusive;
+ event.namespace = namespaces.join(".");
+ event.namespace_re = new RegExp("(^|\\.)" + namespaces.join("\\.(?:.*\\.)?") + "(\\.|$)");
+
+ // triggerHandler() and global events don't bubble or run the default action
+ if ( onlyHandlers || !elem ) {
+ event.preventDefault();
+ event.stopPropagation();
+ }
+
+ // Handle a global trigger
+ if ( !elem ) {
+ // TODO: Stop taunting the data cache; remove global events and always attach to document
+ jQuery.each( jQuery.cache, function() {
+ // internalKey variable is just used to make it easier to find
+ // and potentially change this stuff later; currently it just
+ // points to jQuery.expando
+ var internalKey = jQuery.expando,
+ internalCache = this[ internalKey ];
+ if ( internalCache && internalCache.events && internalCache.events[ type ] ) {
+ jQuery.event.trigger( event, data, internalCache.handle.elem );
+ }
+ });
+ return;
+ }
+
+ // Don't do events on text and comment nodes
+ if ( elem.nodeType === 3 || elem.nodeType === 8 ) {
+ return;
+ }
+
+ // Clean up the event in case it is being reused
+ event.result = undefined;
+ event.target = elem;
+
+ // Clone any incoming data and prepend the event, creating the handler arg list
+ data = data != null ? jQuery.makeArray( data ) : [];
+ data.unshift( event );
+
+ var cur = elem,
+ // IE doesn't like method names with a colon (#3533, #8272)
+ ontype = type.indexOf(":") < 0 ? "on" + type : "";
+
+ // Fire event on the current element, then bubble up the DOM tree
+ do {
+ var handle = jQuery._data( cur, "handle" );
+
+ event.currentTarget = cur;
+ if ( handle ) {
+ handle.apply( cur, data );
+ }
+
+ // Trigger an inline bound script
+ if ( ontype && jQuery.acceptData( cur ) && cur[ ontype ] && cur[ ontype ].apply( cur, data ) === false ) {
+ event.result = false;
+ event.preventDefault();
+ }
+
+ // Bubble up to document, then to window
+ cur = cur.parentNode || cur.ownerDocument || cur === event.target.ownerDocument && window;
+ } while ( cur && !event.isPropagationStopped() );
+
+ // If nobody prevented the default action, do it now
+ if ( !event.isDefaultPrevented() ) {
+ var old,
+ special = jQuery.event.special[ type ] || {};
+
+ if ( (!special._default || special._default.call( elem.ownerDocument, event ) === false) &&
+ !(type === "click" && jQuery.nodeName( elem, "a" )) && jQuery.acceptData( elem ) ) {
+
+ // Call a native DOM method on the target with the same name name as the event.
+ // Can't use an .isFunction)() check here because IE6/7 fails that test.
+ // IE<9 dies on focus to hidden element (#1486), may want to revisit a try/catch.
+ try {
+ if ( ontype && elem[ type ] ) {
+ // Don't re-trigger an onFOO event when we call its FOO() method
+ old = elem[ ontype ];
+
+ if ( old ) {
+ elem[ ontype ] = null;
+ }
+
+ jQuery.event.triggered = type;
+ elem[ type ]();
+ }
+ } catch ( ieError ) {}
+
+ if ( old ) {
+ elem[ ontype ] = old;
+ }
+
+ jQuery.event.triggered = undefined;
+ }
+ }
+
+ return event.result;
+ },
+
+ handle: function( event ) {
+ event = jQuery.event.fix( event || window.event );
+ // Snapshot the handlers list since a called handler may add/remove events.
+ var handlers = ((jQuery._data( this, "events" ) || {})[ event.type ] || []).slice(0),
+ run_all = !event.exclusive && !event.namespace,
+ args = Array.prototype.slice.call( arguments, 0 );
+
+ // Use the fix-ed Event rather than the (read-only) native event
+ args[0] = event;
+ event.currentTarget = this;
+
+ for ( var j = 0, l = handlers.length; j < l; j++ ) {
+ var handleObj = handlers[ j ];
+
+ // Triggered event must 1) be non-exclusive and have no namespace, or
+ // 2) have namespace(s) a subset or equal to those in the bound event.
+ if ( run_all || event.namespace_re.test( handleObj.namespace ) ) {
+ // Pass in a reference to the handler function itself
+ // So that we can later remove it
+ event.handler = handleObj.handler;
+ event.data = handleObj.data;
+ event.handleObj = handleObj;
+
+ var ret = handleObj.handler.apply( this, args );
+
+ if ( ret !== undefined ) {
+ event.result = ret;
+ if ( ret === false ) {
+ event.preventDefault();
+ event.stopPropagation();
+ }
+ }
+
+ if ( event.isImmediatePropagationStopped() ) {
+ break;
+ }
+ }
+ }
+ return event.result;
+ },
+
+ props: "altKey attrChange attrName bubbles button cancelable charCode clientX clientY ctrlKey currentTarget data detail eventPhase fromElement handler keyCode layerX layerY metaKey newValue offsetX offsetY pageX pageY prevValue relatedNode relatedTarget screenX screenY shiftKey srcElement target toElement view wheelDelta which".split(" "),
+
+ fix: function( event ) {
+ if ( event[ jQuery.expando ] ) {
+ return event;
+ }
+
+ // store a copy of the original event object
+ // and "clone" to set read-only properties
+ var originalEvent = event;
+ event = jQuery.Event( originalEvent );
+
+ for ( var i = this.props.length, prop; i; ) {
+ prop = this.props[ --i ];
+ event[ prop ] = originalEvent[ prop ];
+ }
+
+ // Fix target property, if necessary
+ if ( !event.target ) {
+ // Fixes #1925 where srcElement might not be defined either
+ event.target = event.srcElement || document;
+ }
+
+ // check if target is a textnode (safari)
+ if ( event.target.nodeType === 3 ) {
+ event.target = event.target.parentNode;
+ }
+
+ // Add relatedTarget, if necessary
+ if ( !event.relatedTarget && event.fromElement ) {
+ event.relatedTarget = event.fromElement === event.target ? event.toElement : event.fromElement;
+ }
+
+ // Calculate pageX/Y if missing and clientX/Y available
+ if ( event.pageX == null && event.clientX != null ) {
+ var eventDocument = event.target.ownerDocument || document,
+ doc = eventDocument.documentElement,
+ body = eventDocument.body;
+
+ event.pageX = event.clientX + (doc && doc.scrollLeft || body && body.scrollLeft || 0) - (doc && doc.clientLeft || body && body.clientLeft || 0);
+ event.pageY = event.clientY + (doc && doc.scrollTop || body && body.scrollTop || 0) - (doc && doc.clientTop || body && body.clientTop || 0);
+ }
+
+ // Add which for key events
+ if ( event.which == null && (event.charCode != null || event.keyCode != null) ) {
+ event.which = event.charCode != null ? event.charCode : event.keyCode;
+ }
+
+ // Add metaKey to non-Mac browsers (use ctrl for PC's and Meta for Macs)
+ if ( !event.metaKey && event.ctrlKey ) {
+ event.metaKey = event.ctrlKey;
+ }
+
+ // Add which for click: 1 === left; 2 === middle; 3 === right
+ // Note: button is not normalized, so don't use it
+ if ( !event.which && event.button !== undefined ) {
+ event.which = (event.button & 1 ? 1 : ( event.button & 2 ? 3 : ( event.button & 4 ? 2 : 0 ) ));
+ }
+
+ return event;
+ },
+
+ // Deprecated, use jQuery.guid instead
+ guid: 1E8,
+
+ // Deprecated, use jQuery.proxy instead
+ proxy: jQuery.proxy,
+
+ special: {
+ ready: {
+ // Make sure the ready event is setup
+ setup: jQuery.bindReady,
+ teardown: jQuery.noop
+ },
+
+ live: {
+ add: function( handleObj ) {
+ jQuery.event.add( this,
+ liveConvert( handleObj.origType, handleObj.selector ),
+ jQuery.extend({}, handleObj, {handler: liveHandler, guid: handleObj.handler.guid}) );
+ },
+
+ remove: function( handleObj ) {
+ jQuery.event.remove( this, liveConvert( handleObj.origType, handleObj.selector ), handleObj );
+ }
+ },
+
+ beforeunload: {
+ setup: function( data, namespaces, eventHandle ) {
+ // We only want to do this special case on windows
+ if ( jQuery.isWindow( this ) ) {
+ this.onbeforeunload = eventHandle;
+ }
+ },
+
+ teardown: function( namespaces, eventHandle ) {
+ if ( this.onbeforeunload === eventHandle ) {
+ this.onbeforeunload = null;
+ }
+ }
+ }
+ }
+};
+
+jQuery.removeEvent = document.removeEventListener ?
+ function( elem, type, handle ) {
+ if ( elem.removeEventListener ) {
+ elem.removeEventListener( type, handle, false );
+ }
+ } :
+ function( elem, type, handle ) {
+ if ( elem.detachEvent ) {
+ elem.detachEvent( "on" + type, handle );
+ }
+ };
+
+jQuery.Event = function( src, props ) {
+ // Allow instantiation without the 'new' keyword
+ if ( !this.preventDefault ) {
+ return new jQuery.Event( src, props );
+ }
+
+ // Event object
+ if ( src && src.type ) {
+ this.originalEvent = src;
+ this.type = src.type;
+
+ // Events bubbling up the document may have been marked as prevented
+ // by a handler lower down the tree; reflect the correct value.
+ this.isDefaultPrevented = (src.defaultPrevented || src.returnValue === false ||
+ src.getPreventDefault && src.getPreventDefault()) ? returnTrue : returnFalse;
+
+ // Event type
+ } else {
+ this.type = src;
+ }
+
+ // Put explicitly provided properties onto the event object
+ if ( props ) {
+ jQuery.extend( this, props );
+ }
+
+ // timeStamp is buggy for some events on Firefox(#3843)
+ // So we won't rely on the native value
+ this.timeStamp = jQuery.now();
+
+ // Mark it as fixed
+ this[ jQuery.expando ] = true;
+};
+
+function returnFalse() {
+ return false;
+}
+function returnTrue() {
+ return true;
+}
+
+// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding
+// http://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html
+jQuery.Event.prototype = {
+ preventDefault: function() {
+ this.isDefaultPrevented = returnTrue;
+
+ var e = this.originalEvent;
+ if ( !e ) {
+ return;
+ }
+
+ // if preventDefault exists run it on the original event
+ if ( e.preventDefault ) {
+ e.preventDefault();
+
+ // otherwise set the returnValue property of the original event to false (IE)
+ } else {
+ e.returnValue = false;
+ }
+ },
+ stopPropagation: function() {
+ this.isPropagationStopped = returnTrue;
+
+ var e = this.originalEvent;
+ if ( !e ) {
+ return;
+ }
+ // if stopPropagation exists run it on the original event
+ if ( e.stopPropagation ) {
+ e.stopPropagation();
+ }
+ // otherwise set the cancelBubble property of the original event to true (IE)
+ e.cancelBubble = true;
+ },
+ stopImmediatePropagation: function() {
+ this.isImmediatePropagationStopped = returnTrue;
+ this.stopPropagation();
+ },
+ isDefaultPrevented: returnFalse,
+ isPropagationStopped: returnFalse,
+ isImmediatePropagationStopped: returnFalse
+};
+
+// Checks if an event happened on an element within another element
+// Used in jQuery.event.special.mouseenter and mouseleave handlers
+var withinElement = function( event ) {
+
+ // Check if mouse(over|out) are still within the same parent element
+ var related = event.relatedTarget,
+ inside = false,
+ eventType = event.type;
+
+ event.type = event.data;
+
+ if ( related !== this ) {
+
+ if ( related ) {
+ inside = jQuery.contains( this, related );
+ }
+
+ if ( !inside ) {
+
+ jQuery.event.handle.apply( this, arguments );
+
+ event.type = eventType;
+ }
+ }
+},
+
+// In case of event delegation, we only need to rename the event.type,
+// liveHandler will take care of the rest.
+delegate = function( event ) {
+ event.type = event.data;
+ jQuery.event.handle.apply( this, arguments );
+};
+
+// Create mouseenter and mouseleave events
+jQuery.each({
+ mouseenter: "mouseover",
+ mouseleave: "mouseout"
+}, function( orig, fix ) {
+ jQuery.event.special[ orig ] = {
+ setup: function( data ) {
+ jQuery.event.add( this, fix, data && data.selector ? delegate : withinElement, orig );
+ },
+ teardown: function( data ) {
+ jQuery.event.remove( this, fix, data && data.selector ? delegate : withinElement );
+ }
+ };
+});
+
+// submit delegation
+if ( !jQuery.support.submitBubbles ) {
+
+ jQuery.event.special.submit = {
+ setup: function( data, namespaces ) {
+ if ( !jQuery.nodeName( this, "form" ) ) {
+ jQuery.event.add(this, "click.specialSubmit", function( e ) {
+ // Avoid triggering error on non-existent type attribute in IE VML (#7071)
+ var elem = e.target,
+ type = jQuery.nodeName( elem, "input" ) || jQuery.nodeName( elem, "button" ) ? elem.type : "";
+
+ if ( (type === "submit" || type === "image") && jQuery( elem ).closest("form").length ) {
+ trigger( "submit", this, arguments );
+ }
+ });
+
+ jQuery.event.add(this, "keypress.specialSubmit", function( e ) {
+ var elem = e.target,
+ type = jQuery.nodeName( elem, "input" ) || jQuery.nodeName( elem, "button" ) ? elem.type : "";
+
+ if ( (type === "text" || type === "password") && jQuery( elem ).closest("form").length && e.keyCode === 13 ) {
+ trigger( "submit", this, arguments );
+ }
+ });
+
+ } else {
+ return false;
+ }
+ },
+
+ teardown: function( namespaces ) {
+ jQuery.event.remove( this, ".specialSubmit" );
+ }
+ };
+
+}
+
+// change delegation, happens here so we have bind.
+if ( !jQuery.support.changeBubbles ) {
+
+ var changeFilters,
+
+ getVal = function( elem ) {
+ var type = jQuery.nodeName( elem, "input" ) ? elem.type : "",
+ val = elem.value;
+
+ if ( type === "radio" || type === "checkbox" ) {
+ val = elem.checked;
+
+ } else if ( type === "select-multiple" ) {
+ val = elem.selectedIndex > -1 ?
+ jQuery.map( elem.options, function( elem ) {
+ return elem.selected;
+ }).join("-") :
+ "";
+
+ } else if ( jQuery.nodeName( elem, "select" ) ) {
+ val = elem.selectedIndex;
+ }
+
+ return val;
+ },
+
+ testChange = function testChange( e ) {
+ var elem = e.target, data, val;
+
+ if ( !rformElems.test( elem.nodeName ) || elem.readOnly ) {
+ return;
+ }
+
+ data = jQuery._data( elem, "_change_data" );
+ val = getVal(elem);
+
+ // the current data will be also retrieved by beforeactivate
+ if ( e.type !== "focusout" || elem.type !== "radio" ) {
+ jQuery._data( elem, "_change_data", val );
+ }
+
+ if ( data === undefined || val === data ) {
+ return;
+ }
+
+ if ( data != null || val ) {
+ e.type = "change";
+ e.liveFired = undefined;
+ jQuery.event.trigger( e, arguments[1], elem );
+ }
+ };
+
+ jQuery.event.special.change = {
+ filters: {
+ focusout: testChange,
+
+ beforedeactivate: testChange,
+
+ click: function( e ) {
+ var elem = e.target, type = jQuery.nodeName( elem, "input" ) ? elem.type : "";
+
+ if ( type === "radio" || type === "checkbox" || jQuery.nodeName( elem, "select" ) ) {
+ testChange.call( this, e );
+ }
+ },
+
+ // Change has to be called before submit
+ // Keydown will be called before keypress, which is used in submit-event delegation
+ keydown: function( e ) {
+ var elem = e.target, type = jQuery.nodeName( elem, "input" ) ? elem.type : "";
+
+ if ( (e.keyCode === 13 && !jQuery.nodeName( elem, "textarea" ) ) ||
+ (e.keyCode === 32 && (type === "checkbox" || type === "radio")) ||
+ type === "select-multiple" ) {
+ testChange.call( this, e );
+ }
+ },
+
+ // Beforeactivate happens also before the previous element is blurred
+ // with this event you can't trigger a change event, but you can store
+ // information
+ beforeactivate: function( e ) {
+ var elem = e.target;
+ jQuery._data( elem, "_change_data", getVal(elem) );
+ }
+ },
+
+ setup: function( data, namespaces ) {
+ if ( this.type === "file" ) {
+ return false;
+ }
+
+ for ( var type in changeFilters ) {
+ jQuery.event.add( this, type + ".specialChange", changeFilters[type] );
+ }
+
+ return rformElems.test( this.nodeName );
+ },
+
+ teardown: function( namespaces ) {
+ jQuery.event.remove( this, ".specialChange" );
+
+ return rformElems.test( this.nodeName );
+ }
+ };
+
+ changeFilters = jQuery.event.special.change.filters;
+
+ // Handle when the input is .focus()'d
+ changeFilters.focus = changeFilters.beforeactivate;
+}
+
+function trigger( type, elem, args ) {
+ // Piggyback on a donor event to simulate a different one.
+ // Fake originalEvent to avoid donor's stopPropagation, but if the
+ // simulated event prevents default then we do the same on the donor.
+ // Don't pass args or remember liveFired; they apply to the donor event.
+ var event = jQuery.extend( {}, args[ 0 ] );
+ event.type = type;
+ event.originalEvent = {};
+ event.liveFired = undefined;
+ jQuery.event.handle.call( elem, event );
+ if ( event.isDefaultPrevented() ) {
+ args[ 0 ].preventDefault();
+ }
+}
+
+// Create "bubbling" focus and blur events
+if ( !jQuery.support.focusinBubbles ) {
+ jQuery.each({ focus: "focusin", blur: "focusout" }, function( orig, fix ) {
+
+ // Attach a single capturing handler while someone wants focusin/focusout
+ var attaches = 0;
+
+ jQuery.event.special[ fix ] = {
+ setup: function() {
+ if ( attaches++ === 0 ) {
+ document.addEventListener( orig, handler, true );
+ }
+ },
+ teardown: function() {
+ if ( --attaches === 0 ) {
+ document.removeEventListener( orig, handler, true );
+ }
+ }
+ };
+
+ function handler( donor ) {
+ // Donor event is always a native one; fix it and switch its type.
+ // Let focusin/out handler cancel the donor focus/blur event.
+ var e = jQuery.event.fix( donor );
+ e.type = fix;
+ e.originalEvent = {};
+ jQuery.event.trigger( e, null, e.target );
+ if ( e.isDefaultPrevented() ) {
+ donor.preventDefault();
+ }
+ }
+ });
+}
+
+jQuery.each(["bind", "one"], function( i, name ) {
+ jQuery.fn[ name ] = function( type, data, fn ) {
+ var handler;
+
+ // Handle object literals
+ if ( typeof type === "object" ) {
+ for ( var key in type ) {
+ this[ name ](key, data, type[key], fn);
+ }
+ return this;
+ }
+
+ if ( arguments.length === 2 || data === false ) {
+ fn = data;
+ data = undefined;
+ }
+
+ if ( name === "one" ) {
+ handler = function( event ) {
+ jQuery( this ).unbind( event, handler );
+ return fn.apply( this, arguments );
+ };
+ handler.guid = fn.guid || jQuery.guid++;
+ } else {
+ handler = fn;
+ }
+
+ if ( type === "unload" && name !== "one" ) {
+ this.one( type, data, fn );
+
+ } else {
+ for ( var i = 0, l = this.length; i < l; i++ ) {
+ jQuery.event.add( this[i], type, handler, data );
+ }
+ }
+
+ return this;
+ };
+});
+
+jQuery.fn.extend({
+ unbind: function( type, fn ) {
+ // Handle object literals
+ if ( typeof type === "object" && !type.preventDefault ) {
+ for ( var key in type ) {
+ this.unbind(key, type[key]);
+ }
+
+ } else {
+ for ( var i = 0, l = this.length; i < l; i++ ) {
+ jQuery.event.remove( this[i], type, fn );
+ }
+ }
+
+ return this;
+ },
+
+ delegate: function( selector, types, data, fn ) {
+ return this.live( types, data, fn, selector );
+ },
+
+ undelegate: function( selector, types, fn ) {
+ if ( arguments.length === 0 ) {
+ return this.unbind( "live" );
+
+ } else {
+ return this.die( types, null, fn, selector );
+ }
+ },
+
+ trigger: function( type, data ) {
+ return this.each(function() {
+ jQuery.event.trigger( type, data, this );
+ });
+ },
+
+ triggerHandler: function( type, data ) {
+ if ( this[0] ) {
+ return jQuery.event.trigger( type, data, this[0], true );
+ }
+ },
+
+ toggle: function( fn ) {
+ // Save reference to arguments for access in closure
+ var args = arguments,
+ guid = fn.guid || jQuery.guid++,
+ i = 0,
+ toggler = function( event ) {
+ // Figure out which function to execute
+ var lastToggle = ( jQuery.data( this, "lastToggle" + fn.guid ) || 0 ) % i;
+ jQuery.data( this, "lastToggle" + fn.guid, lastToggle + 1 );
+
+ // Make sure that clicks stop
+ event.preventDefault();
+
+ // and execute the function
+ return args[ lastToggle ].apply( this, arguments ) || false;
+ };
+
+ // link all the functions, so any of them can unbind this click handler
+ toggler.guid = guid;
+ while ( i < args.length ) {
+ args[ i++ ].guid = guid;
+ }
+
+ return this.click( toggler );
+ },
+
+ hover: function( fnOver, fnOut ) {
+ return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver );
+ }
+});
+
+var liveMap = {
+ focus: "focusin",
+ blur: "focusout",
+ mouseenter: "mouseover",
+ mouseleave: "mouseout"
+};
+
+jQuery.each(["live", "die"], function( i, name ) {
+ jQuery.fn[ name ] = function( types, data, fn, origSelector /* Internal Use Only */ ) {
+ var type, i = 0, match, namespaces, preType,
+ selector = origSelector || this.selector,
+ context = origSelector ? this : jQuery( this.context );
+
+ if ( typeof types === "object" && !types.preventDefault ) {
+ for ( var key in types ) {
+ context[ name ]( key, data, types[key], selector );
+ }
+
+ return this;
+ }
+
+ if ( name === "die" && !types &&
+ origSelector && origSelector.charAt(0) === "." ) {
+
+ context.unbind( origSelector );
+
+ return this;
+ }
+
+ if ( data === false || jQuery.isFunction( data ) ) {
+ fn = data || returnFalse;
+ data = undefined;
+ }
+
+ types = (types || "").split(" ");
+
+ while ( (type = types[ i++ ]) != null ) {
+ match = rnamespaces.exec( type );
+ namespaces = "";
+
+ if ( match ) {
+ namespaces = match[0];
+ type = type.replace( rnamespaces, "" );
+ }
+
+ if ( type === "hover" ) {
+ types.push( "mouseenter" + namespaces, "mouseleave" + namespaces );
+ continue;
+ }
+
+ preType = type;
+
+ if ( liveMap[ type ] ) {
+ types.push( liveMap[ type ] + namespaces );
+ type = type + namespaces;
+
+ } else {
+ type = (liveMap[ type ] || type) + namespaces;
+ }
+
+ if ( name === "live" ) {
+ // bind live handler
+ for ( var j = 0, l = context.length; j < l; j++ ) {
+ jQuery.event.add( context[j], "live." + liveConvert( type, selector ),
+ { data: data, selector: selector, handler: fn, origType: type, origHandler: fn, preType: preType } );
+ }
+
+ } else {
+ // unbind live handler
+ context.unbind( "live." + liveConvert( type, selector ), fn );
+ }
+ }
+
+ return this;
+ };
+});
+
+function liveHandler( event ) {
+ var stop, maxLevel, related, match, handleObj, elem, j, i, l, data, close, namespace, ret,
+ elems = [],
+ selectors = [],
+ events = jQuery._data( this, "events" );
+
+ // Make sure we avoid non-left-click bubbling in Firefox (#3861) and disabled elements in IE (#6911)
+ if ( event.liveFired === this || !events || !events.live || event.target.disabled || event.button && event.type === "click" ) {
+ return;
+ }
+
+ if ( event.namespace ) {
+ namespace = new RegExp("(^|\\.)" + event.namespace.split(".").join("\\.(?:.*\\.)?") + "(\\.|$)");
+ }
+
+ event.liveFired = this;
+
+ var live = events.live.slice(0);
+
+ for ( j = 0; j < live.length; j++ ) {
+ handleObj = live[j];
+
+ if ( handleObj.origType.replace( rnamespaces, "" ) === event.type ) {
+ selectors.push( handleObj.selector );
+
+ } else {
+ live.splice( j--, 1 );
+ }
+ }
+
+ match = jQuery( event.target ).closest( selectors, event.currentTarget );
+
+ for ( i = 0, l = match.length; i < l; i++ ) {
+ close = match[i];
+
+ for ( j = 0; j < live.length; j++ ) {
+ handleObj = live[j];
+
+ if ( close.selector === handleObj.selector && (!namespace || namespace.test( handleObj.namespace )) && !close.elem.disabled ) {
+ elem = close.elem;
+ related = null;
+
+ // Those two events require additional checking
+ if ( handleObj.preType === "mouseenter" || handleObj.preType === "mouseleave" ) {
+ event.type = handleObj.preType;
+ related = jQuery( event.relatedTarget ).closest( handleObj.selector )[0];
+
+ // Make sure not to accidentally match a child element with the same selector
+ if ( related && jQuery.contains( elem, related ) ) {
+ related = elem;
+ }
+ }
+
+ if ( !related || related !== elem ) {
+ elems.push({ elem: elem, handleObj: handleObj, level: close.level });
+ }
+ }
+ }
+ }
+
+ for ( i = 0, l = elems.length; i < l; i++ ) {
+ match = elems[i];
+
+ if ( maxLevel && match.level > maxLevel ) {
+ break;
+ }
+
+ event.currentTarget = match.elem;
+ event.data = match.handleObj.data;
+ event.handleObj = match.handleObj;
+
+ ret = match.handleObj.origHandler.apply( match.elem, arguments );
+
+ if ( ret === false || event.isPropagationStopped() ) {
+ maxLevel = match.level;
+
+ if ( ret === false ) {
+ stop = false;
+ }
+ if ( event.isImmediatePropagationStopped() ) {
+ break;
+ }
+ }
+ }
+
+ return stop;
+}
+
+function liveConvert( type, selector ) {
+ return (type && type !== "*" ? type + "." : "") + selector.replace(rperiod, "`").replace(rspaces, "&");
+}
+
+jQuery.each( ("blur focus focusin focusout load resize scroll unload click dblclick " +
+ "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " +
+ "change select submit keydown keypress keyup error").split(" "), function( i, name ) {
+
+ // Handle event binding
+ jQuery.fn[ name ] = function( data, fn ) {
+ if ( fn == null ) {
+ fn = data;
+ data = null;
+ }
+
+ return arguments.length > 0 ?
+ this.bind( name, data, fn ) :
+ this.trigger( name );
+ };
+
+ if ( jQuery.attrFn ) {
+ jQuery.attrFn[ name ] = true;
+ }
+});
+
+
+
+/*!
+ * Sizzle CSS Selector Engine
+ * Copyright 2011, The Dojo Foundation
+ * Released under the MIT, BSD, and GPL Licenses.
+ * More information: http://sizzlejs.com/
+ */
+(function(){
+
+var chunker = /((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^\[\]]*\]|['"][^'"]*['"]|[^\[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,
+ done = 0,
+ toString = Object.prototype.toString,
+ hasDuplicate = false,
+ baseHasDuplicate = true,
+ rBackslash = /\\/g,
+ rNonWord = /\W/;
+
+// Here we check if the JavaScript engine is using some sort of
+// optimization where it does not always call our comparision
+// function. If that is the case, discard the hasDuplicate value.
+// Thus far that includes Google Chrome.
+[0, 0].sort(function() {
+ baseHasDuplicate = false;
+ return 0;
+});
+
+var Sizzle = function( selector, context, results, seed ) {
+ results = results || [];
+ context = context || document;
+
+ var origContext = context;
+
+ if ( context.nodeType !== 1 && context.nodeType !== 9 ) {
+ return [];
+ }
+
+ if ( !selector || typeof selector !== "string" ) {
+ return results;
+ }
+
+ var m, set, checkSet, extra, ret, cur, pop, i,
+ prune = true,
+ contextXML = Sizzle.isXML( context ),
+ parts = [],
+ soFar = selector;
+
+ // Reset the position of the chunker regexp (start from head)
+ do {
+ chunker.exec( "" );
+ m = chunker.exec( soFar );
+
+ if ( m ) {
+ soFar = m[3];
+
+ parts.push( m[1] );
+
+ if ( m[2] ) {
+ extra = m[3];
+ break;
+ }
+ }
+ } while ( m );
+
+ if ( parts.length > 1 && origPOS.exec( selector ) ) {
+
+ if ( parts.length === 2 && Expr.relative[ parts[0] ] ) {
+ set = posProcess( parts[0] + parts[1], context );
+
+ } else {
+ set = Expr.relative[ parts[0] ] ?
+ [ context ] :
+ Sizzle( parts.shift(), context );
+
+ while ( parts.length ) {
+ selector = parts.shift();
+
+ if ( Expr.relative[ selector ] ) {
+ selector += parts.shift();
+ }
+
+ set = posProcess( selector, set );
+ }
+ }
+
+ } else {
+ // Take a shortcut and set the context if the root selector is an ID
+ // (but not if it'll be faster if the inner selector is an ID)
+ if ( !seed && parts.length > 1 && context.nodeType === 9 && !contextXML &&
+ Expr.match.ID.test(parts[0]) && !Expr.match.ID.test(parts[parts.length - 1]) ) {
+
+ ret = Sizzle.find( parts.shift(), context, contextXML );
+ context = ret.expr ?
+ Sizzle.filter( ret.expr, ret.set )[0] :
+ ret.set[0];
+ }
+
+ if ( context ) {
+ ret = seed ?
+ { expr: parts.pop(), set: makeArray(seed) } :
+ Sizzle.find( parts.pop(), parts.length === 1 && (parts[0] === "~" || parts[0] === "+") && context.parentNode ? context.parentNode : context, contextXML );
+
+ set = ret.expr ?
+ Sizzle.filter( ret.expr, ret.set ) :
+ ret.set;
+
+ if ( parts.length > 0 ) {
+ checkSet = makeArray( set );
+
+ } else {
+ prune = false;
+ }
+
+ while ( parts.length ) {
+ cur = parts.pop();
+ pop = cur;
+
+ if ( !Expr.relative[ cur ] ) {
+ cur = "";
+ } else {
+ pop = parts.pop();
+ }
+
+ if ( pop == null ) {
+ pop = context;
+ }
+
+ Expr.relative[ cur ]( checkSet, pop, contextXML );
+ }
+
+ } else {
+ checkSet = parts = [];
+ }
+ }
+
+ if ( !checkSet ) {
+ checkSet = set;
+ }
+
+ if ( !checkSet ) {
+ Sizzle.error( cur || selector );
+ }
+
+ if ( toString.call(checkSet) === "[object Array]" ) {
+ if ( !prune ) {
+ results.push.apply( results, checkSet );
+
+ } else if ( context && context.nodeType === 1 ) {
+ for ( i = 0; checkSet[i] != null; i++ ) {
+ if ( checkSet[i] && (checkSet[i] === true || checkSet[i].nodeType === 1 && Sizzle.contains(context, checkSet[i])) ) {
+ results.push( set[i] );
+ }
+ }
+
+ } else {
+ for ( i = 0; checkSet[i] != null; i++ ) {
+ if ( checkSet[i] && checkSet[i].nodeType === 1 ) {
+ results.push( set[i] );
+ }
+ }
+ }
+
+ } else {
+ makeArray( checkSet, results );
+ }
+
+ if ( extra ) {
+ Sizzle( extra, origContext, results, seed );
+ Sizzle.uniqueSort( results );
+ }
+
+ return results;
+};
+
+Sizzle.uniqueSort = function( results ) {
+ if ( sortOrder ) {
+ hasDuplicate = baseHasDuplicate;
+ results.sort( sortOrder );
+
+ if ( hasDuplicate ) {
+ for ( var i = 1; i < results.length; i++ ) {
+ if ( results[i] === results[ i - 1 ] ) {
+ results.splice( i--, 1 );
+ }
+ }
+ }
+ }
+
+ return results;
+};
+
+Sizzle.matches = function( expr, set ) {
+ return Sizzle( expr, null, null, set );
+};
+
+Sizzle.matchesSelector = function( node, expr ) {
+ return Sizzle( expr, null, null, [node] ).length > 0;
+};
+
+Sizzle.find = function( expr, context, isXML ) {
+ var set;
+
+ if ( !expr ) {
+ return [];
+ }
+
+ for ( var i = 0, l = Expr.order.length; i < l; i++ ) {
+ var match,
+ type = Expr.order[i];
+
+ if ( (match = Expr.leftMatch[ type ].exec( expr )) ) {
+ var left = match[1];
+ match.splice( 1, 1 );
+
+ if ( left.substr( left.length - 1 ) !== "\\" ) {
+ match[1] = (match[1] || "").replace( rBackslash, "" );
+ set = Expr.find[ type ]( match, context, isXML );
+
+ if ( set != null ) {
+ expr = expr.replace( Expr.match[ type ], "" );
+ break;
+ }
+ }
+ }
+ }
+
+ if ( !set ) {
+ set = typeof context.getElementsByTagName !== "undefined" ?
+ context.getElementsByTagName( "*" ) :
+ [];
+ }
+
+ return { set: set, expr: expr };
+};
+
+Sizzle.filter = function( expr, set, inplace, not ) {
+ var match, anyFound,
+ old = expr,
+ result = [],
+ curLoop = set,
+ isXMLFilter = set && set[0] && Sizzle.isXML( set[0] );
+
+ while ( expr && set.length ) {
+ for ( var type in Expr.filter ) {
+ if ( (match = Expr.leftMatch[ type ].exec( expr )) != null && match[2] ) {
+ var found, item,
+ filter = Expr.filter[ type ],
+ left = match[1];
+
+ anyFound = false;
+
+ match.splice(1,1);
+
+ if ( left.substr( left.length - 1 ) === "\\" ) {
+ continue;
+ }
+
+ if ( curLoop === result ) {
+ result = [];
+ }
+
+ if ( Expr.preFilter[ type ] ) {
+ match = Expr.preFilter[ type ]( match, curLoop, inplace, result, not, isXMLFilter );
+
+ if ( !match ) {
+ anyFound = found = true;
+
+ } else if ( match === true ) {
+ continue;
+ }
+ }
+
+ if ( match ) {
+ for ( var i = 0; (item = curLoop[i]) != null; i++ ) {
+ if ( item ) {
+ found = filter( item, match, i, curLoop );
+ var pass = not ^ !!found;
+
+ if ( inplace && found != null ) {
+ if ( pass ) {
+ anyFound = true;
+
+ } else {
+ curLoop[i] = false;
+ }
+
+ } else if ( pass ) {
+ result.push( item );
+ anyFound = true;
+ }
+ }
+ }
+ }
+
+ if ( found !== undefined ) {
+ if ( !inplace ) {
+ curLoop = result;
+ }
+
+ expr = expr.replace( Expr.match[ type ], "" );
+
+ if ( !anyFound ) {
+ return [];
+ }
+
+ break;
+ }
+ }
+ }
+
+ // Improper expression
+ if ( expr === old ) {
+ if ( anyFound == null ) {
+ Sizzle.error( expr );
+
+ } else {
+ break;
+ }
+ }
+
+ old = expr;
+ }
+
+ return curLoop;
+};
+
+Sizzle.error = function( msg ) {
+ throw "Syntax error, unrecognized expression: " + msg;
+};
+
+var Expr = Sizzle.selectors = {
+ order: [ "ID", "NAME", "TAG" ],
+
+ match: {
+ ID: /#((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,
+ CLASS: /\.((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,
+ NAME: /\[name=['"]*((?:[\w\u00c0-\uFFFF\-]|\\.)+)['"]*\]/,
+ ATTR: /\[\s*((?:[\w\u00c0-\uFFFF\-]|\\.)+)\s*(?:(\S?=)\s*(?:(['"])(.*?)\3|(#?(?:[\w\u00c0-\uFFFF\-]|\\.)*)|)|)\s*\]/,
+ TAG: /^((?:[\w\u00c0-\uFFFF\*\-]|\\.)+)/,
+ CHILD: /:(only|nth|last|first)-child(?:\(\s*(even|odd|(?:[+\-]?\d+|(?:[+\-]?\d*)?n\s*(?:[+\-]\s*\d+)?))\s*\))?/,
+ POS: /:(nth|eq|gt|lt|first|last|even|odd)(?:\((\d*)\))?(?=[^\-]|$)/,
+ PSEUDO: /:((?:[\w\u00c0-\uFFFF\-]|\\.)+)(?:\((['"]?)((?:\([^\)]+\)|[^\(\)]*)+)\2\))?/
+ },
+
+ leftMatch: {},
+
+ attrMap: {
+ "class": "className",
+ "for": "htmlFor"
+ },
+
+ attrHandle: {
+ href: function( elem ) {
+ return elem.getAttribute( "href" );
+ },
+ type: function( elem ) {
+ return elem.getAttribute( "type" );
+ }
+ },
+
+ relative: {
+ "+": function(checkSet, part){
+ var isPartStr = typeof part === "string",
+ isTag = isPartStr && !rNonWord.test( part ),
+ isPartStrNotTag = isPartStr && !isTag;
+
+ if ( isTag ) {
+ part = part.toLowerCase();
+ }
+
+ for ( var i = 0, l = checkSet.length, elem; i < l; i++ ) {
+ if ( (elem = checkSet[i]) ) {
+ while ( (elem = elem.previousSibling) && elem.nodeType !== 1 ) {}
+
+ checkSet[i] = isPartStrNotTag || elem && elem.nodeName.toLowerCase() === part ?
+ elem || false :
+ elem === part;
+ }
+ }
+
+ if ( isPartStrNotTag ) {
+ Sizzle.filter( part, checkSet, true );
+ }
+ },
+
+ ">": function( checkSet, part ) {
+ var elem,
+ isPartStr = typeof part === "string",
+ i = 0,
+ l = checkSet.length;
+
+ if ( isPartStr && !rNonWord.test( part ) ) {
+ part = part.toLowerCase();
+
+ for ( ; i < l; i++ ) {
+ elem = checkSet[i];
+
+ if ( elem ) {
+ var parent = elem.parentNode;
+ checkSet[i] = parent.nodeName.toLowerCase() === part ? parent : false;
+ }
+ }
+
+ } else {
+ for ( ; i < l; i++ ) {
+ elem = checkSet[i];
+
+ if ( elem ) {
+ checkSet[i] = isPartStr ?
+ elem.parentNode :
+ elem.parentNode === part;
+ }
+ }
+
+ if ( isPartStr ) {
+ Sizzle.filter( part, checkSet, true );
+ }
+ }
+ },
+
+ "": function(checkSet, part, isXML){
+ var nodeCheck,
+ doneName = done++,
+ checkFn = dirCheck;
+
+ if ( typeof part === "string" && !rNonWord.test( part ) ) {
+ part = part.toLowerCase();
+ nodeCheck = part;
+ checkFn = dirNodeCheck;
+ }
+
+ checkFn( "parentNode", part, doneName, checkSet, nodeCheck, isXML );
+ },
+
+ "~": function( checkSet, part, isXML ) {
+ var nodeCheck,
+ doneName = done++,
+ checkFn = dirCheck;
+
+ if ( typeof part === "string" && !rNonWord.test( part ) ) {
+ part = part.toLowerCase();
+ nodeCheck = part;
+ checkFn = dirNodeCheck;
+ }
+
+ checkFn( "previousSibling", part, doneName, checkSet, nodeCheck, isXML );
+ }
+ },
+
+ find: {
+ ID: function( match, context, isXML ) {
+ if ( typeof context.getElementById !== "undefined" && !isXML ) {
+ var m = context.getElementById(match[1]);
+ // Check parentNode to catch when Blackberry 4.6 returns
+ // nodes that are no longer in the document #6963
+ return m && m.parentNode ? [m] : [];
+ }
+ },
+
+ NAME: function( match, context ) {
+ if ( typeof context.getElementsByName !== "undefined" ) {
+ var ret = [],
+ results = context.getElementsByName( match[1] );
+
+ for ( var i = 0, l = results.length; i < l; i++ ) {
+ if ( results[i].getAttribute("name") === match[1] ) {
+ ret.push( results[i] );
+ }
+ }
+
+ return ret.length === 0 ? null : ret;
+ }
+ },
+
+ TAG: function( match, context ) {
+ if ( typeof context.getElementsByTagName !== "undefined" ) {
+ return context.getElementsByTagName( match[1] );
+ }
+ }
+ },
+ preFilter: {
+ CLASS: function( match, curLoop, inplace, result, not, isXML ) {
+ match = " " + match[1].replace( rBackslash, "" ) + " ";
+
+ if ( isXML ) {
+ return match;
+ }
+
+ for ( var i = 0, elem; (elem = curLoop[i]) != null; i++ ) {
+ if ( elem ) {
+ if ( not ^ (elem.className && (" " + elem.className + " ").replace(/[\t\n\r]/g, " ").indexOf(match) >= 0) ) {
+ if ( !inplace ) {
+ result.push( elem );
+ }
+
+ } else if ( inplace ) {
+ curLoop[i] = false;
+ }
+ }
+ }
+
+ return false;
+ },
+
+ ID: function( match ) {
+ return match[1].replace( rBackslash, "" );
+ },
+
+ TAG: function( match, curLoop ) {
+ return match[1].replace( rBackslash, "" ).toLowerCase();
+ },
+
+ CHILD: function( match ) {
+ if ( match[1] === "nth" ) {
+ if ( !match[2] ) {
+ Sizzle.error( match[0] );
+ }
+
+ match[2] = match[2].replace(/^\+|\s*/g, '');
+
+ // parse equations like 'even', 'odd', '5', '2n', '3n+2', '4n-1', '-n+6'
+ var test = /(-?)(\d*)(?:n([+\-]?\d*))?/.exec(
+ match[2] === "even" && "2n" || match[2] === "odd" && "2n+1" ||
+ !/\D/.test( match[2] ) && "0n+" + match[2] || match[2]);
+
+ // calculate the numbers (first)n+(last) including if they are negative
+ match[2] = (test[1] + (test[2] || 1)) - 0;
+ match[3] = test[3] - 0;
+ }
+ else if ( match[2] ) {
+ Sizzle.error( match[0] );
+ }
+
+ // TODO: Move to normal caching system
+ match[0] = done++;
+
+ return match;
+ },
+
+ ATTR: function( match, curLoop, inplace, result, not, isXML ) {
+ var name = match[1] = match[1].replace( rBackslash, "" );
+
+ if ( !isXML && Expr.attrMap[name] ) {
+ match[1] = Expr.attrMap[name];
+ }
+
+ // Handle if an un-quoted value was used
+ match[4] = ( match[4] || match[5] || "" ).replace( rBackslash, "" );
+
+ if ( match[2] === "~=" ) {
+ match[4] = " " + match[4] + " ";
+ }
+
+ return match;
+ },
+
+ PSEUDO: function( match, curLoop, inplace, result, not ) {
+ if ( match[1] === "not" ) {
+ // If we're dealing with a complex expression, or a simple one
+ if ( ( chunker.exec(match[3]) || "" ).length > 1 || /^\w/.test(match[3]) ) {
+ match[3] = Sizzle(match[3], null, null, curLoop);
+
+ } else {
+ var ret = Sizzle.filter(match[3], curLoop, inplace, true ^ not);
+
+ if ( !inplace ) {
+ result.push.apply( result, ret );
+ }
+
+ return false;
+ }
+
+ } else if ( Expr.match.POS.test( match[0] ) || Expr.match.CHILD.test( match[0] ) ) {
+ return true;
+ }
+
+ return match;
+ },
+
+ POS: function( match ) {
+ match.unshift( true );
+
+ return match;
+ }
+ },
+
+ filters: {
+ enabled: function( elem ) {
+ return elem.disabled === false && elem.type !== "hidden";
+ },
+
+ disabled: function( elem ) {
+ return elem.disabled === true;
+ },
+
+ checked: function( elem ) {
+ return elem.checked === true;
+ },
+
+ selected: function( elem ) {
+ // Accessing this property makes selected-by-default
+ // options in Safari work properly
+ if ( elem.parentNode ) {
+ elem.parentNode.selectedIndex;
+ }
+
+ return elem.selected === true;
+ },
+
+ parent: function( elem ) {
+ return !!elem.firstChild;
+ },
+
+ empty: function( elem ) {
+ return !elem.firstChild;
+ },
+
+ has: function( elem, i, match ) {
+ return !!Sizzle( match[3], elem ).length;
+ },
+
+ header: function( elem ) {
+ return (/h\d/i).test( elem.nodeName );
+ },
+
+ text: function( elem ) {
+ var attr = elem.getAttribute( "type" ), type = elem.type;
+ // IE6 and 7 will map elem.type to 'text' for new HTML5 types (search, etc)
+ // use getAttribute instead to test this case
+ return elem.nodeName.toLowerCase() === "input" && "text" === type && ( attr === type || attr === null );
+ },
+
+ radio: function( elem ) {
+ return elem.nodeName.toLowerCase() === "input" && "radio" === elem.type;
+ },
+
+ checkbox: function( elem ) {
+ return elem.nodeName.toLowerCase() === "input" && "checkbox" === elem.type;
+ },
+
+ file: function( elem ) {
+ return elem.nodeName.toLowerCase() === "input" && "file" === elem.type;
+ },
+
+ password: function( elem ) {
+ return elem.nodeName.toLowerCase() === "input" && "password" === elem.type;
+ },
+
+ submit: function( elem ) {
+ var name = elem.nodeName.toLowerCase();
+ return (name === "input" || name === "button") && "submit" === elem.type;
+ },
+
+ image: function( elem ) {
+ return elem.nodeName.toLowerCase() === "input" && "image" === elem.type;
+ },
+
+ reset: function( elem ) {
+ var name = elem.nodeName.toLowerCase();
+ return (name === "input" || name === "button") && "reset" === elem.type;
+ },
+
+ button: function( elem ) {
+ var name = elem.nodeName.toLowerCase();
+ return name === "input" && "button" === elem.type || name === "button";
+ },
+
+ input: function( elem ) {
+ return (/input|select|textarea|button/i).test( elem.nodeName );
+ },
+
+ focus: function( elem ) {
+ return elem === elem.ownerDocument.activeElement;
+ }
+ },
+ setFilters: {
+ first: function( elem, i ) {
+ return i === 0;
+ },
+
+ last: function( elem, i, match, array ) {
+ return i === array.length - 1;
+ },
+
+ even: function( elem, i ) {
+ return i % 2 === 0;
+ },
+
+ odd: function( elem, i ) {
+ return i % 2 === 1;
+ },
+
+ lt: function( elem, i, match ) {
+ return i < match[3] - 0;
+ },
+
+ gt: function( elem, i, match ) {
+ return i > match[3] - 0;
+ },
+
+ nth: function( elem, i, match ) {
+ return match[3] - 0 === i;
+ },
+
+ eq: function( elem, i, match ) {
+ return match[3] - 0 === i;
+ }
+ },
+ filter: {
+ PSEUDO: function( elem, match, i, array ) {
+ var name = match[1],
+ filter = Expr.filters[ name ];
+
+ if ( filter ) {
+ return filter( elem, i, match, array );
+
+ } else if ( name === "contains" ) {
+ return (elem.textContent || elem.innerText || Sizzle.getText([ elem ]) || "").indexOf(match[3]) >= 0;
+
+ } else if ( name === "not" ) {
+ var not = match[3];
+
+ for ( var j = 0, l = not.length; j < l; j++ ) {
+ if ( not[j] === elem ) {
+ return false;
+ }
+ }
+
+ return true;
+
+ } else {
+ Sizzle.error( name );
+ }
+ },
+
+ CHILD: function( elem, match ) {
+ var type = match[1],
+ node = elem;
+
+ switch ( type ) {
+ case "only":
+ case "first":
+ while ( (node = node.previousSibling) ) {
+ if ( node.nodeType === 1 ) {
+ return false;
+ }
+ }
+
+ if ( type === "first" ) {
+ return true;
+ }
+
+ node = elem;
+
+ case "last":
+ while ( (node = node.nextSibling) ) {
+ if ( node.nodeType === 1 ) {
+ return false;
+ }
+ }
+
+ return true;
+
+ case "nth":
+ var first = match[2],
+ last = match[3];
+
+ if ( first === 1 && last === 0 ) {
+ return true;
+ }
+
+ var doneName = match[0],
+ parent = elem.parentNode;
+
+ if ( parent && (parent.sizcache !== doneName || !elem.nodeIndex) ) {
+ var count = 0;
+
+ for ( node = parent.firstChild; node; node = node.nextSibling ) {
+ if ( node.nodeType === 1 ) {
+ node.nodeIndex = ++count;
+ }
+ }
+
+ parent.sizcache = doneName;
+ }
+
+ var diff = elem.nodeIndex - last;
+
+ if ( first === 0 ) {
+ return diff === 0;
+
+ } else {
+ return ( diff % first === 0 && diff / first >= 0 );
+ }
+ }
+ },
+
+ ID: function( elem, match ) {
+ return elem.nodeType === 1 && elem.getAttribute("id") === match;
+ },
+
+ TAG: function( elem, match ) {
+ return (match === "*" && elem.nodeType === 1) || elem.nodeName.toLowerCase() === match;
+ },
+
+ CLASS: function( elem, match ) {
+ return (" " + (elem.className || elem.getAttribute("class")) + " ")
+ .indexOf( match ) > -1;
+ },
+
+ ATTR: function( elem, match ) {
+ var name = match[1],
+ result = Expr.attrHandle[ name ] ?
+ Expr.attrHandle[ name ]( elem ) :
+ elem[ name ] != null ?
+ elem[ name ] :
+ elem.getAttribute( name ),
+ value = result + "",
+ type = match[2],
+ check = match[4];
+
+ return result == null ?
+ type === "!=" :
+ type === "=" ?
+ value === check :
+ type === "*=" ?
+ value.indexOf(check) >= 0 :
+ type === "~=" ?
+ (" " + value + " ").indexOf(check) >= 0 :
+ !check ?
+ value && result !== false :
+ type === "!=" ?
+ value !== check :
+ type === "^=" ?
+ value.indexOf(check) === 0 :
+ type === "$=" ?
+ value.substr(value.length - check.length) === check :
+ type === "|=" ?
+ value === check || value.substr(0, check.length + 1) === check + "-" :
+ false;
+ },
+
+ POS: function( elem, match, i, array ) {
+ var name = match[2],
+ filter = Expr.setFilters[ name ];
+
+ if ( filter ) {
+ return filter( elem, i, match, array );
+ }
+ }
+ }
+};
+
+var origPOS = Expr.match.POS,
+ fescape = function(all, num){
+ return "\\" + (num - 0 + 1);
+ };
+
+for ( var type in Expr.match ) {
+ Expr.match[ type ] = new RegExp( Expr.match[ type ].source + (/(?![^\[]*\])(?![^\(]*\))/.source) );
+ Expr.leftMatch[ type ] = new RegExp( /(^(?:.|\r|\n)*?)/.source + Expr.match[ type ].source.replace(/\\(\d+)/g, fescape) );
+}
+
+var makeArray = function( array, results ) {
+ array = Array.prototype.slice.call( array, 0 );
+
+ if ( results ) {
+ results.push.apply( results, array );
+ return results;
+ }
+
+ return array;
+};
+
+// Perform a simple check to determine if the browser is capable of
+// converting a NodeList to an array using builtin methods.
+// Also verifies that the returned array holds DOM nodes
+// (which is not the case in the Blackberry browser)
+try {
+ Array.prototype.slice.call( document.documentElement.childNodes, 0 )[0].nodeType;
+
+// Provide a fallback method if it does not work
+} catch( e ) {
+ makeArray = function( array, results ) {
+ var i = 0,
+ ret = results || [];
+
+ if ( toString.call(array) === "[object Array]" ) {
+ Array.prototype.push.apply( ret, array );
+
+ } else {
+ if ( typeof array.length === "number" ) {
+ for ( var l = array.length; i < l; i++ ) {
+ ret.push( array[i] );
+ }
+
+ } else {
+ for ( ; array[i]; i++ ) {
+ ret.push( array[i] );
+ }
+ }
+ }
+
+ return ret;
+ };
+}
+
+var sortOrder, siblingCheck;
+
+if ( document.documentElement.compareDocumentPosition ) {
+ sortOrder = function( a, b ) {
+ if ( a === b ) {
+ hasDuplicate = true;
+ return 0;
+ }
+
+ if ( !a.compareDocumentPosition || !b.compareDocumentPosition ) {
+ return a.compareDocumentPosition ? -1 : 1;
+ }
+
+ return a.compareDocumentPosition(b) & 4 ? -1 : 1;
+ };
+
+} else {
+ sortOrder = function( a, b ) {
+ // The nodes are identical, we can exit early
+ if ( a === b ) {
+ hasDuplicate = true;
+ return 0;
+
+ // Fallback to using sourceIndex (in IE) if it's available on both nodes
+ } else if ( a.sourceIndex && b.sourceIndex ) {
+ return a.sourceIndex - b.sourceIndex;
+ }
+
+ var al, bl,
+ ap = [],
+ bp = [],
+ aup = a.parentNode,
+ bup = b.parentNode,
+ cur = aup;
+
+ // If the nodes are siblings (or identical) we can do a quick check
+ if ( aup === bup ) {
+ return siblingCheck( a, b );
+
+ // If no parents were found then the nodes are disconnected
+ } else if ( !aup ) {
+ return -1;
+
+ } else if ( !bup ) {
+ return 1;
+ }
+
+ // Otherwise they're somewhere else in the tree so we need
+ // to build up a full list of the parentNodes for comparison
+ while ( cur ) {
+ ap.unshift( cur );
+ cur = cur.parentNode;
+ }
+
+ cur = bup;
+
+ while ( cur ) {
+ bp.unshift( cur );
+ cur = cur.parentNode;
+ }
+
+ al = ap.length;
+ bl = bp.length;
+
+ // Start walking down the tree looking for a discrepancy
+ for ( var i = 0; i < al && i < bl; i++ ) {
+ if ( ap[i] !== bp[i] ) {
+ return siblingCheck( ap[i], bp[i] );
+ }
+ }
+
+ // We ended someplace up the tree so do a sibling check
+ return i === al ?
+ siblingCheck( a, bp[i], -1 ) :
+ siblingCheck( ap[i], b, 1 );
+ };
+
+ siblingCheck = function( a, b, ret ) {
+ if ( a === b ) {
+ return ret;
+ }
+
+ var cur = a.nextSibling;
+
+ while ( cur ) {
+ if ( cur === b ) {
+ return -1;
+ }
+
+ cur = cur.nextSibling;
+ }
+
+ return 1;
+ };
+}
+
+// Utility function for retreiving the text value of an array of DOM nodes
+Sizzle.getText = function( elems ) {
+ var ret = "", elem;
+
+ for ( var i = 0; elems[i]; i++ ) {
+ elem = elems[i];
+
+ // Get the text from text nodes and CDATA nodes
+ if ( elem.nodeType === 3 || elem.nodeType === 4 ) {
+ ret += elem.nodeValue;
+
+ // Traverse everything else, except comment nodes
+ } else if ( elem.nodeType !== 8 ) {
+ ret += Sizzle.getText( elem.childNodes );
+ }
+ }
+
+ return ret;
+};
+
+// Check to see if the browser returns elements by name when
+// querying by getElementById (and provide a workaround)
+(function(){
+ // We're going to inject a fake input element with a specified name
+ var form = document.createElement("div"),
+ id = "script" + (new Date()).getTime(),
+ root = document.documentElement;
+
+ form.innerHTML = "<a name='" + id + "'/>";
+
+ // Inject it into the root element, check its status, and remove it quickly
+ root.insertBefore( form, root.firstChild );
+
+ // The workaround has to do additional checks after a getElementById
+ // Which slows things down for other browsers (hence the branching)
+ if ( document.getElementById( id ) ) {
+ Expr.find.ID = function( match, context, isXML ) {
+ if ( typeof context.getElementById !== "undefined" && !isXML ) {
+ var m = context.getElementById(match[1]);
+
+ return m ?
+ m.id === match[1] || typeof m.getAttributeNode !== "undefined" && m.getAttributeNode("id").nodeValue === match[1] ?
+ [m] :
+ undefined :
+ [];
+ }
+ };
+
+ Expr.filter.ID = function( elem, match ) {
+ var node = typeof elem.getAttributeNode !== "undefined" && elem.getAttributeNode("id");
+
+ return elem.nodeType === 1 && node && node.nodeValue === match;
+ };
+ }
+
+ root.removeChild( form );
+
+ // release memory in IE
+ root = form = null;
+})();
+
+(function(){
+ // Check to see if the browser returns only elements
+ // when doing getElementsByTagName("*")
+
+ // Create a fake element
+ var div = document.createElement("div");
+ div.appendChild( document.createComment("") );
+
+ // Make sure no comments are found
+ if ( div.getElementsByTagName("*").length > 0 ) {
+ Expr.find.TAG = function( match, context ) {
+ var results = context.getElementsByTagName( match[1] );
+
+ // Filter out possible comments
+ if ( match[1] === "*" ) {
+ var tmp = [];
+
+ for ( var i = 0; results[i]; i++ ) {
+ if ( results[i].nodeType === 1 ) {
+ tmp.push( results[i] );
+ }
+ }
+
+ results = tmp;
+ }
+
+ return results;
+ };
+ }
+
+ // Check to see if an attribute returns normalized href attributes
+ div.innerHTML = "<a href='#'></a>";
+
+ if ( div.firstChild && typeof div.firstChild.getAttribute !== "undefined" &&
+ div.firstChild.getAttribute("href") !== "#" ) {
+
+ Expr.attrHandle.href = function( elem ) {
+ return elem.getAttribute( "href", 2 );
+ };
+ }
+
+ // release memory in IE
+ div = null;
+})();
+
+if ( document.querySelectorAll ) {
+ (function(){
+ var oldSizzle = Sizzle,
+ div = document.createElement("div"),
+ id = "__sizzle__";
+
+ div.innerHTML = "<p class='TEST'></p>";
+
+ // Safari can't handle uppercase or unicode characters when
+ // in quirks mode.
+ if ( div.querySelectorAll && div.querySelectorAll(".TEST").length === 0 ) {
+ return;
+ }
+
+ Sizzle = function( query, context, extra, seed ) {
+ context = context || document;
+
+ // Only use querySelectorAll on non-XML documents
+ // (ID selectors don't work in non-HTML documents)
+ if ( !seed && !Sizzle.isXML(context) ) {
+ // See if we find a selector to speed up
+ var match = /^(\w+$)|^\.([\w\-]+$)|^#([\w\-]+$)/.exec( query );
+
+ if ( match && (context.nodeType === 1 || context.nodeType === 9) ) {
+ // Speed-up: Sizzle("TAG")
+ if ( match[1] ) {
+ return makeArray( context.getElementsByTagName( query ), extra );
+
+ // Speed-up: Sizzle(".CLASS")
+ } else if ( match[2] && Expr.find.CLASS && context.getElementsByClassName ) {
+ return makeArray( context.getElementsByClassName( match[2] ), extra );
+ }
+ }
+
+ if ( context.nodeType === 9 ) {
+ // Speed-up: Sizzle("body")
+ // The body element only exists once, optimize finding it
+ if ( query === "body" && context.body ) {
+ return makeArray( [ context.body ], extra );
+
+ // Speed-up: Sizzle("#ID")
+ } else if ( match && match[3] ) {
+ var elem = context.getElementById( match[3] );
+
+ // Check parentNode to catch when Blackberry 4.6 returns
+ // nodes that are no longer in the document #6963
+ if ( elem && elem.parentNode ) {
+ // Handle the case where IE and Opera return items
+ // by name instead of ID
+ if ( elem.id === match[3] ) {
+ return makeArray( [ elem ], extra );
+ }
+
+ } else {
+ return makeArray( [], extra );
+ }
+ }
+
+ try {
+ return makeArray( context.querySelectorAll(query), extra );
+ } catch(qsaError) {}
+
+ // qSA works strangely on Element-rooted queries
+ // We can work around this by specifying an extra ID on the root
+ // and working up from there (Thanks to Andrew Dupont for the technique)
+ // IE 8 doesn't work on object elements
+ } else if ( context.nodeType === 1 && context.nodeName.toLowerCase() !== "object" ) {
+ var oldContext = context,
+ old = context.getAttribute( "id" ),
+ nid = old || id,
+ hasParent = context.parentNode,
+ relativeHierarchySelector = /^\s*[+~]/.test( query );
+
+ if ( !old ) {
+ context.setAttribute( "id", nid );
+ } else {
+ nid = nid.replace( /'/g, "\\$&" );
+ }
+ if ( relativeHierarchySelector && hasParent ) {
+ context = context.parentNode;
+ }
+
+ try {
+ if ( !relativeHierarchySelector || hasParent ) {
+ return makeArray( context.querySelectorAll( "[id='" + nid + "'] " + query ), extra );
+ }
+
+ } catch(pseudoError) {
+ } finally {
+ if ( !old ) {
+ oldContext.removeAttribute( "id" );
+ }
+ }
+ }
+ }
+
+ return oldSizzle(query, context, extra, seed);
+ };
+
+ for ( var prop in oldSizzle ) {
+ Sizzle[ prop ] = oldSizzle[ prop ];
+ }
+
+ // release memory in IE
+ div = null;
+ })();
+}
+
+(function(){
+ var html = document.documentElement,
+ matches = html.matchesSelector || html.mozMatchesSelector || html.webkitMatchesSelector || html.msMatchesSelector;
+
+ if ( matches ) {
+ // Check to see if it's possible to do matchesSelector
+ // on a disconnected node (IE 9 fails this)
+ var disconnectedMatch = !matches.call( document.createElement( "div" ), "div" ),
+ pseudoWorks = false;
+
+ try {
+ // This should fail with an exception
+ // Gecko does not error, returns false instead
+ matches.call( document.documentElement, "[test!='']:sizzle" );
+
+ } catch( pseudoError ) {
+ pseudoWorks = true;
+ }
+
+ Sizzle.matchesSelector = function( node, expr ) {
+ // Make sure that attribute selectors are quoted
+ expr = expr.replace(/\=\s*([^'"\]]*)\s*\]/g, "='$1']");
+
+ if ( !Sizzle.isXML( node ) ) {
+ try {
+ if ( pseudoWorks || !Expr.match.PSEUDO.test( expr ) && !/!=/.test( expr ) ) {
+ var ret = matches.call( node, expr );
+
+ // IE 9's matchesSelector returns false on disconnected nodes
+ if ( ret || !disconnectedMatch ||
+ // As well, disconnected nodes are said to be in a document
+ // fragment in IE 9, so check for that
+ node.document && node.document.nodeType !== 11 ) {
+ return ret;
+ }
+ }
+ } catch(e) {}
+ }
+
+ return Sizzle(expr, null, null, [node]).length > 0;
+ };
+ }
+})();
+
+(function(){
+ var div = document.createElement("div");
+
+ div.innerHTML = "<div class='test e'></div><div class='test'></div>";
+
+ // Opera can't find a second classname (in 9.6)
+ // Also, make sure that getElementsByClassName actually exists
+ if ( !div.getElementsByClassName || div.getElementsByClassName("e").length === 0 ) {
+ return;
+ }
+
+ // Safari caches class attributes, doesn't catch changes (in 3.2)
+ div.lastChild.className = "e";
+
+ if ( div.getElementsByClassName("e").length === 1 ) {
+ return;
+ }
+
+ Expr.order.splice(1, 0, "CLASS");
+ Expr.find.CLASS = function( match, context, isXML ) {
+ if ( typeof context.getElementsByClassName !== "undefined" && !isXML ) {
+ return context.getElementsByClassName(match[1]);
+ }
+ };
+
+ // release memory in IE
+ div = null;
+})();
+
+function dirNodeCheck( dir, cur, doneName, checkSet, nodeCheck, isXML ) {
+ for ( var i = 0, l = checkSet.length; i < l; i++ ) {
+ var elem = checkSet[i];
+
+ if ( elem ) {
+ var match = false;
+
+ elem = elem[dir];
+
+ while ( elem ) {
+ if ( elem.sizcache === doneName ) {
+ match = checkSet[elem.sizset];
+ break;
+ }
+
+ if ( elem.nodeType === 1 && !isXML ){
+ elem.sizcache = doneName;
+ elem.sizset = i;
+ }
+
+ if ( elem.nodeName.toLowerCase() === cur ) {
+ match = elem;
+ break;
+ }
+
+ elem = elem[dir];
+ }
+
+ checkSet[i] = match;
+ }
+ }
+}
+
+function dirCheck( dir, cur, doneName, checkSet, nodeCheck, isXML ) {
+ for ( var i = 0, l = checkSet.length; i < l; i++ ) {
+ var elem = checkSet[i];
+
+ if ( elem ) {
+ var match = false;
+
+ elem = elem[dir];
+
+ while ( elem ) {
+ if ( elem.sizcache === doneName ) {
+ match = checkSet[elem.sizset];
+ break;
+ }
+
+ if ( elem.nodeType === 1 ) {
+ if ( !isXML ) {
+ elem.sizcache = doneName;
+ elem.sizset = i;
+ }
+
+ if ( typeof cur !== "string" ) {
+ if ( elem === cur ) {
+ match = true;
+ break;
+ }
+
+ } else if ( Sizzle.filter( cur, [elem] ).length > 0 ) {
+ match = elem;
+ break;
+ }
+ }
+
+ elem = elem[dir];
+ }
+
+ checkSet[i] = match;
+ }
+ }
+}
+
+if ( document.documentElement.contains ) {
+ Sizzle.contains = function( a, b ) {
+ return a !== b && (a.contains ? a.contains(b) : true);
+ };
+
+} else if ( document.documentElement.compareDocumentPosition ) {
+ Sizzle.contains = function( a, b ) {
+ return !!(a.compareDocumentPosition(b) & 16);
+ };
+
+} else {
+ Sizzle.contains = function() {
+ return false;
+ };
+}
+
+Sizzle.isXML = function( elem ) {
+ // documentElement is verified for cases where it doesn't yet exist
+ // (such as loading iframes in IE - #4833)
+ var documentElement = (elem ? elem.ownerDocument || elem : 0).documentElement;
+
+ return documentElement ? documentElement.nodeName !== "HTML" : false;
+};
+
+var posProcess = function( selector, context ) {
+ var match,
+ tmpSet = [],
+ later = "",
+ root = context.nodeType ? [context] : context;
+
+ // Position selectors must be done after the filter
+ // And so must :not(positional) so we move all PSEUDOs to the end
+ while ( (match = Expr.match.PSEUDO.exec( selector )) ) {
+ later += match[0];
+ selector = selector.replace( Expr.match.PSEUDO, "" );
+ }
+
+ selector = Expr.relative[selector] ? selector + "*" : selector;
+
+ for ( var i = 0, l = root.length; i < l; i++ ) {
+ Sizzle( selector, root[i], tmpSet );
+ }
+
+ return Sizzle.filter( later, tmpSet );
+};
+
+// EXPOSE
+jQuery.find = Sizzle;
+jQuery.expr = Sizzle.selectors;
+jQuery.expr[":"] = jQuery.expr.filters;
+jQuery.unique = Sizzle.uniqueSort;
+jQuery.text = Sizzle.getText;
+jQuery.isXMLDoc = Sizzle.isXML;
+jQuery.contains = Sizzle.contains;
+
+
+})();
+
+
+var runtil = /Until$/,
+ rparentsprev = /^(?:parents|prevUntil|prevAll)/,
+ // Note: This RegExp should be improved, or likely pulled from Sizzle
+ rmultiselector = /,/,
+ isSimple = /^.[^:#\[\.,]*$/,
+ slice = Array.prototype.slice,
+ POS = jQuery.expr.match.POS,
+ // methods guaranteed to produce a unique set when starting from a unique set
+ guaranteedUnique = {
+ children: true,
+ contents: true,
+ next: true,
+ prev: true
+ };
+
+jQuery.fn.extend({
+ find: function( selector ) {
+ var self = this,
+ i, l;
+
+ if ( typeof selector !== "string" ) {
+ return jQuery( selector ).filter(function() {
+ for ( i = 0, l = self.length; i < l; i++ ) {
+ if ( jQuery.contains( self[ i ], this ) ) {
+ return true;
+ }
+ }
+ });
+ }
+
+ var ret = this.pushStack( "", "find", selector ),
+ length, n, r;
+
+ for ( i = 0, l = this.length; i < l; i++ ) {
+ length = ret.length;
+ jQuery.find( selector, this[i], ret );
+
+ if ( i > 0 ) {
+ // Make sure that the results are unique
+ for ( n = length; n < ret.length; n++ ) {
+ for ( r = 0; r < length; r++ ) {
+ if ( ret[r] === ret[n] ) {
+ ret.splice(n--, 1);
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return ret;
+ },
+
+ has: function( target ) {
+ var targets = jQuery( target );
+ return this.filter(function() {
+ for ( var i = 0, l = targets.length; i < l; i++ ) {
+ if ( jQuery.contains( this, targets[i] ) ) {
+ return true;
+ }
+ }
+ });
+ },
+
+ not: function( selector ) {
+ return this.pushStack( winnow(this, selector, false), "not", selector);
+ },
+
+ filter: function( selector ) {
+ return this.pushStack( winnow(this, selector, true), "filter", selector );
+ },
+
+ is: function( selector ) {
+ return !!selector && ( typeof selector === "string" ?
+ jQuery.filter( selector, this ).length > 0 :
+ this.filter( selector ).length > 0 );
+ },
+
+ closest: function( selectors, context ) {
+ var ret = [], i, l, cur = this[0];
+
+ // Array
+ if ( jQuery.isArray( selectors ) ) {
+ var match, selector,
+ matches = {},
+ level = 1;
+
+ if ( cur && selectors.length ) {
+ for ( i = 0, l = selectors.length; i < l; i++ ) {
+ selector = selectors[i];
+
+ if ( !matches[ selector ] ) {
+ matches[ selector ] = POS.test( selector ) ?
+ jQuery( selector, context || this.context ) :
+ selector;
+ }
+ }
+
+ while ( cur && cur.ownerDocument && cur !== context ) {
+ for ( selector in matches ) {
+ match = matches[ selector ];
+
+ if ( match.jquery ? match.index( cur ) > -1 : jQuery( cur ).is( match ) ) {
+ ret.push({ selector: selector, elem: cur, level: level });
+ }
+ }
+
+ cur = cur.parentNode;
+ level++;
+ }
+ }
+
+ return ret;
+ }
+
+ // String
+ var pos = POS.test( selectors ) || typeof selectors !== "string" ?
+ jQuery( selectors, context || this.context ) :
+ 0;
+
+ for ( i = 0, l = this.length; i < l; i++ ) {
+ cur = this[i];
+
+ while ( cur ) {
+ if ( pos ? pos.index(cur) > -1 : jQuery.find.matchesSelector(cur, selectors) ) {
+ ret.push( cur );
+ break;
+
+ } else {
+ cur = cur.parentNode;
+ if ( !cur || !cur.ownerDocument || cur === context || cur.nodeType === 11 ) {
+ break;
+ }
+ }
+ }
+ }
+
+ ret = ret.length > 1 ? jQuery.unique( ret ) : ret;
+
+ return this.pushStack( ret, "closest", selectors );
+ },
+
+ // Determine the position of an element within
+ // the matched set of elements
+ index: function( elem ) {
+
+ // No argument, return index in parent
+ if ( !elem ) {
+ return ( this[0] && this[0].parentNode ) ? this.prevAll().length : -1;
+ }
+
+ // index in selector
+ if ( typeof elem === "string" ) {
+ return jQuery.inArray( this[0], jQuery( elem ) );
+ }
+
+ // Locate the position of the desired element
+ return jQuery.inArray(
+ // If it receives a jQuery object, the first element is used
+ elem.jquery ? elem[0] : elem, this );
+ },
+
+ add: function( selector, context ) {
+ var set = typeof selector === "string" ?
+ jQuery( selector, context ) :
+ jQuery.makeArray( selector && selector.nodeType ? [ selector ] : selector ),
+ all = jQuery.merge( this.get(), set );
+
+ return this.pushStack( isDisconnected( set[0] ) || isDisconnected( all[0] ) ?
+ all :
+ jQuery.unique( all ) );
+ },
+
+ andSelf: function() {
+ return this.add( this.prevObject );
+ }
+});
+
+// A painfully simple check to see if an element is disconnected
+// from a document (should be improved, where feasible).
+function isDisconnected( node ) {
+ return !node || !node.parentNode || node.parentNode.nodeType === 11;
+}
+
+jQuery.each({
+ parent: function( elem ) {
+ var parent = elem.parentNode;
+ return parent && parent.nodeType !== 11 ? parent : null;
+ },
+ parents: function( elem ) {
+ return jQuery.dir( elem, "parentNode" );
+ },
+ parentsUntil: function( elem, i, until ) {
+ return jQuery.dir( elem, "parentNode", until );
+ },
+ next: function( elem ) {
+ return jQuery.nth( elem, 2, "nextSibling" );
+ },
+ prev: function( elem ) {
+ return jQuery.nth( elem, 2, "previousSibling" );
+ },
+ nextAll: function( elem ) {
+ return jQuery.dir( elem, "nextSibling" );
+ },
+ prevAll: function( elem ) {
+ return jQuery.dir( elem, "previousSibling" );
+ },
+ nextUntil: function( elem, i, until ) {
+ return jQuery.dir( elem, "nextSibling", until );
+ },
+ prevUntil: function( elem, i, until ) {
+ return jQuery.dir( elem, "previousSibling", until );
+ },
+ siblings: function( elem ) {
+ return jQuery.sibling( elem.parentNode.firstChild, elem );
+ },
+ children: function( elem ) {
+ return jQuery.sibling( elem.firstChild );
+ },
+ contents: function( elem ) {
+ return jQuery.nodeName( elem, "iframe" ) ?
+ elem.contentDocument || elem.contentWindow.document :
+ jQuery.makeArray( elem.childNodes );
+ }
+}, function( name, fn ) {
+ jQuery.fn[ name ] = function( until, selector ) {
+ var ret = jQuery.map( this, fn, until ),
+ // The variable 'args' was introduced in
+ // https://github.com/jquery/jquery/commit/52a0238
+ // to work around a bug in Chrome 10 (Dev) and should be removed when the bug is fixed.
+ // http://code.google.com/p/v8/issues/detail?id=1050
+ args = slice.call(arguments);
+
+ if ( !runtil.test( name ) ) {
+ selector = until;
+ }
+
+ if ( selector && typeof selector === "string" ) {
+ ret = jQuery.filter( selector, ret );
+ }
+
+ ret = this.length > 1 && !guaranteedUnique[ name ] ? jQuery.unique( ret ) : ret;
+
+ if ( (this.length > 1 || rmultiselector.test( selector )) && rparentsprev.test( name ) ) {
+ ret = ret.reverse();
+ }
+
+ return this.pushStack( ret, name, args.join(",") );
+ };
+});
+
+jQuery.extend({
+ filter: function( expr, elems, not ) {
+ if ( not ) {
+ expr = ":not(" + expr + ")";
+ }
+
+ return elems.length === 1 ?
+ jQuery.find.matchesSelector(elems[0], expr) ? [ elems[0] ] : [] :
+ jQuery.find.matches(expr, elems);
+ },
+
+ dir: function( elem, dir, until ) {
+ var matched = [],
+ cur = elem[ dir ];
+
+ while ( cur && cur.nodeType !== 9 && (until === undefined || cur.nodeType !== 1 || !jQuery( cur ).is( until )) ) {
+ if ( cur.nodeType === 1 ) {
+ matched.push( cur );
+ }
+ cur = cur[dir];
+ }
+ return matched;
+ },
+
+ nth: function( cur, result, dir, elem ) {
+ result = result || 1;
+ var num = 0;
+
+ for ( ; cur; cur = cur[dir] ) {
+ if ( cur.nodeType === 1 && ++num === result ) {
+ break;
+ }
+ }
+
+ return cur;
+ },
+
+ sibling: function( n, elem ) {
+ var r = [];
+
+ for ( ; n; n = n.nextSibling ) {
+ if ( n.nodeType === 1 && n !== elem ) {
+ r.push( n );
+ }
+ }
+
+ return r;
+ }
+});
+
+// Implement the identical functionality for filter and not
+function winnow( elements, qualifier, keep ) {
+
+ // Can't pass null or undefined to indexOf in Firefox 4
+ // Set to 0 to skip string check
+ qualifier = qualifier || 0;
+
+ if ( jQuery.isFunction( qualifier ) ) {
+ return jQuery.grep(elements, function( elem, i ) {
+ var retVal = !!qualifier.call( elem, i, elem );
+ return retVal === keep;
+ });
+
+ } else if ( qualifier.nodeType ) {
+ return jQuery.grep(elements, function( elem, i ) {
+ return (elem === qualifier) === keep;
+ });
+
+ } else if ( typeof qualifier === "string" ) {
+ var filtered = jQuery.grep(elements, function( elem ) {
+ return elem.nodeType === 1;
+ });
+
+ if ( isSimple.test( qualifier ) ) {
+ return jQuery.filter(qualifier, filtered, !keep);
+ } else {
+ qualifier = jQuery.filter( qualifier, filtered );
+ }
+ }
+
+ return jQuery.grep(elements, function( elem, i ) {
+ return (jQuery.inArray( elem, qualifier ) >= 0) === keep;
+ });
+}
+
+
+
+
+var rinlinejQuery = / jQuery\d+="(?:\d+|null)"/g,
+ rleadingWhitespace = /^\s+/,
+ rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/ig,
+ rtagName = /<([\w:]+)/,
+ rtbody = /<tbody/i,
+ rhtml = /<|&#?\w+;/,
+ rnocache = /<(?:script|object|embed|option|style)/i,
+ // checked="checked" or checked
+ rchecked = /checked\s*(?:[^=]|=\s*.checked.)/i,
+ rscriptType = /\/(java|ecma)script/i,
+ rcleanScript = /^\s*<!(?:\[CDATA\[|\-\-)/,
+ wrapMap = {
+ option: [ 1, "<select multiple='multiple'>", "</select>" ],
+ legend: [ 1, "<fieldset>", "</fieldset>" ],
+ thead: [ 1, "<table>", "</table>" ],
+ tr: [ 2, "<table><tbody>", "</tbody></table>" ],
+ td: [ 3, "<table><tbody><tr>", "</tr></tbody></table>" ],
+ col: [ 2, "<table><tbody></tbody><colgroup>", "</colgroup></table>" ],
+ area: [ 1, "<map>", "</map>" ],
+ _default: [ 0, "", "" ]
+ };
+
+wrapMap.optgroup = wrapMap.option;
+wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead;
+wrapMap.th = wrapMap.td;
+
+// IE can't serialize <link> and <script> tags normally
+if ( !jQuery.support.htmlSerialize ) {
+ wrapMap._default = [ 1, "div<div>", "</div>" ];
+}
+
+jQuery.fn.extend({
+ text: function( text ) {
+ if ( jQuery.isFunction(text) ) {
+ return this.each(function(i) {
+ var self = jQuery( this );
+
+ self.text( text.call(this, i, self.text()) );
+ });
+ }
+
+ if ( typeof text !== "object" && text !== undefined ) {
+ return this.empty().append( (this[0] && this[0].ownerDocument || document).createTextNode( text ) );
+ }
+
+ return jQuery.text( this );
+ },
+
+ wrapAll: function( html ) {
+ if ( jQuery.isFunction( html ) ) {
+ return this.each(function(i) {
+ jQuery(this).wrapAll( html.call(this, i) );
+ });
+ }
+
+ if ( this[0] ) {
+ // The elements to wrap the target around
+ var wrap = jQuery( html, this[0].ownerDocument ).eq(0).clone(true);
+
+ if ( this[0].parentNode ) {
+ wrap.insertBefore( this[0] );
+ }
+
+ wrap.map(function() {
+ var elem = this;
+
+ while ( elem.firstChild && elem.firstChild.nodeType === 1 ) {
+ elem = elem.firstChild;
+ }
+
+ return elem;
+ }).append( this );
+ }
+
+ return this;
+ },
+
+ wrapInner: function( html ) {
+ if ( jQuery.isFunction( html ) ) {
+ return this.each(function(i) {
+ jQuery(this).wrapInner( html.call(this, i) );
+ });
+ }
+
+ return this.each(function() {
+ var self = jQuery( this ),
+ contents = self.contents();
+
+ if ( contents.length ) {
+ contents.wrapAll( html );
+
+ } else {
+ self.append( html );
+ }
+ });
+ },
+
+ wrap: function( html ) {
+ return this.each(function() {
+ jQuery( this ).wrapAll( html );
+ });
+ },
+
+ unwrap: function() {
+ return this.parent().each(function() {
+ if ( !jQuery.nodeName( this, "body" ) ) {
+ jQuery( this ).replaceWith( this.childNodes );
+ }
+ }).end();
+ },
+
+ append: function() {
+ return this.domManip(arguments, true, function( elem ) {
+ if ( this.nodeType === 1 ) {
+ this.appendChild( elem );
+ }
+ });
+ },
+
+ prepend: function() {
+ return this.domManip(arguments, true, function( elem ) {
+ if ( this.nodeType === 1 ) {
+ this.insertBefore( elem, this.firstChild );
+ }
+ });
+ },
+
+ before: function() {
+ if ( this[0] && this[0].parentNode ) {
+ return this.domManip(arguments, false, function( elem ) {
+ this.parentNode.insertBefore( elem, this );
+ });
+ } else if ( arguments.length ) {
+ var set = jQuery(arguments[0]);
+ set.push.apply( set, this.toArray() );
+ return this.pushStack( set, "before", arguments );
+ }
+ },
+
+ after: function() {
+ if ( this[0] && this[0].parentNode ) {
+ return this.domManip(arguments, false, function( elem ) {
+ this.parentNode.insertBefore( elem, this.nextSibling );
+ });
+ } else if ( arguments.length ) {
+ var set = this.pushStack( this, "after", arguments );
+ set.push.apply( set, jQuery(arguments[0]).toArray() );
+ return set;
+ }
+ },
+
+ // keepData is for internal use only--do not document
+ remove: function( selector, keepData ) {
+ for ( var i = 0, elem; (elem = this[i]) != null; i++ ) {
+ if ( !selector || jQuery.filter( selector, [ elem ] ).length ) {
+ if ( !keepData && elem.nodeType === 1 ) {
+ jQuery.cleanData( elem.getElementsByTagName("*") );
+ jQuery.cleanData( [ elem ] );
+ }
+
+ if ( elem.parentNode ) {
+ elem.parentNode.removeChild( elem );
+ }
+ }
+ }
+
+ return this;
+ },
+
+ empty: function() {
+ for ( var i = 0, elem; (elem = this[i]) != null; i++ ) {
+ // Remove element nodes and prevent memory leaks
+ if ( elem.nodeType === 1 ) {
+ jQuery.cleanData( elem.getElementsByTagName("*") );
+ }
+
+ // Remove any remaining nodes
+ while ( elem.firstChild ) {
+ elem.removeChild( elem.firstChild );
+ }
+ }
+
+ return this;
+ },
+
+ clone: function( dataAndEvents, deepDataAndEvents ) {
+ dataAndEvents = dataAndEvents == null ? false : dataAndEvents;
+ deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents;
+
+ return this.map( function () {
+ return jQuery.clone( this, dataAndEvents, deepDataAndEvents );
+ });
+ },
+
+ html: function( value ) {
+ if ( value === undefined ) {
+ return this[0] && this[0].nodeType === 1 ?
+ this[0].innerHTML.replace(rinlinejQuery, "") :
+ null;
+
+ // See if we can take a shortcut and just use innerHTML
+ } else if ( typeof value === "string" && !rnocache.test( value ) &&
+ (jQuery.support.leadingWhitespace || !rleadingWhitespace.test( value )) &&
+ !wrapMap[ (rtagName.exec( value ) || ["", ""])[1].toLowerCase() ] ) {
+
+ value = value.replace(rxhtmlTag, "<$1></$2>");
+
+ try {
+ for ( var i = 0, l = this.length; i < l; i++ ) {
+ // Remove element nodes and prevent memory leaks
+ if ( this[i].nodeType === 1 ) {
+ jQuery.cleanData( this[i].getElementsByTagName("*") );
+ this[i].innerHTML = value;
+ }
+ }
+
+ // If using innerHTML throws an exception, use the fallback method
+ } catch(e) {
+ this.empty().append( value );
+ }
+
+ } else if ( jQuery.isFunction( value ) ) {
+ this.each(function(i){
+ var self = jQuery( this );
+
+ self.html( value.call(this, i, self.html()) );
+ });
+
+ } else {
+ this.empty().append( value );
+ }
+
+ return this;
+ },
+
+ replaceWith: function( value ) {
+ if ( this[0] && this[0].parentNode ) {
+ // Make sure that the elements are removed from the DOM before they are inserted
+ // this can help fix replacing a parent with child elements
+ if ( jQuery.isFunction( value ) ) {
+ return this.each(function(i) {
+ var self = jQuery(this), old = self.html();
+ self.replaceWith( value.call( this, i, old ) );
+ });
+ }
+
+ if ( typeof value !== "string" ) {
+ value = jQuery( value ).detach();
+ }
+
+ return this.each(function() {
+ var next = this.nextSibling,
+ parent = this.parentNode;
+
+ jQuery( this ).remove();
+
+ if ( next ) {
+ jQuery(next).before( value );
+ } else {
+ jQuery(parent).append( value );
+ }
+ });
+ } else {
+ return this.length ?
+ this.pushStack( jQuery(jQuery.isFunction(value) ? value() : value), "replaceWith", value ) :
+ this;
+ }
+ },
+
+ detach: function( selector ) {
+ return this.remove( selector, true );
+ },
+
+ domManip: function( args, table, callback ) {
+ var results, first, fragment, parent,
+ value = args[0],
+ scripts = [];
+
+ // We can't cloneNode fragments that contain checked, in WebKit
+ if ( !jQuery.support.checkClone && arguments.length === 3 && typeof value === "string" && rchecked.test( value ) ) {
+ return this.each(function() {
+ jQuery(this).domManip( args, table, callback, true );
+ });
+ }
+
+ if ( jQuery.isFunction(value) ) {
+ return this.each(function(i) {
+ var self = jQuery(this);
+ args[0] = value.call(this, i, table ? self.html() : undefined);
+ self.domManip( args, table, callback );
+ });
+ }
+
+ if ( this[0] ) {
+ parent = value && value.parentNode;
+
+ // If we're in a fragment, just use that instead of building a new one
+ if ( jQuery.support.parentNode && parent && parent.nodeType === 11 && parent.childNodes.length === this.length ) {
+ results = { fragment: parent };
+
+ } else {
+ results = jQuery.buildFragment( args, this, scripts );
+ }
+
+ fragment = results.fragment;
+
+ if ( fragment.childNodes.length === 1 ) {
+ first = fragment = fragment.firstChild;
+ } else {
+ first = fragment.firstChild;
+ }
+
+ if ( first ) {
+ table = table && jQuery.nodeName( first, "tr" );
+
+ for ( var i = 0, l = this.length, lastIndex = l - 1; i < l; i++ ) {
+ callback.call(
+ table ?
+ root(this[i], first) :
+ this[i],
+ // Make sure that we do not leak memory by inadvertently discarding
+ // the original fragment (which might have attached data) instead of
+ // using it; in addition, use the original fragment object for the last
+ // item instead of first because it can end up being emptied incorrectly
+ // in certain situations (Bug #8070).
+ // Fragments from the fragment cache must always be cloned and never used
+ // in place.
+ results.cacheable || (l > 1 && i < lastIndex) ?
+ jQuery.clone( fragment, true, true ) :
+ fragment
+ );
+ }
+ }
+
+ if ( scripts.length ) {
+ jQuery.each( scripts, evalScript );
+ }
+ }
+
+ return this;
+ }
+});
+
+function root( elem, cur ) {
+ return jQuery.nodeName(elem, "table") ?
+ (elem.getElementsByTagName("tbody")[0] ||
+ elem.appendChild(elem.ownerDocument.createElement("tbody"))) :
+ elem;
+}
+
+function cloneCopyEvent( src, dest ) {
+
+ if ( dest.nodeType !== 1 || !jQuery.hasData( src ) ) {
+ return;
+ }
+
+ var internalKey = jQuery.expando,
+ oldData = jQuery.data( src ),
+ curData = jQuery.data( dest, oldData );
+
+ // Switch to use the internal data object, if it exists, for the next
+ // stage of data copying
+ if ( (oldData = oldData[ internalKey ]) ) {
+ var events = oldData.events;
+ curData = curData[ internalKey ] = jQuery.extend({}, oldData);
+
+ if ( events ) {
+ delete curData.handle;
+ curData.events = {};
+
+ for ( var type in events ) {
+ for ( var i = 0, l = events[ type ].length; i < l; i++ ) {
+ jQuery.event.add( dest, type + ( events[ type ][ i ].namespace ? "." : "" ) + events[ type ][ i ].namespace, events[ type ][ i ], events[ type ][ i ].data );
+ }
+ }
+ }
+ }
+}
+
+function cloneFixAttributes( src, dest ) {
+ var nodeName;
+
+ // We do not need to do anything for non-Elements
+ if ( dest.nodeType !== 1 ) {
+ return;
+ }
+
+ // clearAttributes removes the attributes, which we don't want,
+ // but also removes the attachEvent events, which we *do* want
+ if ( dest.clearAttributes ) {
+ dest.clearAttributes();
+ }
+
+ // mergeAttributes, in contrast, only merges back on the
+ // original attributes, not the events
+ if ( dest.mergeAttributes ) {
+ dest.mergeAttributes( src );
+ }
+
+ nodeName = dest.nodeName.toLowerCase();
+
+ // IE6-8 fail to clone children inside object elements that use
+ // the proprietary classid attribute value (rather than the type
+ // attribute) to identify the type of content to display
+ if ( nodeName === "object" ) {
+ dest.outerHTML = src.outerHTML;
+
+ } else if ( nodeName === "input" && (src.type === "checkbox" || src.type === "radio") ) {
+ // IE6-8 fails to persist the checked state of a cloned checkbox
+ // or radio button. Worse, IE6-7 fail to give the cloned element
+ // a checked appearance if the defaultChecked value isn't also set
+ if ( src.checked ) {
+ dest.defaultChecked = dest.checked = src.checked;
+ }
+
+ // IE6-7 get confused and end up setting the value of a cloned
+ // checkbox/radio button to an empty string instead of "on"
+ if ( dest.value !== src.value ) {
+ dest.value = src.value;
+ }
+
+ // IE6-8 fails to return the selected option to the default selected
+ // state when cloning options
+ } else if ( nodeName === "option" ) {
+ dest.selected = src.defaultSelected;
+
+ // IE6-8 fails to set the defaultValue to the correct value when
+ // cloning other types of input fields
+ } else if ( nodeName === "input" || nodeName === "textarea" ) {
+ dest.defaultValue = src.defaultValue;
+ }
+
+ // Event data gets referenced instead of copied if the expando
+ // gets copied too
+ dest.removeAttribute( jQuery.expando );
+}
+
+jQuery.buildFragment = function( args, nodes, scripts ) {
+ var fragment, cacheable, cacheresults, doc;
+
+ // nodes may contain either an explicit document object,
+ // a jQuery collection or context object.
+ // If nodes[0] contains a valid object to assign to doc
+ if ( nodes && nodes[0] ) {
+ doc = nodes[0].ownerDocument || nodes[0];
+ }
+
+ // Ensure that an attr object doesn't incorrectly stand in as a document object
+ // Chrome and Firefox seem to allow this to occur and will throw exception
+ // Fixes #8950
+ if ( !doc.createDocumentFragment ) {
+ doc = document;
+ }
+
+ // Only cache "small" (1/2 KB) HTML strings that are associated with the main document
+ // Cloning options loses the selected state, so don't cache them
+ // IE 6 doesn't like it when you put <object> or <embed> elements in a fragment
+ // Also, WebKit does not clone 'checked' attributes on cloneNode, so don't cache
+ if ( args.length === 1 && typeof args[0] === "string" && args[0].length < 512 && doc === document &&
+ args[0].charAt(0) === "<" && !rnocache.test( args[0] ) && (jQuery.support.checkClone || !rchecked.test( args[0] )) ) {
+
+ cacheable = true;
+
+ cacheresults = jQuery.fragments[ args[0] ];
+ if ( cacheresults && cacheresults !== 1 ) {
+ fragment = cacheresults;
+ }
+ }
+
+ if ( !fragment ) {
+ fragment = doc.createDocumentFragment();
+ jQuery.clean( args, doc, fragment, scripts );
+ }
+
+ if ( cacheable ) {
+ jQuery.fragments[ args[0] ] = cacheresults ? fragment : 1;
+ }
+
+ return { fragment: fragment, cacheable: cacheable };
+};
+
+jQuery.fragments = {};
+
+jQuery.each({
+ appendTo: "append",
+ prependTo: "prepend",
+ insertBefore: "before",
+ insertAfter: "after",
+ replaceAll: "replaceWith"
+}, function( name, original ) {
+ jQuery.fn[ name ] = function( selector ) {
+ var ret = [],
+ insert = jQuery( selector ),
+ parent = this.length === 1 && this[0].parentNode;
+
+ if ( parent && parent.nodeType === 11 && parent.childNodes.length === 1 && insert.length === 1 ) {
+ insert[ original ]( this[0] );
+ return this;
+
+ } else {
+ for ( var i = 0, l = insert.length; i < l; i++ ) {
+ var elems = (i > 0 ? this.clone(true) : this).get();
+ jQuery( insert[i] )[ original ]( elems );
+ ret = ret.concat( elems );
+ }
+
+ return this.pushStack( ret, name, insert.selector );
+ }
+ };
+});
+
+function getAll( elem ) {
+ if ( "getElementsByTagName" in elem ) {
+ return elem.getElementsByTagName( "*" );
+
+ } else if ( "querySelectorAll" in elem ) {
+ return elem.querySelectorAll( "*" );
+
+ } else {
+ return [];
+ }
+}
+
+// Used in clean, fixes the defaultChecked property
+function fixDefaultChecked( elem ) {
+ if ( elem.type === "checkbox" || elem.type === "radio" ) {
+ elem.defaultChecked = elem.checked;
+ }
+}
+// Finds all inputs and passes them to fixDefaultChecked
+function findInputs( elem ) {
+ if ( jQuery.nodeName( elem, "input" ) ) {
+ fixDefaultChecked( elem );
+ } else if ( "getElementsByTagName" in elem ) {
+ jQuery.grep( elem.getElementsByTagName("input"), fixDefaultChecked );
+ }
+}
+
+jQuery.extend({
+ clone: function( elem, dataAndEvents, deepDataAndEvents ) {
+ var clone = elem.cloneNode(true),
+ srcElements,
+ destElements,
+ i;
+
+ if ( (!jQuery.support.noCloneEvent || !jQuery.support.noCloneChecked) &&
+ (elem.nodeType === 1 || elem.nodeType === 11) && !jQuery.isXMLDoc(elem) ) {
+ // IE copies events bound via attachEvent when using cloneNode.
+ // Calling detachEvent on the clone will also remove the events
+ // from the original. In order to get around this, we use some
+ // proprietary methods to clear the events. Thanks to MooTools
+ // guys for this hotness.
+
+ cloneFixAttributes( elem, clone );
+
+ // Using Sizzle here is crazy slow, so we use getElementsByTagName
+ // instead
+ srcElements = getAll( elem );
+ destElements = getAll( clone );
+
+ // Weird iteration because IE will replace the length property
+ // with an element if you are cloning the body and one of the
+ // elements on the page has a name or id of "length"
+ for ( i = 0; srcElements[i]; ++i ) {
+ // Ensure that the destination node is not null; Fixes #9587
+ if ( destElements[i] ) {
+ cloneFixAttributes( srcElements[i], destElements[i] );
+ }
+ }
+ }
+
+ // Copy the events from the original to the clone
+ if ( dataAndEvents ) {
+ cloneCopyEvent( elem, clone );
+
+ if ( deepDataAndEvents ) {
+ srcElements = getAll( elem );
+ destElements = getAll( clone );
+
+ for ( i = 0; srcElements[i]; ++i ) {
+ cloneCopyEvent( srcElements[i], destElements[i] );
+ }
+ }
+ }
+
+ srcElements = destElements = null;
+
+ // Return the cloned set
+ return clone;
+ },
+
+ clean: function( elems, context, fragment, scripts ) {
+ var checkScriptType;
+
+ context = context || document;
+
+ // !context.createElement fails in IE with an error but returns typeof 'object'
+ if ( typeof context.createElement === "undefined" ) {
+ context = context.ownerDocument || context[0] && context[0].ownerDocument || document;
+ }
+
+ var ret = [], j;
+
+ for ( var i = 0, elem; (elem = elems[i]) != null; i++ ) {
+ if ( typeof elem === "number" ) {
+ elem += "";
+ }
+
+ if ( !elem ) {
+ continue;
+ }
+
+ // Convert html string into DOM nodes
+ if ( typeof elem === "string" ) {
+ if ( !rhtml.test( elem ) ) {
+ elem = context.createTextNode( elem );
+ } else {
+ // Fix "XHTML"-style tags in all browsers
+ elem = elem.replace(rxhtmlTag, "<$1></$2>");
+
+ // Trim whitespace, otherwise indexOf won't work as expected
+ var tag = (rtagName.exec( elem ) || ["", ""])[1].toLowerCase(),
+ wrap = wrapMap[ tag ] || wrapMap._default,
+ depth = wrap[0],
+ div = context.createElement("div");
+
+ // Go to html and back, then peel off extra wrappers
+ div.innerHTML = wrap[1] + elem + wrap[2];
+
+ // Move to the right depth
+ while ( depth-- ) {
+ div = div.lastChild;
+ }
+
+ // Remove IE's autoinserted <tbody> from table fragments
+ if ( !jQuery.support.tbody ) {
+
+ // String was a <table>, *may* have spurious <tbody>
+ var hasBody = rtbody.test(elem),
+ tbody = tag === "table" && !hasBody ?
+ div.firstChild && div.firstChild.childNodes :
+
+ // String was a bare <thead> or <tfoot>
+ wrap[1] === "<table>" && !hasBody ?
+ div.childNodes :
+ [];
+
+ for ( j = tbody.length - 1; j >= 0 ; --j ) {
+ if ( jQuery.nodeName( tbody[ j ], "tbody" ) && !tbody[ j ].childNodes.length ) {
+ tbody[ j ].parentNode.removeChild( tbody[ j ] );
+ }
+ }
+ }
+
+ // IE completely kills leading whitespace when innerHTML is used
+ if ( !jQuery.support.leadingWhitespace && rleadingWhitespace.test( elem ) ) {
+ div.insertBefore( context.createTextNode( rleadingWhitespace.exec(elem)[0] ), div.firstChild );
+ }
+
+ elem = div.childNodes;
+ }
+ }
+
+ // Resets defaultChecked for any radios and checkboxes
+ // about to be appended to the DOM in IE 6/7 (#8060)
+ var len;
+ if ( !jQuery.support.appendChecked ) {
+ if ( elem[0] && typeof (len = elem.length) === "number" ) {
+ for ( j = 0; j < len; j++ ) {
+ findInputs( elem[j] );
+ }
+ } else {
+ findInputs( elem );
+ }
+ }
+
+ if ( elem.nodeType ) {
+ ret.push( elem );
+ } else {
+ ret = jQuery.merge( ret, elem );
+ }
+ }
+
+ if ( fragment ) {
+ checkScriptType = function( elem ) {
+ return !elem.type || rscriptType.test( elem.type );
+ };
+ for ( i = 0; ret[i]; i++ ) {
+ if ( scripts && jQuery.nodeName( ret[i], "script" ) && (!ret[i].type || ret[i].type.toLowerCase() === "text/javascript") ) {
+ scripts.push( ret[i].parentNode ? ret[i].parentNode.removeChild( ret[i] ) : ret[i] );
+
+ } else {
+ if ( ret[i].nodeType === 1 ) {
+ var jsTags = jQuery.grep( ret[i].getElementsByTagName( "script" ), checkScriptType );
+
+ ret.splice.apply( ret, [i + 1, 0].concat( jsTags ) );
+ }
+ fragment.appendChild( ret[i] );
+ }
+ }
+ }
+
+ return ret;
+ },
+
+ cleanData: function( elems ) {
+ var data, id, cache = jQuery.cache, internalKey = jQuery.expando, special = jQuery.event.special,
+ deleteExpando = jQuery.support.deleteExpando;
+
+ for ( var i = 0, elem; (elem = elems[i]) != null; i++ ) {
+ if ( elem.nodeName && jQuery.noData[elem.nodeName.toLowerCase()] ) {
+ continue;
+ }
+
+ id = elem[ jQuery.expando ];
+
+ if ( id ) {
+ data = cache[ id ] && cache[ id ][ internalKey ];
+
+ if ( data && data.events ) {
+ for ( var type in data.events ) {
+ if ( special[ type ] ) {
+ jQuery.event.remove( elem, type );
+
+ // This is a shortcut to avoid jQuery.event.remove's overhead
+ } else {
+ jQuery.removeEvent( elem, type, data.handle );
+ }
+ }
+
+ // Null the DOM reference to avoid IE6/7/8 leak (#7054)
+ if ( data.handle ) {
+ data.handle.elem = null;
+ }
+ }
+
+ if ( deleteExpando ) {
+ delete elem[ jQuery.expando ];
+
+ } else if ( elem.removeAttribute ) {
+ elem.removeAttribute( jQuery.expando );
+ }
+
+ delete cache[ id ];
+ }
+ }
+ }
+});
+
+function evalScript( i, elem ) {
+ if ( elem.src ) {
+ jQuery.ajax({
+ url: elem.src,
+ async: false,
+ dataType: "script"
+ });
+ } else {
+ jQuery.globalEval( ( elem.text || elem.textContent || elem.innerHTML || "" ).replace( rcleanScript, "/*$0*/" ) );
+ }
+
+ if ( elem.parentNode ) {
+ elem.parentNode.removeChild( elem );
+ }
+}
+
+
+
+
+var ralpha = /alpha\([^)]*\)/i,
+ ropacity = /opacity=([^)]*)/,
+ // fixed for IE9, see #8346
+ rupper = /([A-Z]|^ms)/g,
+ rnumpx = /^-?\d+(?:px)?$/i,
+ rnum = /^-?\d/,
+ rrelNum = /^([\-+])=([\-+.\de]+)/,
+
+ cssShow = { position: "absolute", visibility: "hidden", display: "block" },
+ cssWidth = [ "Left", "Right" ],
+ cssHeight = [ "Top", "Bottom" ],
+ curCSS,
+
+ getComputedStyle,
+ currentStyle;
+
+jQuery.fn.css = function( name, value ) {
+ // Setting 'undefined' is a no-op
+ if ( arguments.length === 2 && value === undefined ) {
+ return this;
+ }
+
+ return jQuery.access( this, name, value, true, function( elem, name, value ) {
+ return value !== undefined ?
+ jQuery.style( elem, name, value ) :
+ jQuery.css( elem, name );
+ });
+};
+
+jQuery.extend({
+ // Add in style property hooks for overriding the default
+ // behavior of getting and setting a style property
+ cssHooks: {
+ opacity: {
+ get: function( elem, computed ) {
+ if ( computed ) {
+ // We should always get a number back from opacity
+ var ret = curCSS( elem, "opacity", "opacity" );
+ return ret === "" ? "1" : ret;
+
+ } else {
+ return elem.style.opacity;
+ }
+ }
+ }
+ },
+
+ // Exclude the following css properties to add px
+ cssNumber: {
+ "fillOpacity": true,
+ "fontWeight": true,
+ "lineHeight": true,
+ "opacity": true,
+ "orphans": true,
+ "widows": true,
+ "zIndex": true,
+ "zoom": true
+ },
+
+ // Add in properties whose names you wish to fix before
+ // setting or getting the value
+ cssProps: {
+ // normalize float css property
+ "float": jQuery.support.cssFloat ? "cssFloat" : "styleFloat"
+ },
+
+ // Get and set the style property on a DOM Node
+ style: function( elem, name, value, extra ) {
+ // Don't set styles on text and comment nodes
+ if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) {
+ return;
+ }
+
+ // Make sure that we're working with the right name
+ var ret, type, origName = jQuery.camelCase( name ),
+ style = elem.style, hooks = jQuery.cssHooks[ origName ];
+
+ name = jQuery.cssProps[ origName ] || origName;
+
+ // Check if we're setting a value
+ if ( value !== undefined ) {
+ type = typeof value;
+
+ // convert relative number strings (+= or -=) to relative numbers. #7345
+ if ( type === "string" && (ret = rrelNum.exec( value )) ) {
+ value = ( +( ret[1] + 1) * +ret[2] ) + parseFloat( jQuery.css( elem, name ) );
+ // Fixes bug #9237
+ type = "number";
+ }
+
+ // Make sure that NaN and null values aren't set. See: #7116
+ if ( value == null || type === "number" && isNaN( value ) ) {
+ return;
+ }
+
+ // If a number was passed in, add 'px' to the (except for certain CSS properties)
+ if ( type === "number" && !jQuery.cssNumber[ origName ] ) {
+ value += "px";
+ }
+
+ // If a hook was provided, use that value, otherwise just set the specified value
+ if ( !hooks || !("set" in hooks) || (value = hooks.set( elem, value )) !== undefined ) {
+ // Wrapped to prevent IE from throwing errors when 'invalid' values are provided
+ // Fixes bug #5509
+ try {
+ style[ name ] = value;
+ } catch(e) {}
+ }
+
+ } else {
+ // If a hook was provided get the non-computed value from there
+ if ( hooks && "get" in hooks && (ret = hooks.get( elem, false, extra )) !== undefined ) {
+ return ret;
+ }
+
+ // Otherwise just get the value from the style object
+ return style[ name ];
+ }
+ },
+
+ css: function( elem, name, extra ) {
+ var ret, hooks;
+
+ // Make sure that we're working with the right name
+ name = jQuery.camelCase( name );
+ hooks = jQuery.cssHooks[ name ];
+ name = jQuery.cssProps[ name ] || name;
+
+ // cssFloat needs a special treatment
+ if ( name === "cssFloat" ) {
+ name = "float";
+ }
+
+ // If a hook was provided get the computed value from there
+ if ( hooks && "get" in hooks && (ret = hooks.get( elem, true, extra )) !== undefined ) {
+ return ret;
+
+ // Otherwise, if a way to get the computed value exists, use that
+ } else if ( curCSS ) {
+ return curCSS( elem, name );
+ }
+ },
+
+ // A method for quickly swapping in/out CSS properties to get correct calculations
+ swap: function( elem, options, callback ) {
+ var old = {};
+
+ // Remember the old values, and insert the new ones
+ for ( var name in options ) {
+ old[ name ] = elem.style[ name ];
+ elem.style[ name ] = options[ name ];
+ }
+
+ callback.call( elem );
+
+ // Revert the old values
+ for ( name in options ) {
+ elem.style[ name ] = old[ name ];
+ }
+ }
+});
+
+// DEPRECATED, Use jQuery.css() instead
+jQuery.curCSS = jQuery.css;
+
+jQuery.each(["height", "width"], function( i, name ) {
+ jQuery.cssHooks[ name ] = {
+ get: function( elem, computed, extra ) {
+ var val;
+
+ if ( computed ) {
+ if ( elem.offsetWidth !== 0 ) {
+ return getWH( elem, name, extra );
+ } else {
+ jQuery.swap( elem, cssShow, function() {
+ val = getWH( elem, name, extra );
+ });
+ }
+
+ return val;
+ }
+ },
+
+ set: function( elem, value ) {
+ if ( rnumpx.test( value ) ) {
+ // ignore negative width and height values #1599
+ value = parseFloat( value );
+
+ if ( value >= 0 ) {
+ return value + "px";
+ }
+
+ } else {
+ return value;
+ }
+ }
+ };
+});
+
+if ( !jQuery.support.opacity ) {
+ jQuery.cssHooks.opacity = {
+ get: function( elem, computed ) {
+ // IE uses filters for opacity
+ return ropacity.test( (computed && elem.currentStyle ? elem.currentStyle.filter : elem.style.filter) || "" ) ?
+ ( parseFloat( RegExp.$1 ) / 100 ) + "" :
+ computed ? "1" : "";
+ },
+
+ set: function( elem, value ) {
+ var style = elem.style,
+ currentStyle = elem.currentStyle,
+ opacity = jQuery.isNaN( value ) ? "" : "alpha(opacity=" + value * 100 + ")",
+ filter = currentStyle && currentStyle.filter || style.filter || "";
+
+ // IE has trouble with opacity if it does not have layout
+ // Force it by setting the zoom level
+ style.zoom = 1;
+
+ // if setting opacity to 1, and no other filters exist - attempt to remove filter attribute #6652
+ if ( value >= 1 && jQuery.trim( filter.replace( ralpha, "" ) ) === "" ) {
+
+ // Setting style.filter to null, "" & " " still leave "filter:" in the cssText
+ // if "filter:" is present at all, clearType is disabled, we want to avoid this
+ // style.removeAttribute is IE Only, but so apparently is this code path...
+ style.removeAttribute( "filter" );
+
+ // if there there is no filter style applied in a css rule, we are done
+ if ( currentStyle && !currentStyle.filter ) {
+ return;
+ }
+ }
+
+ // otherwise, set new filter values
+ style.filter = ralpha.test( filter ) ?
+ filter.replace( ralpha, opacity ) :
+ filter + " " + opacity;
+ }
+ };
+}
+
+jQuery(function() {
+ // This hook cannot be added until DOM ready because the support test
+ // for it is not run until after DOM ready
+ if ( !jQuery.support.reliableMarginRight ) {
+ jQuery.cssHooks.marginRight = {
+ get: function( elem, computed ) {
+ // WebKit Bug 13343 - getComputedStyle returns wrong value for margin-right
+ // Work around by temporarily setting element display to inline-block
+ var ret;
+ jQuery.swap( elem, { "display": "inline-block" }, function() {
+ if ( computed ) {
+ ret = curCSS( elem, "margin-right", "marginRight" );
+ } else {
+ ret = elem.style.marginRight;
+ }
+ });
+ return ret;
+ }
+ };
+ }
+});
+
+if ( document.defaultView && document.defaultView.getComputedStyle ) {
+ getComputedStyle = function( elem, name ) {
+ var ret, defaultView, computedStyle;
+
+ name = name.replace( rupper, "-$1" ).toLowerCase();
+
+ if ( !(defaultView = elem.ownerDocument.defaultView) ) {
+ return undefined;
+ }
+
+ if ( (computedStyle = defaultView.getComputedStyle( elem, null )) ) {
+ ret = computedStyle.getPropertyValue( name );
+ if ( ret === "" && !jQuery.contains( elem.ownerDocument.documentElement, elem ) ) {
+ ret = jQuery.style( elem, name );
+ }
+ }
+
+ return ret;
+ };
+}
+
+if ( document.documentElement.currentStyle ) {
+ currentStyle = function( elem, name ) {
+ var left,
+ ret = elem.currentStyle && elem.currentStyle[ name ],
+ rsLeft = elem.runtimeStyle && elem.runtimeStyle[ name ],
+ style = elem.style;
+
+ // From the awesome hack by Dean Edwards
+ // http://erik.eae.net/archives/2007/07/27/18.54.15/#comment-102291
+
+ // If we're not dealing with a regular pixel number
+ // but a number that has a weird ending, we need to convert it to pixels
+ if ( !rnumpx.test( ret ) && rnum.test( ret ) ) {
+ // Remember the original values
+ left = style.left;
+
+ // Put in the new values to get a computed value out
+ if ( rsLeft ) {
+ elem.runtimeStyle.left = elem.currentStyle.left;
+ }
+ style.left = name === "fontSize" ? "1em" : (ret || 0);
+ ret = style.pixelLeft + "px";
+
+ // Revert the changed values
+ style.left = left;
+ if ( rsLeft ) {
+ elem.runtimeStyle.left = rsLeft;
+ }
+ }
+
+ return ret === "" ? "auto" : ret;
+ };
+}
+
+curCSS = getComputedStyle || currentStyle;
+
+function getWH( elem, name, extra ) {
+
+ // Start with offset property
+ var val = name === "width" ? elem.offsetWidth : elem.offsetHeight,
+ which = name === "width" ? cssWidth : cssHeight;
+
+ if ( val > 0 ) {
+ if ( extra !== "border" ) {
+ jQuery.each( which, function() {
+ if ( !extra ) {
+ val -= parseFloat( jQuery.css( elem, "padding" + this ) ) || 0;
+ }
+ if ( extra === "margin" ) {
+ val += parseFloat( jQuery.css( elem, extra + this ) ) || 0;
+ } else {
+ val -= parseFloat( jQuery.css( elem, "border" + this + "Width" ) ) || 0;
+ }
+ });
+ }
+
+ return val + "px";
+ }
+
+ // Fall back to computed then uncomputed css if necessary
+ val = curCSS( elem, name, name );
+ if ( val < 0 || val == null ) {
+ val = elem.style[ name ] || 0;
+ }
+ // Normalize "", auto, and prepare for extra
+ val = parseFloat( val ) || 0;
+
+ // Add padding, border, margin
+ if ( extra ) {
+ jQuery.each( which, function() {
+ val += parseFloat( jQuery.css( elem, "padding" + this ) ) || 0;
+ if ( extra !== "padding" ) {
+ val += parseFloat( jQuery.css( elem, "border" + this + "Width" ) ) || 0;
+ }
+ if ( extra === "margin" ) {
+ val += parseFloat( jQuery.css( elem, extra + this ) ) || 0;
+ }
+ });
+ }
+
+ return val + "px";
+}
+
+if ( jQuery.expr && jQuery.expr.filters ) {
+ jQuery.expr.filters.hidden = function( elem ) {
+ var width = elem.offsetWidth,
+ height = elem.offsetHeight;
+
+ return (width === 0 && height === 0) || (!jQuery.support.reliableHiddenOffsets && (elem.style.display || jQuery.css( elem, "display" )) === "none");
+ };
+
+ jQuery.expr.filters.visible = function( elem ) {
+ return !jQuery.expr.filters.hidden( elem );
+ };
+}
+
+
+
+
+var r20 = /%20/g,
+ rbracket = /\[\]$/,
+ rCRLF = /\r?\n/g,
+ rhash = /#.*$/,
+ rheaders = /^(.*?):[ \t]*([^\r\n]*)\r?$/mg, // IE leaves an \r character at EOL
+ rinput = /^(?:color|date|datetime|datetime-local|email|hidden|month|number|password|range|search|tel|text|time|url|week)$/i,
+ // #7653, #8125, #8152: local protocol detection
+ rlocalProtocol = /^(?:about|app|app\-storage|.+\-extension|file|res|widget):$/,
+ rnoContent = /^(?:GET|HEAD)$/,
+ rprotocol = /^\/\//,
+ rquery = /\?/,
+ rscript = /<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>/gi,
+ rselectTextarea = /^(?:select|textarea)/i,
+ rspacesAjax = /\s+/,
+ rts = /([?&])_=[^&]*/,
+ rurl = /^([\w\+\.\-]+:)(?:\/\/([^\/?#:]*)(?::(\d+))?)?/,
+
+ // Keep a copy of the old load method
+ _load = jQuery.fn.load,
+
+ /* Prefilters
+ * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example)
+ * 2) These are called:
+ * - BEFORE asking for a transport
+ * - AFTER param serialization (s.data is a string if s.processData is true)
+ * 3) key is the dataType
+ * 4) the catchall symbol "*" can be used
+ * 5) execution will start with transport dataType and THEN continue down to "*" if needed
+ */
+ prefilters = {},
+
+ /* Transports bindings
+ * 1) key is the dataType
+ * 2) the catchall symbol "*" can be used
+ * 3) selection will start with transport dataType and THEN go to "*" if needed
+ */
+ transports = {},
+
+ // Document location
+ ajaxLocation,
+
+ // Document location segments
+ ajaxLocParts,
+
+ // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression
+ allTypes = ["*/"] + ["*"];
+
+// #8138, IE may throw an exception when accessing
+// a field from window.location if document.domain has been set
+try {
+ ajaxLocation = location.href;
+} catch( e ) {
+ // Use the href attribute of an A element
+ // since IE will modify it given document.location
+ ajaxLocation = document.createElement( "a" );
+ ajaxLocation.href = "";
+ ajaxLocation = ajaxLocation.href;
+}
+
+// Segment location into parts
+ajaxLocParts = rurl.exec( ajaxLocation.toLowerCase() ) || [];
+
+// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport
+function addToPrefiltersOrTransports( structure ) {
+
+ // dataTypeExpression is optional and defaults to "*"
+ return function( dataTypeExpression, func ) {
+
+ if ( typeof dataTypeExpression !== "string" ) {
+ func = dataTypeExpression;
+ dataTypeExpression = "*";
+ }
+
+ if ( jQuery.isFunction( func ) ) {
+ var dataTypes = dataTypeExpression.toLowerCase().split( rspacesAjax ),
+ i = 0,
+ length = dataTypes.length,
+ dataType,
+ list,
+ placeBefore;
+
+ // For each dataType in the dataTypeExpression
+ for(; i < length; i++ ) {
+ dataType = dataTypes[ i ];
+ // We control if we're asked to add before
+ // any existing element
+ placeBefore = /^\+/.test( dataType );
+ if ( placeBefore ) {
+ dataType = dataType.substr( 1 ) || "*";
+ }
+ list = structure[ dataType ] = structure[ dataType ] || [];
+ // then we add to the structure accordingly
+ list[ placeBefore ? "unshift" : "push" ]( func );
+ }
+ }
+ };
+}
+
+// Base inspection function for prefilters and transports
+function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR,
+ dataType /* internal */, inspected /* internal */ ) {
+
+ dataType = dataType || options.dataTypes[ 0 ];
+ inspected = inspected || {};
+
+ inspected[ dataType ] = true;
+
+ var list = structure[ dataType ],
+ i = 0,
+ length = list ? list.length : 0,
+ executeOnly = ( structure === prefilters ),
+ selection;
+
+ for(; i < length && ( executeOnly || !selection ); i++ ) {
+ selection = list[ i ]( options, originalOptions, jqXHR );
+ // If we got redirected to another dataType
+ // we try there if executing only and not done already
+ if ( typeof selection === "string" ) {
+ if ( !executeOnly || inspected[ selection ] ) {
+ selection = undefined;
+ } else {
+ options.dataTypes.unshift( selection );
+ selection = inspectPrefiltersOrTransports(
+ structure, options, originalOptions, jqXHR, selection, inspected );
+ }
+ }
+ }
+ // If we're only executing or nothing was selected
+ // we try the catchall dataType if not done already
+ if ( ( executeOnly || !selection ) && !inspected[ "*" ] ) {
+ selection = inspectPrefiltersOrTransports(
+ structure, options, originalOptions, jqXHR, "*", inspected );
+ }
+ // unnecessary when only executing (prefilters)
+ // but it'll be ignored by the caller in that case
+ return selection;
+}
+
+// A special extend for ajax options
+// that takes "flat" options (not to be deep extended)
+// Fixes #9887
+function ajaxExtend( target, src ) {
+ var key, deep,
+ flatOptions = jQuery.ajaxSettings.flatOptions || {};
+ for( key in src ) {
+ if ( src[ key ] !== undefined ) {
+ ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ];
+ }
+ }
+ if ( deep ) {
+ jQuery.extend( true, target, deep );
+ }
+}
+
+jQuery.fn.extend({
+ load: function( url, params, callback ) {
+ if ( typeof url !== "string" && _load ) {
+ return _load.apply( this, arguments );
+
+ // Don't do a request if no elements are being requested
+ } else if ( !this.length ) {
+ return this;
+ }
+
+ var off = url.indexOf( " " );
+ if ( off >= 0 ) {
+ var selector = url.slice( off, url.length );
+ url = url.slice( 0, off );
+ }
+
+ // Default to a GET request
+ var type = "GET";
+
+ // If the second parameter was provided
+ if ( params ) {
+ // If it's a function
+ if ( jQuery.isFunction( params ) ) {
+ // We assume that it's the callback
+ callback = params;
+ params = undefined;
+
+ // Otherwise, build a param string
+ } else if ( typeof params === "object" ) {
+ params = jQuery.param( params, jQuery.ajaxSettings.traditional );
+ type = "POST";
+ }
+ }
+
+ var self = this;
+
+ // Request the remote document
+ jQuery.ajax({
+ url: url,
+ type: type,
+ dataType: "html",
+ data: params,
+ // Complete callback (responseText is used internally)
+ complete: function( jqXHR, status, responseText ) {
+ // Store the response as specified by the jqXHR object
+ responseText = jqXHR.responseText;
+ // If successful, inject the HTML into all the matched elements
+ if ( jqXHR.isResolved() ) {
+ // #4825: Get the actual response in case
+ // a dataFilter is present in ajaxSettings
+ jqXHR.done(function( r ) {
+ responseText = r;
+ });
+ // See if a selector was specified
+ self.html( selector ?
+ // Create a dummy div to hold the results
+ jQuery("<div>")
+ // inject the contents of the document in, removing the scripts
+ // to avoid any 'Permission Denied' errors in IE
+ .append(responseText.replace(rscript, ""))
+
+ // Locate the specified elements
+ .find(selector) :
+
+ // If not, just inject the full result
+ responseText );
+ }
+
+ if ( callback ) {
+ self.each( callback, [ responseText, status, jqXHR ] );
+ }
+ }
+ });
+
+ return this;
+ },
+
+ serialize: function() {
+ return jQuery.param( this.serializeArray() );
+ },
+
+ serializeArray: function() {
+ return this.map(function(){
+ return this.elements ? jQuery.makeArray( this.elements ) : this;
+ })
+ .filter(function(){
+ return this.name && !this.disabled &&
+ ( this.checked || rselectTextarea.test( this.nodeName ) ||
+ rinput.test( this.type ) );
+ })
+ .map(function( i, elem ){
+ var val = jQuery( this ).val();
+
+ return val == null ?
+ null :
+ jQuery.isArray( val ) ?
+ jQuery.map( val, function( val, i ){
+ return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) };
+ }) :
+ { name: elem.name, value: val.replace( rCRLF, "\r\n" ) };
+ }).get();
+ }
+});
+
+// Attach a bunch of functions for handling common AJAX events
+jQuery.each( "ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split( " " ), function( i, o ){
+ jQuery.fn[ o ] = function( f ){
+ return this.bind( o, f );
+ };
+});
+
+jQuery.each( [ "get", "post" ], function( i, method ) {
+ jQuery[ method ] = function( url, data, callback, type ) {
+ // shift arguments if data argument was omitted
+ if ( jQuery.isFunction( data ) ) {
+ type = type || callback;
+ callback = data;
+ data = undefined;
+ }
+
+ return jQuery.ajax({
+ type: method,
+ url: url,
+ data: data,
+ success: callback,
+ dataType: type
+ });
+ };
+});
+
+jQuery.extend({
+
+ getScript: function( url, callback ) {
+ return jQuery.get( url, undefined, callback, "script" );
+ },
+
+ getJSON: function( url, data, callback ) {
+ return jQuery.get( url, data, callback, "json" );
+ },
+
+ // Creates a full fledged settings object into target
+ // with both ajaxSettings and settings fields.
+ // If target is omitted, writes into ajaxSettings.
+ ajaxSetup: function( target, settings ) {
+ if ( settings ) {
+ // Building a settings object
+ ajaxExtend( target, jQuery.ajaxSettings );
+ } else {
+ // Extending ajaxSettings
+ settings = target;
+ target = jQuery.ajaxSettings;
+ }
+ ajaxExtend( target, settings );
+ return target;
+ },
+
+ ajaxSettings: {
+ url: ajaxLocation,
+ isLocal: rlocalProtocol.test( ajaxLocParts[ 1 ] ),
+ global: true,
+ type: "GET",
+ contentType: "application/x-www-form-urlencoded",
+ processData: true,
+ async: true,
+ /*
+ timeout: 0,
+ data: null,
+ dataType: null,
+ username: null,
+ password: null,
+ cache: null,
+ traditional: false,
+ headers: {},
+ */
+
+ accepts: {
+ xml: "application/xml, text/xml",
+ html: "text/html",
+ text: "text/plain",
+ json: "application/json, text/javascript",
+ "*": allTypes
+ },
+
+ contents: {
+ xml: /xml/,
+ html: /html/,
+ json: /json/
+ },
+
+ responseFields: {
+ xml: "responseXML",
+ text: "responseText"
+ },
+
+ // List of data converters
+ // 1) key format is "source_type destination_type" (a single space in-between)
+ // 2) the catchall symbol "*" can be used for source_type
+ converters: {
+
+ // Convert anything to text
+ "* text": window.String,
+
+ // Text to html (true = no transformation)
+ "text html": true,
+
+ // Evaluate text as a json expression
+ "text json": jQuery.parseJSON,
+
+ // Parse text as xml
+ "text xml": jQuery.parseXML
+ },
+
+ // For options that shouldn't be deep extended:
+ // you can add your own custom options here if
+ // and when you create one that shouldn't be
+ // deep extended (see ajaxExtend)
+ flatOptions: {
+ context: true,
+ url: true
+ }
+ },
+
+ ajaxPrefilter: addToPrefiltersOrTransports( prefilters ),
+ ajaxTransport: addToPrefiltersOrTransports( transports ),
+
+ // Main method
+ ajax: function( url, options ) {
+
+ // If url is an object, simulate pre-1.5 signature
+ if ( typeof url === "object" ) {
+ options = url;
+ url = undefined;
+ }
+
+ // Force options to be an object
+ options = options || {};
+
+ var // Create the final options object
+ s = jQuery.ajaxSetup( {}, options ),
+ // Callbacks context
+ callbackContext = s.context || s,
+ // Context for global events
+ // It's the callbackContext if one was provided in the options
+ // and if it's a DOM node or a jQuery collection
+ globalEventContext = callbackContext !== s &&
+ ( callbackContext.nodeType || callbackContext instanceof jQuery ) ?
+ jQuery( callbackContext ) : jQuery.event,
+ // Deferreds
+ deferred = jQuery.Deferred(),
+ completeDeferred = jQuery._Deferred(),
+ // Status-dependent callbacks
+ statusCode = s.statusCode || {},
+ // ifModified key
+ ifModifiedKey,
+ // Headers (they are sent all at once)
+ requestHeaders = {},
+ requestHeadersNames = {},
+ // Response headers
+ responseHeadersString,
+ responseHeaders,
+ // transport
+ transport,
+ // timeout handle
+ timeoutTimer,
+ // Cross-domain detection vars
+ parts,
+ // The jqXHR state
+ state = 0,
+ // To know if global events are to be dispatched
+ fireGlobals,
+ // Loop variable
+ i,
+ // Fake xhr
+ jqXHR = {
+
+ readyState: 0,
+
+ // Caches the header
+ setRequestHeader: function( name, value ) {
+ if ( !state ) {
+ var lname = name.toLowerCase();
+ name = requestHeadersNames[ lname ] = requestHeadersNames[ lname ] || name;
+ requestHeaders[ name ] = value;
+ }
+ return this;
+ },
+
+ // Raw string
+ getAllResponseHeaders: function() {
+ return state === 2 ? responseHeadersString : null;
+ },
+
+ // Builds headers hashtable if needed
+ getResponseHeader: function( key ) {
+ var match;
+ if ( state === 2 ) {
+ if ( !responseHeaders ) {
+ responseHeaders = {};
+ while( ( match = rheaders.exec( responseHeadersString ) ) ) {
+ responseHeaders[ match[1].toLowerCase() ] = match[ 2 ];
+ }
+ }
+ match = responseHeaders[ key.toLowerCase() ];
+ }
+ return match === undefined ? null : match;
+ },
+
+ // Overrides response content-type header
+ overrideMimeType: function( type ) {
+ if ( !state ) {
+ s.mimeType = type;
+ }
+ return this;
+ },
+
+ // Cancel the request
+ abort: function( statusText ) {
+ statusText = statusText || "abort";
+ if ( transport ) {
+ transport.abort( statusText );
+ }
+ done( 0, statusText );
+ return this;
+ }
+ };
+
+ // Callback for when everything is done
+ // It is defined here because jslint complains if it is declared
+ // at the end of the function (which would be more logical and readable)
+ function done( status, nativeStatusText, responses, headers ) {
+
+ // Called once
+ if ( state === 2 ) {
+ return;
+ }
+
+ // State is "done" now
+ state = 2;
+
+ // Clear timeout if it exists
+ if ( timeoutTimer ) {
+ clearTimeout( timeoutTimer );
+ }
+
+ // Dereference transport for early garbage collection
+ // (no matter how long the jqXHR object will be used)
+ transport = undefined;
+
+ // Cache response headers
+ responseHeadersString = headers || "";
+
+ // Set readyState
+ jqXHR.readyState = status > 0 ? 4 : 0;
+
+ var isSuccess,
+ success,
+ error,
+ statusText = nativeStatusText,
+ response = responses ? ajaxHandleResponses( s, jqXHR, responses ) : undefined,
+ lastModified,
+ etag;
+
+ // If successful, handle type chaining
+ if ( status >= 200 && status < 300 || status === 304 ) {
+
+ // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode.
+ if ( s.ifModified ) {
+
+ if ( ( lastModified = jqXHR.getResponseHeader( "Last-Modified" ) ) ) {
+ jQuery.lastModified[ ifModifiedKey ] = lastModified;
+ }
+ if ( ( etag = jqXHR.getResponseHeader( "Etag" ) ) ) {
+ jQuery.etag[ ifModifiedKey ] = etag;
+ }
+ }
+
+ // If not modified
+ if ( status === 304 ) {
+
+ statusText = "notmodified";
+ isSuccess = true;
+
+ // If we have data
+ } else {
+
+ try {
+ success = ajaxConvert( s, response );
+ statusText = "success";
+ isSuccess = true;
+ } catch(e) {
+ // We have a parsererror
+ statusText = "parsererror";
+ error = e;
+ }
+ }
+ } else {
+ // We extract error from statusText
+ // then normalize statusText and status for non-aborts
+ error = statusText;
+ if( !statusText || status ) {
+ statusText = "error";
+ if ( status < 0 ) {
+ status = 0;
+ }
+ }
+ }
+
+ // Set data for the fake xhr object
+ jqXHR.status = status;
+ jqXHR.statusText = "" + ( nativeStatusText || statusText );
+
+ // Success/Error
+ if ( isSuccess ) {
+ deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] );
+ } else {
+ deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] );
+ }
+
+ // Status-dependent callbacks
+ jqXHR.statusCode( statusCode );
+ statusCode = undefined;
+
+ if ( fireGlobals ) {
+ globalEventContext.trigger( "ajax" + ( isSuccess ? "Success" : "Error" ),
+ [ jqXHR, s, isSuccess ? success : error ] );
+ }
+
+ // Complete
+ completeDeferred.resolveWith( callbackContext, [ jqXHR, statusText ] );
+
+ if ( fireGlobals ) {
+ globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] );
+ // Handle the global AJAX counter
+ if ( !( --jQuery.active ) ) {
+ jQuery.event.trigger( "ajaxStop" );
+ }
+ }
+ }
+
+ // Attach deferreds
+ deferred.promise( jqXHR );
+ jqXHR.success = jqXHR.done;
+ jqXHR.error = jqXHR.fail;
+ jqXHR.complete = completeDeferred.done;
+
+ // Status-dependent callbacks
+ jqXHR.statusCode = function( map ) {
+ if ( map ) {
+ var tmp;
+ if ( state < 2 ) {
+ for( tmp in map ) {
+ statusCode[ tmp ] = [ statusCode[tmp], map[tmp] ];
+ }
+ } else {
+ tmp = map[ jqXHR.status ];
+ jqXHR.then( tmp, tmp );
+ }
+ }
+ return this;
+ };
+
+ // Remove hash character (#7531: and string promotion)
+ // Add protocol if not provided (#5866: IE7 issue with protocol-less urls)
+ // We also use the url parameter if available
+ s.url = ( ( url || s.url ) + "" ).replace( rhash, "" ).replace( rprotocol, ajaxLocParts[ 1 ] + "//" );
+
+ // Extract dataTypes list
+ s.dataTypes = jQuery.trim( s.dataType || "*" ).toLowerCase().split( rspacesAjax );
+
+ // Determine if a cross-domain request is in order
+ if ( s.crossDomain == null ) {
+ parts = rurl.exec( s.url.toLowerCase() );
+ s.crossDomain = !!( parts &&
+ ( parts[ 1 ] != ajaxLocParts[ 1 ] || parts[ 2 ] != ajaxLocParts[ 2 ] ||
+ ( parts[ 3 ] || ( parts[ 1 ] === "http:" ? 80 : 443 ) ) !=
+ ( ajaxLocParts[ 3 ] || ( ajaxLocParts[ 1 ] === "http:" ? 80 : 443 ) ) )
+ );
+ }
+
+ // Convert data if not already a string
+ if ( s.data && s.processData && typeof s.data !== "string" ) {
+ s.data = jQuery.param( s.data, s.traditional );
+ }
+
+ // Apply prefilters
+ inspectPrefiltersOrTransports( prefilters, s, options, jqXHR );
+
+ // If request was aborted inside a prefiler, stop there
+ if ( state === 2 ) {
+ return false;
+ }
+
+ // We can fire global events as of now if asked to
+ fireGlobals = s.global;
+
+ // Uppercase the type
+ s.type = s.type.toUpperCase();
+
+ // Determine if request has content
+ s.hasContent = !rnoContent.test( s.type );
+
+ // Watch for a new set of requests
+ if ( fireGlobals && jQuery.active++ === 0 ) {
+ jQuery.event.trigger( "ajaxStart" );
+ }
+
+ // More options handling for requests with no content
+ if ( !s.hasContent ) {
+
+ // If data is available, append data to url
+ if ( s.data ) {
+ s.url += ( rquery.test( s.url ) ? "&" : "?" ) + s.data;
+ // #9682: remove data so that it's not used in an eventual retry
+ delete s.data;
+ }
+
+ // Get ifModifiedKey before adding the anti-cache parameter
+ ifModifiedKey = s.url;
+
+ // Add anti-cache in url if needed
+ if ( s.cache === false ) {
+
+ var ts = jQuery.now(),
+ // try replacing _= if it is there
+ ret = s.url.replace( rts, "$1_=" + ts );
+
+ // if nothing was replaced, add timestamp to the end
+ s.url = ret + ( (ret === s.url ) ? ( rquery.test( s.url ) ? "&" : "?" ) + "_=" + ts : "" );
+ }
+ }
+
+ // Set the correct header, if data is being sent
+ if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) {
+ jqXHR.setRequestHeader( "Content-Type", s.contentType );
+ }
+
+ // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode.
+ if ( s.ifModified ) {
+ ifModifiedKey = ifModifiedKey || s.url;
+ if ( jQuery.lastModified[ ifModifiedKey ] ) {
+ jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ ifModifiedKey ] );
+ }
+ if ( jQuery.etag[ ifModifiedKey ] ) {
+ jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ ifModifiedKey ] );
+ }
+ }
+
+ // Set the Accepts header for the server, depending on the dataType
+ jqXHR.setRequestHeader(
+ "Accept",
+ s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[0] ] ?
+ s.accepts[ s.dataTypes[0] ] + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) :
+ s.accepts[ "*" ]
+ );
+
+ // Check for headers option
+ for ( i in s.headers ) {
+ jqXHR.setRequestHeader( i, s.headers[ i ] );
+ }
+
+ // Allow custom headers/mimetypes and early abort
+ if ( s.beforeSend && ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || state === 2 ) ) {
+ // Abort if not done already
+ jqXHR.abort();
+ return false;
+
+ }
+
+ // Install callbacks on deferreds
+ for ( i in { success: 1, error: 1, complete: 1 } ) {
+ jqXHR[ i ]( s[ i ] );
+ }
+
+ // Get transport
+ transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR );
+
+ // If no transport, we auto-abort
+ if ( !transport ) {
+ done( -1, "No Transport" );
+ } else {
+ jqXHR.readyState = 1;
+ // Send global event
+ if ( fireGlobals ) {
+ globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] );
+ }
+ // Timeout
+ if ( s.async && s.timeout > 0 ) {
+ timeoutTimer = setTimeout( function(){
+ jqXHR.abort( "timeout" );
+ }, s.timeout );
+ }
+
+ try {
+ state = 1;
+ transport.send( requestHeaders, done );
+ } catch (e) {
+ // Propagate exception as error if not done
+ if ( state < 2 ) {
+ done( -1, e );
+ // Simply rethrow otherwise
+ } else {
+ jQuery.error( e );
+ }
+ }
+ }
+
+ return jqXHR;
+ },
+
+ // Serialize an array of form elements or a set of
+ // key/values into a query string
+ param: function( a, traditional ) {
+ var s = [],
+ add = function( key, value ) {
+ // If value is a function, invoke it and return its value
+ value = jQuery.isFunction( value ) ? value() : value;
+ s[ s.length ] = encodeURIComponent( key ) + "=" + encodeURIComponent( value );
+ };
+
+ // Set traditional to true for jQuery <= 1.3.2 behavior.
+ if ( traditional === undefined ) {
+ traditional = jQuery.ajaxSettings.traditional;
+ }
+
+ // If an array was passed in, assume that it is an array of form elements.
+ if ( jQuery.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) {
+ // Serialize the form elements
+ jQuery.each( a, function() {
+ add( this.name, this.value );
+ });
+
+ } else {
+ // If traditional, encode the "old" way (the way 1.3.2 or older
+ // did it), otherwise encode params recursively.
+ for ( var prefix in a ) {
+ buildParams( prefix, a[ prefix ], traditional, add );
+ }
+ }
+
+ // Return the resulting serialization
+ return s.join( "&" ).replace( r20, "+" );
+ }
+});
+
+function buildParams( prefix, obj, traditional, add ) {
+ if ( jQuery.isArray( obj ) ) {
+ // Serialize array item.
+ jQuery.each( obj, function( i, v ) {
+ if ( traditional || rbracket.test( prefix ) ) {
+ // Treat each array item as a scalar.
+ add( prefix, v );
+
+ } else {
+ // If array item is non-scalar (array or object), encode its
+ // numeric index to resolve deserialization ambiguity issues.
+ // Note that rack (as of 1.0.0) can't currently deserialize
+ // nested arrays properly, and attempting to do so may cause
+ // a server error. Possible fixes are to modify rack's
+ // deserialization algorithm or to provide an option or flag
+ // to force array serialization to be shallow.
+ buildParams( prefix + "[" + ( typeof v === "object" || jQuery.isArray(v) ? i : "" ) + "]", v, traditional, add );
+ }
+ });
+
+ } else if ( !traditional && obj != null && typeof obj === "object" ) {
+ // Serialize object item.
+ for ( var name in obj ) {
+ buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add );
+ }
+
+ } else {
+ // Serialize scalar item.
+ add( prefix, obj );
+ }
+}
+
+// This is still on the jQuery object... for now
+// Want to move this to jQuery.ajax some day
+jQuery.extend({
+
+ // Counter for holding the number of active queries
+ active: 0,
+
+ // Last-Modified header cache for next request
+ lastModified: {},
+ etag: {}
+
+});
+
+/* Handles responses to an ajax request:
+ * - sets all responseXXX fields accordingly
+ * - finds the right dataType (mediates between content-type and expected dataType)
+ * - returns the corresponding response
+ */
+function ajaxHandleResponses( s, jqXHR, responses ) {
+
+ var contents = s.contents,
+ dataTypes = s.dataTypes,
+ responseFields = s.responseFields,
+ ct,
+ type,
+ finalDataType,
+ firstDataType;
+
+ // Fill responseXXX fields
+ for( type in responseFields ) {
+ if ( type in responses ) {
+ jqXHR[ responseFields[type] ] = responses[ type ];
+ }
+ }
+
+ // Remove auto dataType and get content-type in the process
+ while( dataTypes[ 0 ] === "*" ) {
+ dataTypes.shift();
+ if ( ct === undefined ) {
+ ct = s.mimeType || jqXHR.getResponseHeader( "content-type" );
+ }
+ }
+
+ // Check if we're dealing with a known content-type
+ if ( ct ) {
+ for ( type in contents ) {
+ if ( contents[ type ] && contents[ type ].test( ct ) ) {
+ dataTypes.unshift( type );
+ break;
+ }
+ }
+ }
+
+ // Check to see if we have a response for the expected dataType
+ if ( dataTypes[ 0 ] in responses ) {
+ finalDataType = dataTypes[ 0 ];
+ } else {
+ // Try convertible dataTypes
+ for ( type in responses ) {
+ if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[0] ] ) {
+ finalDataType = type;
+ break;
+ }
+ if ( !firstDataType ) {
+ firstDataType = type;
+ }
+ }
+ // Or just use first one
+ finalDataType = finalDataType || firstDataType;
+ }
+
+ // If we found a dataType
+ // We add the dataType to the list if needed
+ // and return the corresponding response
+ if ( finalDataType ) {
+ if ( finalDataType !== dataTypes[ 0 ] ) {
+ dataTypes.unshift( finalDataType );
+ }
+ return responses[ finalDataType ];
+ }
+}
+
+// Chain conversions given the request and the original response
+function ajaxConvert( s, response ) {
+
+ // Apply the dataFilter if provided
+ if ( s.dataFilter ) {
+ response = s.dataFilter( response, s.dataType );
+ }
+
+ var dataTypes = s.dataTypes,
+ converters = {},
+ i,
+ key,
+ length = dataTypes.length,
+ tmp,
+ // Current and previous dataTypes
+ current = dataTypes[ 0 ],
+ prev,
+ // Conversion expression
+ conversion,
+ // Conversion function
+ conv,
+ // Conversion functions (transitive conversion)
+ conv1,
+ conv2;
+
+ // For each dataType in the chain
+ for( i = 1; i < length; i++ ) {
+
+ // Create converters map
+ // with lowercased keys
+ if ( i === 1 ) {
+ for( key in s.converters ) {
+ if( typeof key === "string" ) {
+ converters[ key.toLowerCase() ] = s.converters[ key ];
+ }
+ }
+ }
+
+ // Get the dataTypes
+ prev = current;
+ current = dataTypes[ i ];
+
+ // If current is auto dataType, update it to prev
+ if( current === "*" ) {
+ current = prev;
+ // If no auto and dataTypes are actually different
+ } else if ( prev !== "*" && prev !== current ) {
+
+ // Get the converter
+ conversion = prev + " " + current;
+ conv = converters[ conversion ] || converters[ "* " + current ];
+
+ // If there is no direct converter, search transitively
+ if ( !conv ) {
+ conv2 = undefined;
+ for( conv1 in converters ) {
+ tmp = conv1.split( " " );
+ if ( tmp[ 0 ] === prev || tmp[ 0 ] === "*" ) {
+ conv2 = converters[ tmp[1] + " " + current ];
+ if ( conv2 ) {
+ conv1 = converters[ conv1 ];
+ if ( conv1 === true ) {
+ conv = conv2;
+ } else if ( conv2 === true ) {
+ conv = conv1;
+ }
+ break;
+ }
+ }
+ }
+ }
+ // If we found no converter, dispatch an error
+ if ( !( conv || conv2 ) ) {
+ jQuery.error( "No conversion from " + conversion.replace(" "," to ") );
+ }
+ // If found converter is not an equivalence
+ if ( conv !== true ) {
+ // Convert with 1 or 2 converters accordingly
+ response = conv ? conv( response ) : conv2( conv1(response) );
+ }
+ }
+ }
+ return response;
+}
+
+
+
+
+var jsc = jQuery.now(),
+ jsre = /(\=)\?(&|$)|\?\?/i;
+
+// Default jsonp settings
+jQuery.ajaxSetup({
+ jsonp: "callback",
+ jsonpCallback: function() {
+ return jQuery.expando + "_" + ( jsc++ );
+ }
+});
+
+// Detect, normalize options and install callbacks for jsonp requests
+jQuery.ajaxPrefilter( "json jsonp", function( s, originalSettings, jqXHR ) {
+
+ var inspectData = s.contentType === "application/x-www-form-urlencoded" &&
+ ( typeof s.data === "string" );
+
+ if ( s.dataTypes[ 0 ] === "jsonp" ||
+ s.jsonp !== false && ( jsre.test( s.url ) ||
+ inspectData && jsre.test( s.data ) ) ) {
+
+ var responseContainer,
+ jsonpCallback = s.jsonpCallback =
+ jQuery.isFunction( s.jsonpCallback ) ? s.jsonpCallback() : s.jsonpCallback,
+ previous = window[ jsonpCallback ],
+ url = s.url,
+ data = s.data,
+ replace = "$1" + jsonpCallback + "$2";
+
+ if ( s.jsonp !== false ) {
+ url = url.replace( jsre, replace );
+ if ( s.url === url ) {
+ if ( inspectData ) {
+ data = data.replace( jsre, replace );
+ }
+ if ( s.data === data ) {
+ // Add callback manually
+ url += (/\?/.test( url ) ? "&" : "?") + s.jsonp + "=" + jsonpCallback;
+ }
+ }
+ }
+
+ s.url = url;
+ s.data = data;
+
+ // Install callback
+ window[ jsonpCallback ] = function( response ) {
+ responseContainer = [ response ];
+ };
+
+ // Clean-up function
+ jqXHR.always(function() {
+ // Set callback back to previous value
+ window[ jsonpCallback ] = previous;
+ // Call if it was a function and we have a response
+ if ( responseContainer && jQuery.isFunction( previous ) ) {
+ window[ jsonpCallback ]( responseContainer[ 0 ] );
+ }
+ });
+
+ // Use data converter to retrieve json after script execution
+ s.converters["script json"] = function() {
+ if ( !responseContainer ) {
+ jQuery.error( jsonpCallback + " was not called" );
+ }
+ return responseContainer[ 0 ];
+ };
+
+ // force json dataType
+ s.dataTypes[ 0 ] = "json";
+
+ // Delegate to script
+ return "script";
+ }
+});
+
+
+
+
+// Install script dataType
+jQuery.ajaxSetup({
+ accepts: {
+ script: "text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"
+ },
+ contents: {
+ script: /javascript|ecmascript/
+ },
+ converters: {
+ "text script": function( text ) {
+ jQuery.globalEval( text );
+ return text;
+ }
+ }
+});
+
+// Handle cache's special case and global
+jQuery.ajaxPrefilter( "script", function( s ) {
+ if ( s.cache === undefined ) {
+ s.cache = false;
+ }
+ if ( s.crossDomain ) {
+ s.type = "GET";
+ s.global = false;
+ }
+});
+
+// Bind script tag hack transport
+jQuery.ajaxTransport( "script", function(s) {
+
+ // This transport only deals with cross domain requests
+ if ( s.crossDomain ) {
+
+ var script,
+ head = document.head || document.getElementsByTagName( "head" )[0] || document.documentElement;
+
+ return {
+
+ send: function( _, callback ) {
+
+ script = document.createElement( "script" );
+
+ script.async = "async";
+
+ if ( s.scriptCharset ) {
+ script.charset = s.scriptCharset;
+ }
+
+ script.src = s.url;
+
+ // Attach handlers for all browsers
+ script.onload = script.onreadystatechange = function( _, isAbort ) {
+
+ if ( isAbort || !script.readyState || /loaded|complete/.test( script.readyState ) ) {
+
+ // Handle memory leak in IE
+ script.onload = script.onreadystatechange = null;
+
+ // Remove the script
+ if ( head && script.parentNode ) {
+ head.removeChild( script );
+ }
+
+ // Dereference the script
+ script = undefined;
+
+ // Callback if not abort
+ if ( !isAbort ) {
+ callback( 200, "success" );
+ }
+ }
+ };
+ // Use insertBefore instead of appendChild to circumvent an IE6 bug.
+ // This arises when a base node is used (#2709 and #4378).
+ head.insertBefore( script, head.firstChild );
+ },
+
+ abort: function() {
+ if ( script ) {
+ script.onload( 0, 1 );
+ }
+ }
+ };
+ }
+});
+
+
+
+
+var // #5280: Internet Explorer will keep connections alive if we don't abort on unload
+ xhrOnUnloadAbort = window.ActiveXObject ? function() {
+ // Abort all pending requests
+ for ( var key in xhrCallbacks ) {
+ xhrCallbacks[ key ]( 0, 1 );
+ }
+ } : false,
+ xhrId = 0,
+ xhrCallbacks;
+
+// Functions to create xhrs
+function createStandardXHR() {
+ try {
+ return new window.XMLHttpRequest();
+ } catch( e ) {}
+}
+
+function createActiveXHR() {
+ try {
+ return new window.ActiveXObject( "Microsoft.XMLHTTP" );
+ } catch( e ) {}
+}
+
+// Create the request object
+// (This is still attached to ajaxSettings for backward compatibility)
+jQuery.ajaxSettings.xhr = window.ActiveXObject ?
+ /* Microsoft failed to properly
+ * implement the XMLHttpRequest in IE7 (can't request local files),
+ * so we use the ActiveXObject when it is available
+ * Additionally XMLHttpRequest can be disabled in IE7/IE8 so
+ * we need a fallback.
+ */
+ function() {
+ return !this.isLocal && createStandardXHR() || createActiveXHR();
+ } :
+ // For all other browsers, use the standard XMLHttpRequest object
+ createStandardXHR;
+
+// Determine support properties
+(function( xhr ) {
+ jQuery.extend( jQuery.support, {
+ ajax: !!xhr,
+ cors: !!xhr && ( "withCredentials" in xhr )
+ });
+})( jQuery.ajaxSettings.xhr() );
+
+// Create transport if the browser can provide an xhr
+if ( jQuery.support.ajax ) {
+
+ jQuery.ajaxTransport(function( s ) {
+ // Cross domain only allowed if supported through XMLHttpRequest
+ if ( !s.crossDomain || jQuery.support.cors ) {
+
+ var callback;
+
+ return {
+ send: function( headers, complete ) {
+
+ // Get a new xhr
+ var xhr = s.xhr(),
+ handle,
+ i;
+
+ // Open the socket
+ // Passing null username, generates a login popup on Opera (#2865)
+ if ( s.username ) {
+ xhr.open( s.type, s.url, s.async, s.username, s.password );
+ } else {
+ xhr.open( s.type, s.url, s.async );
+ }
+
+ // Apply custom fields if provided
+ if ( s.xhrFields ) {
+ for ( i in s.xhrFields ) {
+ xhr[ i ] = s.xhrFields[ i ];
+ }
+ }
+
+ // Override mime type if needed
+ if ( s.mimeType && xhr.overrideMimeType ) {
+ xhr.overrideMimeType( s.mimeType );
+ }
+
+ // X-Requested-With header
+ // For cross-domain requests, seeing as conditions for a preflight are
+ // akin to a jigsaw puzzle, we simply never set it to be sure.
+ // (it can always be set on a per-request basis or even using ajaxSetup)
+ // For same-domain requests, won't change header if already provided.
+ if ( !s.crossDomain && !headers["X-Requested-With"] ) {
+ headers[ "X-Requested-With" ] = "XMLHttpRequest";
+ }
+
+ // Need an extra try/catch for cross domain requests in Firefox 3
+ try {
+ for ( i in headers ) {
+ xhr.setRequestHeader( i, headers[ i ] );
+ }
+ } catch( _ ) {}
+
+ // Do send the request
+ // This may raise an exception which is actually
+ // handled in jQuery.ajax (so no try/catch here)
+ xhr.send( ( s.hasContent && s.data ) || null );
+
+ // Listener
+ callback = function( _, isAbort ) {
+
+ var status,
+ statusText,
+ responseHeaders,
+ responses,
+ xml;
+
+ // Firefox throws exceptions when accessing properties
+ // of an xhr when a network error occured
+ // http://helpful.knobs-dials.com/index.php/Component_returned_failure_code:_0x80040111_(NS_ERROR_NOT_AVAILABLE)
+ try {
+
+ // Was never called and is aborted or complete
+ if ( callback && ( isAbort || xhr.readyState === 4 ) ) {
+
+ // Only called once
+ callback = undefined;
+
+ // Do not keep as active anymore
+ if ( handle ) {
+ xhr.onreadystatechange = jQuery.noop;
+ if ( xhrOnUnloadAbort ) {
+ delete xhrCallbacks[ handle ];
+ }
+ }
+
+ // If it's an abort
+ if ( isAbort ) {
+ // Abort it manually if needed
+ if ( xhr.readyState !== 4 ) {
+ xhr.abort();
+ }
+ } else {
+ status = xhr.status;
+ responseHeaders = xhr.getAllResponseHeaders();
+ responses = {};
+ xml = xhr.responseXML;
+
+ // Construct response list
+ if ( xml && xml.documentElement /* #4958 */ ) {
+ responses.xml = xml;
+ }
+ responses.text = xhr.responseText;
+
+ // Firefox throws an exception when accessing
+ // statusText for faulty cross-domain requests
+ try {
+ statusText = xhr.statusText;
+ } catch( e ) {
+ // We normalize with Webkit giving an empty statusText
+ statusText = "";
+ }
+
+ // Filter status for non standard behaviors
+
+ // If the request is local and we have data: assume a success
+ // (success with no data won't get notified, that's the best we
+ // can do given current implementations)
+ if ( !status && s.isLocal && !s.crossDomain ) {
+ status = responses.text ? 200 : 404;
+ // IE - #1450: sometimes returns 1223 when it should be 204
+ } else if ( status === 1223 ) {
+ status = 204;
+ }
+ }
+ }
+ } catch( firefoxAccessException ) {
+ if ( !isAbort ) {
+ complete( -1, firefoxAccessException );
+ }
+ }
+
+ // Call complete if needed
+ if ( responses ) {
+ complete( status, statusText, responses, responseHeaders );
+ }
+ };
+
+ // if we're in sync mode or it's in cache
+ // and has been retrieved directly (IE6 & IE7)
+ // we need to manually fire the callback
+ if ( !s.async || xhr.readyState === 4 ) {
+ callback();
+ } else {
+ handle = ++xhrId;
+ if ( xhrOnUnloadAbort ) {
+ // Create the active xhrs callbacks list if needed
+ // and attach the unload handler
+ if ( !xhrCallbacks ) {
+ xhrCallbacks = {};
+ jQuery( window ).unload( xhrOnUnloadAbort );
+ }
+ // Add to list of active xhrs callbacks
+ xhrCallbacks[ handle ] = callback;
+ }
+ xhr.onreadystatechange = callback;
+ }
+ },
+
+ abort: function() {
+ if ( callback ) {
+ callback(0,1);
+ }
+ }
+ };
+ }
+ });
+}
+
+
+
+
+var elemdisplay = {},
+ iframe, iframeDoc,
+ rfxtypes = /^(?:toggle|show|hide)$/,
+ rfxnum = /^([+\-]=)?([\d+.\-]+)([a-z%]*)$/i,
+ timerId,
+ fxAttrs = [
+ // height animations
+ [ "height", "marginTop", "marginBottom", "paddingTop", "paddingBottom" ],
+ // width animations
+ [ "width", "marginLeft", "marginRight", "paddingLeft", "paddingRight" ],
+ // opacity animations
+ [ "opacity" ]
+ ],
+ fxNow;
+
+jQuery.fn.extend({
+ show: function( speed, easing, callback ) {
+ var elem, display;
+
+ if ( speed || speed === 0 ) {
+ return this.animate( genFx("show", 3), speed, easing, callback);
+
+ } else {
+ for ( var i = 0, j = this.length; i < j; i++ ) {
+ elem = this[i];
+
+ if ( elem.style ) {
+ display = elem.style.display;
+
+ // Reset the inline display of this element to learn if it is
+ // being hidden by cascaded rules or not
+ if ( !jQuery._data(elem, "olddisplay") && display === "none" ) {
+ display = elem.style.display = "";
+ }
+
+ // Set elements which have been overridden with display: none
+ // in a stylesheet to whatever the default browser style is
+ // for such an element
+ if ( display === "" && jQuery.css( elem, "display" ) === "none" ) {
+ jQuery._data(elem, "olddisplay", defaultDisplay(elem.nodeName));
+ }
+ }
+ }
+
+ // Set the display of most of the elements in a second loop
+ // to avoid the constant reflow
+ for ( i = 0; i < j; i++ ) {
+ elem = this[i];
+
+ if ( elem.style ) {
+ display = elem.style.display;
+
+ if ( display === "" || display === "none" ) {
+ elem.style.display = jQuery._data(elem, "olddisplay") || "";
+ }
+ }
+ }
+
+ return this;
+ }
+ },
+
+ hide: function( speed, easing, callback ) {
+ if ( speed || speed === 0 ) {
+ return this.animate( genFx("hide", 3), speed, easing, callback);
+
+ } else {
+ for ( var i = 0, j = this.length; i < j; i++ ) {
+ if ( this[i].style ) {
+ var display = jQuery.css( this[i], "display" );
+
+ if ( display !== "none" && !jQuery._data( this[i], "olddisplay" ) ) {
+ jQuery._data( this[i], "olddisplay", display );
+ }
+ }
+ }
+
+ // Set the display of the elements in a second loop
+ // to avoid the constant reflow
+ for ( i = 0; i < j; i++ ) {
+ if ( this[i].style ) {
+ this[i].style.display = "none";
+ }
+ }
+
+ return this;
+ }
+ },
+
+ // Save the old toggle function
+ _toggle: jQuery.fn.toggle,
+
+ toggle: function( fn, fn2, callback ) {
+ var bool = typeof fn === "boolean";
+
+ if ( jQuery.isFunction(fn) && jQuery.isFunction(fn2) ) {
+ this._toggle.apply( this, arguments );
+
+ } else if ( fn == null || bool ) {
+ this.each(function() {
+ var state = bool ? fn : jQuery(this).is(":hidden");
+ jQuery(this)[ state ? "show" : "hide" ]();
+ });
+
+ } else {
+ this.animate(genFx("toggle", 3), fn, fn2, callback);
+ }
+
+ return this;
+ },
+
+ fadeTo: function( speed, to, easing, callback ) {
+ return this.filter(":hidden").css("opacity", 0).show().end()
+ .animate({opacity: to}, speed, easing, callback);
+ },
+
+ animate: function( prop, speed, easing, callback ) {
+ var optall = jQuery.speed(speed, easing, callback);
+
+ if ( jQuery.isEmptyObject( prop ) ) {
+ return this.each( optall.complete, [ false ] );
+ }
+
+ // Do not change referenced properties as per-property easing will be lost
+ prop = jQuery.extend( {}, prop );
+
+ return this[ optall.queue === false ? "each" : "queue" ](function() {
+ // XXX 'this' does not always have a nodeName when running the
+ // test suite
+
+ if ( optall.queue === false ) {
+ jQuery._mark( this );
+ }
+
+ var opt = jQuery.extend( {}, optall ),
+ isElement = this.nodeType === 1,
+ hidden = isElement && jQuery(this).is(":hidden"),
+ name, val, p,
+ display, e,
+ parts, start, end, unit;
+
+ // will store per property easing and be used to determine when an animation is complete
+ opt.animatedProperties = {};
+
+ for ( p in prop ) {
+
+ // property name normalization
+ name = jQuery.camelCase( p );
+ if ( p !== name ) {
+ prop[ name ] = prop[ p ];
+ delete prop[ p ];
+ }
+
+ val = prop[ name ];
+
+ // easing resolution: per property > opt.specialEasing > opt.easing > 'swing' (default)
+ if ( jQuery.isArray( val ) ) {
+ opt.animatedProperties[ name ] = val[ 1 ];
+ val = prop[ name ] = val[ 0 ];
+ } else {
+ opt.animatedProperties[ name ] = opt.specialEasing && opt.specialEasing[ name ] || opt.easing || 'swing';
+ }
+
+ if ( val === "hide" && hidden || val === "show" && !hidden ) {
+ return opt.complete.call( this );
+ }
+
+ if ( isElement && ( name === "height" || name === "width" ) ) {
+ // Make sure that nothing sneaks out
+ // Record all 3 overflow attributes because IE does not
+ // change the overflow attribute when overflowX and
+ // overflowY are set to the same value
+ opt.overflow = [ this.style.overflow, this.style.overflowX, this.style.overflowY ];
+
+ // Set display property to inline-block for height/width
+ // animations on inline elements that are having width/height
+ // animated
+ if ( jQuery.css( this, "display" ) === "inline" &&
+ jQuery.css( this, "float" ) === "none" ) {
+ if ( !jQuery.support.inlineBlockNeedsLayout ) {
+ this.style.display = "inline-block";
+
+ } else {
+ display = defaultDisplay( this.nodeName );
+
+ // inline-level elements accept inline-block;
+ // block-level elements need to be inline with layout
+ if ( display === "inline" ) {
+ this.style.display = "inline-block";
+
+ } else {
+ this.style.display = "inline";
+ this.style.zoom = 1;
+ }
+ }
+ }
+ }
+ }
+
+ if ( opt.overflow != null ) {
+ this.style.overflow = "hidden";
+ }
+
+ for ( p in prop ) {
+ e = new jQuery.fx( this, opt, p );
+ val = prop[ p ];
+
+ if ( rfxtypes.test(val) ) {
+ e[ val === "toggle" ? hidden ? "show" : "hide" : val ]();
+
+ } else {
+ parts = rfxnum.exec( val );
+ start = e.cur();
+
+ if ( parts ) {
+ end = parseFloat( parts[2] );
+ unit = parts[3] || ( jQuery.cssNumber[ p ] ? "" : "px" );
+
+ // We need to compute starting value
+ if ( unit !== "px" ) {
+ jQuery.style( this, p, (end || 1) + unit);
+ start = ((end || 1) / e.cur()) * start;
+ jQuery.style( this, p, start + unit);
+ }
+
+ // If a +=/-= token was provided, we're doing a relative animation
+ if ( parts[1] ) {
+ end = ( (parts[ 1 ] === "-=" ? -1 : 1) * end ) + start;
+ }
+
+ e.custom( start, end, unit );
+
+ } else {
+ e.custom( start, val, "" );
+ }
+ }
+ }
+
+ // For JS strict compliance
+ return true;
+ });
+ },
+
+ stop: function( clearQueue, gotoEnd ) {
+ if ( clearQueue ) {
+ this.queue([]);
+ }
+
+ this.each(function() {
+ var timers = jQuery.timers,
+ i = timers.length;
+ // clear marker counters if we know they won't be
+ if ( !gotoEnd ) {
+ jQuery._unmark( true, this );
+ }
+ while ( i-- ) {
+ if ( timers[i].elem === this ) {
+ if (gotoEnd) {
+ // force the next step to be the last
+ timers[i](true);
+ }
+
+ timers.splice(i, 1);
+ }
+ }
+ });
+
+ // start the next in the queue if the last step wasn't forced
+ if ( !gotoEnd ) {
+ this.dequeue();
+ }
+
+ return this;
+ }
+
+});
+
+// Animations created synchronously will run synchronously
+function createFxNow() {
+ setTimeout( clearFxNow, 0 );
+ return ( fxNow = jQuery.now() );
+}
+
+function clearFxNow() {
+ fxNow = undefined;
+}
+
+// Generate parameters to create a standard animation
+function genFx( type, num ) {
+ var obj = {};
+
+ jQuery.each( fxAttrs.concat.apply([], fxAttrs.slice(0,num)), function() {
+ obj[ this ] = type;
+ });
+
+ return obj;
+}
+
+// Generate shortcuts for custom animations
+jQuery.each({
+ slideDown: genFx("show", 1),
+ slideUp: genFx("hide", 1),
+ slideToggle: genFx("toggle", 1),
+ fadeIn: { opacity: "show" },
+ fadeOut: { opacity: "hide" },
+ fadeToggle: { opacity: "toggle" }
+}, function( name, props ) {
+ jQuery.fn[ name ] = function( speed, easing, callback ) {
+ return this.animate( props, speed, easing, callback );
+ };
+});
+
+jQuery.extend({
+ speed: function( speed, easing, fn ) {
+ var opt = speed && typeof speed === "object" ? jQuery.extend({}, speed) : {
+ complete: fn || !fn && easing ||
+ jQuery.isFunction( speed ) && speed,
+ duration: speed,
+ easing: fn && easing || easing && !jQuery.isFunction(easing) && easing
+ };
+
+ opt.duration = jQuery.fx.off ? 0 : typeof opt.duration === "number" ? opt.duration :
+ opt.duration in jQuery.fx.speeds ? jQuery.fx.speeds[opt.duration] : jQuery.fx.speeds._default;
+
+ // Queueing
+ opt.old = opt.complete;
+ opt.complete = function( noUnmark ) {
+ if ( jQuery.isFunction( opt.old ) ) {
+ opt.old.call( this );
+ }
+
+ if ( opt.queue !== false ) {
+ jQuery.dequeue( this );
+ } else if ( noUnmark !== false ) {
+ jQuery._unmark( this );
+ }
+ };
+
+ return opt;
+ },
+
+ easing: {
+ linear: function( p, n, firstNum, diff ) {
+ return firstNum + diff * p;
+ },
+ swing: function( p, n, firstNum, diff ) {
+ return ((-Math.cos(p*Math.PI)/2) + 0.5) * diff + firstNum;
+ }
+ },
+
+ timers: [],
+
+ fx: function( elem, options, prop ) {
+ this.options = options;
+ this.elem = elem;
+ this.prop = prop;
+
+ options.orig = options.orig || {};
+ }
+
+});
+
+jQuery.fx.prototype = {
+ // Simple function for setting a style value
+ update: function() {
+ if ( this.options.step ) {
+ this.options.step.call( this.elem, this.now, this );
+ }
+
+ (jQuery.fx.step[this.prop] || jQuery.fx.step._default)( this );
+ },
+
+ // Get the current size
+ cur: function() {
+ if ( this.elem[this.prop] != null && (!this.elem.style || this.elem.style[this.prop] == null) ) {
+ return this.elem[ this.prop ];
+ }
+
+ var parsed,
+ r = jQuery.css( this.elem, this.prop );
+ // Empty strings, null, undefined and "auto" are converted to 0,
+ // complex values such as "rotate(1rad)" are returned as is,
+ // simple values such as "10px" are parsed to Float.
+ return isNaN( parsed = parseFloat( r ) ) ? !r || r === "auto" ? 0 : r : parsed;
+ },
+
+ // Start an animation from one number to another
+ custom: function( from, to, unit ) {
+ var self = this,
+ fx = jQuery.fx;
+
+ this.startTime = fxNow || createFxNow();
+ this.start = from;
+ this.end = to;
+ this.unit = unit || this.unit || ( jQuery.cssNumber[ this.prop ] ? "" : "px" );
+ this.now = this.start;
+ this.pos = this.state = 0;
+
+ function t( gotoEnd ) {
+ return self.step(gotoEnd);
+ }
+
+ t.elem = this.elem;
+
+ if ( t() && jQuery.timers.push(t) && !timerId ) {
+ timerId = setInterval( fx.tick, fx.interval );
+ }
+ },
+
+ // Simple 'show' function
+ show: function() {
+ // Remember where we started, so that we can go back to it later
+ this.options.orig[this.prop] = jQuery.style( this.elem, this.prop );
+ this.options.show = true;
+
+ // Begin the animation
+ // Make sure that we start at a small width/height to avoid any
+ // flash of content
+ this.custom(this.prop === "width" || this.prop === "height" ? 1 : 0, this.cur());
+
+ // Start by showing the element
+ jQuery( this.elem ).show();
+ },
+
+ // Simple 'hide' function
+ hide: function() {
+ // Remember where we started, so that we can go back to it later
+ this.options.orig[this.prop] = jQuery.style( this.elem, this.prop );
+ this.options.hide = true;
+
+ // Begin the animation
+ this.custom(this.cur(), 0);
+ },
+
+ // Each step of an animation
+ step: function( gotoEnd ) {
+ var t = fxNow || createFxNow(),
+ done = true,
+ elem = this.elem,
+ options = this.options,
+ i, n;
+
+ if ( gotoEnd || t >= options.duration + this.startTime ) {
+ this.now = this.end;
+ this.pos = this.state = 1;
+ this.update();
+
+ options.animatedProperties[ this.prop ] = true;
+
+ for ( i in options.animatedProperties ) {
+ if ( options.animatedProperties[i] !== true ) {
+ done = false;
+ }
+ }
+
+ if ( done ) {
+ // Reset the overflow
+ if ( options.overflow != null && !jQuery.support.shrinkWrapBlocks ) {
+
+ jQuery.each( [ "", "X", "Y" ], function (index, value) {
+ elem.style[ "overflow" + value ] = options.overflow[index];
+ });
+ }
+
+ // Hide the element if the "hide" operation was done
+ if ( options.hide ) {
+ jQuery(elem).hide();
+ }
+
+ // Reset the properties, if the item has been hidden or shown
+ if ( options.hide || options.show ) {
+ for ( var p in options.animatedProperties ) {
+ jQuery.style( elem, p, options.orig[p] );
+ }
+ }
+
+ // Execute the complete function
+ options.complete.call( elem );
+ }
+
+ return false;
+
+ } else {
+ // classical easing cannot be used with an Infinity duration
+ if ( options.duration == Infinity ) {
+ this.now = t;
+ } else {
+ n = t - this.startTime;
+ this.state = n / options.duration;
+
+ // Perform the easing function, defaults to swing
+ this.pos = jQuery.easing[ options.animatedProperties[ this.prop ] ]( this.state, n, 0, 1, options.duration );
+ this.now = this.start + ((this.end - this.start) * this.pos);
+ }
+ // Perform the next step of the animation
+ this.update();
+ }
+
+ return true;
+ }
+};
+
+jQuery.extend( jQuery.fx, {
+ tick: function() {
+ for ( var timers = jQuery.timers, i = 0 ; i < timers.length ; ++i ) {
+ if ( !timers[i]() ) {
+ timers.splice(i--, 1);
+ }
+ }
+
+ if ( !timers.length ) {
+ jQuery.fx.stop();
+ }
+ },
+
+ interval: 13,
+
+ stop: function() {
+ clearInterval( timerId );
+ timerId = null;
+ },
+
+ speeds: {
+ slow: 600,
+ fast: 200,
+ // Default speed
+ _default: 400
+ },
+
+ step: {
+ opacity: function( fx ) {
+ jQuery.style( fx.elem, "opacity", fx.now );
+ },
+
+ _default: function( fx ) {
+ if ( fx.elem.style && fx.elem.style[ fx.prop ] != null ) {
+ fx.elem.style[ fx.prop ] = (fx.prop === "width" || fx.prop === "height" ? Math.max(0, fx.now) : fx.now) + fx.unit;
+ } else {
+ fx.elem[ fx.prop ] = fx.now;
+ }
+ }
+ }
+});
+
+if ( jQuery.expr && jQuery.expr.filters ) {
+ jQuery.expr.filters.animated = function( elem ) {
+ return jQuery.grep(jQuery.timers, function( fn ) {
+ return elem === fn.elem;
+ }).length;
+ };
+}
+
+// Try to restore the default display value of an element
+function defaultDisplay( nodeName ) {
+
+ if ( !elemdisplay[ nodeName ] ) {
+
+ var body = document.body,
+ elem = jQuery( "<" + nodeName + ">" ).appendTo( body ),
+ display = elem.css( "display" );
+
+ elem.remove();
+
+ // If the simple way fails,
+ // get element's real default display by attaching it to a temp iframe
+ if ( display === "none" || display === "" ) {
+ // No iframe to use yet, so create it
+ if ( !iframe ) {
+ iframe = document.createElement( "iframe" );
+ iframe.frameBorder = iframe.width = iframe.height = 0;
+ }
+
+ body.appendChild( iframe );
+
+ // Create a cacheable copy of the iframe document on first call.
+ // IE and Opera will allow us to reuse the iframeDoc without re-writing the fake HTML
+ // document to it; WebKit & Firefox won't allow reusing the iframe document.
+ if ( !iframeDoc || !iframe.createElement ) {
+ iframeDoc = ( iframe.contentWindow || iframe.contentDocument ).document;
+ iframeDoc.write( ( document.compatMode === "CSS1Compat" ? "<!doctype html>" : "" ) + "<html><body>" );
+ iframeDoc.close();
+ }
+
+ elem = iframeDoc.createElement( nodeName );
+
+ iframeDoc.body.appendChild( elem );
+
+ display = jQuery.css( elem, "display" );
+
+ body.removeChild( iframe );
+ }
+
+ // Store the correct default display
+ elemdisplay[ nodeName ] = display;
+ }
+
+ return elemdisplay[ nodeName ];
+}
+
+
+
+
+var rtable = /^t(?:able|d|h)$/i,
+ rroot = /^(?:body|html)$/i;
+
+if ( "getBoundingClientRect" in document.documentElement ) {
+ jQuery.fn.offset = function( options ) {
+ var elem = this[0], box;
+
+ if ( options ) {
+ return this.each(function( i ) {
+ jQuery.offset.setOffset( this, options, i );
+ });
+ }
+
+ if ( !elem || !elem.ownerDocument ) {
+ return null;
+ }
+
+ if ( elem === elem.ownerDocument.body ) {
+ return jQuery.offset.bodyOffset( elem );
+ }
+
+ try {
+ box = elem.getBoundingClientRect();
+ } catch(e) {}
+
+ var doc = elem.ownerDocument,
+ docElem = doc.documentElement;
+
+ // Make sure we're not dealing with a disconnected DOM node
+ if ( !box || !jQuery.contains( docElem, elem ) ) {
+ return box ? { top: box.top, left: box.left } : { top: 0, left: 0 };
+ }
+
+ var body = doc.body,
+ win = getWindow(doc),
+ clientTop = docElem.clientTop || body.clientTop || 0,
+ clientLeft = docElem.clientLeft || body.clientLeft || 0,
+ scrollTop = win.pageYOffset || jQuery.support.boxModel && docElem.scrollTop || body.scrollTop,
+ scrollLeft = win.pageXOffset || jQuery.support.boxModel && docElem.scrollLeft || body.scrollLeft,
+ top = box.top + scrollTop - clientTop,
+ left = box.left + scrollLeft - clientLeft;
+
+ return { top: top, left: left };
+ };
+
+} else {
+ jQuery.fn.offset = function( options ) {
+ var elem = this[0];
+
+ if ( options ) {
+ return this.each(function( i ) {
+ jQuery.offset.setOffset( this, options, i );
+ });
+ }
+
+ if ( !elem || !elem.ownerDocument ) {
+ return null;
+ }
+
+ if ( elem === elem.ownerDocument.body ) {
+ return jQuery.offset.bodyOffset( elem );
+ }
+
+ jQuery.offset.initialize();
+
+ var computedStyle,
+ offsetParent = elem.offsetParent,
+ prevOffsetParent = elem,
+ doc = elem.ownerDocument,
+ docElem = doc.documentElement,
+ body = doc.body,
+ defaultView = doc.defaultView,
+ prevComputedStyle = defaultView ? defaultView.getComputedStyle( elem, null ) : elem.currentStyle,
+ top = elem.offsetTop,
+ left = elem.offsetLeft;
+
+ while ( (elem = elem.parentNode) && elem !== body && elem !== docElem ) {
+ if ( jQuery.offset.supportsFixedPosition && prevComputedStyle.position === "fixed" ) {
+ break;
+ }
+
+ computedStyle = defaultView ? defaultView.getComputedStyle(elem, null) : elem.currentStyle;
+ top -= elem.scrollTop;
+ left -= elem.scrollLeft;
+
+ if ( elem === offsetParent ) {
+ top += elem.offsetTop;
+ left += elem.offsetLeft;
+
+ if ( jQuery.offset.doesNotAddBorder && !(jQuery.offset.doesAddBorderForTableAndCells && rtable.test(elem.nodeName)) ) {
+ top += parseFloat( computedStyle.borderTopWidth ) || 0;
+ left += parseFloat( computedStyle.borderLeftWidth ) || 0;
+ }
+
+ prevOffsetParent = offsetParent;
+ offsetParent = elem.offsetParent;
+ }
+
+ if ( jQuery.offset.subtractsBorderForOverflowNotVisible && computedStyle.overflow !== "visible" ) {
+ top += parseFloat( computedStyle.borderTopWidth ) || 0;
+ left += parseFloat( computedStyle.borderLeftWidth ) || 0;
+ }
+
+ prevComputedStyle = computedStyle;
+ }
+
+ if ( prevComputedStyle.position === "relative" || prevComputedStyle.position === "static" ) {
+ top += body.offsetTop;
+ left += body.offsetLeft;
+ }
+
+ if ( jQuery.offset.supportsFixedPosition && prevComputedStyle.position === "fixed" ) {
+ top += Math.max( docElem.scrollTop, body.scrollTop );
+ left += Math.max( docElem.scrollLeft, body.scrollLeft );
+ }
+
+ return { top: top, left: left };
+ };
+}
+
+jQuery.offset = {
+ initialize: function() {
+ var body = document.body, container = document.createElement("div"), innerDiv, checkDiv, table, td, bodyMarginTop = parseFloat( jQuery.css(body, "marginTop") ) || 0,
+ html = "<div style='position:absolute;top:0;left:0;margin:0;border:5px solid #000;padding:0;width:1px;height:1px;'><div></div></div><table style='position:absolute;top:0;left:0;margin:0;border:5px solid #000;padding:0;width:1px;height:1px;' cellpadding='0' cellspacing='0'><tr><td></td></tr></table>";
+
+ jQuery.extend( container.style, { position: "absolute", top: 0, left: 0, margin: 0, border: 0, width: "1px", height: "1px", visibility: "hidden" } );
+
+ container.innerHTML = html;
+ body.insertBefore( container, body.firstChild );
+ innerDiv = container.firstChild;
+ checkDiv = innerDiv.firstChild;
+ td = innerDiv.nextSibling.firstChild.firstChild;
+
+ this.doesNotAddBorder = (checkDiv.offsetTop !== 5);
+ this.doesAddBorderForTableAndCells = (td.offsetTop === 5);
+
+ checkDiv.style.position = "fixed";
+ checkDiv.style.top = "20px";
+
+ // safari subtracts parent border width here which is 5px
+ this.supportsFixedPosition = (checkDiv.offsetTop === 20 || checkDiv.offsetTop === 15);
+ checkDiv.style.position = checkDiv.style.top = "";
+
+ innerDiv.style.overflow = "hidden";
+ innerDiv.style.position = "relative";
+
+ this.subtractsBorderForOverflowNotVisible = (checkDiv.offsetTop === -5);
+
+ this.doesNotIncludeMarginInBodyOffset = (body.offsetTop !== bodyMarginTop);
+
+ body.removeChild( container );
+ jQuery.offset.initialize = jQuery.noop;
+ },
+
+ bodyOffset: function( body ) {
+ var top = body.offsetTop,
+ left = body.offsetLeft;
+
+ jQuery.offset.initialize();
+
+ if ( jQuery.offset.doesNotIncludeMarginInBodyOffset ) {
+ top += parseFloat( jQuery.css(body, "marginTop") ) || 0;
+ left += parseFloat( jQuery.css(body, "marginLeft") ) || 0;
+ }
+
+ return { top: top, left: left };
+ },
+
+ setOffset: function( elem, options, i ) {
+ var position = jQuery.css( elem, "position" );
+
+ // set position first, in-case top/left are set even on static elem
+ if ( position === "static" ) {
+ elem.style.position = "relative";
+ }
+
+ var curElem = jQuery( elem ),
+ curOffset = curElem.offset(),
+ curCSSTop = jQuery.css( elem, "top" ),
+ curCSSLeft = jQuery.css( elem, "left" ),
+ calculatePosition = (position === "absolute" || position === "fixed") && jQuery.inArray("auto", [curCSSTop, curCSSLeft]) > -1,
+ props = {}, curPosition = {}, curTop, curLeft;
+
+ // need to be able to calculate position if either top or left is auto and position is either absolute or fixed
+ if ( calculatePosition ) {
+ curPosition = curElem.position();
+ curTop = curPosition.top;
+ curLeft = curPosition.left;
+ } else {
+ curTop = parseFloat( curCSSTop ) || 0;
+ curLeft = parseFloat( curCSSLeft ) || 0;
+ }
+
+ if ( jQuery.isFunction( options ) ) {
+ options = options.call( elem, i, curOffset );
+ }
+
+ if (options.top != null) {
+ props.top = (options.top - curOffset.top) + curTop;
+ }
+ if (options.left != null) {
+ props.left = (options.left - curOffset.left) + curLeft;
+ }
+
+ if ( "using" in options ) {
+ options.using.call( elem, props );
+ } else {
+ curElem.css( props );
+ }
+ }
+};
+
+
+jQuery.fn.extend({
+ position: function() {
+ if ( !this[0] ) {
+ return null;
+ }
+
+ var elem = this[0],
+
+ // Get *real* offsetParent
+ offsetParent = this.offsetParent(),
+
+ // Get correct offsets
+ offset = this.offset(),
+ parentOffset = rroot.test(offsetParent[0].nodeName) ? { top: 0, left: 0 } : offsetParent.offset();
+
+ // Subtract element margins
+ // note: when an element has margin: auto the offsetLeft and marginLeft
+ // are the same in Safari causing offset.left to incorrectly be 0
+ offset.top -= parseFloat( jQuery.css(elem, "marginTop") ) || 0;
+ offset.left -= parseFloat( jQuery.css(elem, "marginLeft") ) || 0;
+
+ // Add offsetParent borders
+ parentOffset.top += parseFloat( jQuery.css(offsetParent[0], "borderTopWidth") ) || 0;
+ parentOffset.left += parseFloat( jQuery.css(offsetParent[0], "borderLeftWidth") ) || 0;
+
+ // Subtract the two offsets
+ return {
+ top: offset.top - parentOffset.top,
+ left: offset.left - parentOffset.left
+ };
+ },
+
+ offsetParent: function() {
+ return this.map(function() {
+ var offsetParent = this.offsetParent || document.body;
+ while ( offsetParent && (!rroot.test(offsetParent.nodeName) && jQuery.css(offsetParent, "position") === "static") ) {
+ offsetParent = offsetParent.offsetParent;
+ }
+ return offsetParent;
+ });
+ }
+});
+
+
+// Create scrollLeft and scrollTop methods
+jQuery.each( ["Left", "Top"], function( i, name ) {
+ var method = "scroll" + name;
+
+ jQuery.fn[ method ] = function( val ) {
+ var elem, win;
+
+ if ( val === undefined ) {
+ elem = this[ 0 ];
+
+ if ( !elem ) {
+ return null;
+ }
+
+ win = getWindow( elem );
+
+ // Return the scroll offset
+ return win ? ("pageXOffset" in win) ? win[ i ? "pageYOffset" : "pageXOffset" ] :
+ jQuery.support.boxModel && win.document.documentElement[ method ] ||
+ win.document.body[ method ] :
+ elem[ method ];
+ }
+
+ // Set the scroll offset
+ return this.each(function() {
+ win = getWindow( this );
+
+ if ( win ) {
+ win.scrollTo(
+ !i ? val : jQuery( win ).scrollLeft(),
+ i ? val : jQuery( win ).scrollTop()
+ );
+
+ } else {
+ this[ method ] = val;
+ }
+ });
+ };
+});
+
+function getWindow( elem ) {
+ return jQuery.isWindow( elem ) ?
+ elem :
+ elem.nodeType === 9 ?
+ elem.defaultView || elem.parentWindow :
+ false;
+}
+
+
+
+
+// Create width, height, innerHeight, innerWidth, outerHeight and outerWidth methods
+jQuery.each([ "Height", "Width" ], function( i, name ) {
+
+ var type = name.toLowerCase();
+
+ // innerHeight and innerWidth
+ jQuery.fn[ "inner" + name ] = function() {
+ var elem = this[0];
+ return elem && elem.style ?
+ parseFloat( jQuery.css( elem, type, "padding" ) ) :
+ null;
+ };
+
+ // outerHeight and outerWidth
+ jQuery.fn[ "outer" + name ] = function( margin ) {
+ var elem = this[0];
+ return elem && elem.style ?
+ parseFloat( jQuery.css( elem, type, margin ? "margin" : "border" ) ) :
+ null;
+ };
+
+ jQuery.fn[ type ] = function( size ) {
+ // Get window width or height
+ var elem = this[0];
+ if ( !elem ) {
+ return size == null ? null : this;
+ }
+
+ if ( jQuery.isFunction( size ) ) {
+ return this.each(function( i ) {
+ var self = jQuery( this );
+ self[ type ]( size.call( this, i, self[ type ]() ) );
+ });
+ }
+
+ if ( jQuery.isWindow( elem ) ) {
+ // Everyone else use document.documentElement or document.body depending on Quirks vs Standards mode
+ // 3rd condition allows Nokia support, as it supports the docElem prop but not CSS1Compat
+ var docElemProp = elem.document.documentElement[ "client" + name ],
+ body = elem.document.body;
+ return elem.document.compatMode === "CSS1Compat" && docElemProp ||
+ body && body[ "client" + name ] || docElemProp;
+
+ // Get document width or height
+ } else if ( elem.nodeType === 9 ) {
+ // Either scroll[Width/Height] or offset[Width/Height], whichever is greater
+ return Math.max(
+ elem.documentElement["client" + name],
+ elem.body["scroll" + name], elem.documentElement["scroll" + name],
+ elem.body["offset" + name], elem.documentElement["offset" + name]
+ );
+
+ // Get or set width or height on the element
+ } else if ( size === undefined ) {
+ var orig = jQuery.css( elem, type ),
+ ret = parseFloat( orig );
+
+ return jQuery.isNaN( ret ) ? orig : ret;
+
+ // Set the width or height on the element (default to pixels if value is unitless)
+ } else {
+ return this.css( type, typeof size === "string" ? size : size + "px" );
+ }
+ };
+
+});
+
+
+// Expose jQuery to the global object
+window.jQuery = window.$ = jQuery;
+})(window);
--- /dev/null
+/*! jQuery v1.6.4 http://jquery.com/ | http://jquery.org/license */
+(function(a,b){function cu(a){return f.isWindow(a)?a:a.nodeType===9?a.defaultView||a.parentWindow:!1}function cr(a){if(!cg[a]){var b=c.body,d=f("<"+a+">").appendTo(b),e=d.css("display");d.remove();if(e==="none"||e===""){ch||(ch=c.createElement("iframe"),ch.frameBorder=ch.width=ch.height=0),b.appendChild(ch);if(!ci||!ch.createElement)ci=(ch.contentWindow||ch.contentDocument).document,ci.write((c.compatMode==="CSS1Compat"?"<!doctype html>":"")+"<html><body>"),ci.close();d=ci.createElement(a),ci.body.appendChild(d),e=f.css(d,"display"),b.removeChild(ch)}cg[a]=e}return cg[a]}function cq(a,b){var c={};f.each(cm.concat.apply([],cm.slice(0,b)),function(){c[this]=a});return c}function cp(){cn=b}function co(){setTimeout(cp,0);return cn=f.now()}function cf(){try{return new a.ActiveXObject("Microsoft.XMLHTTP")}catch(b){}}function ce(){try{return new a.XMLHttpRequest}catch(b){}}function b$(a,c){a.dataFilter&&(c=a.dataFilter(c,a.dataType));var d=a.dataTypes,e={},g,h,i=d.length,j,k=d[0],l,m,n,o,p;for(g=1;g<i;g++){if(g===1)for(h in a.converters)typeof h=="string"&&(e[h.toLowerCase()]=a.converters[h]);l=k,k=d[g];if(k==="*")k=l;else if(l!=="*"&&l!==k){m=l+" "+k,n=e[m]||e["* "+k];if(!n){p=b;for(o in e){j=o.split(" ");if(j[0]===l||j[0]==="*"){p=e[j[1]+" "+k];if(p){o=e[o],o===!0?n=p:p===!0&&(n=o);break}}}}!n&&!p&&f.error("No conversion from "+m.replace(" "," to ")),n!==!0&&(c=n?n(c):p(o(c)))}}return c}function bZ(a,c,d){var e=a.contents,f=a.dataTypes,g=a.responseFields,h,i,j,k;for(i in g)i in d&&(c[g[i]]=d[i]);while(f[0]==="*")f.shift(),h===b&&(h=a.mimeType||c.getResponseHeader("content-type"));if(h)for(i in e)if(e[i]&&e[i].test(h)){f.unshift(i);break}if(f[0]in d)j=f[0];else{for(i in d){if(!f[0]||a.converters[i+" "+f[0]]){j=i;break}k||(k=i)}j=j||k}if(j){j!==f[0]&&f.unshift(j);return d[j]}}function bY(a,b,c,d){if(f.isArray(b))f.each(b,function(b,e){c||bA.test(a)?d(a,e):bY(a+"["+(typeof e=="object"||f.isArray(e)?b:"")+"]",e,c,d)});else if(!c&&b!=null&&typeof b=="object")for(var e in b)bY(a+"["+e+"]",b[e],c,d);else d(a,b)}function bX(a,c){var d,e,g=f.ajaxSettings.flatOptions||{};for(d in c)c[d]!==b&&((g[d]?a:e||(e={}))[d]=c[d]);e&&f.extend(!0,a,e)}function bW(a,c,d,e,f,g){f=f||c.dataTypes[0],g=g||{},g[f]=!0;var h=a[f],i=0,j=h?h.length:0,k=a===bP,l;for(;i<j&&(k||!l);i++)l=h[i](c,d,e),typeof l=="string"&&(!k||g[l]?l=b:(c.dataTypes.unshift(l),l=bW(a,c,d,e,l,g)));(k||!l)&&!g["*"]&&(l=bW(a,c,d,e,"*",g));return l}function bV(a){return function(b,c){typeof b!="string"&&(c=b,b="*");if(f.isFunction(c)){var d=b.toLowerCase().split(bL),e=0,g=d.length,h,i,j;for(;e<g;e++)h=d[e],j=/^\+/.test(h),j&&(h=h.substr(1)||"*"),i=a[h]=a[h]||[],i[j?"unshift":"push"](c)}}}function by(a,b,c){var d=b==="width"?a.offsetWidth:a.offsetHeight,e=b==="width"?bt:bu;if(d>0){c!=="border"&&f.each(e,function(){c||(d-=parseFloat(f.css(a,"padding"+this))||0),c==="margin"?d+=parseFloat(f.css(a,c+this))||0:d-=parseFloat(f.css(a,"border"+this+"Width"))||0});return d+"px"}d=bv(a,b,b);if(d<0||d==null)d=a.style[b]||0;d=parseFloat(d)||0,c&&f.each(e,function(){d+=parseFloat(f.css(a,"padding"+this))||0,c!=="padding"&&(d+=parseFloat(f.css(a,"border"+this+"Width"))||0),c==="margin"&&(d+=parseFloat(f.css(a,c+this))||0)});return d+"px"}function bl(a,b){b.src?f.ajax({url:b.src,async:!1,dataType:"script"}):f.globalEval((b.text||b.textContent||b.innerHTML||"").replace(bd,"/*$0*/")),b.parentNode&&b.parentNode.removeChild(b)}function bk(a){f.nodeName(a,"input")?bj(a):"getElementsByTagName"in a&&f.grep(a.getElementsByTagName("input"),bj)}function bj(a){if(a.type==="checkbox"||a.type==="radio")a.defaultChecked=a.checked}function bi(a){return"getElementsByTagName"in a?a.getElementsByTagName("*"):"querySelectorAll"in a?a.querySelectorAll("*"):[]}function bh(a,b){var c;if(b.nodeType===1){b.clearAttributes&&b.clearAttributes(),b.mergeAttributes&&b.mergeAttributes(a),c=b.nodeName.toLowerCase();if(c==="object")b.outerHTML=a.outerHTML;else if(c!=="input"||a.type!=="checkbox"&&a.type!=="radio"){if(c==="option")b.selected=a.defaultSelected;else if(c==="input"||c==="textarea")b.defaultValue=a.defaultValue}else a.checked&&(b.defaultChecked=b.checked=a.checked),b.value!==a.value&&(b.value=a.value);b.removeAttribute(f.expando)}}function bg(a,b){if(b.nodeType===1&&!!f.hasData(a)){var c=f.expando,d=f.data(a),e=f.data(b,d);if(d=d[c]){var g=d.events;e=e[c]=f.extend({},d);if(g){delete e.handle,e.events={};for(var h in g)for(var i=0,j=g[h].length;i<j;i++)f.event.add(b,h+(g[h][i].namespace?".":"")+g[h][i].namespace,g[h][i],g[h][i].data)}}}}function bf(a,b){return f.nodeName(a,"table")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function V(a,b,c){b=b||0;if(f.isFunction(b))return f.grep(a,function(a,d){var e=!!b.call(a,d,a);return e===c});if(b.nodeType)return f.grep(a,function(a,d){return a===b===c});if(typeof b=="string"){var d=f.grep(a,function(a){return a.nodeType===1});if(Q.test(b))return f.filter(b,d,!c);b=f.filter(b,d)}return f.grep(a,function(a,d){return f.inArray(a,b)>=0===c})}function U(a){return!a||!a.parentNode||a.parentNode.nodeType===11}function M(a,b){return(a&&a!=="*"?a+".":"")+b.replace(y,"`").replace(z,"&")}function L(a){var b,c,d,e,g,h,i,j,k,l,m,n,o,p=[],q=[],r=f._data(this,"events");if(!(a.liveFired===this||!r||!r.live||a.target.disabled||a.button&&a.type==="click")){a.namespace&&(n=new RegExp("(^|\\.)"+a.namespace.split(".").join("\\.(?:.*\\.)?")+"(\\.|$)")),a.liveFired=this;var s=r.live.slice(0);for(i=0;i<s.length;i++)g=s[i],g.origType.replace(w,"")===a.type?q.push(g.selector):s.splice(i--,1);e=f(a.target).closest(q,a.currentTarget);for(j=0,k=e.length;j<k;j++){m=e[j];for(i=0;i<s.length;i++){g=s[i];if(m.selector===g.selector&&(!n||n.test(g.namespace))&&!m.elem.disabled){h=m.elem,d=null;if(g.preType==="mouseenter"||g.preType==="mouseleave")a.type=g.preType,d=f(a.relatedTarget).closest(g.selector)[0],d&&f.contains(h,d)&&(d=h);(!d||d!==h)&&p.push({elem:h,handleObj:g,level:m.level})}}}for(j=0,k=p.length;j<k;j++){e=p[j];if(c&&e.level>c)break;a.currentTarget=e.elem,a.data=e.handleObj.data,a.handleObj=e.handleObj,o=e.handleObj.origHandler.apply(e.elem,arguments);if(o===!1||a.isPropagationStopped()){c=e.level,o===!1&&(b=!1);if(a.isImmediatePropagationStopped())break}}return b}}function J(a,c,d){var e=f.extend({},d[0]);e.type=a,e.originalEvent={},e.liveFired=b,f.event.handle.call(c,e),e.isDefaultPrevented()&&d[0].preventDefault()}function D(){return!0}function C(){return!1}function m(a,c,d){var e=c+"defer",g=c+"queue",h=c+"mark",i=f.data(a,e,b,!0);i&&(d==="queue"||!f.data(a,g,b,!0))&&(d==="mark"||!f.data(a,h,b,!0))&&setTimeout(function(){!f.data(a,g,b,!0)&&!f.data(a,h,b,!0)&&(f.removeData(a,e,!0),i.resolve())},0)}function l(a){for(var b in a)if(b!=="toJSON")return!1;return!0}function k(a,c,d){if(d===b&&a.nodeType===1){var e="data-"+c.replace(j,"-$1").toLowerCase();d=a.getAttribute(e);if(typeof d=="string"){try{d=d==="true"?!0:d==="false"?!1:d==="null"?null:f.isNaN(d)?i.test(d)?f.parseJSON(d):d:parseFloat(d)}catch(g){}f.data(a,c,d)}else d=b}return d}var c=a.document,d=a.navigator,e=a.location,f=function(){function K(){if(!e.isReady){try{c.documentElement.doScroll("left")}catch(a){setTimeout(K,1);return}e.ready()}}var e=function(a,b){return new e.fn.init(a,b,h)},f=a.jQuery,g=a.$,h,i=/^(?:[^#<]*(<[\w\W]+>)[^>]*$|#([\w\-]*)$)/,j=/\S/,k=/^\s+/,l=/\s+$/,m=/\d/,n=/^<(\w+)\s*\/?>(?:<\/\1>)?$/,o=/^[\],:{}\s]*$/,p=/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g,q=/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,r=/(?:^|:|,)(?:\s*\[)+/g,s=/(webkit)[ \/]([\w.]+)/,t=/(opera)(?:.*version)?[ \/]([\w.]+)/,u=/(msie) ([\w.]+)/,v=/(mozilla)(?:.*? rv:([\w.]+))?/,w=/-([a-z]|[0-9])/ig,x=/^-ms-/,y=function(a,b){return(b+"").toUpperCase()},z=d.userAgent,A,B,C,D=Object.prototype.toString,E=Object.prototype.hasOwnProperty,F=Array.prototype.push,G=Array.prototype.slice,H=String.prototype.trim,I=Array.prototype.indexOf,J={};e.fn=e.prototype={constructor:e,init:function(a,d,f){var g,h,j,k;if(!a)return this;if(a.nodeType){this.context=this[0]=a,this.length=1;return this}if(a==="body"&&!d&&c.body){this.context=c,this[0]=c.body,this.selector=a,this.length=1;return this}if(typeof a=="string"){a.charAt(0)!=="<"||a.charAt(a.length-1)!==">"||a.length<3?g=i.exec(a):g=[null,a,null];if(g&&(g[1]||!d)){if(g[1]){d=d instanceof e?d[0]:d,k=d?d.ownerDocument||d:c,j=n.exec(a),j?e.isPlainObject(d)?(a=[c.createElement(j[1])],e.fn.attr.call(a,d,!0)):a=[k.createElement(j[1])]:(j=e.buildFragment([g[1]],[k]),a=(j.cacheable?e.clone(j.fragment):j.fragment).childNodes);return e.merge(this,a)}h=c.getElementById(g[2]);if(h&&h.parentNode){if(h.id!==g[2])return f.find(a);this.length=1,this[0]=h}this.context=c,this.selector=a;return this}return!d||d.jquery?(d||f).find(a):this.constructor(d).find(a)}if(e.isFunction(a))return f.ready(a);a.selector!==b&&(this.selector=a.selector,this.context=a.context);return e.makeArray(a,this)},selector:"",jquery:"1.6.4",length:0,size:function(){return this.length},toArray:function(){return G.call(this,0)},get:function(a){return a==null?this.toArray():a<0?this[this.length+a]:this[a]},pushStack:function(a,b,c){var d=this.constructor();e.isArray(a)?F.apply(d,a):e.merge(d,a),d.prevObject=this,d.context=this.context,b==="find"?d.selector=this.selector+(this.selector?" ":"")+c:b&&(d.selector=this.selector+"."+b+"("+c+")");return d},each:function(a,b){return e.each(this,a,b)},ready:function(a){e.bindReady(),B.done(a);return this},eq:function(a){return a===-1?this.slice(a):this.slice(a,+a+1)},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},slice:function(){return this.pushStack(G.apply(this,arguments),"slice",G.call(arguments).join(","))},map:function(a){return this.pushStack(e.map(this,function(b,c){return a.call(b,c,b)}))},end:function(){return this.prevObject||this.constructor(null)},push:F,sort:[].sort,splice:[].splice},e.fn.init.prototype=e.fn,e.extend=e.fn.extend=function(){var a,c,d,f,g,h,i=arguments[0]||{},j=1,k=arguments.length,l=!1;typeof i=="boolean"&&(l=i,i=arguments[1]||{},j=2),typeof i!="object"&&!e.isFunction(i)&&(i={}),k===j&&(i=this,--j);for(;j<k;j++)if((a=arguments[j])!=null)for(c in a){d=i[c],f=a[c];if(i===f)continue;l&&f&&(e.isPlainObject(f)||(g=e.isArray(f)))?(g?(g=!1,h=d&&e.isArray(d)?d:[]):h=d&&e.isPlainObject(d)?d:{},i[c]=e.extend(l,h,f)):f!==b&&(i[c]=f)}return i},e.extend({noConflict:function(b){a.$===e&&(a.$=g),b&&a.jQuery===e&&(a.jQuery=f);return e},isReady:!1,readyWait:1,holdReady:function(a){a?e.readyWait++:e.ready(!0)},ready:function(a){if(a===!0&&!--e.readyWait||a!==!0&&!e.isReady){if(!c.body)return setTimeout(e.ready,1);e.isReady=!0;if(a!==!0&&--e.readyWait>0)return;B.resolveWith(c,[e]),e.fn.trigger&&e(c).trigger("ready").unbind("ready")}},bindReady:function(){if(!B){B=e._Deferred();if(c.readyState==="complete")return setTimeout(e.ready,1);if(c.addEventListener)c.addEventListener("DOMContentLoaded",C,!1),a.addEventListener("load",e.ready,!1);else if(c.attachEvent){c.attachEvent("onreadystatechange",C),a.attachEvent("onload",e.ready);var b=!1;try{b=a.frameElement==null}catch(d){}c.documentElement.doScroll&&b&&K()}}},isFunction:function(a){return e.type(a)==="function"},isArray:Array.isArray||function(a){return e.type(a)==="array"},isWindow:function(a){return a&&typeof a=="object"&&"setInterval"in a},isNaN:function(a){return a==null||!m.test(a)||isNaN(a)},type:function(a){return a==null?String(a):J[D.call(a)]||"object"},isPlainObject:function(a){if(!a||e.type(a)!=="object"||a.nodeType||e.isWindow(a))return!1;try{if(a.constructor&&!E.call(a,"constructor")&&!E.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(c){return!1}var d;for(d in a);return d===b||E.call(a,d)},isEmptyObject:function(a){for(var b in a)return!1;return!0},error:function(a){throw a},parseJSON:function(b){if(typeof b!="string"||!b)return null;b=e.trim(b);if(a.JSON&&a.JSON.parse)return a.JSON.parse(b);if(o.test(b.replace(p,"@").replace(q,"]").replace(r,"")))return(new Function("return "+b))();e.error("Invalid JSON: "+b)},parseXML:function(c){var d,f;try{a.DOMParser?(f=new DOMParser,d=f.parseFromString(c,"text/xml")):(d=new ActiveXObject("Microsoft.XMLDOM"),d.async="false",d.loadXML(c))}catch(g){d=b}(!d||!d.documentElement||d.getElementsByTagName("parsererror").length)&&e.error("Invalid XML: "+c);return d},noop:function(){},globalEval:function(b){b&&j.test(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return a.replace(x,"ms-").replace(w,y)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toUpperCase()===b.toUpperCase()},each:function(a,c,d){var f,g=0,h=a.length,i=h===b||e.isFunction(a);if(d){if(i){for(f in a)if(c.apply(a[f],d)===!1)break}else for(;g<h;)if(c.apply(a[g++],d)===!1)break}else if(i){for(f in a)if(c.call(a[f],f,a[f])===!1)break}else for(;g<h;)if(c.call(a[g],g,a[g++])===!1)break;return a},trim:H?function(a){return a==null?"":H.call(a)}:function(a){return a==null?"":(a+"").replace(k,"").replace(l,"")},makeArray:function(a,b){var c=b||[];if(a!=null){var d=e.type(a);a.length==null||d==="string"||d==="function"||d==="regexp"||e.isWindow(a)?F.call(c,a):e.merge(c,a)}return c},inArray:function(a,b){if(!b)return-1;if(I)return I.call(b,a);for(var c=0,d=b.length;c<d;c++)if(b[c]===a)return c;return-1},merge:function(a,c){var d=a.length,e=0;if(typeof c.length=="number")for(var f=c.length;e<f;e++)a[d++]=c[e];else while(c[e]!==b)a[d++]=c[e++];a.length=d;return a},grep:function(a,b,c){var d=[],e;c=!!c;for(var f=0,g=a.length;f<g;f++)e=!!b(a[f],f),c!==e&&d.push(a[f]);return d},map:function(a,c,d){var f,g,h=[],i=0,j=a.length,k=a instanceof e||j!==b&&typeof j=="number"&&(j>0&&a[0]&&a[j-1]||j===0||e.isArray(a));if(k)for(;i<j;i++)f=c(a[i],i,d),f!=null&&(h[h.length]=f);else for(g in a)f=c(a[g],g,d),f!=null&&(h[h.length]=f);return h.concat.apply([],h)},guid:1,proxy:function(a,c){if(typeof c=="string"){var d=a[c];c=a,a=d}if(!e.isFunction(a))return b;var f=G.call(arguments,2),g=function(){return a.apply(c,f.concat(G.call(arguments)))};g.guid=a.guid=a.guid||g.guid||e.guid++;return g},access:function(a,c,d,f,g,h){var i=a.length;if(typeof c=="object"){for(var j in c)e.access(a,j,c[j],f,g,d);return a}if(d!==b){f=!h&&f&&e.isFunction(d);for(var k=0;k<i;k++)g(a[k],c,f?d.call(a[k],k,g(a[k],c)):d,h);return a}return i?g(a[0],c):b},now:function(){return(new Date).getTime()},uaMatch:function(a){a=a.toLowerCase();var b=s.exec(a)||t.exec(a)||u.exec(a)||a.indexOf("compatible")<0&&v.exec(a)||[];return{browser:b[1]||"",version:b[2]||"0"}},sub:function(){function a(b,c){return new a.fn.init(b,c)}e.extend(!0,a,this),a.superclass=this,a.fn=a.prototype=this(),a.fn.constructor=a,a.sub=this.sub,a.fn.init=function(d,f){f&&f instanceof e&&!(f instanceof a)&&(f=a(f));return e.fn.init.call(this,d,f,b)},a.fn.init.prototype=a.fn;var b=a(c);return a},browser:{}}),e.each("Boolean Number String Function Array Date RegExp Object".split(" "),function(a,b){J["[object "+b+"]"]=b.toLowerCase()}),A=e.uaMatch(z),A.browser&&(e.browser[A.browser]=!0,e.browser.version=A.version),e.browser.webkit&&(e.browser.safari=!0),j.test("Â ")&&(k=/^[\s\xA0]+/,l=/[\s\xA0]+$/),h=e(c),c.addEventListener?C=function(){c.removeEventListener("DOMContentLoaded",C,!1),e.ready()}:c.attachEvent&&(C=function(){c.readyState==="complete"&&(c.detachEvent("onreadystatechange",C),e.ready())});return e}(),g="done fail isResolved isRejected promise then always pipe".split(" "),h=[].slice;f.extend({_Deferred:function(){var a=[],b,c,d,e={done:function(){if(!d){var c=arguments,g,h,i,j,k;b&&(k=b,b=0);for(g=0,h=c.length;g<h;g++)i=c[g],j=f.type(i),j==="array"?e.done.apply(e,i):j==="function"&&a.push(i);k&&e.resolveWith(k[0],k[1])}return this},resolveWith:function(e,f){if(!d&&!b&&!c){f=f||[],c=1;try{while(a[0])a.shift().apply(e,f)}finally{b=[e,f],c=0}}return this},resolve:function(){e.resolveWith(this,arguments);return this},isResolved:function(){return!!c||!!b},cancel:function(){d=1,a=[];return this}};return e},Deferred:function(a){var b=f._Deferred(),c=f._Deferred(),d;f.extend(b,{then:function(a,c){b.done(a).fail(c);return this},always:function(){return b.done.apply(b,arguments).fail.apply(this,arguments)},fail:c.done,rejectWith:c.resolveWith,reject:c.resolve,isRejected:c.isResolved,pipe:function(a,c){return f.Deferred(function(d){f.each({done:[a,"resolve"],fail:[c,"reject"]},function(a,c){var e=c[0],g=c[1],h;f.isFunction(e)?b[a](function(){h=e.apply(this,arguments),h&&f.isFunction(h.promise)?h.promise().then(d.resolve,d.reject):d[g+"With"](this===b?d:this,[h])}):b[a](d[g])})}).promise()},promise:function(a){if(a==null){if(d)return d;d=a={}}var c=g.length;while(c--)a[g[c]]=b[g[c]];return a}}),b.done(c.cancel).fail(b.cancel),delete b.cancel,a&&a.call(b,b);return b},when:function(a){function i(a){return function(c){b[a]=arguments.length>1?h.call(arguments,0):c,--e||g.resolveWith(g,h.call(b,0))}}var b=arguments,c=0,d=b.length,e=d,g=d<=1&&a&&f.isFunction(a.promise)?a:f.Deferred();if(d>1){for(;c<d;c++)b[c]&&f.isFunction(b[c].promise)?b[c].promise().then(i(c),g.reject):--e;e||g.resolveWith(g,b)}else g!==a&&g.resolveWith(g,d?[a]:[]);return g.promise()}}),f.support=function(){var a=c.createElement("div"),b=c.documentElement,d,e,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u;a.setAttribute("className","t"),a.innerHTML=" <link/><table></table><a href='/a' style='top:1px;float:left;opacity:.55;'>a</a><input type='checkbox'/>",d=a.getElementsByTagName("*"),e=a.getElementsByTagName("a")[0];if(!d||!d.length||!e)return{};g=c.createElement("select"),h=g.appendChild(c.createElement("option")),i=a.getElementsByTagName("input")[0],k={leadingWhitespace:a.firstChild.nodeType===3,tbody:!a.getElementsByTagName("tbody").length,htmlSerialize:!!a.getElementsByTagName("link").length,style:/top/.test(e.getAttribute("style")),hrefNormalized:e.getAttribute("href")==="/a",opacity:/^0.55$/.test(e.style.opacity),cssFloat:!!e.style.cssFloat,checkOn:i.value==="on",optSelected:h.selected,getSetAttribute:a.className!=="t",submitBubbles:!0,changeBubbles:!0,focusinBubbles:!1,deleteExpando:!0,noCloneEvent:!0,inlineBlockNeedsLayout:!1,shrinkWrapBlocks:!1,reliableMarginRight:!0},i.checked=!0,k.noCloneChecked=i.cloneNode(!0).checked,g.disabled=!0,k.optDisabled=!h.disabled;try{delete a.test}catch(v){k.deleteExpando=!1}!a.addEventListener&&a.attachEvent&&a.fireEvent&&(a.attachEvent("onclick",function(){k.noCloneEvent=!1}),a.cloneNode(!0).fireEvent("onclick")),i=c.createElement("input"),i.value="t",i.setAttribute("type","radio"),k.radioValue=i.value==="t",i.setAttribute("checked","checked"),a.appendChild(i),l=c.createDocumentFragment(),l.appendChild(a.firstChild),k.checkClone=l.cloneNode(!0).cloneNode(!0).lastChild.checked,a.innerHTML="",a.style.width=a.style.paddingLeft="1px",m=c.getElementsByTagName("body")[0],o=c.createElement(m?"div":"body"),p={visibility:"hidden",width:0,height:0,border:0,margin:0,background:"none"},m&&f.extend(p,{position:"absolute",left:"-1000px",top:"-1000px"});for(t in p)o.style[t]=p[t];o.appendChild(a),n=m||b,n.insertBefore(o,n.firstChild),k.appendChecked=i.checked,k.boxModel=a.offsetWidth===2,"zoom"in a.style&&(a.style.display="inline",a.style.zoom=1,k.inlineBlockNeedsLayout=a.offsetWidth===2,a.style.display="",a.innerHTML="<div style='width:4px;'></div>",k.shrinkWrapBlocks=a.offsetWidth!==2),a.innerHTML="<table><tr><td style='padding:0;border:0;display:none'></td><td>t</td></tr></table>",q=a.getElementsByTagName("td"),u=q[0].offsetHeight===0,q[0].style.display="",q[1].style.display="none",k.reliableHiddenOffsets=u&&q[0].offsetHeight===0,a.innerHTML="",c.defaultView&&c.defaultView.getComputedStyle&&(j=c.createElement("div"),j.style.width="0",j.style.marginRight="0",a.appendChild(j),k.reliableMarginRight=(parseInt((c.defaultView.getComputedStyle(j,null)||{marginRight:0}).marginRight,10)||0)===0),o.innerHTML="",n.removeChild(o);if(a.attachEvent)for(t in{submit:1,change:1,focusin:1})s="on"+t,u=s in a,u||(a.setAttribute(s,"return;"),u=typeof a[s]=="function"),k[t+"Bubbles"]=u;o=l=g=h=m=j=a=i=null;return k}(),f.boxModel=f.support.boxModel;var i=/^(?:\{.*\}|\[.*\])$/,j=/([A-Z])/g;f.extend({cache:{},uuid:0,expando:"jQuery"+(f.fn.jquery+Math.random()).replace(/\D/g,""),noData:{embed:!0,object:"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",applet:!0},hasData:function(a){a=a.nodeType?f.cache[a[f.expando]]:a[f.expando];return!!a&&!l(a)},data:function(a,c,d,e){if(!!f.acceptData(a)){var g,h,i=f.expando,j=typeof c=="string",k=a.nodeType,l=k?f.cache:a,m=k?a[f.expando]:a[f.expando]&&f.expando;if((!m||e&&m&&l[m]&&!l[m][i])&&j&&d===b)return;m||(k?a[f.expando]=m=++f.uuid:m=f.expando),l[m]||(l[m]={},k||(l[m].toJSON=f.noop));if(typeof c=="object"||typeof c=="function")e?l[m][i]=f.extend(l[m][i],c):l[m]=f.extend(l[m],c);g=l[m],e&&(g[i]||(g[i]={}),g=g[i]),d!==b&&(g[f.camelCase(c)]=d);if(c==="events"&&!g[c])return g[i]&&g[i].events;j?(h=g[c],h==null&&(h=g[f.camelCase(c)])):h=g;return h}},removeData:function(a,b,c){if(!!f.acceptData(a)){var d,e=f.expando,g=a.nodeType,h=g?f.cache:a,i=g?a[f.expando]:f.expando;if(!h[i])return;if(b){d=c?h[i][e]:h[i];if(d){d[b]||(b=f.camelCase(b)),delete d[b];if(!l(d))return}}if(c){delete h[i][e];if(!l(h[i]))return}var j=h[i][e];f.support.deleteExpando||!h.setInterval?delete h[i]:h[i]=null,j?(h[i]={},g||(h[i].toJSON=f.noop),h[i][e]=j):g&&(f.support.deleteExpando?delete a[f.expando]:a.removeAttribute?a.removeAttribute(f.expando):a[f.expando]=null)}},_data:function(a,b,c){return f.data(a,b,c,!0)},acceptData:function(a){if(a.nodeName){var b=f.noData[a.nodeName.toLowerCase()];if(b)return b!==!0&&a.getAttribute("classid")===b}return!0}}),f.fn.extend({data:function(a,c){var d=null;if(typeof a=="undefined"){if(this.length){d=f.data(this[0]);if(this[0].nodeType===1){var e=this[0].attributes,g;for(var h=0,i=e.length;h<i;h++)g=e[h].name,g.indexOf("data-")===0&&(g=f.camelCase(g.substring(5)),k(this[0],g,d[g]))}}return d}if(typeof a=="object")return this.each(function(){f.data(this,a)});var j=a.split(".");j[1]=j[1]?"."+j[1]:"";if(c===b){d=this.triggerHandler("getData"+j[1]+"!",[j[0]]),d===b&&this.length&&(d=f.data(this[0],a),d=k(this[0],a,d));return d===b&&j[1]?this.data(j[0]):d}return this.each(function(){var b=f(this),d=[j[0],c];b.triggerHandler("setData"+j[1]+"!",d),f.data(this,a,c),b.triggerHandler("changeData"+j[1]+"!",d)})},removeData:function(a){return this.each(function(){f.removeData(this,a)})}}),f.extend({_mark:function(a,c){a&&(c=(c||"fx")+"mark",f.data(a,c,(f.data(a,c,b,!0)||0)+1,!0))},_unmark:function(a,c,d){a!==!0&&(d=c,c=a,a=!1);if(c){d=d||"fx";var e=d+"mark",g=a?0:(f.data(c,e,b,!0)||1)-1;g?f.data(c,e,g,!0):(f.removeData(c,e,!0),m(c,d,"mark"))}},queue:function(a,c,d){if(a){c=(c||"fx")+"queue";var e=f.data(a,c,b,!0);d&&(!e||f.isArray(d)?e=f.data(a,c,f.makeArray(d),!0):e.push(d));return e||[]}},dequeue:function(a,b){b=b||"fx";var c=f.queue(a,b),d=c.shift(),e;d==="inprogress"&&(d=c.shift()),d&&(b==="fx"&&c.unshift("inprogress"),d.call(a,function(){f.dequeue(a,b)})),c.length||(f.removeData(a,b+"queue",!0),m(a,b,"queue"))}}),f.fn.extend({queue:function(a,c){typeof a!="string"&&(c=a,a="fx");if(c===b)return f.queue(this[0],a);return this.each(function(){var b=f.queue(this,a,c);a==="fx"&&b[0]!=="inprogress"&&f.dequeue(this,a)})},dequeue:function(a){return this.each(function(){f.dequeue(this,a)})},delay:function(a,b){a=f.fx?f.fx.speeds[a]||a:a,b=b||"fx";return this.queue(b,function(){var c=this;setTimeout(function(){f.dequeue(c,b)},a)})},clearQueue:function(a){return this.queue(a||"fx",[])},promise:function(a,c){function m(){--h||d.resolveWith(e,[e])}typeof a!="string"&&(c=a,a=b),a=a||"fx";var d=f.Deferred(),e=this,g=e.length,h=1,i=a+"defer",j=a+"queue",k=a+"mark",l;while(g--)if(l=f.data(e[g],i,b,!0)||(f.data(e[g],j,b,!0)||f.data(e[g],k,b,!0))&&f.data(e[g],i,f._Deferred(),!0))h++,l.done(m);m();return d.promise()}});var n=/[\n\t\r]/g,o=/\s+/,p=/\r/g,q=/^(?:button|input)$/i,r=/^(?:button|input|object|select|textarea)$/i,s=/^a(?:rea)?$/i,t=/^(?:autofocus|autoplay|async|checked|controls|defer|disabled|hidden|loop|multiple|open|readonly|required|scoped|selected)$/i,u,v;f.fn.extend({attr:function(a,b){return f.access(this,a,b,!0,f.attr)},removeAttr:function(a){return this.each(function(){f.removeAttr(this,a)})},prop:function(a,b){return f.access(this,a,b,!0,f.prop)},removeProp:function(a){a=f.propFix[a]||a;return this.each(function(){try{this[a]=b,delete this[a]}catch(c){}})},addClass:function(a){var b,c,d,e,g,h,i;if(f.isFunction(a))return this.each(function(b){f(this).addClass(a.call(this,b,this.className))});if(a&&typeof a=="string"){b=a.split(o);for(c=0,d=this.length;c<d;c++){e=this[c];if(e.nodeType===1)if(!e.className&&b.length===1)e.className=a;else{g=" "+e.className+" ";for(h=0,i=b.length;h<i;h++)~g.indexOf(" "+b[h]+" ")||(g+=b[h]+" ");e.className=f.trim(g)}}}return this},removeClass:function(a){var c,d,e,g,h,i,j;if(f.isFunction(a))return this.each(function(b){f(this).removeClass(a.call(this,b,this.className))});if(a&&typeof a=="string"||a===b){c=(a||"").split(o);for(d=0,e=this.length;d<e;d++){g=this[d];if(g.nodeType===1&&g.className)if(a){h=(" "+g.className+" ").replace(n," ");for(i=0,j=c.length;i<j;i++)h=h.replace(" "+c[i]+" "," ");g.className=f.trim(h)}else g.className=""}}return this},toggleClass:function(a,b){var c=typeof a,d=typeof b=="boolean";if(f.isFunction(a))return this.each(function(c){f(this).toggleClass(a.call(this,c,this.className,b),b)});return this.each(function(){if(c==="string"){var e,g=0,h=f(this),i=b,j=a.split(o);while(e=j[g++])i=d?i:!h.hasClass(e),h[i?"addClass":"removeClass"](e)}else if(c==="undefined"||c==="boolean")this.className&&f._data(this,"__className__",this.className),this.className=this.className||a===!1?"":f._data(this,"__className__")||""})},hasClass:function(a){var b=" "+a+" ";for(var c=0,d=this.length;c<d;c++)if(this[c].nodeType===1&&(" "+this[c].className+" ").replace(n," ").indexOf(b)>-1)return!0;return!1},val:function(a){var c,d,e=this[0];if(!arguments.length){if(e){c=f.valHooks[e.nodeName.toLowerCase()]||f.valHooks[e.type];if(c&&"get"in c&&(d=c.get(e,"value"))!==b)return d;d=e.value;return typeof d=="string"?d.replace(p,""):d==null?"":d}return b}var g=f.isFunction(a);return this.each(function(d){var e=f(this),h;if(this.nodeType===1){g?h=a.call(this,d,e.val()):h=a,h==null?h="":typeof h=="number"?h+="":f.isArray(h)&&(h=f.map(h,function(a){return a==null?"":a+""})),c=f.valHooks[this.nodeName.toLowerCase()]||f.valHooks[this.type];if(!c||!("set"in c)||c.set(this,h,"value")===b)this.value=h}})}}),f.extend({valHooks:{option:{get:function(a){var b=a.attributes.value;return!b||b.specified?a.value:a.text}},select:{get:function(a){var b,c=a.selectedIndex,d=[],e=a.options,g=a.type==="select-one";if(c<0)return null;for(var h=g?c:0,i=g?c+1:e.length;h<i;h++){var j=e[h];if(j.selected&&(f.support.optDisabled?!j.disabled:j.getAttribute("disabled")===null)&&(!j.parentNode.disabled||!f.nodeName(j.parentNode,"optgroup"))){b=f(j).val();if(g)return b;d.push(b)}}if(g&&!d.length&&e.length)return f(e[c]).val();return d},set:function(a,b){var c=f.makeArray(b);f(a).find("option").each(function(){this.selected=f.inArray(f(this).val(),c)>=0}),c.length||(a.selectedIndex=-1);return c}}},attrFn:{val:!0,css:!0,html:!0,text:!0,data:!0,width:!0,height:!0,offset:!0},attrFix:{tabindex:"tabIndex"},attr:function(a,c,d,e){var g=a.nodeType;if(!a||g===3||g===8||g===2)return b;if(e&&c in f.attrFn)return f(a)[c](d);if(!("getAttribute"in a))return f.prop(a,c,d);var h,i,j=g!==1||!f.isXMLDoc(a);j&&(c=f.attrFix[c]||c,i=f.attrHooks[c],i||(t.test(c)?i=v:u&&(i=u)));if(d!==b){if(d===null){f.removeAttr(a,c);return b}if(i&&"set"in i&&j&&(h=i.set(a,d,c))!==b)return h;a.setAttribute(c,""+d);return d}if(i&&"get"in i&&j&&(h=i.get(a,c))!==null)return h;h=a.getAttribute(c);return h===null?b:h},removeAttr:function(a,b){var c;a.nodeType===1&&(b=f.attrFix[b]||b,f.attr(a,b,""),a.removeAttribute(b),t.test(b)&&(c=f.propFix[b]||b)in a&&(a[c]=!1))},attrHooks:{type:{set:function(a,b){if(q.test(a.nodeName)&&a.parentNode)f.error("type property can't be changed");else if(!f.support.radioValue&&b==="radio"&&f.nodeName(a,"input")){var c=a.value;a.setAttribute("type",b),c&&(a.value=c);return b}}},value:{get:function(a,b){if(u&&f.nodeName(a,"button"))return u.get(a,b);return b in a?a.value:null},set:function(a,b,c){if(u&&f.nodeName(a,"button"))return u.set(a,b,c);a.value=b}}},propFix:{tabindex:"tabIndex",readonly:"readOnly","for":"htmlFor","class":"className",maxlength:"maxLength",cellspacing:"cellSpacing",cellpadding:"cellPadding",rowspan:"rowSpan",colspan:"colSpan",usemap:"useMap",frameborder:"frameBorder",contenteditable:"contentEditable"},prop:function(a,c,d){var e=a.nodeType;if(!a||e===3||e===8||e===2)return b;var g,h,i=e!==1||!f.isXMLDoc(a);i&&(c=f.propFix[c]||c,h=f.propHooks[c]);return d!==b?h&&"set"in h&&(g=h.set(a,d,c))!==b?g:a[c]=d:h&&"get"in h&&(g=h.get(a,c))!==null?g:a[c]},propHooks:{tabIndex:{get:function(a){var c=a.getAttributeNode("tabindex");return c&&c.specified?parseInt(c.value,10):r.test(a.nodeName)||s.test(a.nodeName)&&a.href?0:b}}}}),f.attrHooks.tabIndex=f.propHooks.tabIndex,v={get:function(a,c){var d;return f.prop(a,c)===!0||(d=a.getAttributeNode(c))&&d.nodeValue!==!1?c.toLowerCase():b},set:function(a,b,c){var d;b===!1?f.removeAttr(a,c):(d=f.propFix[c]||c,d in a&&(a[d]=!0),a.setAttribute(c,c.toLowerCase()));return c}},f.support.getSetAttribute||(u=f.valHooks.button={get:function(a,c){var d;d=a.getAttributeNode(c);return d&&d.nodeValue!==""?d.nodeValue:b},set:function(a,b,d){var e=a.getAttributeNode(d);e||(e=c.createAttribute(d),a.setAttributeNode(e));return e.nodeValue=b+""}},f.each(["width","height"],function(a,b){f.attrHooks[b]=f.extend(f.attrHooks[b],{set:function(a,c){if(c===""){a.setAttribute(b,"auto");return c}}})})),f.support.hrefNormalized||f.each(["href","src","width","height"],function(a,c){f.attrHooks[c]=f.extend(f.attrHooks[c],{get:function(a){var d=a.getAttribute(c,2);return d===null?b:d}})}),f.support.style||(f.attrHooks.style={get:function(a){return a.style.cssText.toLowerCase()||b},set:function(a,b){return a.style.cssText=""+b}}),f.support.optSelected||(f.propHooks.selected=f.extend(f.propHooks.selected,{get:function(a){var b=a.parentNode;b&&(b.selectedIndex,b.parentNode&&b.parentNode.selectedIndex);return null}})),f.support.checkOn||f.each(["radio","checkbox"],function(){f.valHooks[this]={get:function(a){return a.getAttribute("value")===null?"on":a.value}}}),f.each(["radio","checkbox"],function(){f.valHooks[this]=f.extend(f.valHooks[this],{set:function(a,b){if(f.isArray(b))return a.checked=f.inArray(f(a).val(),b)>=0}})});var w=/\.(.*)$/,x=/^(?:textarea|input|select)$/i,y=/\./g,z=/ /g,A=/[^\w\s.|`]/g,B=function(a){return a.replace(A,"\\$&")};f.event={add:function(a,c,d,e){if(a.nodeType!==3&&a.nodeType!==8){if(d===!1)d=C;else if(!d)return;var g,h;d.handler&&(g=d,d=g.handler),d.guid||(d.guid=f.guid++);var i=f._data(a);if(!i)return;var j=i.events,k=i.handle;j||(i.events=j={}),k||(i.handle=k=function(a){return typeof f!="undefined"&&(!a||f.event.triggered!==a.type)?f.event.handle.apply(k.elem,arguments):b}),k.elem=a,c=c.split(" ");var l,m=0,n;while(l=c[m++]){h=g?f.extend({},g):{handler:d,data:e},l.indexOf(".")>-1?(n=l.split("."),l=n.shift(),h.namespace=n.slice(0).sort().join(".")):(n=[],h.namespace=""),h.type=l,h.guid||(h.guid=d.guid);var o=j[l],p=f.event.special[l]||{};if(!o){o=j[l]=[];if(!p.setup||p.setup.call(a,e,n,k)===!1)a.addEventListener?a.addEventListener(l,k,!1):a.attachEvent&&a.attachEvent("on"+l,k)}p.add&&(p.add.call(a,h),h.handler.guid||(h.handler.guid=d.guid)),o.push(h),f.event.global[l]=!0}a=null}},global:{},remove:function(a,c,d,e){if(a.nodeType!==3&&a.nodeType!==8){d===!1&&(d=C);var g,h,i,j,k=0,l,m,n,o,p,q,r,s=f.hasData(a)&&f._data(a),t=s&&s.events;if(!s||!t)return;c&&c.type&&(d=c.handler,c=c.type);if(!c||typeof c=="string"&&c.charAt(0)==="."){c=c||"";for(h in t)f.event.remove(a,h+c);return}c=c.split(" ");while(h=c[k++]){r=h,q=null,l=h.indexOf(".")<0,m=[],l||(m=h.split("."),h=m.shift(),n=new RegExp("(^|\\.)"+f.map(m.slice(0).sort(),B).join("\\.(?:.*\\.)?")+"(\\.|$)")),p=t[h];if(!p)continue;if(!d){for(j=0;j<p.length;j++){q=p[j];if(l||n.test(q.namespace))f.event.remove(a,r,q.handler,j),p.splice(j--,1)}continue}o=f.event.special[h]||{};for(j=e||0;j<p.length;j++){q=p[j];if(d.guid===q.guid){if(l||n.test(q.namespace))e==null&&p.splice(j--,1),o.remove&&o.remove.call(a,q);if(e!=null)break}}if(p.length===0||e!=null&&p.length===1)(!o.teardown||o.teardown.call(a,m)===!1)&&f.removeEvent(a,h,s.handle),g=null,delete
+t[h]}if(f.isEmptyObject(t)){var u=s.handle;u&&(u.elem=null),delete s.events,delete s.handle,f.isEmptyObject(s)&&f.removeData(a,b,!0)}}},customEvent:{getData:!0,setData:!0,changeData:!0},trigger:function(c,d,e,g){var h=c.type||c,i=[],j;h.indexOf("!")>=0&&(h=h.slice(0,-1),j=!0),h.indexOf(".")>=0&&(i=h.split("."),h=i.shift(),i.sort());if(!!e&&!f.event.customEvent[h]||!!f.event.global[h]){c=typeof c=="object"?c[f.expando]?c:new f.Event(h,c):new f.Event(h),c.type=h,c.exclusive=j,c.namespace=i.join("."),c.namespace_re=new RegExp("(^|\\.)"+i.join("\\.(?:.*\\.)?")+"(\\.|$)");if(g||!e)c.preventDefault(),c.stopPropagation();if(!e){f.each(f.cache,function(){var a=f.expando,b=this[a];b&&b.events&&b.events[h]&&f.event.trigger(c,d,b.handle.elem)});return}if(e.nodeType===3||e.nodeType===8)return;c.result=b,c.target=e,d=d!=null?f.makeArray(d):[],d.unshift(c);var k=e,l=h.indexOf(":")<0?"on"+h:"";do{var m=f._data(k,"handle");c.currentTarget=k,m&&m.apply(k,d),l&&f.acceptData(k)&&k[l]&&k[l].apply(k,d)===!1&&(c.result=!1,c.preventDefault()),k=k.parentNode||k.ownerDocument||k===c.target.ownerDocument&&a}while(k&&!c.isPropagationStopped());if(!c.isDefaultPrevented()){var n,o=f.event.special[h]||{};if((!o._default||o._default.call(e.ownerDocument,c)===!1)&&(h!=="click"||!f.nodeName(e,"a"))&&f.acceptData(e)){try{l&&e[h]&&(n=e[l],n&&(e[l]=null),f.event.triggered=h,e[h]())}catch(p){}n&&(e[l]=n),f.event.triggered=b}}return c.result}},handle:function(c){c=f.event.fix(c||a.event);var d=((f._data(this,"events")||{})[c.type]||[]).slice(0),e=!c.exclusive&&!c.namespace,g=Array.prototype.slice.call(arguments,0);g[0]=c,c.currentTarget=this;for(var h=0,i=d.length;h<i;h++){var j=d[h];if(e||c.namespace_re.test(j.namespace)){c.handler=j.handler,c.data=j.data,c.handleObj=j;var k=j.handler.apply(this,g);k!==b&&(c.result=k,k===!1&&(c.preventDefault(),c.stopPropagation()));if(c.isImmediatePropagationStopped())break}}return c.result},props:"altKey attrChange attrName bubbles button cancelable charCode clientX clientY ctrlKey currentTarget data detail eventPhase fromElement handler keyCode layerX layerY metaKey newValue offsetX offsetY pageX pageY prevValue relatedNode relatedTarget screenX screenY shiftKey srcElement target toElement view wheelDelta which".split(" "),fix:function(a){if(a[f.expando])return a;var d=a;a=f.Event(d);for(var e=this.props.length,g;e;)g=this.props[--e],a[g]=d[g];a.target||(a.target=a.srcElement||c),a.target.nodeType===3&&(a.target=a.target.parentNode),!a.relatedTarget&&a.fromElement&&(a.relatedTarget=a.fromElement===a.target?a.toElement:a.fromElement);if(a.pageX==null&&a.clientX!=null){var h=a.target.ownerDocument||c,i=h.documentElement,j=h.body;a.pageX=a.clientX+(i&&i.scrollLeft||j&&j.scrollLeft||0)-(i&&i.clientLeft||j&&j.clientLeft||0),a.pageY=a.clientY+(i&&i.scrollTop||j&&j.scrollTop||0)-(i&&i.clientTop||j&&j.clientTop||0)}a.which==null&&(a.charCode!=null||a.keyCode!=null)&&(a.which=a.charCode!=null?a.charCode:a.keyCode),!a.metaKey&&a.ctrlKey&&(a.metaKey=a.ctrlKey),!a.which&&a.button!==b&&(a.which=a.button&1?1:a.button&2?3:a.button&4?2:0);return a},guid:1e8,proxy:f.proxy,special:{ready:{setup:f.bindReady,teardown:f.noop},live:{add:function(a){f.event.add(this,M(a.origType,a.selector),f.extend({},a,{handler:L,guid:a.handler.guid}))},remove:function(a){f.event.remove(this,M(a.origType,a.selector),a)}},beforeunload:{setup:function(a,b,c){f.isWindow(this)&&(this.onbeforeunload=c)},teardown:function(a,b){this.onbeforeunload===b&&(this.onbeforeunload=null)}}}},f.removeEvent=c.removeEventListener?function(a,b,c){a.removeEventListener&&a.removeEventListener(b,c,!1)}:function(a,b,c){a.detachEvent&&a.detachEvent("on"+b,c)},f.Event=function(a,b){if(!this.preventDefault)return new f.Event(a,b);a&&a.type?(this.originalEvent=a,this.type=a.type,this.isDefaultPrevented=a.defaultPrevented||a.returnValue===!1||a.getPreventDefault&&a.getPreventDefault()?D:C):this.type=a,b&&f.extend(this,b),this.timeStamp=f.now(),this[f.expando]=!0},f.Event.prototype={preventDefault:function(){this.isDefaultPrevented=D;var a=this.originalEvent;!a||(a.preventDefault?a.preventDefault():a.returnValue=!1)},stopPropagation:function(){this.isPropagationStopped=D;var a=this.originalEvent;!a||(a.stopPropagation&&a.stopPropagation(),a.cancelBubble=!0)},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=D,this.stopPropagation()},isDefaultPrevented:C,isPropagationStopped:C,isImmediatePropagationStopped:C};var E=function(a){var b=a.relatedTarget,c=!1,d=a.type;a.type=a.data,b!==this&&(b&&(c=f.contains(this,b)),c||(f.event.handle.apply(this,arguments),a.type=d))},F=function(a){a.type=a.data,f.event.handle.apply(this,arguments)};f.each({mouseenter:"mouseover",mouseleave:"mouseout"},function(a,b){f.event.special[a]={setup:function(c){f.event.add(this,b,c&&c.selector?F:E,a)},teardown:function(a){f.event.remove(this,b,a&&a.selector?F:E)}}}),f.support.submitBubbles||(f.event.special.submit={setup:function(a,b){if(!f.nodeName(this,"form"))f.event.add(this,"click.specialSubmit",function(a){var b=a.target,c=f.nodeName(b,"input")||f.nodeName(b,"button")?b.type:"";(c==="submit"||c==="image")&&f(b).closest("form").length&&J("submit",this,arguments)}),f.event.add(this,"keypress.specialSubmit",function(a){var b=a.target,c=f.nodeName(b,"input")||f.nodeName(b,"button")?b.type:"";(c==="text"||c==="password")&&f(b).closest("form").length&&a.keyCode===13&&J("submit",this,arguments)});else return!1},teardown:function(a){f.event.remove(this,".specialSubmit")}});if(!f.support.changeBubbles){var G,H=function(a){var b=f.nodeName(a,"input")?a.type:"",c=a.value;b==="radio"||b==="checkbox"?c=a.checked:b==="select-multiple"?c=a.selectedIndex>-1?f.map(a.options,function(a){return a.selected}).join("-"):"":f.nodeName(a,"select")&&(c=a.selectedIndex);return c},I=function(c){var d=c.target,e,g;if(!!x.test(d.nodeName)&&!d.readOnly){e=f._data(d,"_change_data"),g=H(d),(c.type!=="focusout"||d.type!=="radio")&&f._data(d,"_change_data",g);if(e===b||g===e)return;if(e!=null||g)c.type="change",c.liveFired=b,f.event.trigger(c,arguments[1],d)}};f.event.special.change={filters:{focusout:I,beforedeactivate:I,click:function(a){var b=a.target,c=f.nodeName(b,"input")?b.type:"";(c==="radio"||c==="checkbox"||f.nodeName(b,"select"))&&I.call(this,a)},keydown:function(a){var b=a.target,c=f.nodeName(b,"input")?b.type:"";(a.keyCode===13&&!f.nodeName(b,"textarea")||a.keyCode===32&&(c==="checkbox"||c==="radio")||c==="select-multiple")&&I.call(this,a)},beforeactivate:function(a){var b=a.target;f._data(b,"_change_data",H(b))}},setup:function(a,b){if(this.type==="file")return!1;for(var c in G)f.event.add(this,c+".specialChange",G[c]);return x.test(this.nodeName)},teardown:function(a){f.event.remove(this,".specialChange");return x.test(this.nodeName)}},G=f.event.special.change.filters,G.focus=G.beforeactivate}f.support.focusinBubbles||f.each({focus:"focusin",blur:"focusout"},function(a,b){function e(a){var c=f.event.fix(a);c.type=b,c.originalEvent={},f.event.trigger(c,null,c.target),c.isDefaultPrevented()&&a.preventDefault()}var d=0;f.event.special[b]={setup:function(){d++===0&&c.addEventListener(a,e,!0)},teardown:function(){--d===0&&c.removeEventListener(a,e,!0)}}}),f.each(["bind","one"],function(a,c){f.fn[c]=function(a,d,e){var g;if(typeof a=="object"){for(var h in a)this[c](h,d,a[h],e);return this}if(arguments.length===2||d===!1)e=d,d=b;c==="one"?(g=function(a){f(this).unbind(a,g);return e.apply(this,arguments)},g.guid=e.guid||f.guid++):g=e;if(a==="unload"&&c!=="one")this.one(a,d,e);else for(var i=0,j=this.length;i<j;i++)f.event.add(this[i],a,g,d);return this}}),f.fn.extend({unbind:function(a,b){if(typeof a=="object"&&!a.preventDefault)for(var c in a)this.unbind(c,a[c]);else for(var d=0,e=this.length;d<e;d++)f.event.remove(this[d],a,b);return this},delegate:function(a,b,c,d){return this.live(b,c,d,a)},undelegate:function(a,b,c){return arguments.length===0?this.unbind("live"):this.die(b,null,c,a)},trigger:function(a,b){return this.each(function(){f.event.trigger(a,b,this)})},triggerHandler:function(a,b){if(this[0])return f.event.trigger(a,b,this[0],!0)},toggle:function(a){var b=arguments,c=a.guid||f.guid++,d=0,e=function(c){var e=(f.data(this,"lastToggle"+a.guid)||0)%d;f.data(this,"lastToggle"+a.guid,e+1),c.preventDefault();return b[e].apply(this,arguments)||!1};e.guid=c;while(d<b.length)b[d++].guid=c;return this.click(e)},hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)}});var K={focus:"focusin",blur:"focusout",mouseenter:"mouseover",mouseleave:"mouseout"};f.each(["live","die"],function(a,c){f.fn[c]=function(a,d,e,g){var h,i=0,j,k,l,m=g||this.selector,n=g?this:f(this.context);if(typeof a=="object"&&!a.preventDefault){for(var o in a)n[c](o,d,a[o],m);return this}if(c==="die"&&!a&&g&&g.charAt(0)==="."){n.unbind(g);return this}if(d===!1||f.isFunction(d))e=d||C,d=b;a=(a||"").split(" ");while((h=a[i++])!=null){j=w.exec(h),k="",j&&(k=j[0],h=h.replace(w,""));if(h==="hover"){a.push("mouseenter"+k,"mouseleave"+k);continue}l=h,K[h]?(a.push(K[h]+k),h=h+k):h=(K[h]||h)+k;if(c==="live")for(var p=0,q=n.length;p<q;p++)f.event.add(n[p],"live."+M(h,m),{data:d,selector:m,handler:e,origType:h,origHandler:e,preType:l});else n.unbind("live."+M(h,m),e)}return this}}),f.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error".split(" "),function(a,b){f.fn[b]=function(a,c){c==null&&(c=a,a=null);return arguments.length>0?this.bind(b,a,c):this.trigger(b)},f.attrFn&&(f.attrFn[b]=!0)}),function(){function u(a,b,c,d,e,f){for(var g=0,h=d.length;g<h;g++){var i=d[g];if(i){var j=!1;i=i[a];while(i){if(i.sizcache===c){j=d[i.sizset];break}if(i.nodeType===1){f||(i.sizcache=c,i.sizset=g);if(typeof b!="string"){if(i===b){j=!0;break}}else if(k.filter(b,[i]).length>0){j=i;break}}i=i[a]}d[g]=j}}}function t(a,b,c,d,e,f){for(var g=0,h=d.length;g<h;g++){var i=d[g];if(i){var j=!1;i=i[a];while(i){if(i.sizcache===c){j=d[i.sizset];break}i.nodeType===1&&!f&&(i.sizcache=c,i.sizset=g);if(i.nodeName.toLowerCase()===b){j=i;break}i=i[a]}d[g]=j}}}var a=/((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^\[\]]*\]|['"][^'"]*['"]|[^\[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,d=0,e=Object.prototype.toString,g=!1,h=!0,i=/\\/g,j=/\W/;[0,0].sort(function(){h=!1;return 0});var k=function(b,d,f,g){f=f||[],d=d||c;var h=d;if(d.nodeType!==1&&d.nodeType!==9)return[];if(!b||typeof b!="string")return f;var i,j,n,o,q,r,s,t,u=!0,w=k.isXML(d),x=[],y=b;do{a.exec(""),i=a.exec(y);if(i){y=i[3],x.push(i[1]);if(i[2]){o=i[3];break}}}while(i);if(x.length>1&&m.exec(b))if(x.length===2&&l.relative[x[0]])j=v(x[0]+x[1],d);else{j=l.relative[x[0]]?[d]:k(x.shift(),d);while(x.length)b=x.shift(),l.relative[b]&&(b+=x.shift()),j=v(b,j)}else{!g&&x.length>1&&d.nodeType===9&&!w&&l.match.ID.test(x[0])&&!l.match.ID.test(x[x.length-1])&&(q=k.find(x.shift(),d,w),d=q.expr?k.filter(q.expr,q.set)[0]:q.set[0]);if(d){q=g?{expr:x.pop(),set:p(g)}:k.find(x.pop(),x.length===1&&(x[0]==="~"||x[0]==="+")&&d.parentNode?d.parentNode:d,w),j=q.expr?k.filter(q.expr,q.set):q.set,x.length>0?n=p(j):u=!1;while(x.length)r=x.pop(),s=r,l.relative[r]?s=x.pop():r="",s==null&&(s=d),l.relative[r](n,s,w)}else n=x=[]}n||(n=j),n||k.error(r||b);if(e.call(n)==="[object Array]")if(!u)f.push.apply(f,n);else if(d&&d.nodeType===1)for(t=0;n[t]!=null;t++)n[t]&&(n[t]===!0||n[t].nodeType===1&&k.contains(d,n[t]))&&f.push(j[t]);else for(t=0;n[t]!=null;t++)n[t]&&n[t].nodeType===1&&f.push(j[t]);else p(n,f);o&&(k(o,h,f,g),k.uniqueSort(f));return f};k.uniqueSort=function(a){if(r){g=h,a.sort(r);if(g)for(var b=1;b<a.length;b++)a[b]===a[b-1]&&a.splice(b--,1)}return a},k.matches=function(a,b){return k(a,null,null,b)},k.matchesSelector=function(a,b){return k(b,null,null,[a]).length>0},k.find=function(a,b,c){var d;if(!a)return[];for(var e=0,f=l.order.length;e<f;e++){var g,h=l.order[e];if(g=l.leftMatch[h].exec(a)){var j=g[1];g.splice(1,1);if(j.substr(j.length-1)!=="\\"){g[1]=(g[1]||"").replace(i,""),d=l.find[h](g,b,c);if(d!=null){a=a.replace(l.match[h],"");break}}}}d||(d=typeof b.getElementsByTagName!="undefined"?b.getElementsByTagName("*"):[]);return{set:d,expr:a}},k.filter=function(a,c,d,e){var f,g,h=a,i=[],j=c,m=c&&c[0]&&k.isXML(c[0]);while(a&&c.length){for(var n in l.filter)if((f=l.leftMatch[n].exec(a))!=null&&f[2]){var o,p,q=l.filter[n],r=f[1];g=!1,f.splice(1,1);if(r.substr(r.length-1)==="\\")continue;j===i&&(i=[]);if(l.preFilter[n]){f=l.preFilter[n](f,j,d,i,e,m);if(!f)g=o=!0;else if(f===!0)continue}if(f)for(var s=0;(p=j[s])!=null;s++)if(p){o=q(p,f,s,j);var t=e^!!o;d&&o!=null?t?g=!0:j[s]=!1:t&&(i.push(p),g=!0)}if(o!==b){d||(j=i),a=a.replace(l.match[n],"");if(!g)return[];break}}if(a===h)if(g==null)k.error(a);else break;h=a}return j},k.error=function(a){throw"Syntax error, unrecognized expression: "+a};var l=k.selectors={order:["ID","NAME","TAG"],match:{ID:/#((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,CLASS:/\.((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,NAME:/\[name=['"]*((?:[\w\u00c0-\uFFFF\-]|\\.)+)['"]*\]/,ATTR:/\[\s*((?:[\w\u00c0-\uFFFF\-]|\\.)+)\s*(?:(\S?=)\s*(?:(['"])(.*?)\3|(#?(?:[\w\u00c0-\uFFFF\-]|\\.)*)|)|)\s*\]/,TAG:/^((?:[\w\u00c0-\uFFFF\*\-]|\\.)+)/,CHILD:/:(only|nth|last|first)-child(?:\(\s*(even|odd|(?:[+\-]?\d+|(?:[+\-]?\d*)?n\s*(?:[+\-]\s*\d+)?))\s*\))?/,POS:/:(nth|eq|gt|lt|first|last|even|odd)(?:\((\d*)\))?(?=[^\-]|$)/,PSEUDO:/:((?:[\w\u00c0-\uFFFF\-]|\\.)+)(?:\((['"]?)((?:\([^\)]+\)|[^\(\)]*)+)\2\))?/},leftMatch:{},attrMap:{"class":"className","for":"htmlFor"},attrHandle:{href:function(a){return a.getAttribute("href")},type:function(a){return a.getAttribute("type")}},relative:{"+":function(a,b){var c=typeof b=="string",d=c&&!j.test(b),e=c&&!d;d&&(b=b.toLowerCase());for(var f=0,g=a.length,h;f<g;f++)if(h=a[f]){while((h=h.previousSibling)&&h.nodeType!==1);a[f]=e||h&&h.nodeName.toLowerCase()===b?h||!1:h===b}e&&k.filter(b,a,!0)},">":function(a,b){var c,d=typeof b=="string",e=0,f=a.length;if(d&&!j.test(b)){b=b.toLowerCase();for(;e<f;e++){c=a[e];if(c){var g=c.parentNode;a[e]=g.nodeName.toLowerCase()===b?g:!1}}}else{for(;e<f;e++)c=a[e],c&&(a[e]=d?c.parentNode:c.parentNode===b);d&&k.filter(b,a,!0)}},"":function(a,b,c){var e,f=d++,g=u;typeof b=="string"&&!j.test(b)&&(b=b.toLowerCase(),e=b,g=t),g("parentNode",b,f,a,e,c)},"~":function(a,b,c){var e,f=d++,g=u;typeof b=="string"&&!j.test(b)&&(b=b.toLowerCase(),e=b,g=t),g("previousSibling",b,f,a,e,c)}},find:{ID:function(a,b,c){if(typeof b.getElementById!="undefined"&&!c){var d=b.getElementById(a[1]);return d&&d.parentNode?[d]:[]}},NAME:function(a,b){if(typeof b.getElementsByName!="undefined"){var c=[],d=b.getElementsByName(a[1]);for(var e=0,f=d.length;e<f;e++)d[e].getAttribute("name")===a[1]&&c.push(d[e]);return c.length===0?null:c}},TAG:function(a,b){if(typeof b.getElementsByTagName!="undefined")return b.getElementsByTagName(a[1])}},preFilter:{CLASS:function(a,b,c,d,e,f){a=" "+a[1].replace(i,"")+" ";if(f)return a;for(var g=0,h;(h=b[g])!=null;g++)h&&(e^(h.className&&(" "+h.className+" ").replace(/[\t\n\r]/g," ").indexOf(a)>=0)?c||d.push(h):c&&(b[g]=!1));return!1},ID:function(a){return a[1].replace(i,"")},TAG:function(a,b){return a[1].replace(i,"").toLowerCase()},CHILD:function(a){if(a[1]==="nth"){a[2]||k.error(a[0]),a[2]=a[2].replace(/^\+|\s*/g,"");var b=/(-?)(\d*)(?:n([+\-]?\d*))?/.exec(a[2]==="even"&&"2n"||a[2]==="odd"&&"2n+1"||!/\D/.test(a[2])&&"0n+"+a[2]||a[2]);a[2]=b[1]+(b[2]||1)-0,a[3]=b[3]-0}else a[2]&&k.error(a[0]);a[0]=d++;return a},ATTR:function(a,b,c,d,e,f){var g=a[1]=a[1].replace(i,"");!f&&l.attrMap[g]&&(a[1]=l.attrMap[g]),a[4]=(a[4]||a[5]||"").replace(i,""),a[2]==="~="&&(a[4]=" "+a[4]+" ");return a},PSEUDO:function(b,c,d,e,f){if(b[1]==="not")if((a.exec(b[3])||"").length>1||/^\w/.test(b[3]))b[3]=k(b[3],null,null,c);else{var g=k.filter(b[3],c,d,!0^f);d||e.push.apply(e,g);return!1}else if(l.match.POS.test(b[0])||l.match.CHILD.test(b[0]))return!0;return b},POS:function(a){a.unshift(!0);return a}},filters:{enabled:function(a){return a.disabled===!1&&a.type!=="hidden"},disabled:function(a){return a.disabled===!0},checked:function(a){return a.checked===!0},selected:function(a){a.parentNode&&a.parentNode.selectedIndex;return a.selected===!0},parent:function(a){return!!a.firstChild},empty:function(a){return!a.firstChild},has:function(a,b,c){return!!k(c[3],a).length},header:function(a){return/h\d/i.test(a.nodeName)},text:function(a){var b=a.getAttribute("type"),c=a.type;return a.nodeName.toLowerCase()==="input"&&"text"===c&&(b===c||b===null)},radio:function(a){return a.nodeName.toLowerCase()==="input"&&"radio"===a.type},checkbox:function(a){return a.nodeName.toLowerCase()==="input"&&"checkbox"===a.type},file:function(a){return a.nodeName.toLowerCase()==="input"&&"file"===a.type},password:function(a){return a.nodeName.toLowerCase()==="input"&&"password"===a.type},submit:function(a){var b=a.nodeName.toLowerCase();return(b==="input"||b==="button")&&"submit"===a.type},image:function(a){return a.nodeName.toLowerCase()==="input"&&"image"===a.type},reset:function(a){var b=a.nodeName.toLowerCase();return(b==="input"||b==="button")&&"reset"===a.type},button:function(a){var b=a.nodeName.toLowerCase();return b==="input"&&"button"===a.type||b==="button"},input:function(a){return/input|select|textarea|button/i.test(a.nodeName)},focus:function(a){return a===a.ownerDocument.activeElement}},setFilters:{first:function(a,b){return b===0},last:function(a,b,c,d){return b===d.length-1},even:function(a,b){return b%2===0},odd:function(a,b){return b%2===1},lt:function(a,b,c){return b<c[3]-0},gt:function(a,b,c){return b>c[3]-0},nth:function(a,b,c){return c[3]-0===b},eq:function(a,b,c){return c[3]-0===b}},filter:{PSEUDO:function(a,b,c,d){var e=b[1],f=l.filters[e];if(f)return f(a,c,b,d);if(e==="contains")return(a.textContent||a.innerText||k.getText([a])||"").indexOf(b[3])>=0;if(e==="not"){var g=b[3];for(var h=0,i=g.length;h<i;h++)if(g[h]===a)return!1;return!0}k.error(e)},CHILD:function(a,b){var c=b[1],d=a;switch(c){case"only":case"first":while(d=d.previousSibling)if(d.nodeType===1)return!1;if(c==="first")return!0;d=a;case"last":while(d=d.nextSibling)if(d.nodeType===1)return!1;return!0;case"nth":var e=b[2],f=b[3];if(e===1&&f===0)return!0;var g=b[0],h=a.parentNode;if(h&&(h.sizcache!==g||!a.nodeIndex)){var i=0;for(d=h.firstChild;d;d=d.nextSibling)d.nodeType===1&&(d.nodeIndex=++i);h.sizcache=g}var j=a.nodeIndex-f;return e===0?j===0:j%e===0&&j/e>=0}},ID:function(a,b){return a.nodeType===1&&a.getAttribute("id")===b},TAG:function(a,b){return b==="*"&&a.nodeType===1||a.nodeName.toLowerCase()===b},CLASS:function(a,b){return(" "+(a.className||a.getAttribute("class"))+" ").indexOf(b)>-1},ATTR:function(a,b){var c=b[1],d=l.attrHandle[c]?l.attrHandle[c](a):a[c]!=null?a[c]:a.getAttribute(c),e=d+"",f=b[2],g=b[4];return d==null?f==="!=":f==="="?e===g:f==="*="?e.indexOf(g)>=0:f==="~="?(" "+e+" ").indexOf(g)>=0:g?f==="!="?e!==g:f==="^="?e.indexOf(g)===0:f==="$="?e.substr(e.length-g.length)===g:f==="|="?e===g||e.substr(0,g.length+1)===g+"-":!1:e&&d!==!1},POS:function(a,b,c,d){var e=b[2],f=l.setFilters[e];if(f)return f(a,c,b,d)}}},m=l.match.POS,n=function(a,b){return"\\"+(b-0+1)};for(var o in l.match)l.match[o]=new RegExp(l.match[o].source+/(?![^\[]*\])(?![^\(]*\))/.source),l.leftMatch[o]=new RegExp(/(^(?:.|\r|\n)*?)/.source+l.match[o].source.replace(/\\(\d+)/g,n));var p=function(a,b){a=Array.prototype.slice.call(a,0);if(b){b.push.apply(b,a);return b}return a};try{Array.prototype.slice.call(c.documentElement.childNodes,0)[0].nodeType}catch(q){p=function(a,b){var c=0,d=b||[];if(e.call(a)==="[object Array]")Array.prototype.push.apply(d,a);else if(typeof a.length=="number")for(var f=a.length;c<f;c++)d.push(a[c]);else for(;a[c];c++)d.push(a[c]);return d}}var r,s;c.documentElement.compareDocumentPosition?r=function(a,b){if(a===b){g=!0;return 0}if(!a.compareDocumentPosition||!b.compareDocumentPosition)return a.compareDocumentPosition?-1:1;return a.compareDocumentPosition(b)&4?-1:1}:(r=function(a,b){if(a===b){g=!0;return 0}if(a.sourceIndex&&b.sourceIndex)return a.sourceIndex-b.sourceIndex;var c,d,e=[],f=[],h=a.parentNode,i=b.parentNode,j=h;if(h===i)return s(a,b);if(!h)return-1;if(!i)return 1;while(j)e.unshift(j),j=j.parentNode;j=i;while(j)f.unshift(j),j=j.parentNode;c=e.length,d=f.length;for(var k=0;k<c&&k<d;k++)if(e[k]!==f[k])return s(e[k],f[k]);return k===c?s(a,f[k],-1):s(e[k],b,1)},s=function(a,b,c){if(a===b)return c;var d=a.nextSibling;while(d){if(d===b)return-1;d=d.nextSibling}return 1}),k.getText=function(a){var b="",c;for(var d=0;a[d];d++)c=a[d],c.nodeType===3||c.nodeType===4?b+=c.nodeValue:c.nodeType!==8&&(b+=k.getText(c.childNodes));return b},function(){var a=c.createElement("div"),d="script"+(new Date).getTime(),e=c.documentElement;a.innerHTML="<a name='"+d+"'/>",e.insertBefore(a,e.firstChild),c.getElementById(d)&&(l.find.ID=function(a,c,d){if(typeof c.getElementById!="undefined"&&!d){var e=c.getElementById(a[1]);return e?e.id===a[1]||typeof e.getAttributeNode!="undefined"&&e.getAttributeNode("id").nodeValue===a[1]?[e]:b:[]}},l.filter.ID=function(a,b){var c=typeof a.getAttributeNode!="undefined"&&a.getAttributeNode("id");return a.nodeType===1&&c&&c.nodeValue===b}),e.removeChild(a),e=a=null}(),function(){var a=c.createElement("div");a.appendChild(c.createComment("")),a.getElementsByTagName("*").length>0&&(l.find.TAG=function(a,b){var c=b.getElementsByTagName(a[1]);if(a[1]==="*"){var d=[];for(var e=0;c[e];e++)c[e].nodeType===1&&d.push(c[e]);c=d}return c}),a.innerHTML="<a href='#'></a>",a.firstChild&&typeof a.firstChild.getAttribute!="undefined"&&a.firstChild.getAttribute("href")!=="#"&&(l.attrHandle.href=function(a){return a.getAttribute("href",2)}),a=null}(),c.querySelectorAll&&function(){var a=k,b=c.createElement("div"),d="__sizzle__";b.innerHTML="<p class='TEST'></p>";if(!b.querySelectorAll||b.querySelectorAll(".TEST").length!==0){k=function(b,e,f,g){e=e||c;if(!g&&!k.isXML(e)){var h=/^(\w+$)|^\.([\w\-]+$)|^#([\w\-]+$)/.exec(b);if(h&&(e.nodeType===1||e.nodeType===9)){if(h[1])return p(e.getElementsByTagName(b),f);if(h[2]&&l.find.CLASS&&e.getElementsByClassName)return p(e.getElementsByClassName(h[2]),f)}if(e.nodeType===9){if(b==="body"&&e.body)return p([e.body],f);if(h&&h[3]){var i=e.getElementById(h[3]);if(!i||!i.parentNode)return p([],f);if(i.id===h[3])return p([i],f)}try{return p(e.querySelectorAll(b),f)}catch(j){}}else if(e.nodeType===1&&e.nodeName.toLowerCase()!=="object"){var m=e,n=e.getAttribute("id"),o=n||d,q=e.parentNode,r=/^\s*[+~]/.test(b);n?o=o.replace(/'/g,"\\$&"):e.setAttribute("id",o),r&&q&&(e=e.parentNode);try{if(!r||q)return p(e.querySelectorAll("[id='"+o+"'] "+b),f)}catch(s){}finally{n||m.removeAttribute("id")}}}return a(b,e,f,g)};for(var e in a)k[e]=a[e];b=null}}(),function(){var a=c.documentElement,b=a.matchesSelector||a.mozMatchesSelector||a.webkitMatchesSelector||a.msMatchesSelector;if(b){var d=!b.call(c.createElement("div"),"div"),e=!1;try{b.call(c.documentElement,"[test!='']:sizzle")}catch(f){e=!0}k.matchesSelector=function(a,c){c=c.replace(/\=\s*([^'"\]]*)\s*\]/g,"='$1']");if(!k.isXML(a))try{if(e||!l.match.PSEUDO.test(c)&&!/!=/.test(c)){var f=b.call(a,c);if(f||!d||a.document&&a.document.nodeType!==11)return f}}catch(g){}return k(c,null,null,[a]).length>0}}}(),function(){var a=c.createElement("div");a.innerHTML="<div class='test e'></div><div class='test'></div>";if(!!a.getElementsByClassName&&a.getElementsByClassName("e").length!==0){a.lastChild.className="e";if(a.getElementsByClassName("e").length===1)return;l.order.splice(1,0,"CLASS"),l.find.CLASS=function(a,b,c){if(typeof b.getElementsByClassName!="undefined"&&!c)return b.getElementsByClassName(a[1])},a=null}}(),c.documentElement.contains?k.contains=function(a,b){return a!==b&&(a.contains?a.contains(b):!0)}:c.documentElement.compareDocumentPosition?k.contains=function(a,b){return!!(a.compareDocumentPosition(b)&16)}:k.contains=function(){return!1},k.isXML=function(a){var b=(a?a.ownerDocument||a:0).documentElement;return b?b.nodeName!=="HTML":!1};var v=function(a,b){var c,d=[],e="",f=b.nodeType?[b]:b;while(c=l.match.PSEUDO.exec(a))e+=c[0],a=a.replace(l.match.PSEUDO,"");a=l.relative[a]?a+"*":a;for(var g=0,h=f.length;g<h;g++)k(a,f[g],d);return k.filter(e,d)};f.find=k,f.expr=k.selectors,f.expr[":"]=f.expr.filters,f.unique=k.uniqueSort,f.text=k.getText,f.isXMLDoc=k.isXML,f.contains=k.contains}();var N=/Until$/,O=/^(?:parents|prevUntil|prevAll)/,P=/,/,Q=/^.[^:#\[\.,]*$/,R=Array.prototype.slice,S=f.expr.match.POS,T={children:!0,contents:!0,next:!0,prev:!0};f.fn.extend({find:function(a){var b=this,c,d;if(typeof a!="string")return f(a).filter(function(){for(c=0,d=b.length;c<d;c++)if(f.contains(b[c],this))return!0});var e=this.pushStack("","find",a),g,h,i;for(c=0,d=this.length;c<d;c++){g=e.length,f.find(a,this[c],e);if(c>0)for(h=g;h<e.length;h++)for(i=0;i<g;i++)if(e[i]===e[h]){e.splice(h--,1);break}}return e},has:function(a){var b=f(a);return this.filter(function(){for(var a=0,c=b.length;a<c;a++)if(f.contains(this,b[a]))return!0})},not:function(a){return this.pushStack(V(this,a,!1),"not",a)},filter:function(a){return this.pushStack(V(this,a,!0),"filter",a)},is:function(a){return!!a&&(typeof a=="string"?f.filter(a,this).length>0:this.filter(a).length>0)},closest:function(a,b){var c=[],d,e,g=this[0];if(f.isArray(a)){var h,i,j={},k=1;if(g&&a.length){for(d=0,e=a.length;d<e;d++)i=a[d],j[i]||(j[i]=S.test(i)?f(i,b||this.context):i);while(g&&g.ownerDocument&&g!==b){for(i in j)h=j[i],(h.jquery?h.index(g)>-1:f(g).is(h))&&c.push({selector:i,elem:g,level:k});g=g.parentNode,k++}}return c}var l=S.test(a)||typeof a!="string"?f(a,b||this.context):0;for(d=0,e=this.length;d<e;d++){g=this[d];while(g){if(l?l.index(g)>-1:f.find.matchesSelector(g,a)){c.push(g);break}g=g.parentNode;if(!g||!g.ownerDocument||g===b||g.nodeType===11)break}}c=c.length>1?f.unique(c):c;return this.pushStack(c,"closest",a)},index:function(a){if(!a)return this[0]&&this[0].parentNode?this.prevAll().length:-1;if(typeof a=="string")return f.inArray(this[0],f(a));return f.inArray(a.jquery?a[0]:a,this)},add:function(a,b){var c=typeof a=="string"?f(a,b):f.makeArray(a&&a.nodeType?[a]:a),d=f.merge(this.get(),c);return this.pushStack(U(c[0])||U(d[0])?d:f.unique(d))},andSelf:function(){return this.add(this.prevObject)}}),f.each({parent:function(a){var b=a.parentNode;return b&&b.nodeType!==11?b:null},parents:function(a){return f.dir(a,"parentNode")},parentsUntil:function(a,b,c){return f.dir(a,"parentNode",c)},next:function(a){return f.nth(a,2,"nextSibling")},prev:function(a){return f.nth(a,2,"previousSibling")},nextAll:function(a){return f.dir(a,"nextSibling")},prevAll:function(a){return f.dir(a,"previousSibling")},nextUntil:function(a,b,c){return f.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return f.dir(a,"previousSibling",c)},siblings:function(a){return f.sibling(a.parentNode.firstChild,a)},children:function(a){return f.sibling(a.firstChild)},contents:function(a){return f.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:f.makeArray(a.childNodes)}},function(a,b){f.fn[a]=function(c,d){var e=f.map(this,b,c),g=R.call(arguments);N.test(a)||(d=c),d&&typeof d=="string"&&(e=f.filter(d,e)),e=this.length>1&&!T[a]?f.unique(e):e,(this.length>1||P.test(d))&&O.test(a)&&(e=e.reverse());return this.pushStack(e,a,g.join(","))}}),f.extend({filter:function(a,b,c){c&&(a=":not("+a+")");return b.length===1?f.find.matchesSelector(b[0],a)?[b[0]]:[]:f.find.matches(a,b)},dir:function(a,c,d){var e=[],g=a[c];while(g&&g.nodeType!==9&&(d===b||g.nodeType!==1||!f(g).is(d)))g.nodeType===1&&e.push(g),g=g[c];return e},nth:function(a,b,c,d){b=b||1;var e=0;for(;a;a=a[c])if(a.nodeType===1&&++e===b)break;return a},sibling:function(a,b){var c=[];for(;a;a=a.nextSibling)a.nodeType===1&&a!==b&&c.push(a);return c}});var W=/ jQuery\d+="(?:\d+|null)"/g,X=/^\s+/,Y=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/ig,Z=/<([\w:]+)/,$=/<tbody/i,_=/<|&#?\w+;/,ba=/<(?:script|object|embed|option|style)/i,bb=/checked\s*(?:[^=]|=\s*.checked.)/i,bc=/\/(java|ecma)script/i,bd=/^\s*<!(?:\[CDATA\[|\-\-)/,be={option:[1,"<select multiple='multiple'>","</select>"],legend:[1,"<fieldset>","</fieldset>"],thead:[1,"<table>","</table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],col:[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"],area:[1,"<map>","</map>"],_default:[0,"",""]};be.optgroup=be.option,be.tbody=be.tfoot=be.colgroup=be.caption=be.thead,be.th=be.td,f.support.htmlSerialize||(be._default=[1,"div<div>","</div>"]),f.fn.extend({text:function(a){if(f.isFunction(a))return this.each(function(b){var c=f(this);c.text(a.call(this,b,c.text()))});if(typeof a!="object"&&a!==b)return this.empty().append((this[0]&&this[0].ownerDocument||c).createTextNode(a));return f.text(this)},wrapAll:function(a){if(f.isFunction(a))return this.each(function(b){f(this).wrapAll(a.call(this,b))});if(this[0]){var b=f(a,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstChild&&a.firstChild.nodeType===1)a=a.firstChild;return a}).append(this)}return this},wrapInner:function(a){if(f.isFunction(a))return this.each(function(b){f(this).wrapInner(a.call(this,b))});return this.each(function(){var b=f(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){return this.each(function(){f(this).wrapAll(a)})},unwrap:function(){return this.parent().each(function(){f.nodeName(this,"body")||f(this).replaceWith(this.childNodes)}).end()},append:function(){return this.domManip(arguments,!0,function(a){this.nodeType===1&&this.appendChild(a)})},prepend:function(){return this.domManip(arguments,!0,function(a){this.nodeType===1&&this.insertBefore(a,this.firstChild)})},before:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,!1,function(a){this.parentNode.insertBefore(a,this)});if(arguments.length){var a=f(arguments[0]);a.push.apply(a,this.toArray());return this.pushStack(a,"before",arguments)}},after:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,!1,function(a){this.parentNode.insertBefore(a,this.nextSibling)});if(arguments.length){var a=this.pushStack(this,"after",arguments);a.push.apply(a,f(arguments[0]).toArray());return a}},remove:function(a,b){for(var c=0,d;(d=this[c])!=null;c++)if(!a||f.filter(a,[d]).length)!b&&d.nodeType===1&&(f.cleanData(d.getElementsByTagName("*")),f.cleanData([d])),d.parentNode&&d.parentNode.removeChild(d);return this},empty:function(){for(var a=0,b;(b=this[a])!=null;a++){b.nodeType===1&&f.cleanData(b.getElementsByTagName("*"));while(b.firstChild)b.removeChild(b.firstChild)}return this},clone:function(a,b){a=a==null?!1:a,b=b==null?a:b;return this.map(function(){return f.clone(this,a,b)})},html:function(a){if(a===b)return this[0]&&this[0].nodeType===1?this[0].innerHTML.replace(W,""):null;if(typeof a=="string"&&!ba.test(a)&&(f.support.leadingWhitespace||!X.test(a))&&!be[(Z.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(Y,"<$1></$2>");try{for(var c=0,d=this.length;c<d;c++)this[c].nodeType===1&&(f.cleanData(this[c].getElementsByTagName("*")),this[c].innerHTML=a)}catch(e){this.empty().append(a)}}else f.isFunction(a)?this.each(function(b){var c=f(this);c.html(a.call(this,b,c.html()))}):this.empty().append(a);return this},replaceWith:function(a){if(this[0]&&this[0].parentNode){if(f.isFunction(a))return this.each(function(b){var c=f(this),d=c.html();c.replaceWith(a.call(this,b,d))});typeof a!="string"&&(a=f(a).detach());return this.each(function(){var b=this.nextSibling,c=this.parentNode;f(this).remove(),b?f(b).before(a):f(c).append(a)})}return this.length?this.pushStack(f(f.isFunction(a)?a():a),"replaceWith",a):this},detach:function(a){return this.remove(a,!0)},domManip:function(a,c,d){var e,g,h,i,j=a[0],k=[];if(!f.support.checkClone&&arguments.length===3&&typeof j=="string"&&bb.test(j))return this.each(function(){f(this).domManip(a,c,d,!0)});if(f.isFunction(j))return this.each(function(e){var g=f(this);a[0]=j.call(this,e,c?g.html():b),g.domManip(a,c,d)});if(this[0]){i=j&&j.parentNode,f.support.parentNode&&i&&i.nodeType===11&&i.childNodes.length===this.length?e={fragment:i}:e=f.buildFragment(a,this,k),h=e.fragment,h.childNodes.length===1?g=h=h.firstChild:g=h.firstChild;if(g){c=c&&f.nodeName(g,"tr");for(var l=0,m=this.length,n=m-1;l<m;l++)d.call(c?bf(this[l],g):this[l],e.cacheable||m>1&&l<n?f.clone(h,!0,!0):h)}k.length&&f.each(k,bl)}return this}}),f.buildFragment=function(a,b,d){var e,g,h,i;b&&b[0]&&(i=b[0].ownerDocument||b[0]),i.createDocumentFragment||(i=c),a.length===1&&typeof a[0]=="string"&&a[0].length<512&&i===c&&a[0].charAt(0)==="<"&&!ba.test(a[0])&&(f.support.checkClone||!bb.test(a[0]))&&(g=!0,h=f.fragments[a[0]],h&&h!==1&&(e=h)),e||(e=i.createDocumentFragment(),f.clean
+(a,i,e,d)),g&&(f.fragments[a[0]]=h?e:1);return{fragment:e,cacheable:g}},f.fragments={},f.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){f.fn[a]=function(c){var d=[],e=f(c),g=this.length===1&&this[0].parentNode;if(g&&g.nodeType===11&&g.childNodes.length===1&&e.length===1){e[b](this[0]);return this}for(var h=0,i=e.length;h<i;h++){var j=(h>0?this.clone(!0):this).get();f(e[h])[b](j),d=d.concat(j)}return this.pushStack(d,a,e.selector)}}),f.extend({clone:function(a,b,c){var d=a.cloneNode(!0),e,g,h;if((!f.support.noCloneEvent||!f.support.noCloneChecked)&&(a.nodeType===1||a.nodeType===11)&&!f.isXMLDoc(a)){bh(a,d),e=bi(a),g=bi(d);for(h=0;e[h];++h)g[h]&&bh(e[h],g[h])}if(b){bg(a,d);if(c){e=bi(a),g=bi(d);for(h=0;e[h];++h)bg(e[h],g[h])}}e=g=null;return d},clean:function(a,b,d,e){var g;b=b||c,typeof b.createElement=="undefined"&&(b=b.ownerDocument||b[0]&&b[0].ownerDocument||c);var h=[],i;for(var j=0,k;(k=a[j])!=null;j++){typeof k=="number"&&(k+="");if(!k)continue;if(typeof k=="string")if(!_.test(k))k=b.createTextNode(k);else{k=k.replace(Y,"<$1></$2>");var l=(Z.exec(k)||["",""])[1].toLowerCase(),m=be[l]||be._default,n=m[0],o=b.createElement("div");o.innerHTML=m[1]+k+m[2];while(n--)o=o.lastChild;if(!f.support.tbody){var p=$.test(k),q=l==="table"&&!p?o.firstChild&&o.firstChild.childNodes:m[1]==="<table>"&&!p?o.childNodes:[];for(i=q.length-1;i>=0;--i)f.nodeName(q[i],"tbody")&&!q[i].childNodes.length&&q[i].parentNode.removeChild(q[i])}!f.support.leadingWhitespace&&X.test(k)&&o.insertBefore(b.createTextNode(X.exec(k)[0]),o.firstChild),k=o.childNodes}var r;if(!f.support.appendChecked)if(k[0]&&typeof (r=k.length)=="number")for(i=0;i<r;i++)bk(k[i]);else bk(k);k.nodeType?h.push(k):h=f.merge(h,k)}if(d){g=function(a){return!a.type||bc.test(a.type)};for(j=0;h[j];j++)if(e&&f.nodeName(h[j],"script")&&(!h[j].type||h[j].type.toLowerCase()==="text/javascript"))e.push(h[j].parentNode?h[j].parentNode.removeChild(h[j]):h[j]);else{if(h[j].nodeType===1){var s=f.grep(h[j].getElementsByTagName("script"),g);h.splice.apply(h,[j+1,0].concat(s))}d.appendChild(h[j])}}return h},cleanData:function(a){var b,c,d=f.cache,e=f.expando,g=f.event.special,h=f.support.deleteExpando;for(var i=0,j;(j=a[i])!=null;i++){if(j.nodeName&&f.noData[j.nodeName.toLowerCase()])continue;c=j[f.expando];if(c){b=d[c]&&d[c][e];if(b&&b.events){for(var k in b.events)g[k]?f.event.remove(j,k):f.removeEvent(j,k,b.handle);b.handle&&(b.handle.elem=null)}h?delete j[f.expando]:j.removeAttribute&&j.removeAttribute(f.expando),delete d[c]}}}});var bm=/alpha\([^)]*\)/i,bn=/opacity=([^)]*)/,bo=/([A-Z]|^ms)/g,bp=/^-?\d+(?:px)?$/i,bq=/^-?\d/,br=/^([\-+])=([\-+.\de]+)/,bs={position:"absolute",visibility:"hidden",display:"block"},bt=["Left","Right"],bu=["Top","Bottom"],bv,bw,bx;f.fn.css=function(a,c){if(arguments.length===2&&c===b)return this;return f.access(this,a,c,!0,function(a,c,d){return d!==b?f.style(a,c,d):f.css(a,c)})},f.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=bv(a,"opacity","opacity");return c===""?"1":c}return a.style.opacity}}},cssNumber:{fillOpacity:!0,fontWeight:!0,lineHeight:!0,opacity:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":f.support.cssFloat?"cssFloat":"styleFloat"},style:function(a,c,d,e){if(!!a&&a.nodeType!==3&&a.nodeType!==8&&!!a.style){var g,h,i=f.camelCase(c),j=a.style,k=f.cssHooks[i];c=f.cssProps[i]||i;if(d===b){if(k&&"get"in k&&(g=k.get(a,!1,e))!==b)return g;return j[c]}h=typeof d,h==="string"&&(g=br.exec(d))&&(d=+(g[1]+1)*+g[2]+parseFloat(f.css(a,c)),h="number");if(d==null||h==="number"&&isNaN(d))return;h==="number"&&!f.cssNumber[i]&&(d+="px");if(!k||!("set"in k)||(d=k.set(a,d))!==b)try{j[c]=d}catch(l){}}},css:function(a,c,d){var e,g;c=f.camelCase(c),g=f.cssHooks[c],c=f.cssProps[c]||c,c==="cssFloat"&&(c="float");if(g&&"get"in g&&(e=g.get(a,!0,d))!==b)return e;if(bv)return bv(a,c)},swap:function(a,b,c){var d={};for(var e in b)d[e]=a.style[e],a.style[e]=b[e];c.call(a);for(e in b)a.style[e]=d[e]}}),f.curCSS=f.css,f.each(["height","width"],function(a,b){f.cssHooks[b]={get:function(a,c,d){var e;if(c){if(a.offsetWidth!==0)return by(a,b,d);f.swap(a,bs,function(){e=by(a,b,d)});return e}},set:function(a,b){if(!bp.test(b))return b;b=parseFloat(b);if(b>=0)return b+"px"}}}),f.support.opacity||(f.cssHooks.opacity={get:function(a,b){return bn.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||"")?parseFloat(RegExp.$1)/100+"":b?"1":""},set:function(a,b){var c=a.style,d=a.currentStyle,e=f.isNaN(b)?"":"alpha(opacity="+b*100+")",g=d&&d.filter||c.filter||"";c.zoom=1;if(b>=1&&f.trim(g.replace(bm,""))===""){c.removeAttribute("filter");if(d&&!d.filter)return}c.filter=bm.test(g)?g.replace(bm,e):g+" "+e}}),f(function(){f.support.reliableMarginRight||(f.cssHooks.marginRight={get:function(a,b){var c;f.swap(a,{display:"inline-block"},function(){b?c=bv(a,"margin-right","marginRight"):c=a.style.marginRight});return c}})}),c.defaultView&&c.defaultView.getComputedStyle&&(bw=function(a,c){var d,e,g;c=c.replace(bo,"-$1").toLowerCase();if(!(e=a.ownerDocument.defaultView))return b;if(g=e.getComputedStyle(a,null))d=g.getPropertyValue(c),d===""&&!f.contains(a.ownerDocument.documentElement,a)&&(d=f.style(a,c));return d}),c.documentElement.currentStyle&&(bx=function(a,b){var c,d=a.currentStyle&&a.currentStyle[b],e=a.runtimeStyle&&a.runtimeStyle[b],f=a.style;!bp.test(d)&&bq.test(d)&&(c=f.left,e&&(a.runtimeStyle.left=a.currentStyle.left),f.left=b==="fontSize"?"1em":d||0,d=f.pixelLeft+"px",f.left=c,e&&(a.runtimeStyle.left=e));return d===""?"auto":d}),bv=bw||bx,f.expr&&f.expr.filters&&(f.expr.filters.hidden=function(a){var b=a.offsetWidth,c=a.offsetHeight;return b===0&&c===0||!f.support.reliableHiddenOffsets&&(a.style.display||f.css(a,"display"))==="none"},f.expr.filters.visible=function(a){return!f.expr.filters.hidden(a)});var bz=/%20/g,bA=/\[\]$/,bB=/\r?\n/g,bC=/#.*$/,bD=/^(.*?):[ \t]*([^\r\n]*)\r?$/mg,bE=/^(?:color|date|datetime|datetime-local|email|hidden|month|number|password|range|search|tel|text|time|url|week)$/i,bF=/^(?:about|app|app\-storage|.+\-extension|file|res|widget):$/,bG=/^(?:GET|HEAD)$/,bH=/^\/\//,bI=/\?/,bJ=/<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>/gi,bK=/^(?:select|textarea)/i,bL=/\s+/,bM=/([?&])_=[^&]*/,bN=/^([\w\+\.\-]+:)(?:\/\/([^\/?#:]*)(?::(\d+))?)?/,bO=f.fn.load,bP={},bQ={},bR,bS,bT=["*/"]+["*"];try{bR=e.href}catch(bU){bR=c.createElement("a"),bR.href="",bR=bR.href}bS=bN.exec(bR.toLowerCase())||[],f.fn.extend({load:function(a,c,d){if(typeof a!="string"&&bO)return bO.apply(this,arguments);if(!this.length)return this;var e=a.indexOf(" ");if(e>=0){var g=a.slice(e,a.length);a=a.slice(0,e)}var h="GET";c&&(f.isFunction(c)?(d=c,c=b):typeof c=="object"&&(c=f.param(c,f.ajaxSettings.traditional),h="POST"));var i=this;f.ajax({url:a,type:h,dataType:"html",data:c,complete:function(a,b,c){c=a.responseText,a.isResolved()&&(a.done(function(a){c=a}),i.html(g?f("<div>").append(c.replace(bJ,"")).find(g):c)),d&&i.each(d,[c,b,a])}});return this},serialize:function(){return f.param(this.serializeArray())},serializeArray:function(){return this.map(function(){return this.elements?f.makeArray(this.elements):this}).filter(function(){return this.name&&!this.disabled&&(this.checked||bK.test(this.nodeName)||bE.test(this.type))}).map(function(a,b){var c=f(this).val();return c==null?null:f.isArray(c)?f.map(c,function(a,c){return{name:b.name,value:a.replace(bB,"\r\n")}}):{name:b.name,value:c.replace(bB,"\r\n")}}).get()}}),f.each("ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split(" "),function(a,b){f.fn[b]=function(a){return this.bind(b,a)}}),f.each(["get","post"],function(a,c){f[c]=function(a,d,e,g){f.isFunction(d)&&(g=g||e,e=d,d=b);return f.ajax({type:c,url:a,data:d,success:e,dataType:g})}}),f.extend({getScript:function(a,c){return f.get(a,b,c,"script")},getJSON:function(a,b,c){return f.get(a,b,c,"json")},ajaxSetup:function(a,b){b?bX(a,f.ajaxSettings):(b=a,a=f.ajaxSettings),bX(a,b);return a},ajaxSettings:{url:bR,isLocal:bF.test(bS[1]),global:!0,type:"GET",contentType:"application/x-www-form-urlencoded",processData:!0,async:!0,accepts:{xml:"application/xml, text/xml",html:"text/html",text:"text/plain",json:"application/json, text/javascript","*":bT},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText"},converters:{"* text":a.String,"text html":!0,"text json":f.parseJSON,"text xml":f.parseXML},flatOptions:{context:!0,url:!0}},ajaxPrefilter:bV(bP),ajaxTransport:bV(bQ),ajax:function(a,c){function w(a,c,l,m){if(s!==2){s=2,q&&clearTimeout(q),p=b,n=m||"",v.readyState=a>0?4:0;var o,r,u,w=c,x=l?bZ(d,v,l):b,y,z;if(a>=200&&a<300||a===304){if(d.ifModified){if(y=v.getResponseHeader("Last-Modified"))f.lastModified[k]=y;if(z=v.getResponseHeader("Etag"))f.etag[k]=z}if(a===304)w="notmodified",o=!0;else try{r=b$(d,x),w="success",o=!0}catch(A){w="parsererror",u=A}}else{u=w;if(!w||a)w="error",a<0&&(a=0)}v.status=a,v.statusText=""+(c||w),o?h.resolveWith(e,[r,w,v]):h.rejectWith(e,[v,w,u]),v.statusCode(j),j=b,t&&g.trigger("ajax"+(o?"Success":"Error"),[v,d,o?r:u]),i.resolveWith(e,[v,w]),t&&(g.trigger("ajaxComplete",[v,d]),--f.active||f.event.trigger("ajaxStop"))}}typeof a=="object"&&(c=a,a=b),c=c||{};var d=f.ajaxSetup({},c),e=d.context||d,g=e!==d&&(e.nodeType||e instanceof f)?f(e):f.event,h=f.Deferred(),i=f._Deferred(),j=d.statusCode||{},k,l={},m={},n,o,p,q,r,s=0,t,u,v={readyState:0,setRequestHeader:function(a,b){if(!s){var c=a.toLowerCase();a=m[c]=m[c]||a,l[a]=b}return this},getAllResponseHeaders:function(){return s===2?n:null},getResponseHeader:function(a){var c;if(s===2){if(!o){o={};while(c=bD.exec(n))o[c[1].toLowerCase()]=c[2]}c=o[a.toLowerCase()]}return c===b?null:c},overrideMimeType:function(a){s||(d.mimeType=a);return this},abort:function(a){a=a||"abort",p&&p.abort(a),w(0,a);return this}};h.promise(v),v.success=v.done,v.error=v.fail,v.complete=i.done,v.statusCode=function(a){if(a){var b;if(s<2)for(b in a)j[b]=[j[b],a[b]];else b=a[v.status],v.then(b,b)}return this},d.url=((a||d.url)+"").replace(bC,"").replace(bH,bS[1]+"//"),d.dataTypes=f.trim(d.dataType||"*").toLowerCase().split(bL),d.crossDomain==null&&(r=bN.exec(d.url.toLowerCase()),d.crossDomain=!(!r||r[1]==bS[1]&&r[2]==bS[2]&&(r[3]||(r[1]==="http:"?80:443))==(bS[3]||(bS[1]==="http:"?80:443)))),d.data&&d.processData&&typeof d.data!="string"&&(d.data=f.param(d.data,d.traditional)),bW(bP,d,c,v);if(s===2)return!1;t=d.global,d.type=d.type.toUpperCase(),d.hasContent=!bG.test(d.type),t&&f.active++===0&&f.event.trigger("ajaxStart");if(!d.hasContent){d.data&&(d.url+=(bI.test(d.url)?"&":"?")+d.data,delete d.data),k=d.url;if(d.cache===!1){var x=f.now(),y=d.url.replace(bM,"$1_="+x);d.url=y+(y===d.url?(bI.test(d.url)?"&":"?")+"_="+x:"")}}(d.data&&d.hasContent&&d.contentType!==!1||c.contentType)&&v.setRequestHeader("Content-Type",d.contentType),d.ifModified&&(k=k||d.url,f.lastModified[k]&&v.setRequestHeader("If-Modified-Since",f.lastModified[k]),f.etag[k]&&v.setRequestHeader("If-None-Match",f.etag[k])),v.setRequestHeader("Accept",d.dataTypes[0]&&d.accepts[d.dataTypes[0]]?d.accepts[d.dataTypes[0]]+(d.dataTypes[0]!=="*"?", "+bT+"; q=0.01":""):d.accepts["*"]);for(u in d.headers)v.setRequestHeader(u,d.headers[u]);if(d.beforeSend&&(d.beforeSend.call(e,v,d)===!1||s===2)){v.abort();return!1}for(u in{success:1,error:1,complete:1})v[u](d[u]);p=bW(bQ,d,c,v);if(!p)w(-1,"No Transport");else{v.readyState=1,t&&g.trigger("ajaxSend",[v,d]),d.async&&d.timeout>0&&(q=setTimeout(function(){v.abort("timeout")},d.timeout));try{s=1,p.send(l,w)}catch(z){s<2?w(-1,z):f.error(z)}}return v},param:function(a,c){var d=[],e=function(a,b){b=f.isFunction(b)?b():b,d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(b)};c===b&&(c=f.ajaxSettings.traditional);if(f.isArray(a)||a.jquery&&!f.isPlainObject(a))f.each(a,function(){e(this.name,this.value)});else for(var g in a)bY(g,a[g],c,e);return d.join("&").replace(bz,"+")}}),f.extend({active:0,lastModified:{},etag:{}});var b_=f.now(),ca=/(\=)\?(&|$)|\?\?/i;f.ajaxSetup({jsonp:"callback",jsonpCallback:function(){return f.expando+"_"+b_++}}),f.ajaxPrefilter("json jsonp",function(b,c,d){var e=b.contentType==="application/x-www-form-urlencoded"&&typeof b.data=="string";if(b.dataTypes[0]==="jsonp"||b.jsonp!==!1&&(ca.test(b.url)||e&&ca.test(b.data))){var g,h=b.jsonpCallback=f.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,i=a[h],j=b.url,k=b.data,l="$1"+h+"$2";b.jsonp!==!1&&(j=j.replace(ca,l),b.url===j&&(e&&(k=k.replace(ca,l)),b.data===k&&(j+=(/\?/.test(j)?"&":"?")+b.jsonp+"="+h))),b.url=j,b.data=k,a[h]=function(a){g=[a]},d.always(function(){a[h]=i,g&&f.isFunction(i)&&a[h](g[0])}),b.converters["script json"]=function(){g||f.error(h+" was not called");return g[0]},b.dataTypes[0]="json";return"script"}}),f.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/javascript|ecmascript/},converters:{"text script":function(a){f.globalEval(a);return a}}}),f.ajaxPrefilter("script",function(a){a.cache===b&&(a.cache=!1),a.crossDomain&&(a.type="GET",a.global=!1)}),f.ajaxTransport("script",function(a){if(a.crossDomain){var d,e=c.head||c.getElementsByTagName("head")[0]||c.documentElement;return{send:function(f,g){d=c.createElement("script"),d.async="async",a.scriptCharset&&(d.charset=a.scriptCharset),d.src=a.url,d.onload=d.onreadystatechange=function(a,c){if(c||!d.readyState||/loaded|complete/.test(d.readyState))d.onload=d.onreadystatechange=null,e&&d.parentNode&&e.removeChild(d),d=b,c||g(200,"success")},e.insertBefore(d,e.firstChild)},abort:function(){d&&d.onload(0,1)}}}});var cb=a.ActiveXObject?function(){for(var a in cd)cd[a](0,1)}:!1,cc=0,cd;f.ajaxSettings.xhr=a.ActiveXObject?function(){return!this.isLocal&&ce()||cf()}:ce,function(a){f.extend(f.support,{ajax:!!a,cors:!!a&&"withCredentials"in a})}(f.ajaxSettings.xhr()),f.support.ajax&&f.ajaxTransport(function(c){if(!c.crossDomain||f.support.cors){var d;return{send:function(e,g){var h=c.xhr(),i,j;c.username?h.open(c.type,c.url,c.async,c.username,c.password):h.open(c.type,c.url,c.async);if(c.xhrFields)for(j in c.xhrFields)h[j]=c.xhrFields[j];c.mimeType&&h.overrideMimeType&&h.overrideMimeType(c.mimeType),!c.crossDomain&&!e["X-Requested-With"]&&(e["X-Requested-With"]="XMLHttpRequest");try{for(j in e)h.setRequestHeader(j,e[j])}catch(k){}h.send(c.hasContent&&c.data||null),d=function(a,e){var j,k,l,m,n;try{if(d&&(e||h.readyState===4)){d=b,i&&(h.onreadystatechange=f.noop,cb&&delete cd[i]);if(e)h.readyState!==4&&h.abort();else{j=h.status,l=h.getAllResponseHeaders(),m={},n=h.responseXML,n&&n.documentElement&&(m.xml=n),m.text=h.responseText;try{k=h.statusText}catch(o){k=""}!j&&c.isLocal&&!c.crossDomain?j=m.text?200:404:j===1223&&(j=204)}}}catch(p){e||g(-1,p)}m&&g(j,k,m,l)},!c.async||h.readyState===4?d():(i=++cc,cb&&(cd||(cd={},f(a).unload(cb)),cd[i]=d),h.onreadystatechange=d)},abort:function(){d&&d(0,1)}}}});var cg={},ch,ci,cj=/^(?:toggle|show|hide)$/,ck=/^([+\-]=)?([\d+.\-]+)([a-z%]*)$/i,cl,cm=[["height","marginTop","marginBottom","paddingTop","paddingBottom"],["width","marginLeft","marginRight","paddingLeft","paddingRight"],["opacity"]],cn;f.fn.extend({show:function(a,b,c){var d,e;if(a||a===0)return this.animate(cq("show",3),a,b,c);for(var g=0,h=this.length;g<h;g++)d=this[g],d.style&&(e=d.style.display,!f._data(d,"olddisplay")&&e==="none"&&(e=d.style.display=""),e===""&&f.css(d,"display")==="none"&&f._data(d,"olddisplay",cr(d.nodeName)));for(g=0;g<h;g++){d=this[g];if(d.style){e=d.style.display;if(e===""||e==="none")d.style.display=f._data(d,"olddisplay")||""}}return this},hide:function(a,b,c){if(a||a===0)return this.animate(cq("hide",3),a,b,c);for(var d=0,e=this.length;d<e;d++)if(this[d].style){var g=f.css(this[d],"display");g!=="none"&&!f._data(this[d],"olddisplay")&&f._data(this[d],"olddisplay",g)}for(d=0;d<e;d++)this[d].style&&(this[d].style.display="none");return this},_toggle:f.fn.toggle,toggle:function(a,b,c){var d=typeof a=="boolean";f.isFunction(a)&&f.isFunction(b)?this._toggle.apply(this,arguments):a==null||d?this.each(function(){var b=d?a:f(this).is(":hidden");f(this)[b?"show":"hide"]()}):this.animate(cq("toggle",3),a,b,c);return this},fadeTo:function(a,b,c,d){return this.filter(":hidden").css("opacity",0).show().end().animate({opacity:b},a,c,d)},animate:function(a,b,c,d){var e=f.speed(b,c,d);if(f.isEmptyObject(a))return this.each(e.complete,[!1]);a=f.extend({},a);return this[e.queue===!1?"each":"queue"](function(){e.queue===!1&&f._mark(this);var b=f.extend({},e),c=this.nodeType===1,d=c&&f(this).is(":hidden"),g,h,i,j,k,l,m,n,o;b.animatedProperties={};for(i in a){g=f.camelCase(i),i!==g&&(a[g]=a[i],delete a[i]),h=a[g],f.isArray(h)?(b.animatedProperties[g]=h[1],h=a[g]=h[0]):b.animatedProperties[g]=b.specialEasing&&b.specialEasing[g]||b.easing||"swing";if(h==="hide"&&d||h==="show"&&!d)return b.complete.call(this);c&&(g==="height"||g==="width")&&(b.overflow=[this.style.overflow,this.style.overflowX,this.style.overflowY],f.css(this,"display")==="inline"&&f.css(this,"float")==="none"&&(f.support.inlineBlockNeedsLayout?(j=cr(this.nodeName),j==="inline"?this.style.display="inline-block":(this.style.display="inline",this.style.zoom=1)):this.style.display="inline-block"))}b.overflow!=null&&(this.style.overflow="hidden");for(i in a)k=new f.fx(this,b,i),h=a[i],cj.test(h)?k[h==="toggle"?d?"show":"hide":h]():(l=ck.exec(h),m=k.cur(),l?(n=parseFloat(l[2]),o=l[3]||(f.cssNumber[i]?"":"px"),o!=="px"&&(f.style(this,i,(n||1)+o),m=(n||1)/k.cur()*m,f.style(this,i,m+o)),l[1]&&(n=(l[1]==="-="?-1:1)*n+m),k.custom(m,n,o)):k.custom(m,h,""));return!0})},stop:function(a,b){a&&this.queue([]),this.each(function(){var a=f.timers,c=a.length;b||f._unmark(!0,this);while(c--)a[c].elem===this&&(b&&a[c](!0),a.splice(c,1))}),b||this.dequeue();return this}}),f.each({slideDown:cq("show",1),slideUp:cq("hide",1),slideToggle:cq("toggle",1),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(a,b){f.fn[a]=function(a,c,d){return this.animate(b,a,c,d)}}),f.extend({speed:function(a,b,c){var d=a&&typeof a=="object"?f.extend({},a):{complete:c||!c&&b||f.isFunction(a)&&a,duration:a,easing:c&&b||b&&!f.isFunction(b)&&b};d.duration=f.fx.off?0:typeof d.duration=="number"?d.duration:d.duration in f.fx.speeds?f.fx.speeds[d.duration]:f.fx.speeds._default,d.old=d.complete,d.complete=function(a){f.isFunction(d.old)&&d.old.call(this),d.queue!==!1?f.dequeue(this):a!==!1&&f._unmark(this)};return d},easing:{linear:function(a,b,c,d){return c+d*a},swing:function(a,b,c,d){return(-Math.cos(a*Math.PI)/2+.5)*d+c}},timers:[],fx:function(a,b,c){this.options=b,this.elem=a,this.prop=c,b.orig=b.orig||{}}}),f.fx.prototype={update:function(){this.options.step&&this.options.step.call(this.elem,this.now,this),(f.fx.step[this.prop]||f.fx.step._default)(this)},cur:function(){if(this.elem[this.prop]!=null&&(!this.elem.style||this.elem.style[this.prop]==null))return this.elem[this.prop];var a,b=f.css(this.elem,this.prop);return isNaN(a=parseFloat(b))?!b||b==="auto"?0:b:a},custom:function(a,b,c){function g(a){return d.step(a)}var d=this,e=f.fx;this.startTime=cn||co(),this.start=a,this.end=b,this.unit=c||this.unit||(f.cssNumber[this.prop]?"":"px"),this.now=this.start,this.pos=this.state=0,g.elem=this.elem,g()&&f.timers.push(g)&&!cl&&(cl=setInterval(e.tick,e.interval))},show:function(){this.options.orig[this.prop]=f.style(this.elem,this.prop),this.options.show=!0,this.custom(this.prop==="width"||this.prop==="height"?1:0,this.cur()),f(this.elem).show()},hide:function(){this.options.orig[this.prop]=f.style(this.elem,this.prop),this.options.hide=!0,this.custom(this.cur(),0)},step:function(a){var b=cn||co(),c=!0,d=this.elem,e=this.options,g,h;if(a||b>=e.duration+this.startTime){this.now=this.end,this.pos=this.state=1,this.update(),e.animatedProperties[this.prop]=!0;for(g in e.animatedProperties)e.animatedProperties[g]!==!0&&(c=!1);if(c){e.overflow!=null&&!f.support.shrinkWrapBlocks&&f.each(["","X","Y"],function(a,b){d.style["overflow"+b]=e.overflow[a]}),e.hide&&f(d).hide();if(e.hide||e.show)for(var i in e.animatedProperties)f.style(d,i,e.orig[i]);e.complete.call(d)}return!1}e.duration==Infinity?this.now=b:(h=b-this.startTime,this.state=h/e.duration,this.pos=f.easing[e.animatedProperties[this.prop]](this.state,h,0,1,e.duration),this.now=this.start+(this.end-this.start)*this.pos),this.update();return!0}},f.extend(f.fx,{tick:function(){for(var a=f.timers,b=0;b<a.length;++b)a[b]()||a.splice(b--,1);a.length||f.fx.stop()},interval:13,stop:function(){clearInterval(cl),cl=null},speeds:{slow:600,fast:200,_default:400},step:{opacity:function(a){f.style(a.elem,"opacity",a.now)},_default:function(a){a.elem.style&&a.elem.style[a.prop]!=null?a.elem.style[a.prop]=(a.prop==="width"||a.prop==="height"?Math.max(0,a.now):a.now)+a.unit:a.elem[a.prop]=a.now}}}),f.expr&&f.expr.filters&&(f.expr.filters.animated=function(a){return f.grep(f.timers,function(b){return a===b.elem}).length});var cs=/^t(?:able|d|h)$/i,ct=/^(?:body|html)$/i;"getBoundingClientRect"in c.documentElement?f.fn.offset=function(a){var b=this[0],c;if(a)return this.each(function(b){f.offset.setOffset(this,a,b)});if(!b||!b.ownerDocument)return null;if(b===b.ownerDocument.body)return f.offset.bodyOffset(b);try{c=b.getBoundingClientRect()}catch(d){}var e=b.ownerDocument,g=e.documentElement;if(!c||!f.contains(g,b))return c?{top:c.top,left:c.left}:{top:0,left:0};var h=e.body,i=cu(e),j=g.clientTop||h.clientTop||0,k=g.clientLeft||h.clientLeft||0,l=i.pageYOffset||f.support.boxModel&&g.scrollTop||h.scrollTop,m=i.pageXOffset||f.support.boxModel&&g.scrollLeft||h.scrollLeft,n=c.top+l-j,o=c.left+m-k;return{top:n,left:o}}:f.fn.offset=function(a){var b=this[0];if(a)return this.each(function(b){f.offset.setOffset(this,a,b)});if(!b||!b.ownerDocument)return null;if(b===b.ownerDocument.body)return f.offset.bodyOffset(b);f.offset.initialize();var c,d=b.offsetParent,e=b,g=b.ownerDocument,h=g.documentElement,i=g.body,j=g.defaultView,k=j?j.getComputedStyle(b,null):b.currentStyle,l=b.offsetTop,m=b.offsetLeft;while((b=b.parentNode)&&b!==i&&b!==h){if(f.offset.supportsFixedPosition&&k.position==="fixed")break;c=j?j.getComputedStyle(b,null):b.currentStyle,l-=b.scrollTop,m-=b.scrollLeft,b===d&&(l+=b.offsetTop,m+=b.offsetLeft,f.offset.doesNotAddBorder&&(!f.offset.doesAddBorderForTableAndCells||!cs.test(b.nodeName))&&(l+=parseFloat(c.borderTopWidth)||0,m+=parseFloat(c.borderLeftWidth)||0),e=d,d=b.offsetParent),f.offset.subtractsBorderForOverflowNotVisible&&c.overflow!=="visible"&&(l+=parseFloat(c.borderTopWidth)||0,m+=parseFloat(c.borderLeftWidth)||0),k=c}if(k.position==="relative"||k.position==="static")l+=i.offsetTop,m+=i.offsetLeft;f.offset.supportsFixedPosition&&k.position==="fixed"&&(l+=Math.max(h.scrollTop,i.scrollTop),m+=Math.max(h.scrollLeft,i.scrollLeft));return{top:l,left:m}},f.offset={initialize:function(){var a=c.body,b=c.createElement("div"),d,e,g,h,i=parseFloat(f.css(a,"marginTop"))||0,j="<div style='position:absolute;top:0;left:0;margin:0;border:5px solid #000;padding:0;width:1px;height:1px;'><div></div></div><table style='position:absolute;top:0;left:0;margin:0;border:5px solid #000;padding:0;width:1px;height:1px;' cellpadding='0' cellspacing='0'><tr><td></td></tr></table>";f.extend(b.style,{position:"absolute",top:0,left:0,margin:0,border:0,width:"1px",height:"1px",visibility:"hidden"}),b.innerHTML=j,a.insertBefore(b,a.firstChild),d=b.firstChild,e=d.firstChild,h=d.nextSibling.firstChild.firstChild,this.doesNotAddBorder=e.offsetTop!==5,this.doesAddBorderForTableAndCells=h.offsetTop===5,e.style.position="fixed",e.style.top="20px",this.supportsFixedPosition=e.offsetTop===20||e.offsetTop===15,e.style.position=e.style.top="",d.style.overflow="hidden",d.style.position="relative",this.subtractsBorderForOverflowNotVisible=e.offsetTop===-5,this.doesNotIncludeMarginInBodyOffset=a.offsetTop!==i,a.removeChild(b),f.offset.initialize=f.noop},bodyOffset:function(a){var b=a.offsetTop,c=a.offsetLeft;f.offset.initialize(),f.offset.doesNotIncludeMarginInBodyOffset&&(b+=parseFloat(f.css(a,"marginTop"))||0,c+=parseFloat(f.css(a,"marginLeft"))||0);return{top:b,left:c}},setOffset:function(a,b,c){var d=f.css(a,"position");d==="static"&&(a.style.position="relative");var e=f(a),g=e.offset(),h=f.css(a,"top"),i=f.css(a,"left"),j=(d==="absolute"||d==="fixed")&&f.inArray("auto",[h,i])>-1,k={},l={},m,n;j?(l=e.position(),m=l.top,n=l.left):(m=parseFloat(h)||0,n=parseFloat(i)||0),f.isFunction(b)&&(b=b.call(a,c,g)),b.top!=null&&(k.top=b.top-g.top+m),b.left!=null&&(k.left=b.left-g.left+n),"using"in b?b.using.call(a,k):e.css(k)}},f.fn.extend({position:function(){if(!this[0])return null;var a=this[0],b=this.offsetParent(),c=this.offset(),d=ct.test(b[0].nodeName)?{top:0,left:0}:b.offset();c.top-=parseFloat(f.css(a,"marginTop"))||0,c.left-=parseFloat(f.css(a,"marginLeft"))||0,d.top+=parseFloat(f.css(b[0],"borderTopWidth"))||0,d.left+=parseFloat(f.css(b[0],"borderLeftWidth"))||0;return{top:c.top-d.top,left:c.left-d.left}},offsetParent:function(){return this.map(function(){var a=this.offsetParent||c.body;while(a&&!ct.test(a.nodeName)&&f.css(a,"position")==="static")a=a.offsetParent;return a})}}),f.each(["Left","Top"],function(a,c){var d="scroll"+c;f.fn[d]=function(c){var e,g;if(c===b){e=this[0];if(!e)return null;g=cu(e);return g?"pageXOffset"in g?g[a?"pageYOffset":"pageXOffset"]:f.support.boxModel&&g.document.documentElement[d]||g.document.body[d]:e[d]}return this.each(function(){g=cu(this),g?g.scrollTo(a?f(g).scrollLeft():c,a?c:f(g).scrollTop()):this[d]=c})}}),f.each(["Height","Width"],function(a,c){var d=c.toLowerCase();f.fn["inner"+c]=function(){var a=this[0];return a&&a.style?parseFloat(f.css(a,d,"padding")):null},f.fn["outer"+c]=function(a){var b=this[0];return b&&b.style?parseFloat(f.css(b,d,a?"margin":"border")):null},f.fn[d]=function(a){var e=this[0];if(!e)return a==null?null:this;if(f.isFunction(a))return this.each(function(b){var c=f(this);c[d](a.call(this,b,c[d]()))});if(f.isWindow(e)){var g=e.document.documentElement["client"+c],h=e.document.body;return e.document.compatMode==="CSS1Compat"&&g||h&&h["client"+c]||g}if(e.nodeType===9)return Math.max(e.documentElement["client"+c],e.body["scroll"+c],e.documentElement["scroll"+c],e.body["offset"+c],e.documentElement["offset"+c]);if(a===b){var i=f.css(e,d),j=parseFloat(i);return f.isNaN(j)?i:j}return this.css(d,typeof a=="string"?a:a+"px")}}),a.jQuery=a.$=f})(window);
--- /dev/null
+/* Javascript plotting library for jQuery, version 0.8.1.
+
+Copyright (c) 2007-2013 IOLA and Ole Laursen.
+Licensed under the MIT license.
+
+*/
+
+// first an inline dependency, jquery.colorhelpers.js, we inline it here
+// for convenience
+
+/* Plugin for jQuery for working with colors.
+ *
+ * Version 1.1.
+ *
+ * Inspiration from jQuery color animation plugin by John Resig.
+ *
+ * Released under the MIT license by Ole Laursen, October 2009.
+ *
+ * Examples:
+ *
+ * $.color.parse("#fff").scale('rgb', 0.25).add('a', -0.5).toString()
+ * var c = $.color.extract($("#mydiv"), 'background-color');
+ * console.log(c.r, c.g, c.b, c.a);
+ * $.color.make(100, 50, 25, 0.4).toString() // returns "rgba(100,50,25,0.4)"
+ *
+ * Note that .scale() and .add() return the same modified object
+ * instead of making a new one.
+ *
+ * V. 1.1: Fix error handling so e.g. parsing an empty string does
+ * produce a color rather than just crashing.
+ */
+(function(B){B.color={};B.color.make=function(F,E,C,D){var G={};G.r=F||0;G.g=E||0;G.b=C||0;G.a=D!=null?D:1;G.add=function(J,I){for(var H=0;H<J.length;++H){G[J.charAt(H)]+=I}return G.normalize()};G.scale=function(J,I){for(var H=0;H<J.length;++H){G[J.charAt(H)]*=I}return G.normalize()};G.toString=function(){if(G.a>=1){return"rgb("+[G.r,G.g,G.b].join(",")+")"}else{return"rgba("+[G.r,G.g,G.b,G.a].join(",")+")"}};G.normalize=function(){function H(J,K,I){return K<J?J:(K>I?I:K)}G.r=H(0,parseInt(G.r),255);G.g=H(0,parseInt(G.g),255);G.b=H(0,parseInt(G.b),255);G.a=H(0,G.a,1);return G};G.clone=function(){return B.color.make(G.r,G.b,G.g,G.a)};return G.normalize()};B.color.extract=function(D,C){var E;do{E=D.css(C).toLowerCase();if(E!=""&&E!="transparent"){break}D=D.parent()}while(!B.nodeName(D.get(0),"body"));if(E=="rgba(0, 0, 0, 0)"){E="transparent"}return B.color.parse(E)};B.color.parse=function(F){var E,C=B.color.make;if(E=/rgb\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*\)/.exec(F)){return C(parseInt(E[1],10),parseInt(E[2],10),parseInt(E[3],10))}if(E=/rgba\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]+(?:\.[0-9]+)?)\s*\)/.exec(F)){return C(parseInt(E[1],10),parseInt(E[2],10),parseInt(E[3],10),parseFloat(E[4]))}if(E=/rgb\(\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*\)/.exec(F)){return C(parseFloat(E[1])*2.55,parseFloat(E[2])*2.55,parseFloat(E[3])*2.55)}if(E=/rgba\(\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\s*\)/.exec(F)){return C(parseFloat(E[1])*2.55,parseFloat(E[2])*2.55,parseFloat(E[3])*2.55,parseFloat(E[4]))}if(E=/#([a-fA-F0-9]{2})([a-fA-F0-9]{2})([a-fA-F0-9]{2})/.exec(F)){return C(parseInt(E[1],16),parseInt(E[2],16),parseInt(E[3],16))}if(E=/#([a-fA-F0-9])([a-fA-F0-9])([a-fA-F0-9])/.exec(F)){return C(parseInt(E[1]+E[1],16),parseInt(E[2]+E[2],16),parseInt(E[3]+E[3],16))}var D=B.trim(F).toLowerCase();if(D=="transparent"){return C(255,255,255,0)}else{E=A[D]||[0,0,0];return C(E[0],E[1],E[2])}};var A={aqua:[0,255,255],azure:[240,255,255],beige:[245,245,220],black:[0,0,0],blue:[0,0,255],brown:[165,42,42],cyan:[0,255,255],darkblue:[0,0,139],darkcyan:[0,139,139],darkgrey:[169,169,169],darkgreen:[0,100,0],darkkhaki:[189,183,107],darkmagenta:[139,0,139],darkolivegreen:[85,107,47],darkorange:[255,140,0],darkorchid:[153,50,204],darkred:[139,0,0],darksalmon:[233,150,122],darkviolet:[148,0,211],fuchsia:[255,0,255],gold:[255,215,0],green:[0,128,0],indigo:[75,0,130],khaki:[240,230,140],lightblue:[173,216,230],lightcyan:[224,255,255],lightgreen:[144,238,144],lightgrey:[211,211,211],lightpink:[255,182,193],lightyellow:[255,255,224],lime:[0,255,0],magenta:[255,0,255],maroon:[128,0,0],navy:[0,0,128],olive:[128,128,0],orange:[255,165,0],pink:[255,192,203],purple:[128,0,128],violet:[128,0,128],red:[255,0,0],silver:[192,192,192],white:[255,255,255],yellow:[255,255,0]}})(jQuery);
+
+// the actual Flot code
+(function($) {
+
+ // Cache the prototype hasOwnProperty for faster access
+
+ var hasOwnProperty = Object.prototype.hasOwnProperty;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // The Canvas object is a wrapper around an HTML5 <canvas> tag.
+ //
+ // @constructor
+ // @param {string} cls List of classes to apply to the canvas.
+ // @param {element} container Element onto which to append the canvas.
+ //
+ // Requiring a container is a little iffy, but unfortunately canvas
+ // operations don't work unless the canvas is attached to the DOM.
+
+ function Canvas(cls, container) {
+
+ var element = container.children("." + cls)[0];
+
+ if (element == null) {
+
+ element = document.createElement("canvas");
+ element.className = cls;
+
+ $(element).css({ direction: "ltr", position: "absolute", left: 0, top: 0 })
+ .appendTo(container);
+
+ // If HTML5 Canvas isn't available, fall back to [Ex|Flash]canvas
+
+ if (!element.getContext) {
+ if (window.G_vmlCanvasManager) {
+ element = window.G_vmlCanvasManager.initElement(element);
+ } else {
+ throw new Error("Canvas is not available. If you're using IE with a fall-back such as Excanvas, then there's either a mistake in your conditional include, or the page has no DOCTYPE and is rendering in Quirks Mode.");
+ }
+ }
+ }
+
+ this.element = element;
+
+ var context = this.context = element.getContext("2d");
+
+ // Determine the screen's ratio of physical to device-independent
+ // pixels. This is the ratio between the canvas width that the browser
+ // advertises and the number of pixels actually present in that space.
+
+ // The iPhone 4, for example, has a device-independent width of 320px,
+ // but its screen is actually 640px wide. It therefore has a pixel
+ // ratio of 2, while most normal devices have a ratio of 1.
+
+ var devicePixelRatio = window.devicePixelRatio || 1,
+ backingStoreRatio =
+ context.webkitBackingStorePixelRatio ||
+ context.mozBackingStorePixelRatio ||
+ context.msBackingStorePixelRatio ||
+ context.oBackingStorePixelRatio ||
+ context.backingStorePixelRatio || 1;
+
+ this.pixelRatio = devicePixelRatio / backingStoreRatio;
+
+ // Size the canvas to match the internal dimensions of its container
+
+ this.resize(container.width(), container.height());
+
+ // Collection of HTML div layers for text overlaid onto the canvas
+
+ this.textContainer = null;
+ this.text = {};
+
+ // Cache of text fragments and metrics, so we can avoid expensively
+ // re-calculating them when the plot is re-rendered in a loop.
+
+ this._textCache = {};
+ }
+
+ // Resizes the canvas to the given dimensions.
+ //
+ // @param {number} width New width of the canvas, in pixels.
+ // @param {number} width New height of the canvas, in pixels.
+
+ Canvas.prototype.resize = function(width, height) {
+
+ if (width <= 0 || height <= 0) {
+ throw new Error("Invalid dimensions for plot, width = " + width + ", height = " + height);
+ }
+
+ var element = this.element,
+ context = this.context,
+ pixelRatio = this.pixelRatio;
+
+ // Resize the canvas, increasing its density based on the display's
+ // pixel ratio; basically giving it more pixels without increasing the
+ // size of its element, to take advantage of the fact that retina
+ // displays have that many more pixels in the same advertised space.
+
+ // Resizing should reset the state (excanvas seems to be buggy though)
+
+ if (this.width != width) {
+ element.width = width * pixelRatio;
+ element.style.width = width + "px";
+ this.width = width;
+ }
+
+ if (this.height != height) {
+ element.height = height * pixelRatio;
+ element.style.height = height + "px";
+ this.height = height;
+ }
+
+ // Save the context, so we can reset in case we get replotted. The
+ // restore ensure that we're really back at the initial state, and
+ // should be safe even if we haven't saved the initial state yet.
+
+ context.restore();
+ context.save();
+
+ // Scale the coordinate space to match the display density; so even though we
+ // may have twice as many pixels, we still want lines and other drawing to
+ // appear at the same size; the extra pixels will just make them crisper.
+
+ context.scale(pixelRatio, pixelRatio);
+ };
+
+ // Clears the entire canvas area, not including any overlaid HTML text
+
+ Canvas.prototype.clear = function() {
+ this.context.clearRect(0, 0, this.width, this.height);
+ };
+
+ // Finishes rendering the canvas, including managing the text overlay.
+
+ Canvas.prototype.render = function() {
+
+ var cache = this._textCache;
+
+ // For each text layer, add elements marked as active that haven't
+ // already been rendered, and remove those that are no longer active.
+
+ for (var layerKey in cache) {
+ if (hasOwnProperty.call(cache, layerKey)) {
+
+ var layer = this.getTextLayer(layerKey),
+ layerCache = cache[layerKey];
+
+ layer.hide();
+
+ for (var styleKey in layerCache) {
+ if (hasOwnProperty.call(layerCache, styleKey)) {
+ var styleCache = layerCache[styleKey];
+ for (var key in styleCache) {
+ if (hasOwnProperty.call(styleCache, key)) {
+
+ var positions = styleCache[key].positions;
+
+ for (var i = 0, position; position = positions[i]; i++) {
+ if (position.active) {
+ if (!position.rendered) {
+ layer.append(position.element);
+ position.rendered = true;
+ }
+ } else {
+ positions.splice(i--, 1);
+ if (position.rendered) {
+ position.element.detach();
+ }
+ }
+ }
+
+ if (positions.length == 0) {
+ delete styleCache[key];
+ }
+ }
+ }
+ }
+ }
+
+ layer.show();
+ }
+ }
+ };
+
+ // Creates (if necessary) and returns the text overlay container.
+ //
+ // @param {string} classes String of space-separated CSS classes used to
+ // uniquely identify the text layer.
+ // @return {object} The jQuery-wrapped text-layer div.
+
+ Canvas.prototype.getTextLayer = function(classes) {
+
+ var layer = this.text[classes];
+
+ // Create the text layer if it doesn't exist
+
+ if (layer == null) {
+
+ // Create the text layer container, if it doesn't exist
+
+ if (this.textContainer == null) {
+ this.textContainer = $("<div class='flot-text'></div>")
+ .css({
+ position: "absolute",
+ top: 0,
+ left: 0,
+ bottom: 0,
+ right: 0,
+ 'font-size': "smaller",
+ color: "#545454"
+ })
+ .insertAfter(this.element);
+ }
+
+ layer = this.text[classes] = $("<div></div>")
+ .addClass(classes)
+ .css({
+ position: "absolute",
+ top: 0,
+ left: 0,
+ bottom: 0,
+ right: 0
+ })
+ .appendTo(this.textContainer);
+ }
+
+ return layer;
+ };
+
+ // Creates (if necessary) and returns a text info object.
+ //
+ // The object looks like this:
+ //
+ // {
+ // width: Width of the text's wrapper div.
+ // height: Height of the text's wrapper div.
+ // element: The jQuery-wrapped HTML div containing the text.
+ // positions: Array of positions at which this text is drawn.
+ // }
+ //
+ // The positions array contains objects that look like this:
+ //
+ // {
+ // active: Flag indicating whether the text should be visible.
+ // rendered: Flag indicating whether the text is currently visible.
+ // element: The jQuery-wrapped HTML div containing the text.
+ // x: X coordinate at which to draw the text.
+ // y: Y coordinate at which to draw the text.
+ // }
+ //
+ // Each position after the first receives a clone of the original element.
+ //
+ // The idea is that that the width, height, and general 'identity' of the
+ // text is constant no matter where it is placed; the placements are a
+ // secondary property.
+ //
+ // Canvas maintains a cache of recently-used text info objects; getTextInfo
+ // either returns the cached element or creates a new entry.
+ //
+ // @param {string} layer A string of space-separated CSS classes uniquely
+ // identifying the layer containing this text.
+ // @param {string} text Text string to retrieve info for.
+ // @param {(string|object)=} font Either a string of space-separated CSS
+ // classes or a font-spec object, defining the text's font and style.
+ // @param {number=} angle Angle at which to rotate the text, in degrees.
+ // Angle is currently unused, it will be implemented in the future.
+ // @param {number=} width Maximum width of the text before it wraps.
+ // @return {object} a text info object.
+
+ Canvas.prototype.getTextInfo = function(layer, text, font, angle, width) {
+
+ var textStyle, layerCache, styleCache, info;
+
+ // Cast the value to a string, in case we were given a number or such
+
+ text = "" + text;
+
+ // If the font is a font-spec object, generate a CSS font definition
+
+ if (typeof font === "object") {
+ textStyle = font.style + " " + font.variant + " " + font.weight + " " + font.size + "px/" + font.lineHeight + "px " + font.family;
+ } else {
+ textStyle = font;
+ }
+
+ // Retrieve (or create) the cache for the text's layer and styles
+
+ layerCache = this._textCache[layer];
+
+ if (layerCache == null) {
+ layerCache = this._textCache[layer] = {};
+ }
+
+ styleCache = layerCache[textStyle];
+
+ if (styleCache == null) {
+ styleCache = layerCache[textStyle] = {};
+ }
+
+ info = styleCache[text];
+
+ // If we can't find a matching element in our cache, create a new one
+
+ if (info == null) {
+
+ var element = $("<div></div>").html(text)
+ .css({
+ position: "absolute",
+ 'max-width': width,
+ top: -9999
+ })
+ .appendTo(this.getTextLayer(layer));
+
+ if (typeof font === "object") {
+ element.css({
+ font: textStyle,
+ color: font.color
+ });
+ } else if (typeof font === "string") {
+ element.addClass(font);
+ }
+
+ info = styleCache[text] = {
+ width: element.outerWidth(true),
+ height: element.outerHeight(true),
+ element: element,
+ positions: []
+ };
+
+ element.detach();
+ }
+
+ return info;
+ };
+
+ // Adds a text string to the canvas text overlay.
+ //
+ // The text isn't drawn immediately; it is marked as rendering, which will
+ // result in its addition to the canvas on the next render pass.
+ //
+ // @param {string} layer A string of space-separated CSS classes uniquely
+ // identifying the layer containing this text.
+ // @param {number} x X coordinate at which to draw the text.
+ // @param {number} y Y coordinate at which to draw the text.
+ // @param {string} text Text string to draw.
+ // @param {(string|object)=} font Either a string of space-separated CSS
+ // classes or a font-spec object, defining the text's font and style.
+ // @param {number=} angle Angle at which to rotate the text, in degrees.
+ // Angle is currently unused, it will be implemented in the future.
+ // @param {number=} width Maximum width of the text before it wraps.
+ // @param {string=} halign Horizontal alignment of the text; either "left",
+ // "center" or "right".
+ // @param {string=} valign Vertical alignment of the text; either "top",
+ // "middle" or "bottom".
+
+ Canvas.prototype.addText = function(layer, x, y, text, font, angle, width, halign, valign) {
+
+ var info = this.getTextInfo(layer, text, font, angle, width),
+ positions = info.positions;
+
+ // Tweak the div's position to match the text's alignment
+
+ if (halign == "center") {
+ x -= info.width / 2;
+ } else if (halign == "right") {
+ x -= info.width;
+ }
+
+ if (valign == "middle") {
+ y -= info.height / 2;
+ } else if (valign == "bottom") {
+ y -= info.height;
+ }
+
+ // Determine whether this text already exists at this position.
+ // If so, mark it for inclusion in the next render pass.
+
+ for (var i = 0, position; position = positions[i]; i++) {
+ if (position.x == x && position.y == y) {
+ position.active = true;
+ return;
+ }
+ }
+
+ // If the text doesn't exist at this position, create a new entry
+
+ // For the very first position we'll re-use the original element,
+ // while for subsequent ones we'll clone it.
+
+ position = {
+ active: true,
+ rendered: false,
+ element: positions.length ? info.element.clone() : info.element,
+ x: x,
+ y: y
+ }
+
+ positions.push(position);
+
+ // Move the element to its final position within the container
+
+ position.element.css({
+ top: Math.round(y),
+ left: Math.round(x),
+ 'text-align': halign // In case the text wraps
+ });
+ };
+
+ // Removes one or more text strings from the canvas text overlay.
+ //
+ // If no parameters are given, all text within the layer is removed.
+ //
+ // Note that the text is not immediately removed; it is simply marked as
+ // inactive, which will result in its removal on the next render pass.
+ // This avoids the performance penalty for 'clear and redraw' behavior,
+ // where we potentially get rid of all text on a layer, but will likely
+ // add back most or all of it later, as when redrawing axes, for example.
+ //
+ // @param {string} layer A string of space-separated CSS classes uniquely
+ // identifying the layer containing this text.
+ // @param {number=} x X coordinate of the text.
+ // @param {number=} y Y coordinate of the text.
+ // @param {string=} text Text string to remove.
+ // @param {(string|object)=} font Either a string of space-separated CSS
+ // classes or a font-spec object, defining the text's font and style.
+ // @param {number=} angle Angle at which the text is rotated, in degrees.
+ // Angle is currently unused, it will be implemented in the future.
+
+ Canvas.prototype.removeText = function(layer, x, y, text, font, angle) {
+ if (text == null) {
+ var layerCache = this._textCache[layer];
+ if (layerCache != null) {
+ for (var styleKey in layerCache) {
+ if (hasOwnProperty.call(layerCache, styleKey)) {
+ var styleCache = layerCache[styleKey];
+ for (var key in styleCache) {
+ if (hasOwnProperty.call(styleCache, key)) {
+ var positions = styleCache[key].positions;
+ for (var i = 0, position; position = positions[i]; i++) {
+ position.active = false;
+ }
+ }
+ }
+ }
+ }
+ }
+ } else {
+ var positions = this.getTextInfo(layer, text, font, angle).positions;
+ for (var i = 0, position; position = positions[i]; i++) {
+ if (position.x == x && position.y == y) {
+ position.active = false;
+ }
+ }
+ }
+ };
+
+ ///////////////////////////////////////////////////////////////////////////
+ // The top-level container for the entire plot.
+
+ function Plot(placeholder, data_, options_, plugins) {
+ // data is on the form:
+ // [ series1, series2 ... ]
+ // where series is either just the data as [ [x1, y1], [x2, y2], ... ]
+ // or { data: [ [x1, y1], [x2, y2], ... ], label: "some label", ... }
+
+ var series = [],
+ options = {
+ // the color theme used for graphs
+ colors: ["#edc240", "#afd8f8", "#cb4b4b", "#4da74d", "#9440ed"],
+ legend: {
+ show: true,
+ noColumns: 1, // number of colums in legend table
+ labelFormatter: null, // fn: string -> string
+ labelBoxBorderColor: "#ccc", // border color for the little label boxes
+ container: null, // container (as jQuery object) to put legend in, null means default on top of graph
+ position: "ne", // position of default legend container within plot
+ margin: 5, // distance from grid edge to default legend container within plot
+ backgroundColor: null, // null means auto-detect
+ backgroundOpacity: 0.85, // set to 0 to avoid background
+ sorted: null // default to no legend sorting
+ },
+ xaxis: {
+ show: null, // null = auto-detect, true = always, false = never
+ position: "bottom", // or "top"
+ mode: null, // null or "time"
+ font: null, // null (derived from CSS in placeholder) or object like { size: 11, lineHeight: 13, style: "italic", weight: "bold", family: "sans-serif", variant: "small-caps" }
+ color: null, // base color, labels, ticks
+ tickColor: null, // possibly different color of ticks, e.g. "rgba(0,0,0,0.15)"
+ transform: null, // null or f: number -> number to transform axis
+ inverseTransform: null, // if transform is set, this should be the inverse function
+ min: null, // min. value to show, null means set automatically
+ max: null, // max. value to show, null means set automatically
+ autoscaleMargin: null, // margin in % to add if auto-setting min/max
+ ticks: null, // either [1, 3] or [[1, "a"], 3] or (fn: axis info -> ticks) or app. number of ticks for auto-ticks
+ tickFormatter: null, // fn: number -> string
+ labelWidth: null, // size of tick labels in pixels
+ labelHeight: null,
+ reserveSpace: null, // whether to reserve space even if axis isn't shown
+ tickLength: null, // size in pixels of ticks, or "full" for whole line
+ alignTicksWithAxis: null, // axis number or null for no sync
+ tickDecimals: null, // no. of decimals, null means auto
+ tickSize: null, // number or [number, "unit"]
+ minTickSize: null // number or [number, "unit"]
+ },
+ yaxis: {
+ autoscaleMargin: 0.02,
+ position: "left" // or "right"
+ },
+ xaxes: [],
+ yaxes: [],
+ series: {
+ points: {
+ show: false,
+ radius: 3,
+ lineWidth: 2, // in pixels
+ fill: true,
+ fillColor: "#ffffff",
+ symbol: "circle" // or callback
+ },
+ lines: {
+ // we don't put in show: false so we can see
+ // whether lines were actively disabled
+ lineWidth: 2, // in pixels
+ fill: false,
+ fillColor: null,
+ steps: false
+ // Omit 'zero', so we can later default its value to
+ // match that of the 'fill' option.
+ },
+ bars: {
+ show: false,
+ lineWidth: 2, // in pixels
+ barWidth: 1, // in units of the x axis
+ fill: true,
+ fillColor: null,
+ align: "left", // "left", "right", or "center"
+ horizontal: false,
+ zero: true
+ },
+ shadowSize: 3,
+ highlightColor: null
+ },
+ grid: {
+ show: true,
+ aboveData: false,
+ color: "#545454", // primary color used for outline and labels
+ backgroundColor: null, // null for transparent, else color
+ borderColor: null, // set if different from the grid color
+ tickColor: null, // color for the ticks, e.g. "rgba(0,0,0,0.15)"
+ margin: 0, // distance from the canvas edge to the grid
+ labelMargin: 5, // in pixels
+ axisMargin: 8, // in pixels
+ borderWidth: 2, // in pixels
+ minBorderMargin: null, // in pixels, null means taken from points radius
+ markings: null, // array of ranges or fn: axes -> array of ranges
+ markingsColor: "#f4f4f4",
+ markingsLineWidth: 2,
+ // interactive stuff
+ clickable: false,
+ hoverable: false,
+ autoHighlight: true, // highlight in case mouse is near
+ mouseActiveRadius: 10 // how far the mouse can be away to activate an item
+ },
+ interaction: {
+ redrawOverlayInterval: 1000/60 // time between updates, -1 means in same flow
+ },
+ hooks: {}
+ },
+ surface = null, // the canvas for the plot itself
+ overlay = null, // canvas for interactive stuff on top of plot
+ eventHolder = null, // jQuery object that events should be bound to
+ ctx = null, octx = null,
+ xaxes = [], yaxes = [],
+ plotOffset = { left: 0, right: 0, top: 0, bottom: 0},
+ plotWidth = 0, plotHeight = 0,
+ hooks = {
+ processOptions: [],
+ processRawData: [],
+ processDatapoints: [],
+ processOffset: [],
+ drawBackground: [],
+ drawSeries: [],
+ draw: [],
+ bindEvents: [],
+ drawOverlay: [],
+ shutdown: []
+ },
+ plot = this;
+
+ // public functions
+ plot.setData = setData;
+ plot.setupGrid = setupGrid;
+ plot.draw = draw;
+ plot.getPlaceholder = function() { return placeholder; };
+ plot.getCanvas = function() { return surface.element; };
+ plot.getPlotOffset = function() { return plotOffset; };
+ plot.width = function () { return plotWidth; };
+ plot.height = function () { return plotHeight; };
+ plot.offset = function () {
+ var o = eventHolder.offset();
+ o.left += plotOffset.left;
+ o.top += plotOffset.top;
+ return o;
+ };
+ plot.getData = function () { return series; };
+ plot.getAxes = function () {
+ var res = {}, i;
+ $.each(xaxes.concat(yaxes), function (_, axis) {
+ if (axis)
+ res[axis.direction + (axis.n != 1 ? axis.n : "") + "axis"] = axis;
+ });
+ return res;
+ };
+ plot.getXAxes = function () { return xaxes; };
+ plot.getYAxes = function () { return yaxes; };
+ plot.c2p = canvasToAxisCoords;
+ plot.p2c = axisToCanvasCoords;
+ plot.getOptions = function () { return options; };
+ plot.highlight = highlight;
+ plot.unhighlight = unhighlight;
+ plot.triggerRedrawOverlay = triggerRedrawOverlay;
+ plot.pointOffset = function(point) {
+ return {
+ left: parseInt(xaxes[axisNumber(point, "x") - 1].p2c(+point.x) + plotOffset.left, 10),
+ top: parseInt(yaxes[axisNumber(point, "y") - 1].p2c(+point.y) + plotOffset.top, 10)
+ };
+ };
+ plot.shutdown = shutdown;
+ plot.resize = function () {
+ var width = placeholder.width(),
+ height = placeholder.height();
+ surface.resize(width, height);
+ overlay.resize(width, height);
+ };
+
+ // public attributes
+ plot.hooks = hooks;
+
+ // initialize
+ initPlugins(plot);
+ parseOptions(options_);
+ setupCanvases();
+ setData(data_);
+ setupGrid();
+ draw();
+ bindEvents();
+
+
+ function executeHooks(hook, args) {
+ args = [plot].concat(args);
+ for (var i = 0; i < hook.length; ++i)
+ hook[i].apply(this, args);
+ }
+
+ function initPlugins() {
+
+ // References to key classes, allowing plugins to modify them
+
+ var classes = {
+ Canvas: Canvas
+ };
+
+ for (var i = 0; i < plugins.length; ++i) {
+ var p = plugins[i];
+ p.init(plot, classes);
+ if (p.options)
+ $.extend(true, options, p.options);
+ }
+ }
+
+ function parseOptions(opts) {
+
+ $.extend(true, options, opts);
+
+ // $.extend merges arrays, rather than replacing them. When less
+ // colors are provided than the size of the default palette, we
+ // end up with those colors plus the remaining defaults, which is
+ // not expected behavior; avoid it by replacing them here.
+
+ if (opts && opts.colors) {
+ options.colors = opts.colors;
+ }
+
+ if (options.xaxis.color == null)
+ options.xaxis.color = $.color.parse(options.grid.color).scale('a', 0.22).toString();
+ if (options.yaxis.color == null)
+ options.yaxis.color = $.color.parse(options.grid.color).scale('a', 0.22).toString();
+
+ if (options.xaxis.tickColor == null) // grid.tickColor for back-compatibility
+ options.xaxis.tickColor = options.grid.tickColor || options.xaxis.color;
+ if (options.yaxis.tickColor == null) // grid.tickColor for back-compatibility
+ options.yaxis.tickColor = options.grid.tickColor || options.yaxis.color;
+
+ if (options.grid.borderColor == null)
+ options.grid.borderColor = options.grid.color;
+ if (options.grid.tickColor == null)
+ options.grid.tickColor = $.color.parse(options.grid.color).scale('a', 0.22).toString();
+
+ // Fill in defaults for axis options, including any unspecified
+ // font-spec fields, if a font-spec was provided.
+
+ // If no x/y axis options were provided, create one of each anyway,
+ // since the rest of the code assumes that they exist.
+
+ var i, axisOptions, axisCount,
+ fontDefaults = {
+ style: placeholder.css("font-style"),
+ size: Math.round(0.8 * (+placeholder.css("font-size").replace("px", "") || 13)),
+ variant: placeholder.css("font-variant"),
+ weight: placeholder.css("font-weight"),
+ family: placeholder.css("font-family")
+ };
+
+ fontDefaults.lineHeight = fontDefaults.size * 1.15;
+
+ axisCount = options.xaxes.length || 1;
+ for (i = 0; i < axisCount; ++i) {
+
+ axisOptions = options.xaxes[i];
+ if (axisOptions && !axisOptions.tickColor) {
+ axisOptions.tickColor = axisOptions.color;
+ }
+
+ axisOptions = $.extend(true, {}, options.xaxis, axisOptions);
+ options.xaxes[i] = axisOptions;
+
+ if (axisOptions.font) {
+ axisOptions.font = $.extend({}, fontDefaults, axisOptions.font);
+ if (!axisOptions.font.color) {
+ axisOptions.font.color = axisOptions.color;
+ }
+ }
+ }
+
+ axisCount = options.yaxes.length || 1;
+ for (i = 0; i < axisCount; ++i) {
+
+ axisOptions = options.yaxes[i];
+ if (axisOptions && !axisOptions.tickColor) {
+ axisOptions.tickColor = axisOptions.color;
+ }
+
+ axisOptions = $.extend(true, {}, options.yaxis, axisOptions);
+ options.yaxes[i] = axisOptions;
+
+ if (axisOptions.font) {
+ axisOptions.font = $.extend({}, fontDefaults, axisOptions.font);
+ if (!axisOptions.font.color) {
+ axisOptions.font.color = axisOptions.color;
+ }
+ }
+ }
+
+ // backwards compatibility, to be removed in future
+ if (options.xaxis.noTicks && options.xaxis.ticks == null)
+ options.xaxis.ticks = options.xaxis.noTicks;
+ if (options.yaxis.noTicks && options.yaxis.ticks == null)
+ options.yaxis.ticks = options.yaxis.noTicks;
+ if (options.x2axis) {
+ options.xaxes[1] = $.extend(true, {}, options.xaxis, options.x2axis);
+ options.xaxes[1].position = "top";
+ }
+ if (options.y2axis) {
+ options.yaxes[1] = $.extend(true, {}, options.yaxis, options.y2axis);
+ options.yaxes[1].position = "right";
+ }
+ if (options.grid.coloredAreas)
+ options.grid.markings = options.grid.coloredAreas;
+ if (options.grid.coloredAreasColor)
+ options.grid.markingsColor = options.grid.coloredAreasColor;
+ if (options.lines)
+ $.extend(true, options.series.lines, options.lines);
+ if (options.points)
+ $.extend(true, options.series.points, options.points);
+ if (options.bars)
+ $.extend(true, options.series.bars, options.bars);
+ if (options.shadowSize != null)
+ options.series.shadowSize = options.shadowSize;
+ if (options.highlightColor != null)
+ options.series.highlightColor = options.highlightColor;
+
+ // save options on axes for future reference
+ for (i = 0; i < options.xaxes.length; ++i)
+ getOrCreateAxis(xaxes, i + 1).options = options.xaxes[i];
+ for (i = 0; i < options.yaxes.length; ++i)
+ getOrCreateAxis(yaxes, i + 1).options = options.yaxes[i];
+
+ // add hooks from options
+ for (var n in hooks)
+ if (options.hooks[n] && options.hooks[n].length)
+ hooks[n] = hooks[n].concat(options.hooks[n]);
+
+ executeHooks(hooks.processOptions, [options]);
+ }
+
+ function setData(d) {
+ series = parseData(d);
+ fillInSeriesOptions();
+ processData();
+ }
+
+ function parseData(d) {
+ var res = [];
+ for (var i = 0; i < d.length; ++i) {
+ var s = $.extend(true, {}, options.series);
+
+ if (d[i].data != null) {
+ s.data = d[i].data; // move the data instead of deep-copy
+ delete d[i].data;
+
+ $.extend(true, s, d[i]);
+
+ d[i].data = s.data;
+ }
+ else
+ s.data = d[i];
+ res.push(s);
+ }
+
+ return res;
+ }
+
+ function axisNumber(obj, coord) {
+ var a = obj[coord + "axis"];
+ if (typeof a == "object") // if we got a real axis, extract number
+ a = a.n;
+ if (typeof a != "number")
+ a = 1; // default to first axis
+ return a;
+ }
+
+ function allAxes() {
+ // return flat array without annoying null entries
+ return $.grep(xaxes.concat(yaxes), function (a) { return a; });
+ }
+
+ function canvasToAxisCoords(pos) {
+ // return an object with x/y corresponding to all used axes
+ var res = {}, i, axis;
+ for (i = 0; i < xaxes.length; ++i) {
+ axis = xaxes[i];
+ if (axis && axis.used)
+ res["x" + axis.n] = axis.c2p(pos.left);
+ }
+
+ for (i = 0; i < yaxes.length; ++i) {
+ axis = yaxes[i];
+ if (axis && axis.used)
+ res["y" + axis.n] = axis.c2p(pos.top);
+ }
+
+ if (res.x1 !== undefined)
+ res.x = res.x1;
+ if (res.y1 !== undefined)
+ res.y = res.y1;
+
+ return res;
+ }
+
+ function axisToCanvasCoords(pos) {
+ // get canvas coords from the first pair of x/y found in pos
+ var res = {}, i, axis, key;
+
+ for (i = 0; i < xaxes.length; ++i) {
+ axis = xaxes[i];
+ if (axis && axis.used) {
+ key = "x" + axis.n;
+ if (pos[key] == null && axis.n == 1)
+ key = "x";
+
+ if (pos[key] != null) {
+ res.left = axis.p2c(pos[key]);
+ break;
+ }
+ }
+ }
+
+ for (i = 0; i < yaxes.length; ++i) {
+ axis = yaxes[i];
+ if (axis && axis.used) {
+ key = "y" + axis.n;
+ if (pos[key] == null && axis.n == 1)
+ key = "y";
+
+ if (pos[key] != null) {
+ res.top = axis.p2c(pos[key]);
+ break;
+ }
+ }
+ }
+
+ return res;
+ }
+
+ function getOrCreateAxis(axes, number) {
+ if (!axes[number - 1])
+ axes[number - 1] = {
+ n: number, // save the number for future reference
+ direction: axes == xaxes ? "x" : "y",
+ options: $.extend(true, {}, axes == xaxes ? options.xaxis : options.yaxis)
+ };
+
+ return axes[number - 1];
+ }
+
+ function fillInSeriesOptions() {
+
+ var neededColors = series.length, maxIndex = -1, i;
+
+ // Subtract the number of series that already have fixed colors or
+ // color indexes from the number that we still need to generate.
+
+ for (i = 0; i < series.length; ++i) {
+ var sc = series[i].color;
+ if (sc != null) {
+ neededColors--;
+ if (typeof sc == "number" && sc > maxIndex) {
+ maxIndex = sc;
+ }
+ }
+ }
+
+ // If any of the series have fixed color indexes, then we need to
+ // generate at least as many colors as the highest index.
+
+ if (neededColors <= maxIndex) {
+ neededColors = maxIndex + 1;
+ }
+
+ // Generate all the colors, using first the option colors and then
+ // variations on those colors once they're exhausted.
+
+ var c, colors = [], colorPool = options.colors,
+ colorPoolSize = colorPool.length, variation = 0;
+
+ for (i = 0; i < neededColors; i++) {
+
+ c = $.color.parse(colorPool[i % colorPoolSize] || "#666");
+
+ // Each time we exhaust the colors in the pool we adjust
+ // a scaling factor used to produce more variations on
+ // those colors. The factor alternates negative/positive
+ // to produce lighter/darker colors.
+
+ // Reset the variation after every few cycles, or else
+ // it will end up producing only white or black colors.
+
+ if (i % colorPoolSize == 0 && i) {
+ if (variation >= 0) {
+ if (variation < 0.5) {
+ variation = -variation - 0.2;
+ } else variation = 0;
+ } else variation = -variation;
+ }
+
+ colors[i] = c.scale('rgb', 1 + variation);
+ }
+
+ // Finalize the series options, filling in their colors
+
+ var colori = 0, s;
+ for (i = 0; i < series.length; ++i) {
+ s = series[i];
+
+ // assign colors
+ if (s.color == null) {
+ s.color = colors[colori].toString();
+ ++colori;
+ }
+ else if (typeof s.color == "number")
+ s.color = colors[s.color].toString();
+
+ // turn on lines automatically in case nothing is set
+ if (s.lines.show == null) {
+ var v, show = true;
+ for (v in s)
+ if (s[v] && s[v].show) {
+ show = false;
+ break;
+ }
+ if (show)
+ s.lines.show = true;
+ }
+
+ // If nothing was provided for lines.zero, default it to match
+ // lines.fill, since areas by default should extend to zero.
+
+ if (s.lines.zero == null) {
+ s.lines.zero = !!s.lines.fill;
+ }
+
+ // setup axes
+ s.xaxis = getOrCreateAxis(xaxes, axisNumber(s, "x"));
+ s.yaxis = getOrCreateAxis(yaxes, axisNumber(s, "y"));
+ }
+ }
+
+ function processData() {
+ var topSentry = Number.POSITIVE_INFINITY,
+ bottomSentry = Number.NEGATIVE_INFINITY,
+ fakeInfinity = Number.MAX_VALUE,
+ i, j, k, m, length,
+ s, points, ps, x, y, axis, val, f, p,
+ data, format;
+
+ function updateAxis(axis, min, max) {
+ if (min < axis.datamin && min != -fakeInfinity)
+ axis.datamin = min;
+ if (max > axis.datamax && max != fakeInfinity)
+ axis.datamax = max;
+ }
+
+ $.each(allAxes(), function (_, axis) {
+ // init axis
+ axis.datamin = topSentry;
+ axis.datamax = bottomSentry;
+ axis.used = false;
+ });
+
+ for (i = 0; i < series.length; ++i) {
+ s = series[i];
+ s.datapoints = { points: [] };
+
+ executeHooks(hooks.processRawData, [ s, s.data, s.datapoints ]);
+ }
+
+ // first pass: clean and copy data
+ for (i = 0; i < series.length; ++i) {
+ s = series[i];
+
+ data = s.data;
+ format = s.datapoints.format;
+
+ if (!format) {
+ format = [];
+ // find out how to copy
+ format.push({ x: true, number: true, required: true });
+ format.push({ y: true, number: true, required: true });
+
+ if (s.bars.show || (s.lines.show && s.lines.fill)) {
+ var autoscale = !!((s.bars.show && s.bars.zero) || (s.lines.show && s.lines.zero));
+ format.push({ y: true, number: true, required: false, defaultValue: 0, autoscale: autoscale });
+ if (s.bars.horizontal) {
+ delete format[format.length - 1].y;
+ format[format.length - 1].x = true;
+ }
+ }
+
+ s.datapoints.format = format;
+ }
+
+ if (s.datapoints.pointsize != null)
+ continue; // already filled in
+
+ s.datapoints.pointsize = format.length;
+
+ ps = s.datapoints.pointsize;
+ points = s.datapoints.points;
+
+ var insertSteps = s.lines.show && s.lines.steps;
+ s.xaxis.used = s.yaxis.used = true;
+
+ for (j = k = 0; j < data.length; ++j, k += ps) {
+ p = data[j];
+
+ var nullify = p == null;
+ if (!nullify) {
+ for (m = 0; m < ps; ++m) {
+ val = p[m];
+ f = format[m];
+
+ if (f) {
+ if (f.number && val != null) {
+ val = +val; // convert to number
+ if (isNaN(val))
+ val = null;
+ else if (val == Infinity)
+ val = fakeInfinity;
+ else if (val == -Infinity)
+ val = -fakeInfinity;
+ }
+
+ if (val == null) {
+ if (f.required)
+ nullify = true;
+
+ if (f.defaultValue != null)
+ val = f.defaultValue;
+ }
+ }
+
+ points[k + m] = val;
+ }
+ }
+
+ if (nullify) {
+ for (m = 0; m < ps; ++m) {
+ val = points[k + m];
+ if (val != null) {
+ f = format[m];
+ // extract min/max info
+ if (f.autoscale) {
+ if (f.x) {
+ updateAxis(s.xaxis, val, val);
+ }
+ if (f.y) {
+ updateAxis(s.yaxis, val, val);
+ }
+ }
+ }
+ points[k + m] = null;
+ }
+ }
+ else {
+ // a little bit of line specific stuff that
+ // perhaps shouldn't be here, but lacking
+ // better means...
+ if (insertSteps && k > 0
+ && points[k - ps] != null
+ && points[k - ps] != points[k]
+ && points[k - ps + 1] != points[k + 1]) {
+ // copy the point to make room for a middle point
+ for (m = 0; m < ps; ++m)
+ points[k + ps + m] = points[k + m];
+
+ // middle point has same y
+ points[k + 1] = points[k - ps + 1];
+
+ // we've added a point, better reflect that
+ k += ps;
+ }
+ }
+ }
+ }
+
+ // give the hooks a chance to run
+ for (i = 0; i < series.length; ++i) {
+ s = series[i];
+
+ executeHooks(hooks.processDatapoints, [ s, s.datapoints]);
+ }
+
+ // second pass: find datamax/datamin for auto-scaling
+ for (i = 0; i < series.length; ++i) {
+ s = series[i];
+ points = s.datapoints.points;
+ ps = s.datapoints.pointsize;
+ format = s.datapoints.format;
+
+ var xmin = topSentry, ymin = topSentry,
+ xmax = bottomSentry, ymax = bottomSentry;
+
+ for (j = 0; j < points.length; j += ps) {
+ if (points[j] == null)
+ continue;
+
+ for (m = 0; m < ps; ++m) {
+ val = points[j + m];
+ f = format[m];
+ if (!f || f.autoscale === false || val == fakeInfinity || val == -fakeInfinity)
+ continue;
+
+ if (f.x) {
+ if (val < xmin)
+ xmin = val;
+ if (val > xmax)
+ xmax = val;
+ }
+ if (f.y) {
+ if (val < ymin)
+ ymin = val;
+ if (val > ymax)
+ ymax = val;
+ }
+ }
+ }
+
+ if (s.bars.show) {
+ // make sure we got room for the bar on the dancing floor
+ var delta;
+
+ switch (s.bars.align) {
+ case "left":
+ delta = 0;
+ break;
+ case "right":
+ delta = -s.bars.barWidth;
+ break;
+ case "center":
+ delta = -s.bars.barWidth / 2;
+ break;
+ default:
+ throw new Error("Invalid bar alignment: " + s.bars.align);
+ }
+
+ if (s.bars.horizontal) {
+ ymin += delta;
+ ymax += delta + s.bars.barWidth;
+ }
+ else {
+ xmin += delta;
+ xmax += delta + s.bars.barWidth;
+ }
+ }
+
+ updateAxis(s.xaxis, xmin, xmax);
+ updateAxis(s.yaxis, ymin, ymax);
+ }
+
+ $.each(allAxes(), function (_, axis) {
+ if (axis.datamin == topSentry)
+ axis.datamin = null;
+ if (axis.datamax == bottomSentry)
+ axis.datamax = null;
+ });
+ }
+
+ function setupCanvases() {
+
+ // Make sure the placeholder is clear of everything except canvases
+ // from a previous plot in this container that we'll try to re-use.
+
+ placeholder.css("padding", 0) // padding messes up the positioning
+ .children(":not(.flot-base,.flot-overlay)").remove();
+
+ if (placeholder.css("position") == 'static')
+ placeholder.css("position", "relative"); // for positioning labels and overlay
+
+ surface = new Canvas("flot-base", placeholder);
+ overlay = new Canvas("flot-overlay", placeholder); // overlay canvas for interactive features
+
+ ctx = surface.context;
+ octx = overlay.context;
+
+ // define which element we're listening for events on
+ eventHolder = $(overlay.element).unbind();
+
+ // If we're re-using a plot object, shut down the old one
+
+ var existing = placeholder.data("plot");
+
+ if (existing) {
+ existing.shutdown();
+ overlay.clear();
+ }
+
+ // save in case we get replotted
+ placeholder.data("plot", plot);
+ }
+
+ function bindEvents() {
+ // bind events
+ if (options.grid.hoverable) {
+ eventHolder.mousemove(onMouseMove);
+
+ // Use bind, rather than .mouseleave, because we officially
+ // still support jQuery 1.2.6, which doesn't define a shortcut
+ // for mouseenter or mouseleave. This was a bug/oversight that
+ // was fixed somewhere around 1.3.x. We can return to using
+ // .mouseleave when we drop support for 1.2.6.
+
+ eventHolder.bind("mouseleave", onMouseLeave);
+ }
+
+ if (options.grid.clickable)
+ eventHolder.click(onClick);
+
+ executeHooks(hooks.bindEvents, [eventHolder]);
+ }
+
+ function shutdown() {
+ if (redrawTimeout)
+ clearTimeout(redrawTimeout);
+
+ eventHolder.unbind("mousemove", onMouseMove);
+ eventHolder.unbind("mouseleave", onMouseLeave);
+ eventHolder.unbind("click", onClick);
+
+ executeHooks(hooks.shutdown, [eventHolder]);
+ }
+
+ function setTransformationHelpers(axis) {
+ // set helper functions on the axis, assumes plot area
+ // has been computed already
+
+ function identity(x) { return x; }
+
+ var s, m, t = axis.options.transform || identity,
+ it = axis.options.inverseTransform;
+
+ // precompute how much the axis is scaling a point
+ // in canvas space
+ if (axis.direction == "x") {
+ s = axis.scale = plotWidth / Math.abs(t(axis.max) - t(axis.min));
+ m = Math.min(t(axis.max), t(axis.min));
+ }
+ else {
+ s = axis.scale = plotHeight / Math.abs(t(axis.max) - t(axis.min));
+ s = -s;
+ m = Math.max(t(axis.max), t(axis.min));
+ }
+
+ // data point to canvas coordinate
+ if (t == identity) // slight optimization
+ axis.p2c = function (p) { return (p - m) * s; };
+ else
+ axis.p2c = function (p) { return (t(p) - m) * s; };
+ // canvas coordinate to data point
+ if (!it)
+ axis.c2p = function (c) { return m + c / s; };
+ else
+ axis.c2p = function (c) { return it(m + c / s); };
+ }
+
+ function measureTickLabels(axis) {
+
+ var opts = axis.options,
+ ticks = axis.ticks || [],
+ labelWidth = opts.labelWidth || 0,
+ labelHeight = opts.labelHeight || 0,
+ maxWidth = labelWidth || axis.direction == "x" ? Math.floor(surface.width / (ticks.length || 1)) : null;
+ legacyStyles = axis.direction + "Axis " + axis.direction + axis.n + "Axis",
+ layer = "flot-" + axis.direction + "-axis flot-" + axis.direction + axis.n + "-axis " + legacyStyles,
+ font = opts.font || "flot-tick-label tickLabel";
+
+ for (var i = 0; i < ticks.length; ++i) {
+
+ var t = ticks[i];
+
+ if (!t.label)
+ continue;
+
+ var info = surface.getTextInfo(layer, t.label, font, null, maxWidth);
+
+ labelWidth = Math.max(labelWidth, info.width);
+ labelHeight = Math.max(labelHeight, info.height);
+ }
+
+ axis.labelWidth = opts.labelWidth || labelWidth;
+ axis.labelHeight = opts.labelHeight || labelHeight;
+ }
+
+ function allocateAxisBoxFirstPhase(axis) {
+ // find the bounding box of the axis by looking at label
+ // widths/heights and ticks, make room by diminishing the
+ // plotOffset; this first phase only looks at one
+ // dimension per axis, the other dimension depends on the
+ // other axes so will have to wait
+
+ var lw = axis.labelWidth,
+ lh = axis.labelHeight,
+ pos = axis.options.position,
+ tickLength = axis.options.tickLength,
+ axisMargin = options.grid.axisMargin,
+ padding = options.grid.labelMargin,
+ all = axis.direction == "x" ? xaxes : yaxes,
+ index, innermost;
+
+ // determine axis margin
+ var samePosition = $.grep(all, function (a) {
+ return a && a.options.position == pos && a.reserveSpace;
+ });
+ if ($.inArray(axis, samePosition) == samePosition.length - 1)
+ axisMargin = 0; // outermost
+
+ // determine tick length - if we're innermost, we can use "full"
+ if (tickLength == null) {
+ var sameDirection = $.grep(all, function (a) {
+ return a && a.reserveSpace;
+ });
+
+ innermost = $.inArray(axis, sameDirection) == 0;
+ if (innermost)
+ tickLength = "full";
+ else
+ tickLength = 5;
+ }
+
+ if (!isNaN(+tickLength))
+ padding += +tickLength;
+
+ // compute box
+ if (axis.direction == "x") {
+ lh += padding;
+
+ if (pos == "bottom") {
+ plotOffset.bottom += lh + axisMargin;
+ axis.box = { top: surface.height - plotOffset.bottom, height: lh };
+ }
+ else {
+ axis.box = { top: plotOffset.top + axisMargin, height: lh };
+ plotOffset.top += lh + axisMargin;
+ }
+ }
+ else {
+ lw += padding;
+
+ if (pos == "left") {
+ axis.box = { left: plotOffset.left + axisMargin, width: lw };
+ plotOffset.left += lw + axisMargin;
+ }
+ else {
+ plotOffset.right += lw + axisMargin;
+ axis.box = { left: surface.width - plotOffset.right, width: lw };
+ }
+ }
+
+ // save for future reference
+ axis.position = pos;
+ axis.tickLength = tickLength;
+ axis.box.padding = padding;
+ axis.innermost = innermost;
+ }
+
+ function allocateAxisBoxSecondPhase(axis) {
+ // now that all axis boxes have been placed in one
+ // dimension, we can set the remaining dimension coordinates
+ if (axis.direction == "x") {
+ axis.box.left = plotOffset.left - axis.labelWidth / 2;
+ axis.box.width = surface.width - plotOffset.left - plotOffset.right + axis.labelWidth;
+ }
+ else {
+ axis.box.top = plotOffset.top - axis.labelHeight / 2;
+ axis.box.height = surface.height - plotOffset.bottom - plotOffset.top + axis.labelHeight;
+ }
+ }
+
+ function adjustLayoutForThingsStickingOut() {
+ // possibly adjust plot offset to ensure everything stays
+ // inside the canvas and isn't clipped off
+
+ var minMargin = options.grid.minBorderMargin,
+ margins = { x: 0, y: 0 }, i, axis;
+
+ // check stuff from the plot (FIXME: this should just read
+ // a value from the series, otherwise it's impossible to
+ // customize)
+ if (minMargin == null) {
+ minMargin = 0;
+ for (i = 0; i < series.length; ++i)
+ minMargin = Math.max(minMargin, 2 * (series[i].points.radius + series[i].points.lineWidth/2));
+ }
+
+ margins.x = margins.y = Math.ceil(minMargin);
+
+ // check axis labels, note we don't check the actual
+ // labels but instead use the overall width/height to not
+ // jump as much around with replots
+ $.each(allAxes(), function (_, axis) {
+ var dir = axis.direction;
+ if (axis.reserveSpace)
+ margins[dir] = Math.ceil(Math.max(margins[dir], (dir == "x" ? axis.labelWidth : axis.labelHeight) / 2));
+ });
+
+ plotOffset.left = Math.max(margins.x, plotOffset.left);
+ plotOffset.right = Math.max(margins.x, plotOffset.right);
+ plotOffset.top = Math.max(margins.y, plotOffset.top);
+ plotOffset.bottom = Math.max(margins.y, plotOffset.bottom);
+ }
+
+ function setupGrid() {
+ var i, axes = allAxes(), showGrid = options.grid.show;
+
+ // Initialize the plot's offset from the edge of the canvas
+
+ for (var a in plotOffset) {
+ var margin = options.grid.margin || 0;
+ plotOffset[a] = typeof margin == "number" ? margin : margin[a] || 0;
+ }
+
+ executeHooks(hooks.processOffset, [plotOffset]);
+
+ // If the grid is visible, add its border width to the offset
+
+ for (var a in plotOffset) {
+ if(typeof(options.grid.borderWidth) == "object") {
+ plotOffset[a] += showGrid ? options.grid.borderWidth[a] : 0;
+ }
+ else {
+ plotOffset[a] += showGrid ? options.grid.borderWidth : 0;
+ }
+ }
+
+ // init axes
+ $.each(axes, function (_, axis) {
+ axis.show = axis.options.show;
+ if (axis.show == null)
+ axis.show = axis.used; // by default an axis is visible if it's got data
+
+ axis.reserveSpace = axis.show || axis.options.reserveSpace;
+
+ setRange(axis);
+ });
+
+ if (showGrid) {
+
+ var allocatedAxes = $.grep(axes, function (axis) { return axis.reserveSpace; });
+
+ $.each(allocatedAxes, function (_, axis) {
+ // make the ticks
+ setupTickGeneration(axis);
+ setTicks(axis);
+ snapRangeToTicks(axis, axis.ticks);
+ // find labelWidth/Height for axis
+ measureTickLabels(axis);
+ });
+
+ // with all dimensions calculated, we can compute the
+ // axis bounding boxes, start from the outside
+ // (reverse order)
+ for (i = allocatedAxes.length - 1; i >= 0; --i)
+ allocateAxisBoxFirstPhase(allocatedAxes[i]);
+
+ // make sure we've got enough space for things that
+ // might stick out
+ adjustLayoutForThingsStickingOut();
+
+ $.each(allocatedAxes, function (_, axis) {
+ allocateAxisBoxSecondPhase(axis);
+ });
+ }
+
+ plotWidth = surface.width - plotOffset.left - plotOffset.right;
+ plotHeight = surface.height - plotOffset.bottom - plotOffset.top;
+
+ // now we got the proper plot dimensions, we can compute the scaling
+ $.each(axes, function (_, axis) {
+ setTransformationHelpers(axis);
+ });
+
+ if (showGrid) {
+ drawAxisLabels();
+ }
+
+ insertLegend();
+ }
+
+ function setRange(axis) {
+ var opts = axis.options,
+ min = +(opts.min != null ? opts.min : axis.datamin),
+ max = +(opts.max != null ? opts.max : axis.datamax),
+ delta = max - min;
+
+ if (delta == 0.0) {
+ // degenerate case
+ var widen = max == 0 ? 1 : 0.01;
+
+ if (opts.min == null)
+ min -= widen;
+ // always widen max if we couldn't widen min to ensure we
+ // don't fall into min == max which doesn't work
+ if (opts.max == null || opts.min != null)
+ max += widen;
+ }
+ else {
+ // consider autoscaling
+ var margin = opts.autoscaleMargin;
+ if (margin != null) {
+ if (opts.min == null) {
+ min -= delta * margin;
+ // make sure we don't go below zero if all values
+ // are positive
+ if (min < 0 && axis.datamin != null && axis.datamin >= 0)
+ min = 0;
+ }
+ if (opts.max == null) {
+ max += delta * margin;
+ if (max > 0 && axis.datamax != null && axis.datamax <= 0)
+ max = 0;
+ }
+ }
+ }
+ axis.min = min;
+ axis.max = max;
+ }
+
+ function setupTickGeneration(axis) {
+ var opts = axis.options;
+
+ // estimate number of ticks
+ var noTicks;
+ if (typeof opts.ticks == "number" && opts.ticks > 0)
+ noTicks = opts.ticks;
+ else
+ // heuristic based on the model a*sqrt(x) fitted to
+ // some data points that seemed reasonable
+ noTicks = 0.3 * Math.sqrt(axis.direction == "x" ? surface.width : surface.height);
+
+ var delta = (axis.max - axis.min) / noTicks,
+ dec = -Math.floor(Math.log(delta) / Math.LN10),
+ maxDec = opts.tickDecimals;
+
+ if (maxDec != null && dec > maxDec) {
+ dec = maxDec;
+ }
+
+ var magn = Math.pow(10, -dec),
+ norm = delta / magn, // norm is between 1.0 and 10.0
+ size;
+
+ if (norm < 1.5) {
+ size = 1;
+ } else if (norm < 3) {
+ size = 2;
+ // special case for 2.5, requires an extra decimal
+ if (norm > 2.25 && (maxDec == null || dec + 1 <= maxDec)) {
+ size = 2.5;
+ ++dec;
+ }
+ } else if (norm < 7.5) {
+ size = 5;
+ } else {
+ size = 10;
+ }
+
+ size *= magn;
+
+ if (opts.minTickSize != null && size < opts.minTickSize) {
+ size = opts.minTickSize;
+ }
+
+ axis.delta = delta;
+ axis.tickDecimals = Math.max(0, maxDec != null ? maxDec : dec);
+ axis.tickSize = opts.tickSize || size;
+
+ // Time mode was moved to a plug-in in 0.8, but since so many people use this
+ // we'll add an especially friendly make sure they remembered to include it.
+
+ if (opts.mode == "time" && !axis.tickGenerator) {
+ throw new Error("Time mode requires the flot.time plugin.");
+ }
+
+ // Flot supports base-10 axes; any other mode else is handled by a plug-in,
+ // like flot.time.js.
+
+ if (!axis.tickGenerator) {
+
+ axis.tickGenerator = function (axis) {
+
+ var ticks = [],
+ start = floorInBase(axis.min, axis.tickSize),
+ i = 0,
+ v = Number.NaN,
+ prev;
+
+ do {
+ prev = v;
+ v = start + i * axis.tickSize;
+ ticks.push(v);
+ ++i;
+ } while (v < axis.max && v != prev);
+ return ticks;
+ };
+
+ axis.tickFormatter = function (value, axis) {
+
+ var factor = axis.tickDecimals ? Math.pow(10, axis.tickDecimals) : 1;
+ var formatted = "" + Math.round(value * factor) / factor;
+
+ // If tickDecimals was specified, ensure that we have exactly that
+ // much precision; otherwise default to the value's own precision.
+
+ if (axis.tickDecimals != null) {
+ var decimal = formatted.indexOf(".");
+ var precision = decimal == -1 ? 0 : formatted.length - decimal - 1;
+ if (precision < axis.tickDecimals) {
+ return (precision ? formatted : formatted + ".") + ("" + factor).substr(1, axis.tickDecimals - precision);
+ }
+ }
+
+ return formatted;
+ };
+ }
+
+ if ($.isFunction(opts.tickFormatter))
+ axis.tickFormatter = function (v, axis) { return "" + opts.tickFormatter(v, axis); };
+
+ if (opts.alignTicksWithAxis != null) {
+ var otherAxis = (axis.direction == "x" ? xaxes : yaxes)[opts.alignTicksWithAxis - 1];
+ if (otherAxis && otherAxis.used && otherAxis != axis) {
+ // consider snapping min/max to outermost nice ticks
+ var niceTicks = axis.tickGenerator(axis);
+ if (niceTicks.length > 0) {
+ if (opts.min == null)
+ axis.min = Math.min(axis.min, niceTicks[0]);
+ if (opts.max == null && niceTicks.length > 1)
+ axis.max = Math.max(axis.max, niceTicks[niceTicks.length - 1]);
+ }
+
+ axis.tickGenerator = function (axis) {
+ // copy ticks, scaled to this axis
+ var ticks = [], v, i;
+ for (i = 0; i < otherAxis.ticks.length; ++i) {
+ v = (otherAxis.ticks[i].v - otherAxis.min) / (otherAxis.max - otherAxis.min);
+ v = axis.min + v * (axis.max - axis.min);
+ ticks.push(v);
+ }
+ return ticks;
+ };
+
+ // we might need an extra decimal since forced
+ // ticks don't necessarily fit naturally
+ if (!axis.mode && opts.tickDecimals == null) {
+ var extraDec = Math.max(0, -Math.floor(Math.log(axis.delta) / Math.LN10) + 1),
+ ts = axis.tickGenerator(axis);
+
+ // only proceed if the tick interval rounded
+ // with an extra decimal doesn't give us a
+ // zero at end
+ if (!(ts.length > 1 && /\..*0$/.test((ts[1] - ts[0]).toFixed(extraDec))))
+ axis.tickDecimals = extraDec;
+ }
+ }
+ }
+ }
+
+ function setTicks(axis) {
+ var oticks = axis.options.ticks, ticks = [];
+ if (oticks == null || (typeof oticks == "number" && oticks > 0))
+ ticks = axis.tickGenerator(axis);
+ else if (oticks) {
+ if ($.isFunction(oticks))
+ // generate the ticks
+ ticks = oticks(axis);
+ else
+ ticks = oticks;
+ }
+
+ // clean up/labelify the supplied ticks, copy them over
+ var i, v;
+ axis.ticks = [];
+ for (i = 0; i < ticks.length; ++i) {
+ var label = null;
+ var t = ticks[i];
+ if (typeof t == "object") {
+ v = +t[0];
+ if (t.length > 1)
+ label = t[1];
+ }
+ else
+ v = +t;
+ if (label == null)
+ label = axis.tickFormatter(v, axis);
+ if (!isNaN(v))
+ axis.ticks.push({ v: v, label: label });
+ }
+ }
+
+ function snapRangeToTicks(axis, ticks) {
+ if (axis.options.autoscaleMargin && ticks.length > 0) {
+ // snap to ticks
+ if (axis.options.min == null)
+ axis.min = Math.min(axis.min, ticks[0].v);
+ if (axis.options.max == null && ticks.length > 1)
+ axis.max = Math.max(axis.max, ticks[ticks.length - 1].v);
+ }
+ }
+
+ function draw() {
+
+ surface.clear();
+
+ executeHooks(hooks.drawBackground, [ctx]);
+
+ var grid = options.grid;
+
+ // draw background, if any
+ if (grid.show && grid.backgroundColor)
+ drawBackground();
+
+ if (grid.show && !grid.aboveData) {
+ drawGrid();
+ }
+
+ for (var i = 0; i < series.length; ++i) {
+ executeHooks(hooks.drawSeries, [ctx, series[i]]);
+ drawSeries(series[i]);
+ }
+
+ executeHooks(hooks.draw, [ctx]);
+
+ if (grid.show && grid.aboveData) {
+ drawGrid();
+ }
+
+ surface.render();
+
+ // A draw implies that either the axes or data have changed, so we
+ // should probably update the overlay highlights as well.
+
+ triggerRedrawOverlay();
+ }
+
+ function extractRange(ranges, coord) {
+ var axis, from, to, key, axes = allAxes();
+
+ for (var i = 0; i < axes.length; ++i) {
+ axis = axes[i];
+ if (axis.direction == coord) {
+ key = coord + axis.n + "axis";
+ if (!ranges[key] && axis.n == 1)
+ key = coord + "axis"; // support x1axis as xaxis
+ if (ranges[key]) {
+ from = ranges[key].from;
+ to = ranges[key].to;
+ break;
+ }
+ }
+ }
+
+ // backwards-compat stuff - to be removed in future
+ if (!ranges[key]) {
+ axis = coord == "x" ? xaxes[0] : yaxes[0];
+ from = ranges[coord + "1"];
+ to = ranges[coord + "2"];
+ }
+
+ // auto-reverse as an added bonus
+ if (from != null && to != null && from > to) {
+ var tmp = from;
+ from = to;
+ to = tmp;
+ }
+
+ return { from: from, to: to, axis: axis };
+ }
+
+ function drawBackground() {
+ ctx.save();
+ ctx.translate(plotOffset.left, plotOffset.top);
+
+ ctx.fillStyle = getColorOrGradient(options.grid.backgroundColor, plotHeight, 0, "rgba(255, 255, 255, 0)");
+ ctx.fillRect(0, 0, plotWidth, plotHeight);
+ ctx.restore();
+ }
+
+ function drawGrid() {
+ var i, axes, bw, bc;
+
+ ctx.save();
+ ctx.translate(plotOffset.left, plotOffset.top);
+
+ // draw markings
+ var markings = options.grid.markings;
+ if (markings) {
+ if ($.isFunction(markings)) {
+ axes = plot.getAxes();
+ // xmin etc. is backwards compatibility, to be
+ // removed in the future
+ axes.xmin = axes.xaxis.min;
+ axes.xmax = axes.xaxis.max;
+ axes.ymin = axes.yaxis.min;
+ axes.ymax = axes.yaxis.max;
+
+ markings = markings(axes);
+ }
+
+ for (i = 0; i < markings.length; ++i) {
+ var m = markings[i],
+ xrange = extractRange(m, "x"),
+ yrange = extractRange(m, "y");
+
+ // fill in missing
+ if (xrange.from == null)
+ xrange.from = xrange.axis.min;
+ if (xrange.to == null)
+ xrange.to = xrange.axis.max;
+ if (yrange.from == null)
+ yrange.from = yrange.axis.min;
+ if (yrange.to == null)
+ yrange.to = yrange.axis.max;
+
+ // clip
+ if (xrange.to < xrange.axis.min || xrange.from > xrange.axis.max ||
+ yrange.to < yrange.axis.min || yrange.from > yrange.axis.max)
+ continue;
+
+ xrange.from = Math.max(xrange.from, xrange.axis.min);
+ xrange.to = Math.min(xrange.to, xrange.axis.max);
+ yrange.from = Math.max(yrange.from, yrange.axis.min);
+ yrange.to = Math.min(yrange.to, yrange.axis.max);
+
+ if (xrange.from == xrange.to && yrange.from == yrange.to)
+ continue;
+
+ // then draw
+ xrange.from = xrange.axis.p2c(xrange.from);
+ xrange.to = xrange.axis.p2c(xrange.to);
+ yrange.from = yrange.axis.p2c(yrange.from);
+ yrange.to = yrange.axis.p2c(yrange.to);
+
+ if (xrange.from == xrange.to || yrange.from == yrange.to) {
+ // draw line
+ ctx.beginPath();
+ ctx.strokeStyle = m.color || options.grid.markingsColor;
+ ctx.lineWidth = m.lineWidth || options.grid.markingsLineWidth;
+ ctx.moveTo(xrange.from, yrange.from);
+ ctx.lineTo(xrange.to, yrange.to);
+ ctx.stroke();
+ }
+ else {
+ // fill area
+ ctx.fillStyle = m.color || options.grid.markingsColor;
+ ctx.fillRect(xrange.from, yrange.to,
+ xrange.to - xrange.from,
+ yrange.from - yrange.to);
+ }
+ }
+ }
+
+ // draw the ticks
+ axes = allAxes();
+ bw = options.grid.borderWidth;
+
+ for (var j = 0; j < axes.length; ++j) {
+ var axis = axes[j], box = axis.box,
+ t = axis.tickLength, x, y, xoff, yoff;
+ if (!axis.show || axis.ticks.length == 0)
+ continue;
+
+ ctx.lineWidth = 1;
+
+ // find the edges
+ if (axis.direction == "x") {
+ x = 0;
+ if (t == "full")
+ y = (axis.position == "top" ? 0 : plotHeight);
+ else
+ y = box.top - plotOffset.top + (axis.position == "top" ? box.height : 0);
+ }
+ else {
+ y = 0;
+ if (t == "full")
+ x = (axis.position == "left" ? 0 : plotWidth);
+ else
+ x = box.left - plotOffset.left + (axis.position == "left" ? box.width : 0);
+ }
+
+ // draw tick bar
+ if (!axis.innermost) {
+ ctx.strokeStyle = axis.options.color;
+ ctx.beginPath();
+ xoff = yoff = 0;
+ if (axis.direction == "x")
+ xoff = plotWidth + 1;
+ else
+ yoff = plotHeight + 1;
+
+ if (ctx.lineWidth == 1) {
+ if (axis.direction == "x") {
+ y = Math.floor(y) + 0.5;
+ } else {
+ x = Math.floor(x) + 0.5;
+ }
+ }
+
+ ctx.moveTo(x, y);
+ ctx.lineTo(x + xoff, y + yoff);
+ ctx.stroke();
+ }
+
+ // draw ticks
+
+ ctx.strokeStyle = axis.options.tickColor;
+
+ ctx.beginPath();
+ for (i = 0; i < axis.ticks.length; ++i) {
+ var v = axis.ticks[i].v;
+
+ xoff = yoff = 0;
+
+ if (isNaN(v) || v < axis.min || v > axis.max
+ // skip those lying on the axes if we got a border
+ || (t == "full"
+ && ((typeof bw == "object" && bw[axis.position] > 0) || bw > 0)
+ && (v == axis.min || v == axis.max)))
+ continue;
+
+ if (axis.direction == "x") {
+ x = axis.p2c(v);
+ yoff = t == "full" ? -plotHeight : t;
+
+ if (axis.position == "top")
+ yoff = -yoff;
+ }
+ else {
+ y = axis.p2c(v);
+ xoff = t == "full" ? -plotWidth : t;
+
+ if (axis.position == "left")
+ xoff = -xoff;
+ }
+
+ if (ctx.lineWidth == 1) {
+ if (axis.direction == "x")
+ x = Math.floor(x) + 0.5;
+ else
+ y = Math.floor(y) + 0.5;
+ }
+
+ ctx.moveTo(x, y);
+ ctx.lineTo(x + xoff, y + yoff);
+ }
+
+ ctx.stroke();
+ }
+
+
+ // draw border
+ if (bw) {
+ // If either borderWidth or borderColor is an object, then draw the border
+ // line by line instead of as one rectangle
+ bc = options.grid.borderColor;
+ if(typeof bw == "object" || typeof bc == "object") {
+ if (typeof bw !== "object") {
+ bw = {top: bw, right: bw, bottom: bw, left: bw};
+ }
+ if (typeof bc !== "object") {
+ bc = {top: bc, right: bc, bottom: bc, left: bc};
+ }
+
+ if (bw.top > 0) {
+ ctx.strokeStyle = bc.top;
+ ctx.lineWidth = bw.top;
+ ctx.beginPath();
+ ctx.moveTo(0 - bw.left, 0 - bw.top/2);
+ ctx.lineTo(plotWidth, 0 - bw.top/2);
+ ctx.stroke();
+ }
+
+ if (bw.right > 0) {
+ ctx.strokeStyle = bc.right;
+ ctx.lineWidth = bw.right;
+ ctx.beginPath();
+ ctx.moveTo(plotWidth + bw.right / 2, 0 - bw.top);
+ ctx.lineTo(plotWidth + bw.right / 2, plotHeight);
+ ctx.stroke();
+ }
+
+ if (bw.bottom > 0) {
+ ctx.strokeStyle = bc.bottom;
+ ctx.lineWidth = bw.bottom;
+ ctx.beginPath();
+ ctx.moveTo(plotWidth + bw.right, plotHeight + bw.bottom / 2);
+ ctx.lineTo(0, plotHeight + bw.bottom / 2);
+ ctx.stroke();
+ }
+
+ if (bw.left > 0) {
+ ctx.strokeStyle = bc.left;
+ ctx.lineWidth = bw.left;
+ ctx.beginPath();
+ ctx.moveTo(0 - bw.left/2, plotHeight + bw.bottom);
+ ctx.lineTo(0- bw.left/2, 0);
+ ctx.stroke();
+ }
+ }
+ else {
+ ctx.lineWidth = bw;
+ ctx.strokeStyle = options.grid.borderColor;
+ ctx.strokeRect(-bw/2, -bw/2, plotWidth + bw, plotHeight + bw);
+ }
+ }
+
+ ctx.restore();
+ }
+
+ function drawAxisLabels() {
+
+ $.each(allAxes(), function (_, axis) {
+ if (!axis.show || axis.ticks.length == 0)
+ return;
+
+ var box = axis.box,
+ legacyStyles = axis.direction + "Axis " + axis.direction + axis.n + "Axis",
+ layer = "flot-" + axis.direction + "-axis flot-" + axis.direction + axis.n + "-axis " + legacyStyles,
+ font = axis.options.font || "flot-tick-label tickLabel",
+ tick, x, y, halign, valign;
+
+ surface.removeText(layer);
+
+ for (var i = 0; i < axis.ticks.length; ++i) {
+
+ tick = axis.ticks[i];
+ if (!tick.label || tick.v < axis.min || tick.v > axis.max)
+ continue;
+
+ if (axis.direction == "x") {
+ halign = "center";
+ x = plotOffset.left + axis.p2c(tick.v);
+ if (axis.position == "bottom") {
+ y = box.top + box.padding;
+ } else {
+ y = box.top + box.height - box.padding;
+ valign = "bottom";
+ }
+ } else {
+ valign = "middle";
+ y = plotOffset.top + axis.p2c(tick.v);
+ if (axis.position == "left") {
+ x = box.left + box.width - box.padding;
+ halign = "right";
+ } else {
+ x = box.left + box.padding;
+ }
+ }
+
+ surface.addText(layer, x, y, tick.label, font, null, null, halign, valign);
+ }
+ });
+ }
+
+ function drawSeries(series) {
+ if (series.lines.show)
+ drawSeriesLines(series);
+ if (series.bars.show)
+ drawSeriesBars(series);
+ if (series.points.show)
+ drawSeriesPoints(series);
+ }
+
+ function drawSeriesLines(series) {
+ function plotLine(datapoints, xoffset, yoffset, axisx, axisy) {
+ var points = datapoints.points,
+ ps = datapoints.pointsize,
+ prevx = null, prevy = null;
+
+ ctx.beginPath();
+ for (var i = ps; i < points.length; i += ps) {
+ var x1 = points[i - ps], y1 = points[i - ps + 1],
+ x2 = points[i], y2 = points[i + 1];
+
+ if (x1 == null || x2 == null)
+ continue;
+
+ // clip with ymin
+ if (y1 <= y2 && y1 < axisy.min) {
+ if (y2 < axisy.min)
+ continue; // line segment is outside
+ // compute new intersection point
+ x1 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1;
+ y1 = axisy.min;
+ }
+ else if (y2 <= y1 && y2 < axisy.min) {
+ if (y1 < axisy.min)
+ continue;
+ x2 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1;
+ y2 = axisy.min;
+ }
+
+ // clip with ymax
+ if (y1 >= y2 && y1 > axisy.max) {
+ if (y2 > axisy.max)
+ continue;
+ x1 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1;
+ y1 = axisy.max;
+ }
+ else if (y2 >= y1 && y2 > axisy.max) {
+ if (y1 > axisy.max)
+ continue;
+ x2 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1;
+ y2 = axisy.max;
+ }
+
+ // clip with xmin
+ if (x1 <= x2 && x1 < axisx.min) {
+ if (x2 < axisx.min)
+ continue;
+ y1 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1;
+ x1 = axisx.min;
+ }
+ else if (x2 <= x1 && x2 < axisx.min) {
+ if (x1 < axisx.min)
+ continue;
+ y2 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1;
+ x2 = axisx.min;
+ }
+
+ // clip with xmax
+ if (x1 >= x2 && x1 > axisx.max) {
+ if (x2 > axisx.max)
+ continue;
+ y1 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1;
+ x1 = axisx.max;
+ }
+ else if (x2 >= x1 && x2 > axisx.max) {
+ if (x1 > axisx.max)
+ continue;
+ y2 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1;
+ x2 = axisx.max;
+ }
+
+ if (x1 != prevx || y1 != prevy)
+ ctx.moveTo(axisx.p2c(x1) + xoffset, axisy.p2c(y1) + yoffset);
+
+ prevx = x2;
+ prevy = y2;
+ ctx.lineTo(axisx.p2c(x2) + xoffset, axisy.p2c(y2) + yoffset);
+ }
+ ctx.stroke();
+ }
+
+ function plotLineArea(datapoints, axisx, axisy) {
+ var points = datapoints.points,
+ ps = datapoints.pointsize,
+ bottom = Math.min(Math.max(0, axisy.min), axisy.max),
+ i = 0, top, areaOpen = false,
+ ypos = 1, segmentStart = 0, segmentEnd = 0;
+
+ // we process each segment in two turns, first forward
+ // direction to sketch out top, then once we hit the
+ // end we go backwards to sketch the bottom
+ while (true) {
+ if (ps > 0 && i > points.length + ps)
+ break;
+
+ i += ps; // ps is negative if going backwards
+
+ var x1 = points[i - ps],
+ y1 = points[i - ps + ypos],
+ x2 = points[i], y2 = points[i + ypos];
+
+ if (areaOpen) {
+ if (ps > 0 && x1 != null && x2 == null) {
+ // at turning point
+ segmentEnd = i;
+ ps = -ps;
+ ypos = 2;
+ continue;
+ }
+
+ if (ps < 0 && i == segmentStart + ps) {
+ // done with the reverse sweep
+ ctx.fill();
+ areaOpen = false;
+ ps = -ps;
+ ypos = 1;
+ i = segmentStart = segmentEnd + ps;
+ continue;
+ }
+ }
+
+ if (x1 == null || x2 == null)
+ continue;
+
+ // clip x values
+
+ // clip with xmin
+ if (x1 <= x2 && x1 < axisx.min) {
+ if (x2 < axisx.min)
+ continue;
+ y1 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1;
+ x1 = axisx.min;
+ }
+ else if (x2 <= x1 && x2 < axisx.min) {
+ if (x1 < axisx.min)
+ continue;
+ y2 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1;
+ x2 = axisx.min;
+ }
+
+ // clip with xmax
+ if (x1 >= x2 && x1 > axisx.max) {
+ if (x2 > axisx.max)
+ continue;
+ y1 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1;
+ x1 = axisx.max;
+ }
+ else if (x2 >= x1 && x2 > axisx.max) {
+ if (x1 > axisx.max)
+ continue;
+ y2 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1;
+ x2 = axisx.max;
+ }
+
+ if (!areaOpen) {
+ // open area
+ ctx.beginPath();
+ ctx.moveTo(axisx.p2c(x1), axisy.p2c(bottom));
+ areaOpen = true;
+ }
+
+ // now first check the case where both is outside
+ if (y1 >= axisy.max && y2 >= axisy.max) {
+ ctx.lineTo(axisx.p2c(x1), axisy.p2c(axisy.max));
+ ctx.lineTo(axisx.p2c(x2), axisy.p2c(axisy.max));
+ continue;
+ }
+ else if (y1 <= axisy.min && y2 <= axisy.min) {
+ ctx.lineTo(axisx.p2c(x1), axisy.p2c(axisy.min));
+ ctx.lineTo(axisx.p2c(x2), axisy.p2c(axisy.min));
+ continue;
+ }
+
+ // else it's a bit more complicated, there might
+ // be a flat maxed out rectangle first, then a
+ // triangular cutout or reverse; to find these
+ // keep track of the current x values
+ var x1old = x1, x2old = x2;
+
+ // clip the y values, without shortcutting, we
+ // go through all cases in turn
+
+ // clip with ymin
+ if (y1 <= y2 && y1 < axisy.min && y2 >= axisy.min) {
+ x1 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1;
+ y1 = axisy.min;
+ }
+ else if (y2 <= y1 && y2 < axisy.min && y1 >= axisy.min) {
+ x2 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1;
+ y2 = axisy.min;
+ }
+
+ // clip with ymax
+ if (y1 >= y2 && y1 > axisy.max && y2 <= axisy.max) {
+ x1 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1;
+ y1 = axisy.max;
+ }
+ else if (y2 >= y1 && y2 > axisy.max && y1 <= axisy.max) {
+ x2 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1;
+ y2 = axisy.max;
+ }
+
+ // if the x value was changed we got a rectangle
+ // to fill
+ if (x1 != x1old) {
+ ctx.lineTo(axisx.p2c(x1old), axisy.p2c(y1));
+ // it goes to (x1, y1), but we fill that below
+ }
+
+ // fill triangular section, this sometimes result
+ // in redundant points if (x1, y1) hasn't changed
+ // from previous line to, but we just ignore that
+ ctx.lineTo(axisx.p2c(x1), axisy.p2c(y1));
+ ctx.lineTo(axisx.p2c(x2), axisy.p2c(y2));
+
+ // fill the other rectangle if it's there
+ if (x2 != x2old) {
+ ctx.lineTo(axisx.p2c(x2), axisy.p2c(y2));
+ ctx.lineTo(axisx.p2c(x2old), axisy.p2c(y2));
+ }
+ }
+ }
+
+ ctx.save();
+ ctx.translate(plotOffset.left, plotOffset.top);
+ ctx.lineJoin = "round";
+
+ var lw = series.lines.lineWidth,
+ sw = series.shadowSize;
+ // FIXME: consider another form of shadow when filling is turned on
+ if (lw > 0 && sw > 0) {
+ // draw shadow as a thick and thin line with transparency
+ ctx.lineWidth = sw;
+ ctx.strokeStyle = "rgba(0,0,0,0.1)";
+ // position shadow at angle from the mid of line
+ var angle = Math.PI/18;
+ plotLine(series.datapoints, Math.sin(angle) * (lw/2 + sw/2), Math.cos(angle) * (lw/2 + sw/2), series.xaxis, series.yaxis);
+ ctx.lineWidth = sw/2;
+ plotLine(series.datapoints, Math.sin(angle) * (lw/2 + sw/4), Math.cos(angle) * (lw/2 + sw/4), series.xaxis, series.yaxis);
+ }
+
+ ctx.lineWidth = lw;
+ ctx.strokeStyle = series.color;
+ var fillStyle = getFillStyle(series.lines, series.color, 0, plotHeight);
+ if (fillStyle) {
+ ctx.fillStyle = fillStyle;
+ plotLineArea(series.datapoints, series.xaxis, series.yaxis);
+ }
+
+ if (lw > 0)
+ plotLine(series.datapoints, 0, 0, series.xaxis, series.yaxis);
+ ctx.restore();
+ }
+
+ function drawSeriesPoints(series) {
+ function plotPoints(datapoints, radius, fillStyle, offset, shadow, axisx, axisy, symbol) {
+ var points = datapoints.points, ps = datapoints.pointsize;
+
+ for (var i = 0; i < points.length; i += ps) {
+ var x = points[i], y = points[i + 1];
+ if (x == null || x < axisx.min || x > axisx.max || y < axisy.min || y > axisy.max)
+ continue;
+
+ ctx.beginPath();
+ x = axisx.p2c(x);
+ y = axisy.p2c(y) + offset;
+ if (symbol == "circle")
+ ctx.arc(x, y, radius, 0, shadow ? Math.PI : Math.PI * 2, false);
+ else
+ symbol(ctx, x, y, radius, shadow);
+ ctx.closePath();
+
+ if (fillStyle) {
+ ctx.fillStyle = fillStyle;
+ ctx.fill();
+ }
+ ctx.stroke();
+ }
+ }
+
+ ctx.save();
+ ctx.translate(plotOffset.left, plotOffset.top);
+
+ var lw = series.points.lineWidth,
+ sw = series.shadowSize,
+ radius = series.points.radius,
+ symbol = series.points.symbol;
+
+ // If the user sets the line width to 0, we change it to a very
+ // small value. A line width of 0 seems to force the default of 1.
+ // Doing the conditional here allows the shadow setting to still be
+ // optional even with a lineWidth of 0.
+
+ if( lw == 0 )
+ lw = 0.0001;
+
+ if (lw > 0 && sw > 0) {
+ // draw shadow in two steps
+ var w = sw / 2;
+ ctx.lineWidth = w;
+ ctx.strokeStyle = "rgba(0,0,0,0.1)";
+ plotPoints(series.datapoints, radius, null, w + w/2, true,
+ series.xaxis, series.yaxis, symbol);
+
+ ctx.strokeStyle = "rgba(0,0,0,0.2)";
+ plotPoints(series.datapoints, radius, null, w/2, true,
+ series.xaxis, series.yaxis, symbol);
+ }
+
+ ctx.lineWidth = lw;
+ ctx.strokeStyle = series.color;
+ plotPoints(series.datapoints, radius,
+ getFillStyle(series.points, series.color), 0, false,
+ series.xaxis, series.yaxis, symbol);
+ ctx.restore();
+ }
+
+ function drawBar(x, y, b, barLeft, barRight, offset, fillStyleCallback, axisx, axisy, c, horizontal, lineWidth) {
+ var left, right, bottom, top,
+ drawLeft, drawRight, drawTop, drawBottom,
+ tmp;
+
+ // in horizontal mode, we start the bar from the left
+ // instead of from the bottom so it appears to be
+ // horizontal rather than vertical
+ if (horizontal) {
+ drawBottom = drawRight = drawTop = true;
+ drawLeft = false;
+ left = b;
+ right = x;
+ top = y + barLeft;
+ bottom = y + barRight;
+
+ // account for negative bars
+ if (right < left) {
+ tmp = right;
+ right = left;
+ left = tmp;
+ drawLeft = true;
+ drawRight = false;
+ }
+ }
+ else {
+ drawLeft = drawRight = drawTop = true;
+ drawBottom = false;
+ left = x + barLeft;
+ right = x + barRight;
+ bottom = b;
+ top = y;
+
+ // account for negative bars
+ if (top < bottom) {
+ tmp = top;
+ top = bottom;
+ bottom = tmp;
+ drawBottom = true;
+ drawTop = false;
+ }
+ }
+
+ // clip
+ if (right < axisx.min || left > axisx.max ||
+ top < axisy.min || bottom > axisy.max)
+ return;
+
+ if (left < axisx.min) {
+ left = axisx.min;
+ drawLeft = false;
+ }
+
+ if (right > axisx.max) {
+ right = axisx.max;
+ drawRight = false;
+ }
+
+ if (bottom < axisy.min) {
+ bottom = axisy.min;
+ drawBottom = false;
+ }
+
+ if (top > axisy.max) {
+ top = axisy.max;
+ drawTop = false;
+ }
+
+ left = axisx.p2c(left);
+ bottom = axisy.p2c(bottom);
+ right = axisx.p2c(right);
+ top = axisy.p2c(top);
+
+ // fill the bar
+ if (fillStyleCallback) {
+ c.beginPath();
+ c.moveTo(left, bottom);
+ c.lineTo(left, top);
+ c.lineTo(right, top);
+ c.lineTo(right, bottom);
+ c.fillStyle = fillStyleCallback(bottom, top);
+ c.fill();
+ }
+
+ // draw outline
+ if (lineWidth > 0 && (drawLeft || drawRight || drawTop || drawBottom)) {
+ c.beginPath();
+
+ // FIXME: inline moveTo is buggy with excanvas
+ c.moveTo(left, bottom + offset);
+ if (drawLeft)
+ c.lineTo(left, top + offset);
+ else
+ c.moveTo(left, top + offset);
+ if (drawTop)
+ c.lineTo(right, top + offset);
+ else
+ c.moveTo(right, top + offset);
+ if (drawRight)
+ c.lineTo(right, bottom + offset);
+ else
+ c.moveTo(right, bottom + offset);
+ if (drawBottom)
+ c.lineTo(left, bottom + offset);
+ else
+ c.moveTo(left, bottom + offset);
+ c.stroke();
+ }
+ }
+
+ function drawSeriesBars(series) {
+ function plotBars(datapoints, barLeft, barRight, offset, fillStyleCallback, axisx, axisy) {
+ var points = datapoints.points, ps = datapoints.pointsize;
+
+ for (var i = 0; i < points.length; i += ps) {
+ if (points[i] == null)
+ continue;
+ drawBar(points[i], points[i + 1], points[i + 2], barLeft, barRight, offset, fillStyleCallback, axisx, axisy, ctx, series.bars.horizontal, series.bars.lineWidth);
+ }
+ }
+
+ ctx.save();
+ ctx.translate(plotOffset.left, plotOffset.top);
+
+ // FIXME: figure out a way to add shadows (for instance along the right edge)
+ ctx.lineWidth = series.bars.lineWidth;
+ ctx.strokeStyle = series.color;
+
+ var barLeft;
+
+ switch (series.bars.align) {
+ case "left":
+ barLeft = 0;
+ break;
+ case "right":
+ barLeft = -series.bars.barWidth;
+ break;
+ case "center":
+ barLeft = -series.bars.barWidth / 2;
+ break;
+ default:
+ throw new Error("Invalid bar alignment: " + series.bars.align);
+ }
+
+ var fillStyleCallback = series.bars.fill ? function (bottom, top) { return getFillStyle(series.bars, series.color, bottom, top); } : null;
+ plotBars(series.datapoints, barLeft, barLeft + series.bars.barWidth, 0, fillStyleCallback, series.xaxis, series.yaxis);
+ ctx.restore();
+ }
+
+ function getFillStyle(filloptions, seriesColor, bottom, top) {
+ var fill = filloptions.fill;
+ if (!fill)
+ return null;
+
+ if (filloptions.fillColor)
+ return getColorOrGradient(filloptions.fillColor, bottom, top, seriesColor);
+
+ var c = $.color.parse(seriesColor);
+ c.a = typeof fill == "number" ? fill : 0.4;
+ c.normalize();
+ return c.toString();
+ }
+
+ function insertLegend() {
+
+ placeholder.find(".legend").remove();
+
+ if (!options.legend.show)
+ return;
+
+ var fragments = [], entries = [], rowStarted = false,
+ lf = options.legend.labelFormatter, s, label;
+
+ // Build a list of legend entries, with each having a label and a color
+
+ for (var i = 0; i < series.length; ++i) {
+ s = series[i];
+ if (s.label) {
+ label = lf ? lf(s.label, s) : s.label;
+ if (label) {
+ entries.push({
+ label: label,
+ color: s.color
+ });
+ }
+ }
+ }
+
+ // Sort the legend using either the default or a custom comparator
+
+ if (options.legend.sorted) {
+ if ($.isFunction(options.legend.sorted)) {
+ entries.sort(options.legend.sorted);
+ } else if (options.legend.sorted == "reverse") {
+ entries.reverse();
+ } else {
+ var ascending = options.legend.sorted != "descending";
+ entries.sort(function(a, b) {
+ return a.label == b.label ? 0 : (
+ (a.label < b.label) != ascending ? 1 : -1 // Logical XOR
+ );
+ });
+ }
+ }
+
+ // Generate markup for the list of entries, in their final order
+
+ for (var i = 0; i < entries.length; ++i) {
+
+ var entry = entries[i];
+
+ if (i % options.legend.noColumns == 0) {
+ if (rowStarted)
+ fragments.push('</tr>');
+ fragments.push('<tr>');
+ rowStarted = true;
+ }
+
+ fragments.push(
+ '<td class="legendColorBox"><div style="border:1px solid ' + options.legend.labelBoxBorderColor + ';padding:1px"><div style="width:4px;height:0;border:5px solid ' + entry.color + ';overflow:hidden"></div></div></td>' +
+ '<td class="legendLabel">' + entry.label + '</td>'
+ );
+ }
+
+ if (rowStarted)
+ fragments.push('</tr>');
+
+ if (fragments.length == 0)
+ return;
+
+ var table = '<table style="font-size:smaller;color:' + options.grid.color + '">' + fragments.join("") + '</table>';
+ if (options.legend.container != null)
+ $(options.legend.container).html(table);
+ else {
+ var pos = "",
+ p = options.legend.position,
+ m = options.legend.margin;
+ if (m[0] == null)
+ m = [m, m];
+ if (p.charAt(0) == "n")
+ pos += 'top:' + (m[1] + plotOffset.top) + 'px;';
+ else if (p.charAt(0) == "s")
+ pos += 'bottom:' + (m[1] + plotOffset.bottom) + 'px;';
+ if (p.charAt(1) == "e")
+ pos += 'right:' + (m[0] + plotOffset.right) + 'px;';
+ else if (p.charAt(1) == "w")
+ pos += 'left:' + (m[0] + plotOffset.left) + 'px;';
+ var legend = $('<div class="legend">' + table.replace('style="', 'style="position:absolute;' + pos +';') + '</div>').appendTo(placeholder);
+ if (options.legend.backgroundOpacity != 0.0) {
+ // put in the transparent background
+ // separately to avoid blended labels and
+ // label boxes
+ var c = options.legend.backgroundColor;
+ if (c == null) {
+ c = options.grid.backgroundColor;
+ if (c && typeof c == "string")
+ c = $.color.parse(c);
+ else
+ c = $.color.extract(legend, 'background-color');
+ c.a = 1;
+ c = c.toString();
+ }
+ var div = legend.children();
+ $('<div style="position:absolute;width:' + div.width() + 'px;height:' + div.height() + 'px;' + pos +'background-color:' + c + ';"> </div>').prependTo(legend).css('opacity', options.legend.backgroundOpacity);
+ }
+ }
+ }
+
+
+ // interactive features
+
+ var highlights = [],
+ redrawTimeout = null;
+
+ // returns the data item the mouse is over, or null if none is found
+ function findNearbyItem(mouseX, mouseY, seriesFilter) {
+ var maxDistance = options.grid.mouseActiveRadius,
+ smallestDistance = maxDistance * maxDistance + 1,
+ item = null, foundPoint = false, i, j, ps;
+
+ for (i = series.length - 1; i >= 0; --i) {
+ if (!seriesFilter(series[i]))
+ continue;
+
+ var s = series[i],
+ axisx = s.xaxis,
+ axisy = s.yaxis,
+ points = s.datapoints.points,
+ mx = axisx.c2p(mouseX), // precompute some stuff to make the loop faster
+ my = axisy.c2p(mouseY),
+ maxx = maxDistance / axisx.scale,
+ maxy = maxDistance / axisy.scale;
+
+ ps = s.datapoints.pointsize;
+ // with inverse transforms, we can't use the maxx/maxy
+ // optimization, sadly
+ if (axisx.options.inverseTransform)
+ maxx = Number.MAX_VALUE;
+ if (axisy.options.inverseTransform)
+ maxy = Number.MAX_VALUE;
+
+ if (s.lines.show || s.points.show) {
+ for (j = 0; j < points.length; j += ps) {
+ var x = points[j], y = points[j + 1];
+ if (x == null)
+ continue;
+
+ // For points and lines, the cursor must be within a
+ // certain distance to the data point
+ if (x - mx > maxx || x - mx < -maxx ||
+ y - my > maxy || y - my < -maxy)
+ continue;
+
+ // We have to calculate distances in pixels, not in
+ // data units, because the scales of the axes may be different
+ var dx = Math.abs(axisx.p2c(x) - mouseX),
+ dy = Math.abs(axisy.p2c(y) - mouseY),
+ dist = dx * dx + dy * dy; // we save the sqrt
+
+ // use <= to ensure last point takes precedence
+ // (last generally means on top of)
+ if (dist < smallestDistance) {
+ smallestDistance = dist;
+ item = [i, j / ps];
+ }
+ }
+ }
+
+ if (s.bars.show && !item) { // no other point can be nearby
+ var barLeft = s.bars.align == "left" ? 0 : -s.bars.barWidth/2,
+ barRight = barLeft + s.bars.barWidth;
+
+ for (j = 0; j < points.length; j += ps) {
+ var x = points[j], y = points[j + 1], b = points[j + 2];
+ if (x == null)
+ continue;
+
+ // for a bar graph, the cursor must be inside the bar
+ if (series[i].bars.horizontal ?
+ (mx <= Math.max(b, x) && mx >= Math.min(b, x) &&
+ my >= y + barLeft && my <= y + barRight) :
+ (mx >= x + barLeft && mx <= x + barRight &&
+ my >= Math.min(b, y) && my <= Math.max(b, y)))
+ item = [i, j / ps];
+ }
+ }
+ }
+
+ if (item) {
+ i = item[0];
+ j = item[1];
+ ps = series[i].datapoints.pointsize;
+
+ return { datapoint: series[i].datapoints.points.slice(j * ps, (j + 1) * ps),
+ dataIndex: j,
+ series: series[i],
+ seriesIndex: i };
+ }
+
+ return null;
+ }
+
+ function onMouseMove(e) {
+ if (options.grid.hoverable)
+ triggerClickHoverEvent("plothover", e,
+ function (s) { return s["hoverable"] != false; });
+ }
+
+ function onMouseLeave(e) {
+ if (options.grid.hoverable)
+ triggerClickHoverEvent("plothover", e,
+ function (s) { return false; });
+ }
+
+ function onClick(e) {
+ triggerClickHoverEvent("plotclick", e,
+ function (s) { return s["clickable"] != false; });
+ }
+
+ // trigger click or hover event (they send the same parameters
+ // so we share their code)
+ function triggerClickHoverEvent(eventname, event, seriesFilter) {
+ var offset = eventHolder.offset(),
+ canvasX = event.pageX - offset.left - plotOffset.left,
+ canvasY = event.pageY - offset.top - plotOffset.top,
+ pos = canvasToAxisCoords({ left: canvasX, top: canvasY });
+
+ pos.pageX = event.pageX;
+ pos.pageY = event.pageY;
+
+ var item = findNearbyItem(canvasX, canvasY, seriesFilter);
+
+ if (item) {
+ // fill in mouse pos for any listeners out there
+ item.pageX = parseInt(item.series.xaxis.p2c(item.datapoint[0]) + offset.left + plotOffset.left, 10);
+ item.pageY = parseInt(item.series.yaxis.p2c(item.datapoint[1]) + offset.top + plotOffset.top, 10);
+ }
+
+ if (options.grid.autoHighlight) {
+ // clear auto-highlights
+ for (var i = 0; i < highlights.length; ++i) {
+ var h = highlights[i];
+ if (h.auto == eventname &&
+ !(item && h.series == item.series &&
+ h.point[0] == item.datapoint[0] &&
+ h.point[1] == item.datapoint[1]))
+ unhighlight(h.series, h.point);
+ }
+
+ if (item)
+ highlight(item.series, item.datapoint, eventname);
+ }
+
+ placeholder.trigger(eventname, [ pos, item ]);
+ }
+
+ function triggerRedrawOverlay() {
+ var t = options.interaction.redrawOverlayInterval;
+ if (t == -1) { // skip event queue
+ drawOverlay();
+ return;
+ }
+
+ if (!redrawTimeout)
+ redrawTimeout = setTimeout(drawOverlay, t);
+ }
+
+ function drawOverlay() {
+ redrawTimeout = null;
+
+ // draw highlights
+ octx.save();
+ overlay.clear();
+ octx.translate(plotOffset.left, plotOffset.top);
+
+ var i, hi;
+ for (i = 0; i < highlights.length; ++i) {
+ hi = highlights[i];
+
+ if (hi.series.bars.show)
+ drawBarHighlight(hi.series, hi.point);
+ else
+ drawPointHighlight(hi.series, hi.point);
+ }
+ octx.restore();
+
+ executeHooks(hooks.drawOverlay, [octx]);
+ }
+
+ function highlight(s, point, auto) {
+ if (typeof s == "number")
+ s = series[s];
+
+ if (typeof point == "number") {
+ var ps = s.datapoints.pointsize;
+ point = s.datapoints.points.slice(ps * point, ps * (point + 1));
+ }
+
+ var i = indexOfHighlight(s, point);
+ if (i == -1) {
+ highlights.push({ series: s, point: point, auto: auto });
+
+ triggerRedrawOverlay();
+ }
+ else if (!auto)
+ highlights[i].auto = false;
+ }
+
+ function unhighlight(s, point) {
+ if (s == null && point == null) {
+ highlights = [];
+ triggerRedrawOverlay();
+ return;
+ }
+
+ if (typeof s == "number")
+ s = series[s];
+
+ if (typeof point == "number") {
+ var ps = s.datapoints.pointsize;
+ point = s.datapoints.points.slice(ps * point, ps * (point + 1));
+ }
+
+ var i = indexOfHighlight(s, point);
+ if (i != -1) {
+ highlights.splice(i, 1);
+
+ triggerRedrawOverlay();
+ }
+ }
+
+ function indexOfHighlight(s, p) {
+ for (var i = 0; i < highlights.length; ++i) {
+ var h = highlights[i];
+ if (h.series == s && h.point[0] == p[0]
+ && h.point[1] == p[1])
+ return i;
+ }
+ return -1;
+ }
+
+ function drawPointHighlight(series, point) {
+ var x = point[0], y = point[1],
+ axisx = series.xaxis, axisy = series.yaxis,
+ highlightColor = (typeof series.highlightColor === "string") ? series.highlightColor : $.color.parse(series.color).scale('a', 0.5).toString();
+
+ if (x < axisx.min || x > axisx.max || y < axisy.min || y > axisy.max)
+ return;
+
+ var pointRadius = series.points.radius + series.points.lineWidth / 2;
+ octx.lineWidth = pointRadius;
+ octx.strokeStyle = highlightColor;
+ var radius = 1.5 * pointRadius;
+ x = axisx.p2c(x);
+ y = axisy.p2c(y);
+
+ octx.beginPath();
+ if (series.points.symbol == "circle")
+ octx.arc(x, y, radius, 0, 2 * Math.PI, false);
+ else
+ series.points.symbol(octx, x, y, radius, false);
+ octx.closePath();
+ octx.stroke();
+ }
+
+ function drawBarHighlight(series, point) {
+ var highlightColor = (typeof series.highlightColor === "string") ? series.highlightColor : $.color.parse(series.color).scale('a', 0.5).toString(),
+ fillStyle = highlightColor,
+ barLeft = series.bars.align == "left" ? 0 : -series.bars.barWidth/2;
+
+ octx.lineWidth = series.bars.lineWidth;
+ octx.strokeStyle = highlightColor;
+
+ drawBar(point[0], point[1], point[2] || 0, barLeft, barLeft + series.bars.barWidth,
+ 0, function () { return fillStyle; }, series.xaxis, series.yaxis, octx, series.bars.horizontal, series.bars.lineWidth);
+ }
+
+ function getColorOrGradient(spec, bottom, top, defaultColor) {
+ if (typeof spec == "string")
+ return spec;
+ else {
+ // assume this is a gradient spec; IE currently only
+ // supports a simple vertical gradient properly, so that's
+ // what we support too
+ var gradient = ctx.createLinearGradient(0, top, 0, bottom);
+
+ for (var i = 0, l = spec.colors.length; i < l; ++i) {
+ var c = spec.colors[i];
+ if (typeof c != "string") {
+ var co = $.color.parse(defaultColor);
+ if (c.brightness != null)
+ co = co.scale('rgb', c.brightness);
+ if (c.opacity != null)
+ co.a *= c.opacity;
+ c = co.toString();
+ }
+ gradient.addColorStop(i / (l - 1), c);
+ }
+
+ return gradient;
+ }
+ }
+ }
+
+ // Add the plot function to the top level of the jQuery object
+
+ $.plot = function(placeholder, data, options) {
+ //var t0 = new Date();
+ var plot = new Plot($(placeholder), data, options, $.plot.plugins);
+ //(window.console ? console.log : alert)("time used (msecs): " + ((new Date()).getTime() - t0.getTime()));
+ return plot;
+ };
+
+ $.plot.version = "0.8.1";
+
+ $.plot.plugins = [];
+
+ // Also add the plot function as a chainable property
+
+ $.fn.plot = function(data, options) {
+ return this.each(function() {
+ $.plot(this, data, options);
+ });
+ };
+
+ // round to nearby lower multiple of base
+ function floorInBase(n, base) {
+ return base * Math.floor(n / base);
+ }
+
+})(jQuery);
--- /dev/null
+/* Javascript plotting library for jQuery, version 0.8.1.
+
+Copyright (c) 2007-2013 IOLA and Ole Laursen.
+Licensed under the MIT license.
+
+*/// first an inline dependency, jquery.colorhelpers.js, we inline it here
+// for convenience
+/* Plugin for jQuery for working with colors.
+ *
+ * Version 1.1.
+ *
+ * Inspiration from jQuery color animation plugin by John Resig.
+ *
+ * Released under the MIT license by Ole Laursen, October 2009.
+ *
+ * Examples:
+ *
+ * $.color.parse("#fff").scale('rgb', 0.25).add('a', -0.5).toString()
+ * var c = $.color.extract($("#mydiv"), 'background-color');
+ * console.log(c.r, c.g, c.b, c.a);
+ * $.color.make(100, 50, 25, 0.4).toString() // returns "rgba(100,50,25,0.4)"
+ *
+ * Note that .scale() and .add() return the same modified object
+ * instead of making a new one.
+ *
+ * V. 1.1: Fix error handling so e.g. parsing an empty string does
+ * produce a color rather than just crashing.
+ */(function(e){e.color={},e.color.make=function(t,n,r,i){var s={};return s.r=t||0,s.g=n||0,s.b=r||0,s.a=i!=null?i:1,s.add=function(e,t){for(var n=0;n<e.length;++n)s[e.charAt(n)]+=t;return s.normalize()},s.scale=function(e,t){for(var n=0;n<e.length;++n)s[e.charAt(n)]*=t;return s.normalize()},s.toString=function(){return s.a>=1?"rgb("+[s.r,s.g,s.b].join(",")+")":"rgba("+[s.r,s.g,s.b,s.a].join(",")+")"},s.normalize=function(){function e(e,t,n){return t<e?e:t>n?n:t}return s.r=e(0,parseInt(s.r),255),s.g=e(0,parseInt(s.g),255),s.b=e(0,parseInt(s.b),255),s.a=e(0,s.a,1),s},s.clone=function(){return e.color.make(s.r,s.b,s.g,s.a)},s.normalize()},e.color.extract=function(t,n){var r;do{r=t.css(n).toLowerCase();if(r!=""&&r!="transparent")break;t=t.parent()}while(!e.nodeName(t.get(0),"body"));return r=="rgba(0, 0, 0, 0)"&&(r="transparent"),e.color.parse(r)},e.color.parse=function(n){var r,i=e.color.make;if(r=/rgb\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*\)/.exec(n))return i(parseInt(r[1],10),parseInt(r[2],10),parseInt(r[3],10));if(r=/rgba\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]+(?:\.[0-9]+)?)\s*\)/.exec(n))return i(parseInt(r[1],10),parseInt(r[2],10),parseInt(r[3],10),parseFloat(r[4]));if(r=/rgb\(\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*\)/.exec(n))return i(parseFloat(r[1])*2.55,parseFloat(r[2])*2.55,parseFloat(r[3])*2.55);if(r=/rgba\(\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\s*\)/.exec(n))return i(parseFloat(r[1])*2.55,parseFloat(r[2])*2.55,parseFloat(r[3])*2.55,parseFloat(r[4]));if(r=/#([a-fA-F0-9]{2})([a-fA-F0-9]{2})([a-fA-F0-9]{2})/.exec(n))return i(parseInt(r[1],16),parseInt(r[2],16),parseInt(r[3],16));if(r=/#([a-fA-F0-9])([a-fA-F0-9])([a-fA-F0-9])/.exec(n))return i(parseInt(r[1]+r[1],16),parseInt(r[2]+r[2],16),parseInt(r[3]+r[3],16));var s=e.trim(n).toLowerCase();return s=="transparent"?i(255,255,255,0):(r=t[s]||[0,0,0],i(r[0],r[1],r[2]))};var t={aqua:[0,255,255],azure:[240,255,255],beige:[245,245,220],black:[0,0,0],blue:[0,0,255],brown:[165,42,42],cyan:[0,255,255],darkblue:[0,0,139],darkcyan:[0,139,139],darkgrey:[169,169,169],darkgreen:[0,100,0],darkkhaki:[189,183,107],darkmagenta:[139,0,139],darkolivegreen:[85,107,47],darkorange:[255,140,0],darkorchid:[153,50,204],darkred:[139,0,0],darksalmon:[233,150,122],darkviolet:[148,0,211],fuchsia:[255,0,255],gold:[255,215,0],green:[0,128,0],indigo:[75,0,130],khaki:[240,230,140],lightblue:[173,216,230],lightcyan:[224,255,255],lightgreen:[144,238,144],lightgrey:[211,211,211],lightpink:[255,182,193],lightyellow:[255,255,224],lime:[0,255,0],magenta:[255,0,255],maroon:[128,0,0],navy:[0,0,128],olive:[128,128,0],orange:[255,165,0],pink:[255,192,203],purple:[128,0,128],violet:[128,0,128],red:[255,0,0],silver:[192,192,192],white:[255,255,255],yellow:[255,255,0]}})(jQuery),function(e){function n(t,n){var r=n.children("."+t)[0];if(r==null){r=document.createElement("canvas"),r.className=t,e(r).css({direction:"ltr",position:"absolute",left:0,top:0}).appendTo(n);if(!r.getContext){if(!window.G_vmlCanvasManager)throw new Error("Canvas is not available. If you're using IE with a fall-back such as Excanvas, then there's either a mistake in your conditional include, or the page has no DOCTYPE and is rendering in Quirks Mode.");r=window.G_vmlCanvasManager.initElement(r)}}this.element=r;var i=this.context=r.getContext("2d"),s=window.devicePixelRatio||1,o=i.webkitBackingStorePixelRatio||i.mozBackingStorePixelRatio||i.msBackingStorePixelRatio||i.oBackingStorePixelRatio||i.backingStorePixelRatio||1;this.pixelRatio=s/o,this.resize(n.width(),n.height()),this.textContainer=null,this.text={},this._textCache={}}function r(t,r,s,o){function E(e,t){t=[w].concat(t);for(var n=0;n<e.length;++n)e[n].apply(this,t)}function S(){var t={Canvas:n};for(var r=0;r<o.length;++r){var i=o[r];i.init(w,t),i.options&&e.extend(!0,a,i.options)}}function x(n){e.extend(!0,a,n),n&&n.colors&&(a.colors=n.colors),a.xaxis.color==null&&(a.xaxis.color=e.color.parse(a.grid.color).scale("a",.22).toString()),a.yaxis.color==null&&(a.yaxis.color=e.color.parse(a.grid.color).scale("a",.22).toString()),a.xaxis.tickColor==null&&(a.xaxis.tickColor=a.grid.tickColor||a.xaxis.color),a.yaxis.tickColor==null&&(a.yaxis.tickColor=a.grid.tickColor||a.yaxis.color),a.grid.borderColor==null&&(a.grid.borderColor=a.grid.color),a.grid.tickColor==null&&(a.grid.tickColor=e.color.parse(a.grid.color).scale("a",.22).toString());var r,i,s,o={style:t.css("font-style"),size:Math.round(.8*(+t.css("font-size").replace("px","")||13)),variant:t.css("font-variant"),weight:t.css("font-weight"),family:t.css("font-family")};o.lineHeight=o.size*1.15,s=a.xaxes.length||1;for(r=0;r<s;++r)i=a.xaxes[r],i&&!i.tickColor&&(i.tickColor=i.color),i=e.extend(!0,{},a.xaxis,i),a.xaxes[r]=i,i.font&&(i.font=e.extend({},o,i.font),i.font.color||(i.font.color=i.color));s=a.yaxes.length||1;for(r=0;r<s;++r)i=a.yaxes[r],i&&!i.tickColor&&(i.tickColor=i.color),i=e.extend(!0,{},a.yaxis,i),a.yaxes[r]=i,i.font&&(i.font=e.extend({},o,i.font),i.font.color||(i.font.color=i.color));a.xaxis.noTicks&&a.xaxis.ticks==null&&(a.xaxis.ticks=a.xaxis.noTicks),a.yaxis.noTicks&&a.yaxis.ticks==null&&(a.yaxis.ticks=a.yaxis.noTicks),a.x2axis&&(a.xaxes[1]=e.extend(!0,{},a.xaxis,a.x2axis),a.xaxes[1].position="top"),a.y2axis&&(a.yaxes[1]=e.extend(!0,{},a.yaxis,a.y2axis),a.yaxes[1].position="right"),a.grid.coloredAreas&&(a.grid.markings=a.grid.coloredAreas),a.grid.coloredAreasColor&&(a.grid.markingsColor=a.grid.coloredAreasColor),a.lines&&e.extend(!0,a.series.lines,a.lines),a.points&&e.extend(!0,a.series.points,a.points),a.bars&&e.extend(!0,a.series.bars,a.bars),a.shadowSize!=null&&(a.series.shadowSize=a.shadowSize),a.highlightColor!=null&&(a.series.highlightColor=a.highlightColor);for(r=0;r<a.xaxes.length;++r)O(d,r+1).options=a.xaxes[r];for(r=0;r<a.yaxes.length;++r)O(v,r+1).options=a.yaxes[r];for(var u in b)a.hooks[u]&&a.hooks[u].length&&(b[u]=b[u].concat(a.hooks[u]));E(b.processOptions,[a])}function T(e){u=N(e),M(),_()}function N(t){var n=[];for(var r=0;r<t.length;++r){var i=e.extend(!0,{},a.series);t[r].data!=null?(i.data=t[r].data,delete t[r].data,e.extend(!0,i,t[r]),t[r].data=i.data):i.data=t[r],n.push(i)}return n}function C(e,t){var n=e[t+"axis"];return typeof n=="object"&&(n=n.n),typeof n!="number"&&(n=1),n}function k(){return e.grep(d.concat(v),function(e){return e})}function L(e){var t={},n,r;for(n=0;n<d.length;++n)r=d[n],r&&r.used&&(t["x"+r.n]=r.c2p(e.left));for(n=0;n<v.length;++n)r=v[n],r&&r.used&&(t["y"+r.n]=r.c2p(e.top));return t.x1!==undefined&&(t.x=t.x1),t.y1!==undefined&&(t.y=t.y1),t}function A(e){var t={},n,r,i;for(n=0;n<d.length;++n){r=d[n];if(r&&r.used){i="x"+r.n,e[i]==null&&r.n==1&&(i="x");if(e[i]!=null){t.left=r.p2c(e[i]);break}}}for(n=0;n<v.length;++n){r=v[n];if(r&&r.used){i="y"+r.n,e[i]==null&&r.n==1&&(i="y");if(e[i]!=null){t.top=r.p2c(e[i]);break}}}return t}function O(t,n){return t[n-1]||(t[n-1]={n:n,direction:t==d?"x":"y",options:e.extend(!0,{},t==d?a.xaxis:a.yaxis)}),t[n-1]}function M(){var t=u.length,n=-1,r;for(r=0;r<u.length;++r){var i=u[r].color;i!=null&&(t--,typeof i=="number"&&i>n&&(n=i))}t<=n&&(t=n+1);var s,o=[],f=a.colors,l=f.length,c=0;for(r=0;r<t;r++)s=e.color.parse(f[r%l]||"#666"),r%l==0&&r&&(c>=0?c<.5?c=-c-.2:c=0:c=-c),o[r]=s.scale("rgb",1+c);var h=0,p;for(r=0;r<u.length;++r){p=u[r],p.color==null?(p.color=o[h].toString(),++h):typeof p.color=="number"&&(p.color=o[p.color].toString());if(p.lines.show==null){var m,g=!0;for(m in p)if(p[m]&&p[m].show){g=!1;break}g&&(p.lines.show=!0)}p.lines.zero==null&&(p.lines.zero=!!p.lines.fill),p.xaxis=O(d,C(p,"x")),p.yaxis=O(v,C(p,"y"))}}function _(){function x(e,t,n){t<e.datamin&&t!=-r&&(e.datamin=t),n>e.datamax&&n!=r&&(e.datamax=n)}var t=Number.POSITIVE_INFINITY,n=Number.NEGATIVE_INFINITY,r=Number.MAX_VALUE,i,s,o,a,f,l,c,h,p,d,v,m,g,y,w,S;e.each(k(),function(e,r){r.datamin=t,r.datamax=n,r.used=!1});for(i=0;i<u.length;++i)l=u[i],l.datapoints={points:[]},E(b.processRawData,[l,l.data,l.datapoints]);for(i=0;i<u.length;++i){l=u[i],w=l.data,S=l.datapoints.format;if(!S){S=[],S.push({x:!0,number:!0,required:!0}),S.push({y:!0,number:!0,required:!0});if(l.bars.show||l.lines.show&&l.lines.fill){var T=!!(l.bars.show&&l.bars.zero||l.lines.show&&l.lines.zero);S.push({y:!0,number:!0,required:!1,defaultValue:0,autoscale:T}),l.bars.horizontal&&(delete S[S.length-1].y,S[S.length-1].x=!0)}l.datapoints.format=S}if(l.datapoints.pointsize!=null)continue;l.datapoints.pointsize=S.length,h=l.datapoints.pointsize,c=l.datapoints.points;var N=l.lines.show&&l.lines.steps;l.xaxis.used=l.yaxis.used=!0;for(s=o=0;s<w.length;++s,o+=h){y=w[s];var C=y==null;if(!C)for(a=0;a<h;++a)m=y[a],g=S[a],g&&(g.number&&m!=null&&(m=+m,isNaN(m)?m=null:m==Infinity?m=r:m==-Infinity&&(m=-r)),m==null&&(g.required&&(C=!0),g.defaultValue!=null&&(m=g.defaultValue))),c[o+a]=m;if(C)for(a=0;a<h;++a)m=c[o+a],m!=null&&(g=S[a],g.autoscale&&(g.x&&x(l.xaxis,m,m),g.y&&x(l.yaxis,m,m))),c[o+a]=null;else if(N&&o>0&&c[o-h]!=null&&c[o-h]!=c[o]&&c[o-h+1]!=c[o+1]){for(a=0;a<h;++a)c[o+h+a]=c[o+a];c[o+1]=c[o-h+1],o+=h}}}for(i=0;i<u.length;++i)l=u[i],E(b.processDatapoints,[l,l.datapoints]);for(i=0;i<u.length;++i){l=u[i],c=l.datapoints.points,h=l.datapoints.pointsize,S=l.datapoints.format;var L=t,A=t,O=n,M=n;for(s=0;s<c.length;s+=h){if(c[s]==null)continue;for(a=0;a<h;++a){m=c[s+a],g=S[a];if(!g||g.autoscale===!1||m==r||m==-r)continue;g.x&&(m<L&&(L=m),m>O&&(O=m)),g.y&&(m<A&&(A=m),m>M&&(M=m))}}if(l.bars.show){var _;switch(l.bars.align){case"left":_=0;break;case"right":_=-l.bars.barWidth;break;case"center":_=-l.bars.barWidth/2;break;default:throw new Error("Invalid bar alignment: "+l.bars.align)}l.bars.horizontal?(A+=_,M+=_+l.bars.barWidth):(L+=_,O+=_+l.bars.barWidth)}x(l.xaxis,L,O),x(l.yaxis,A,M)}e.each(k(),function(e,r){r.datamin==t&&(r.datamin=null),r.datamax==n&&(r.datamax=null)})}function D(){t.css("padding",0).children(":not(.flot-base,.flot-overlay)").remove(),t.css("position")=="static"&&t.css("position","relative"),f=new n("flot-base",t),l=new n("flot-overlay",t),h=f.context,p=l.context,c=e(l.element).unbind();var r=t.data("plot");r&&(r.shutdown(),l.clear()),t.data("plot",w)}function P(){a.grid.hoverable&&(c.mousemove(at),c.bind("mouseleave",ft)),a.grid.clickable&&c.click(lt),E(b.bindEvents,[c])}function H(){ot&&clearTimeout(ot),c.unbind("mousemove",at),c.unbind("mouseleave",ft),c.unbind("click",lt),E(b.shutdown,[c])}function B(e){function t(e){return e}var n,r,i=e.options.transform||t,s=e.options.inverseTransform;e.direction=="x"?(n=e.scale=g/Math.abs(i(e.max)-i(e.min)),r=Math.min(i(e.max),i(e.min))):(n=e.scale=y/Math.abs(i(e.max)-i(e.min)),n=-n,r=Math.max(i(e.max),i(e.min))),i==t?e.p2c=function(e){return(e-r)*n}:e.p2c=function(e){return(i(e)-r)*n},s?e.c2p=function(e){return s(r+e/n)}:e.c2p=function(e){return r+e/n}}function j(e){var t=e.options,n=e.ticks||[],r=t.labelWidth||0,i=t.labelHeight||0,s=r||e.direction=="x"?Math.floor(f.width/(n.length||1)):null;legacyStyles=e.direction+"Axis "+e.direction+e.n+"Axis",layer="flot-"+e.direction+"-axis flot-"+e.direction+e.n+"-axis "+legacyStyles,font=t.font||"flot-tick-label tickLabel";for(var o=0;o<n.length;++o){var u=n[o];if(!u.label)continue;var a=f.getTextInfo(layer,u.label,font,null,s);r=Math.max(r,a.width),i=Math.max(i,a.height)}e.labelWidth=t.labelWidth||r,e.labelHeight=t.labelHeight||i}function F(t){var n=t.labelWidth,r=t.labelHeight,i=t.options.position,s=t.options.tickLength,o=a.grid.axisMargin,u=a.grid.labelMargin,l=t.direction=="x"?d:v,c,h,p=e.grep(l,function(e){return e&&e.options.position==i&&e.reserveSpace});e.inArray(t,p)==p.length-1&&(o=0);if(s==null){var g=e.grep(l,function(e){return e&&e.reserveSpace});h=e.inArray(t,g)==0,h?s="full":s=5}isNaN(+s)||(u+=+s),t.direction=="x"?(r+=u,i=="bottom"?(m.bottom+=r+o,t.box={top:f.height-m.bottom,height:r}):(t.box={top:m.top+o,height:r},m.top+=r+o)):(n+=u,i=="left"?(t.box={left:m.left+o,width:n},m.left+=n+o):(m.right+=n+o,t.box={left:f.width-m.right,width:n})),t.position=i,t.tickLength=s,t.box.padding=u,t.innermost=h}function I(e){e.direction=="x"?(e.box.left=m.left-e.labelWidth/2,e.box.width=f.width-m.left-m.right+e.labelWidth):(e.box.top=m.top-e.labelHeight/2,e.box.height=f.height-m.bottom-m.top+e.labelHeight)}function q(){var t=a.grid.minBorderMargin,n={x:0,y:0},r,i;if(t==null){t=0;for(r=0;r<u.length;++r)t=Math.max(t,2*(u[r].points.radius+u[r].points.lineWidth/2))}n.x=n.y=Math.ceil(t),e.each(k(),function(e,t){var r=t.direction;t.reserveSpace&&(n[r]=Math.ceil(Math.max(n[r],(r=="x"?t.labelWidth:t.labelHeight)/2)))}),m.left=Math.max(n.x,m.left),m.right=Math.max(n.x,m.right),m.top=Math.max(n.y,m.top),m.bottom=Math.max(n.y,m.bottom)}function R(){var t,n=k(),r=a.grid.show;for(var i in m){var s=a.grid.margin||0;m[i]=typeof s=="number"?s:s[i]||0}E(b.processOffset,[m]);for(var i in m)typeof a.grid.borderWidth=="object"?m[i]+=r?a.grid.borderWidth[i]:0:m[i]+=r?a.grid.borderWidth:0;e.each(n,function(e,t){t.show=t.options.show,t.show==null&&(t.show=t.used),t.reserveSpace=t.show||t.options.reserveSpace,U(t)});if(r){var o=e.grep(n,function(e){return e.reserveSpace});e.each(o,function(e,t){z(t),W(t),X(t,t.ticks),j(t)});for(t=o.length-1;t>=0;--t)F(o[t]);q(),e.each(o,function(e,t){I(t)})}g=f.width-m.left-m.right,y=f.height-m.bottom-m.top,e.each(n,function(e,t){B(t)}),r&&G(),it()}function U(e){var t=e.options,n=+(t.min!=null?t.min:e.datamin),r=+(t.max!=null?t.max:e.datamax),i=r-n;if(i==0){var s=r==0?1:.01;t.min==null&&(n-=s);if(t.max==null||t.min!=null)r+=s}else{var o=t.autoscaleMargin;o!=null&&(t.min==null&&(n-=i*o,n<0&&e.datamin!=null&&e.datamin>=0&&(n=0)),t.max==null&&(r+=i*o,r>0&&e.datamax!=null&&e.datamax<=0&&(r=0)))}e.min=n,e.max=r}function z(t){var n=t.options,r;typeof n.ticks=="number"&&n.ticks>0?r=n.ticks:r=.3*Math.sqrt(t.direction=="x"?f.width:f.height);var s=(t.max-t.min)/r,o=-Math.floor(Math.log(s)/Math.LN10),u=n.tickDecimals;u!=null&&o>u&&(o=u);var a=Math.pow(10,-o),l=s/a,c;l<1.5?c=1:l<3?(c=2,l>2.25&&(u==null||o+1<=u)&&(c=2.5,++o)):l<7.5?c=5:c=10,c*=a,n.minTickSize!=null&&c<n.minTickSize&&(c=n.minTickSize),t.delta=s,t.tickDecimals=Math.max(0,u!=null?u:o),t.tickSize=n.tickSize||c;if(n.mode=="time"&&!t.tickGenerator)throw new Error("Time mode requires the flot.time plugin.");t.tickGenerator||(t.tickGenerator=function(e){var t=[],n=i(e.min,e.tickSize),r=0,s=Number.NaN,o;do o=s,s=n+r*e.tickSize,t.push(s),++r;while(s<e.max&&s!=o);return t},t.tickFormatter=function(e,t){var n=t.tickDecimals?Math.pow(10,t.tickDecimals):1,r=""+Math.round(e*n)/n;if(t.tickDecimals!=null){var i=r.indexOf("."),s=i==-1?0:r.length-i-1;if(s<t.tickDecimals)return(s?r:r+".")+(""+n).substr(1,t.tickDecimals-s)}return r}),e.isFunction(n.tickFormatter)&&(t.tickFormatter=function(e,t){return""+n.tickFormatter(e,t)});if(n.alignTicksWithAxis!=null){var h=(t.direction=="x"?d:v)[n.alignTicksWithAxis-1];if(h&&h.used&&h!=t){var p=t.tickGenerator(t);p.length>0&&(n.min==null&&(t.min=Math.min(t.min,p[0])),n.max==null&&p.length>1&&(t.max=Math.max(t.max,p[p.length-1]))),t.tickGenerator=function(e){var t=[],n,r;for(r=0;r<h.ticks.length;++r)n=(h.ticks[r].v-h.min)/(h.max-h.min),n=e.min+n*(e.max-e.min),t.push(n);return t};if(!t.mode&&n.tickDecimals==null){var m=Math.max(0,-Math.floor(Math.log(t.delta)/Math.LN10)+1),g=t.tickGenerator(t);g.length>1&&/\..*0$/.test((g[1]-g[0]).toFixed(m))||(t.tickDecimals=m)}}}}function W(t){var n=t.options.ticks,r=[];n==null||typeof n=="number"&&n>0?r=t.tickGenerator(t):n&&(e.isFunction(n)?r=n(t):r=n);var i,s;t.ticks=[];for(i=0;i<r.length;++i){var o=null,u=r[i];typeof u=="object"?(s=+u[0],u.length>1&&(o=u[1])):s=+u,o==null&&(o=t.tickFormatter(s,t)),isNaN(s)||t.ticks.push({v:s,label:o})}}function X(e,t){e.options.autoscaleMargin&&t.length>0&&(e.options.min==null&&(e.min=Math.min(e.min,t[0].v)),e.options.max==null&&t.length>1&&(e.max=Math.max(e.max,t[t.length-1].v)))}function V(){f.clear(),E(b.drawBackground,[h]);var e=a.grid;e.show&&e.backgroundColor&&K(),e.show&&!e.aboveData&&Q();for(var t=0;t<u.length;++t)E(b.drawSeries,[h,u[t]]),Y(u[t]);E(b.draw,[h]),e.show&&e.aboveData&&Q(),f.render(),ht()}function J(e,t){var n,r,i,s,o=k();for(var u=0;u<o.length;++u){n=o[u];if(n.direction==t){s=t+n.n+"axis",!e[s]&&n.n==1&&(s=t+"axis");if(e[s]){r=e[s].from,i=e[s].to;break}}}e[s]||(n=t=="x"?d[0]:v[0],r=e[t+"1"],i=e[t+"2"]);if(r!=null&&i!=null&&r>i){var a=r;r=i,i=a}return{from:r,to:i,axis:n}}function K(){h.save(),h.translate(m.left,m.top),h.fillStyle=bt(a.grid.backgroundColor,y,0,"rgba(255, 255, 255, 0)"),h.fillRect(0,0,g,y),h.restore()}function Q(){var t,n,r,i;h.save(),h.translate(m.left,m.top);var s=a.grid.markings;if(s){e.isFunction(s)&&(n=w.getAxes(),n.xmin=n.xaxis.min,n.xmax=n.xaxis.max,n.ymin=n.yaxis.min,n.ymax=n.yaxis.max,s=s(n));for(t=0;t<s.length;++t){var o=s[t],u=J(o,"x"),f=J(o,"y");u.from==null&&(u.from=u.axis.min),u.to==null&&(u.to=u.axis.max),f.from==null&&(f.from=f.axis.min),f.to==null&&(f.to=f.axis.max);if(u.to<u.axis.min||u.from>u.axis.max||f.to<f.axis.min||f.from>f.axis.max)continue;u.from=Math.max(u.from,u.axis.min),u.to=Math.min(u.to,u.axis.max),f.from=Math.max(f.from,f.axis.min),f.to=Math.min(f.to,f.axis.max);if(u.from==u.to&&f.from==f.to)continue;u.from=u.axis.p2c(u.from),u.to=u.axis.p2c(u.to),f.from=f.axis.p2c(f.from),f.to=f.axis.p2c(f.to),u.from==u.to||f.from==f.to?(h.beginPath(),h.strokeStyle=o.color||a.grid.markingsColor,h.lineWidth=o.lineWidth||a.grid.markingsLineWidth,h.moveTo(u.from,f.from),h.lineTo(u.to,f.to),h.stroke()):(h.fillStyle=o.color||a.grid.markingsColor,h.fillRect(u.from,f.to,u.to-u.from,f.from-f.to))}}n=k(),r=a.grid.borderWidth;for(var l=0;l<n.length;++l){var c=n[l],p=c.box,d=c.tickLength,v,b,E,S;if(!c.show||c.ticks.length==0)continue;h.lineWidth=1,c.direction=="x"?(v=0,d=="full"?b=c.position=="top"?0:y:b=p.top-m.top+(c.position=="top"?p.height:0)):(b=0,d=="full"?v=c.position=="left"?0:g:v=p.left-m.left+(c.position=="left"?p.width:0)),c.innermost||(h.strokeStyle=c.options.color,h.beginPath(),E=S=0,c.direction=="x"?E=g+1:S=y+1,h.lineWidth==1&&(c.direction=="x"?b=Math.floor(b)+.5:v=Math.floor(v)+.5),h.moveTo(v,b),h.lineTo(v+E,b+S),h.stroke()),h.strokeStyle=c.options.tickColor,h.beginPath();for(t=0;t<c.ticks.length;++t){var x=c.ticks[t].v;E=S=0;if(isNaN(x)||x<c.min||x>c.max||d=="full"&&(typeof r=="object"&&r[c.position]>0||r>0)&&(x==c.min||x==c.max))continue;c.direction=="x"?(v=c.p2c(x),S=d=="full"?-y:d,c.position=="top"&&(S=-S)):(b=c.p2c(x),E=d=="full"?-g:d,c.position=="left"&&(E=-E)),h.lineWidth==1&&(c.direction=="x"?v=Math.floor(v)+.5:b=Math.floor(b)+.5),h.moveTo(v,b),h.lineTo(v+E,b+S)}h.stroke()}r&&(i=a.grid.borderColor,typeof r=="object"||typeof i=="object"?(typeof r!="object"&&(r={top:r,right:r,bottom:r,left:r}),typeof i!="object"&&(i={top:i,right:i,bottom:i,left:i}),r.top>0&&(h.strokeStyle=i.top,h.lineWidth=r.top,h.beginPath(),h.moveTo(0-r.left,0-r.top/2),h.lineTo(g,0-r.top/2),h.stroke()),r.right>0&&(h.strokeStyle=i.right,h.lineWidth=r.right,h.beginPath(),h.moveTo(g+r.right/2,0-r.top),h.lineTo(g+r.right/2,y),h.stroke()),r.bottom>0&&(h.strokeStyle=i.bottom,h.lineWidth=r.bottom,h.beginPath(),h.moveTo(g+r.right,y+r.bottom/2),h.lineTo(0,y+r.bottom/2),h.stroke()),r.left>0&&(h.strokeStyle=i.left,h.lineWidth=r.left,h.beginPath(),h.moveTo(0-r.left/2,y+r.bottom),h.lineTo(0-r.left/2,0),h.stroke())):(h.lineWidth=r,h.strokeStyle=a.grid.borderColor,h.strokeRect(-r/2,-r/2,g+r,y+r))),h.restore()}function G(){e.each(k(),function(e,t){if(!t.show||t.ticks.length==0)return;var n=t.box,r=t.direction+"Axis "+t.direction+t.n+"Axis",i="flot-"+t.direction+"-axis flot-"+t.direction+t.n+"-axis "+r,s=t.options.font||"flot-tick-label tickLabel",o,u,a,l,c;f.removeText(i);for(var h=0;h<t.ticks.length;++h){o=t.ticks[h];if(!o.label||o.v<t.min||o.v>t.max)continue;t.direction=="x"?(l="center",u=m.left+t.p2c(o.v),t.position=="bottom"?a=n.top+n.padding:(a=n.top+n.height-n.padding,c="bottom")):(c="middle",a=m.top+t.p2c(o.v),t.position=="left"?(u=n.left+n.width-n.padding,l="right"):u=n.left+n.padding),f.addText(i,u,a,o.label,s,null,null,l,c)}})}function Y(e){e.lines.show&&Z(e),e.bars.show&&nt(e),e.points.show&&et(e)}function Z(e){function t(e,t,n,r,i){var s=e.points,o=e.pointsize,u=null,a=null;h.beginPath();for(var f=o;f<s.length;f+=o){var l=s[f-o],c=s[f-o+1],p=s[f],d=s[f+1];if(l==null||p==null)continue;if(c<=d&&c<i.min){if(d<i.min)continue;l=(i.min-c)/(d-c)*(p-l)+l,c=i.min}else if(d<=c&&d<i.min){if(c<i.min)continue;p=(i.min-c)/(d-c)*(p-l)+l,d=i.min}if(c>=d&&c>i.max){if(d>i.max)continue;l=(i.max-c)/(d-c)*(p-l)+l,c=i.max}else if(d>=c&&d>i.max){if(c>i.max)continue;p=(i.max-c)/(d-c)*(p-l)+l,d=i.max}if(l<=p&&l<r.min){if(p<r.min)continue;c=(r.min-l)/(p-l)*(d-c)+c,l=r.min}else if(p<=l&&p<r.min){if(l<r.min)continue;d=(r.min-l)/(p-l)*(d-c)+c,p=r.min}if(l>=p&&l>r.max){if(p>r.max)continue;c=(r.max-l)/(p-l)*(d-c)+c,l=r.max}else if(p>=l&&p>r.max){if(l>r.max)continue;d=(r.max-l)/(p-l)*(d-c)+c,p=r.max}(l!=u||c!=a)&&h.moveTo(r.p2c(l)+t,i.p2c(c)+n),u=p,a=d,h.lineTo(r.p2c(p)+t,i.p2c(d)+n)}h.stroke()}function n(e,t,n){var r=e.points,i=e.pointsize,s=Math.min(Math.max(0,n.min),n.max),o=0,u,a=!1,f=1,l=0,c=0;for(;;){if(i>0&&o>r.length+i)break;o+=i;var p=r[o-i],d=r[o-i+f],v=r[o],m=r[o+f];if(a){if(i>0&&p!=null&&v==null){c=o,i=-i,f=2;continue}if(i<0&&o==l+i){h.fill(),a=!1,i=-i,f=1,o=l=c+i;continue}}if(p==null||v==null)continue;if(p<=v&&p<t.min){if(v<t.min)continue;d=(t.min-p)/(v-p)*(m-d)+d,p=t.min}else if(v<=p&&v<t.min){if(p<t.min)continue;m=(t.min-p)/(v-p)*(m-d)+d,v=t.min}if(p>=v&&p>t.max){if(v>t.max)continue;d=(t.max-p)/(v-p)*(m-d)+d,p=t.max}else if(v>=p&&v>t.max){if(p>t.max)continue;m=(t.max-p)/(v-p)*(m-d)+d,v=t.max}a||(h.beginPath(),h.moveTo(t.p2c(p),n.p2c(s)),a=!0);if(d>=n.max&&m>=n.max){h.lineTo(t.p2c(p),n.p2c(n.max)),h.lineTo(t.p2c(v),n.p2c(n.max));continue}if(d<=n.min&&m<=n.min){h.lineTo(t.p2c(p),n.p2c(n.min)),h.lineTo(t.p2c(v),n.p2c(n.min));continue}var g=p,y=v;d<=m&&d<n.min&&m>=n.min?(p=(n.min-d)/(m-d)*(v-p)+p,d=n.min):m<=d&&m<n.min&&d>=n.min&&(v=(n.min-d)/(m-d)*(v-p)+p,m=n.min),d>=m&&d>n.max&&m<=n.max?(p=(n.max-d)/(m-d)*(v-p)+p,d=n.max):m>=d&&m>n.max&&d<=n.max&&(v=(n.max-d)/(m-d)*(v-p)+p,m=n.max),p!=g&&h.lineTo(t.p2c(g),n.p2c(d)),h.lineTo(t.p2c(p),n.p2c(d)),h.lineTo(t.p2c(v),n.p2c(m)),v!=y&&(h.lineTo(t.p2c(v),n.p2c(m)),h.lineTo(t.p2c(y),n.p2c(m)))}}h.save(),h.translate(m.left,m.top),h.lineJoin="round";var r=e.lines.lineWidth,i=e.shadowSize;if(r>0&&i>0){h.lineWidth=i,h.strokeStyle="rgba(0,0,0,0.1)";var s=Math.PI/18;t(e.datapoints,Math.sin(s)*(r/2+i/2),Math.cos(s)*(r/2+i/2),e.xaxis,e.yaxis),h.lineWidth=i/2,t(e.datapoints,Math.sin(s)*(r/2+i/4),Math.cos(s)*(r/2+i/4),e.xaxis,e.yaxis)}h.lineWidth=r,h.strokeStyle=e.color;var o=rt(e.lines,e.color,0,y);o&&(h.fillStyle=o,n(e.datapoints,e.xaxis,e.yaxis)),r>0&&t(e.datapoints,0,0,e.xaxis,e.yaxis),h.restore()}function et(e){function t(e,t,n,r,i,s,o,u){var a=e.points,f=e.pointsize;for(var l=0;l<a.length;l+=f){var c=a[l],p=a[l+1];if(c==null||c<s.min||c>s.max||p<o.min||p>o.max)continue;h.beginPath(),c=s.p2c(c),p=o.p2c(p)+r,u=="circle"?h.arc(c,p,t,0,i?Math.PI:Math.PI*2,!1):u(h,c,p,t,i),h.closePath(),n&&(h.fillStyle=n,h.fill()),h.stroke()}}h.save(),h.translate(m.left,m.top);var n=e.points.lineWidth,r=e.shadowSize,i=e.points.radius,s=e.points.symbol;n==0&&(n=1e-4);if(n>0&&r>0){var o=r/2;h.lineWidth=o,h.strokeStyle="rgba(0,0,0,0.1)",t(e.datapoints,i,null,o+o/2,!0,e.xaxis,e.yaxis,s),h.strokeStyle="rgba(0,0,0,0.2)",t(e.datapoints,i,null,o/2,!0,e.xaxis,e.yaxis,s)}h.lineWidth=n,h.strokeStyle=e.color,t(e.datapoints,i,rt(e.points,e.color),0,!1,e.xaxis,e.yaxis,s),h.restore()}function tt(e,t,n,r,i,s,o,u,a,f,l,c){var h,p,d,v,m,g,y,b,w;l?(b=g=y=!0,m=!1,h=n,p=e,v=t+r,d=t+i,p<h&&(w=p,p=h,h=w,m=!0,g=!1)):(m=g=y=!0,b=!1,h=e+r,p=e+i,d=n,v=t,v<d&&(w=v,v=d,d=w,b=!0,y=!1));if(p<u.min||h>u.max||v<a.min||d>a.max)return;h<u.min&&(h=u.min,m=!1),p>u.max&&(p=u.max,g=!1),d<a.min&&(d=a.min,b=!1),v>a.max&&(v=a.max,y=!1),h=u.p2c(h),d=a.p2c(d),p=u.p2c(p),v=a.p2c(v),o&&(f.beginPath(),f.moveTo(h,d),f.lineTo(h,v),f.lineTo(p,v),f.lineTo(p,d),f.fillStyle=o(d,v),f.fill()),c>0&&(m||g||y||b)&&(f.beginPath(),f.moveTo(h,d+s),m?f.lineTo(h,v+s):f.moveTo(h,v+s),y?f.lineTo(p,v+s):f.moveTo(p,v+s),g?f.lineTo(p,d+s):f.moveTo(p,d+s),b?f.lineTo(h,d+s):f.moveTo(h,d+s),f.stroke())}function nt(e){function t(t,n,r,i,s,o,u){var a=t.points,f=t.pointsize;for(var l=0;l<a.length;l+=f){if(a[l]==null)continue;tt(a[l],a[l+1],a[l+2],n,r,i,s,o,u,h,e.bars.horizontal,e.bars.lineWidth)}}h.save(),h.translate(m.left,m.top),h.lineWidth=e.bars.lineWidth,h.strokeStyle=e.color;var n;switch(e.bars.align){case"left":n=0;break;case"right":n=-e.bars.barWidth;break;case"center":n=-e.bars.barWidth/2;break;default:throw new Error("Invalid bar alignment: "+e.bars.align)}var r=e.bars.fill?function(t,n){return rt(e.bars,e.color,t,n)}:null;t(e.datapoints,n,n+e.bars.barWidth,0,r,e.xaxis,e.yaxis),h.restore()}function rt(t,n,r,i){var s=t.fill;if(!s)return null;if(t.fillColor)return bt(t.fillColor,r,i,n);var o=e.color.parse(n);return o.a=typeof s=="number"?s:.4,o.normalize(),o.toString()}function it(){t.find(".legend").remove();if(!a.legend.show)return;var n=[],r=[],i=!1,s=a.legend.labelFormatter,o,f;for(var l=0;l<u.length;++l)o=u[l],o.label&&(f=s?s(o.label,o):o.label,f&&r.push({label:f,color:o.color}));if(a.legend.sorted)if(e.isFunction(a.legend.sorted))r.sort(a.legend.sorted);else if(a.legend.sorted=="reverse")r.reverse();else{var c=a.legend.sorted!="descending";r.sort(function(e,t){return e.label==t.label?0:e.label<t.label!=c?1:-1})}for(var l=0;l<r.length;++l){var h=r[l];l%a.legend.noColumns==0&&(i&&n.push("</tr>"),n.push("<tr>"),i=!0),n.push('<td class="legendColorBox"><div style="border:1px solid '+a.legend.labelBoxBorderColor+';padding:1px"><div style="width:4px;height:0;border:5px solid '+h.color+';overflow:hidden"></div></div></td>'+'<td class="legendLabel">'+h.label+"</td>")}i&&n.push("</tr>");if(n.length==0)return;var p='<table style="font-size:smaller;color:'+a.grid.color+'">'+n.join("")+"</table>";if(a.legend.container!=null)e(a.legend.container).html(p);else{var d="",v=a.legend.position,g=a.legend.margin;g[0]==null&&(g=[g,g]),v.charAt(0)=="n"?d+="top:"+(g[1]+m.top)+"px;":v.charAt(0)=="s"&&(d+="bottom:"+(g[1]+m.bottom)+"px;"),v.charAt(1)=="e"?d+="right:"+(g[0]+m.right)+"px;":v.charAt(1)=="w"&&(d+="left:"+(g[0]+m.left)+"px;");var y=e('<div class="legend">'+p.replace('style="','style="position:absolute;'+d+";")+"</div>").appendTo(t);if(a.legend.backgroundOpacity!=0){var b=a.legend.backgroundColor;b==null&&(b=a.grid.backgroundColor,b&&typeof b=="string"?b=e.color.parse(b):b=e.color.extract(y,"background-color"),b.a=1,b=b.toString());var w=y.children();e('<div style="position:absolute;width:'+w.width()+"px;height:"+w.height()+"px;"+d+"background-color:"+b+';"> </div>').prependTo(y).css("opacity",a.legend.backgroundOpacity)}}}function ut(e,t,n){var r=a.grid.mouseActiveRadius,i=r*r+1,s=null,o=!1,f,l,c;for(f=u.length-1;f>=0;--f){if(!n(u[f]))continue;var h=u[f],p=h.xaxis,d=h.yaxis,v=h.datapoints.points,m=p.c2p(e),g=d.c2p(t),y=r/p.scale,b=r/d.scale;c=h.datapoints.pointsize,p.options.inverseTransform&&(y=Number.MAX_VALUE),d.options.inverseTransform&&(b=Number.MAX_VALUE);if(h.lines.show||h.points.show)for(l=0;l<v.length;l+=c){var w=v[l],E=v[l+1];if(w==null)continue;if(w-m>y||w-m<-y||E-g>b||E-g<-b)continue;var S=Math.abs(p.p2c(w)-e),x=Math.abs(d.p2c(E)-t),T=S*S+x*x;T<i&&(i=T,s=[f,l/c])}if(h.bars.show&&!s){var N=h.bars.align=="left"?0:-h.bars.barWidth/2,C=N+h.bars.barWidth;for(l=0;l<v.length;l+=c){var w=v[l],E=v[l+1],k=v[l+2];if(w==null)continue;if(u[f].bars.horizontal?m<=Math.max(k,w)&&m>=Math.min(k,w)&&g>=E+N&&g<=E+C:m>=w+N&&m<=w+C&&g>=Math.min(k,E)&&g<=Math.max(k,E))s=[f,l/c]}}}return s?(f=s[0],l=s[1],c=u[f].datapoints.pointsize,{datapoint:u[f].datapoints.points.slice(l*c,(l+1)*c),dataIndex:l,series:u[f],seriesIndex:f}):null}function at(e){a.grid.hoverable&&ct("plothover",e,function(e){return e["hoverable"]!=0})}function ft(e){a.grid.hoverable&&ct("plothover",e,function(e){return!1})}function lt(e){ct("plotclick",e,function(e){return e["clickable"]!=0})}function ct(e,n,r){var i=c.offset(),s=n.pageX-i.left-m.left,o=n.pageY-i.top-m.top,u=L({left:s,top:o});u.pageX=n.pageX,u.pageY=n.pageY;var f=ut(s,o,r);f&&(f.pageX=parseInt(f.series.xaxis.p2c(f.datapoint[0])+i.left+m.left,10),f.pageY=parseInt(f.series.yaxis.p2c(f.datapoint[1])+i.top+m.top,10));if(a.grid.autoHighlight){for(var l=0;l<st.length;++l){var h=st[l];h.auto==e&&(!f||h.series!=f.series||h.point[0]!=f.datapoint[0]||h.point[1]!=f.datapoint[1])&&vt(h.series,h.point)}f&&dt(f.series,f.datapoint,e)}t.trigger(e,[u,f])}function ht(){var e=a.interaction.redrawOverlayInterval;if(e==-1){pt();return}ot||(ot=setTimeout(pt,e))}function pt(){ot=null,p.save(),l.clear(),p.translate(m.left,m.top);var e,t;for(e=0;e<st.length;++e)t=st[e],t.series.bars.show?yt(t.series,t.point):gt(t.series,t.point);p.restore(),E(b.drawOverlay,[p])}function dt(e,t,n){typeof e=="number"&&(e=u[e]);if(typeof t=="number"){var r=e.datapoints.pointsize;t=e.datapoints.points.slice(r*t,r*(t+1))}var i=mt(e,t);i==-1?(st.push({series:e,point:t,auto:n}),ht()):n||(st[i].auto=!1)}function vt(e,t){if(e==null&&t==null){st=[],ht();return}typeof e=="number"&&(e=u[e]);if(typeof t=="number"){var n=e.datapoints.pointsize;t=e.datapoints.points.slice(n*t,n*(t+1))}var r=mt(e,t);r!=-1&&(st.splice(r,1),ht())}function mt(e,t){for(var n=0;n<st.length;++n){var r=st[n];if(r.series==e&&r.point[0]==t[0]&&r.point[1]==t[1])return n}return-1}function gt(t,n){var r=n[0],i=n[1],s=t.xaxis,o=t.yaxis,u=typeof t.highlightColor=="string"?t.highlightColor:e.color.parse(t.color).scale("a",.5).toString();if(r<s.min||r>s.max||i<o.min||i>o.max)return;var a=t.points.radius+t.points.lineWidth/2;p.lineWidth=a,p.strokeStyle=u;var f=1.5*a;r=s.p2c(r),i=o.p2c(i),p.beginPath(),t.points.symbol=="circle"?p.arc(r,i,f,0,2*Math.PI,!1):t.points.symbol(p,r,i,f,!1),p.closePath(),p.stroke()}function yt(t,n){var r=typeof t.highlightColor=="string"?t.highlightColor:e.color.parse(t.color).scale("a",.5).toString(),i=r,s=t.bars.align=="left"?0:-t.bars.barWidth/2;p.lineWidth=t.bars.lineWidth,p.strokeStyle=r,tt(n[0],n[1],n[2]||0,s,s+t.bars.barWidth,0,function(){return i},t.xaxis,t.yaxis,p,t.bars.horizontal,t.bars.lineWidth)}function bt(t,n,r,i){if(typeof t=="string")return t;var s=h.createLinearGradient(0,r,0,n);for(var o=0,u=t.colors.length;o<u;++o){var a=t.colors[o];if(typeof a!="string"){var f=e.color.parse(i);a.brightness!=null&&(f=f.scale("rgb",a.brightness)),a.opacity!=null&&(f.a*=a.opacity),a=f.toString()}s.addColorStop(o/(u-1),a)}return s}var u=[],a={colors:["#edc240","#afd8f8","#cb4b4b","#4da74d","#9440ed"],legend:{show:!0,noColumns:1,labelFormatter:null,labelBoxBorderColor:"#ccc",container:null,position:"ne",margin:5,backgroundColor:null,backgroundOpacity:.85,sorted:null},xaxis:{show:null,position:"bottom",mode:null,font:null,color:null,tickColor:null,transform:null,inverseTransform:null,min:null,max:null,autoscaleMargin:null,ticks:null,tickFormatter:null,labelWidth:null,labelHeight:null,reserveSpace:null,tickLength:null,alignTicksWithAxis:null,tickDecimals:null,tickSize:null,minTickSize:null},yaxis:{autoscaleMargin:.02,position:"left"},xaxes:[],yaxes:[],series:{points:{show:!1,radius:3,lineWidth:2,fill:!0,fillColor:"#ffffff",symbol:"circle"},lines:{lineWidth:2,fill:!1,fillColor:null,steps:!1},bars:{show:!1,lineWidth:2,barWidth:1,fill:!0,fillColor:null,align:"left",horizontal:!1,zero:!0},shadowSize:3,highlightColor:null},grid:{show:!0,aboveData:!1,color:"#545454",backgroundColor:null,borderColor:null,tickColor:null,margin:0,labelMargin:5,axisMargin:8,borderWidth:2,minBorderMargin:null,markings:null,markingsColor:"#f4f4f4",markingsLineWidth:2,clickable:!1,hoverable:!1,autoHighlight:!0,mouseActiveRadius:10},interaction:{redrawOverlayInterval:1e3/60},hooks:{}},f=null,l=null,c=null,h=null,p=null,d=[],v=[],m={left:0,right:0,top:0,bottom
+:0},g=0,y=0,b={processOptions:[],processRawData:[],processDatapoints:[],processOffset:[],drawBackground:[],drawSeries:[],draw:[],bindEvents:[],drawOverlay:[],shutdown:[]},w=this;w.setData=T,w.setupGrid=R,w.draw=V,w.getPlaceholder=function(){return t},w.getCanvas=function(){return f.element},w.getPlotOffset=function(){return m},w.width=function(){return g},w.height=function(){return y},w.offset=function(){var e=c.offset();return e.left+=m.left,e.top+=m.top,e},w.getData=function(){return u},w.getAxes=function(){var t={},n;return e.each(d.concat(v),function(e,n){n&&(t[n.direction+(n.n!=1?n.n:"")+"axis"]=n)}),t},w.getXAxes=function(){return d},w.getYAxes=function(){return v},w.c2p=L,w.p2c=A,w.getOptions=function(){return a},w.highlight=dt,w.unhighlight=vt,w.triggerRedrawOverlay=ht,w.pointOffset=function(e){return{left:parseInt(d[C(e,"x")-1].p2c(+e.x)+m.left,10),top:parseInt(v[C(e,"y")-1].p2c(+e.y)+m.top,10)}},w.shutdown=H,w.resize=function(){var e=t.width(),n=t.height();f.resize(e,n),l.resize(e,n)},w.hooks=b,S(w),x(s),D(),T(r),R(),V(),P();var st=[],ot=null}function i(e,t){return t*Math.floor(e/t)}var t=Object.prototype.hasOwnProperty;n.prototype.resize=function(e,t){if(e<=0||t<=0)throw new Error("Invalid dimensions for plot, width = "+e+", height = "+t);var n=this.element,r=this.context,i=this.pixelRatio;this.width!=e&&(n.width=e*i,n.style.width=e+"px",this.width=e),this.height!=t&&(n.height=t*i,n.style.height=t+"px",this.height=t),r.restore(),r.save(),r.scale(i,i)},n.prototype.clear=function(){this.context.clearRect(0,0,this.width,this.height)},n.prototype.render=function(){var e=this._textCache;for(var n in e)if(t.call(e,n)){var r=this.getTextLayer(n),i=e[n];r.hide();for(var s in i)if(t.call(i,s)){var o=i[s];for(var u in o)if(t.call(o,u)){var a=o[u].positions;for(var f=0,l;l=a[f];f++)l.active?l.rendered||(r.append(l.element),l.rendered=!0):(a.splice(f--,1),l.rendered&&l.element.detach());a.length==0&&delete o[u]}}r.show()}},n.prototype.getTextLayer=function(t){var n=this.text[t];return n==null&&(this.textContainer==null&&(this.textContainer=e("<div class='flot-text'></div>").css({position:"absolute",top:0,left:0,bottom:0,right:0,"font-size":"smaller",color:"#545454"}).insertAfter(this.element)),n=this.text[t]=e("<div></div>").addClass(t).css({position:"absolute",top:0,left:0,bottom:0,right:0}).appendTo(this.textContainer)),n},n.prototype.getTextInfo=function(t,n,r,i,s){var o,u,a,f;n=""+n,typeof r=="object"?o=r.style+" "+r.variant+" "+r.weight+" "+r.size+"px/"+r.lineHeight+"px "+r.family:o=r,u=this._textCache[t],u==null&&(u=this._textCache[t]={}),a=u[o],a==null&&(a=u[o]={}),f=a[n];if(f==null){var l=e("<div></div>").html(n).css({position:"absolute","max-width":s,top:-9999}).appendTo(this.getTextLayer(t));typeof r=="object"?l.css({font:o,color:r.color}):typeof r=="string"&&l.addClass(r),f=a[n]={width:l.outerWidth(!0),height:l.outerHeight(!0),element:l,positions:[]},l.detach()}return f},n.prototype.addText=function(e,t,n,r,i,s,o,u,a){var f=this.getTextInfo(e,r,i,s,o),l=f.positions;u=="center"?t-=f.width/2:u=="right"&&(t-=f.width),a=="middle"?n-=f.height/2:a=="bottom"&&(n-=f.height);for(var c=0,h;h=l[c];c++)if(h.x==t&&h.y==n){h.active=!0;return}h={active:!0,rendered:!1,element:l.length?f.element.clone():f.element,x:t,y:n},l.push(h),h.element.css({top:Math.round(n),left:Math.round(t),"text-align":u})},n.prototype.removeText=function(e,n,r,i,s,o){if(i==null){var u=this._textCache[e];if(u!=null)for(var a in u)if(t.call(u,a)){var f=u[a];for(var l in f)if(t.call(f,l)){var c=f[l].positions;for(var h=0,p;p=c[h];h++)p.active=!1}}}else{var c=this.getTextInfo(e,i,s,o).positions;for(var h=0,p;p=c[h];h++)p.x==n&&p.y==r&&(p.active=!1)}},e.plot=function(t,n,i){var s=new r(e(t),n,i,e.plot.plugins);return s},e.plot.version="0.8.1",e.plot.plugins=[],e.fn.plot=function(t,n){return this.each(function(){e.plot(this,t,n)})}}(jQuery);
\ No newline at end of file
--- /dev/null
+/* Pretty handling of time axes.
+
+Copyright (c) 2007-2013 IOLA and Ole Laursen.
+Licensed under the MIT license.
+
+Set axis.mode to "time" to enable. See the section "Time series data" in
+API.txt for details.
+
+*/
+
+(function($) {
+
+ var options = {
+ xaxis: {
+ timezone: null, // "browser" for local to the client or timezone for timezone-js
+ timeformat: null, // format string to use
+ twelveHourClock: false, // 12 or 24 time in time mode
+ monthNames: null // list of names of months
+ }
+ };
+
+ // round to nearby lower multiple of base
+
+ function floorInBase(n, base) {
+ return base * Math.floor(n / base);
+ }
+
+ // Returns a string with the date d formatted according to fmt.
+ // A subset of the Open Group's strftime format is supported.
+
+ function formatDate(d, fmt, monthNames, dayNames) {
+
+ if (typeof d.strftime == "function") {
+ return d.strftime(fmt);
+ }
+
+ var leftPad = function(n, pad) {
+ n = "" + n;
+ pad = "" + (pad == null ? "0" : pad);
+ return n.length == 1 ? pad + n : n;
+ };
+
+ var r = [];
+ var escape = false;
+ var hours = d.getHours();
+ var isAM = hours < 12;
+
+ if (monthNames == null) {
+ monthNames = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"];
+ }
+
+ if (dayNames == null) {
+ dayNames = ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"];
+ }
+
+ var hours12;
+
+ if (hours > 12) {
+ hours12 = hours - 12;
+ } else if (hours == 0) {
+ hours12 = 12;
+ } else {
+ hours12 = hours;
+ }
+
+ for (var i = 0; i < fmt.length; ++i) {
+
+ var c = fmt.charAt(i);
+
+ if (escape) {
+ switch (c) {
+ case 'a': c = "" + dayNames[d.getDay()]; break;
+ case 'b': c = "" + monthNames[d.getMonth()]; break;
+ case 'd': c = leftPad(d.getDate()); break;
+ case 'e': c = leftPad(d.getDate(), " "); break;
+ case 'h': // For back-compat with 0.7; remove in 1.0
+ case 'H': c = leftPad(hours); break;
+ case 'I': c = leftPad(hours12); break;
+ case 'l': c = leftPad(hours12, " "); break;
+ case 'm': c = leftPad(d.getMonth() + 1); break;
+ case 'M': c = leftPad(d.getMinutes()); break;
+ // quarters not in Open Group's strftime specification
+ case 'q':
+ c = "" + (Math.floor(d.getMonth() / 3) + 1); break;
+ case 'S': c = leftPad(d.getSeconds()); break;
+ case 'y': c = leftPad(d.getFullYear() % 100); break;
+ case 'Y': c = "" + d.getFullYear(); break;
+ case 'p': c = (isAM) ? ("" + "am") : ("" + "pm"); break;
+ case 'P': c = (isAM) ? ("" + "AM") : ("" + "PM"); break;
+ case 'w': c = "" + d.getDay(); break;
+ }
+ r.push(c);
+ escape = false;
+ } else {
+ if (c == "%") {
+ escape = true;
+ } else {
+ r.push(c);
+ }
+ }
+ }
+
+ return r.join("");
+ }
+
+ // To have a consistent view of time-based data independent of which time
+ // zone the client happens to be in we need a date-like object independent
+ // of time zones. This is done through a wrapper that only calls the UTC
+ // versions of the accessor methods.
+
+ function makeUtcWrapper(d) {
+
+ function addProxyMethod(sourceObj, sourceMethod, targetObj, targetMethod) {
+ sourceObj[sourceMethod] = function() {
+ return targetObj[targetMethod].apply(targetObj, arguments);
+ };
+ };
+
+ var utc = {
+ date: d
+ };
+
+ // support strftime, if found
+
+ if (d.strftime != undefined) {
+ addProxyMethod(utc, "strftime", d, "strftime");
+ }
+
+ addProxyMethod(utc, "getTime", d, "getTime");
+ addProxyMethod(utc, "setTime", d, "setTime");
+
+ var props = ["Date", "Day", "FullYear", "Hours", "Milliseconds", "Minutes", "Month", "Seconds"];
+
+ for (var p = 0; p < props.length; p++) {
+ addProxyMethod(utc, "get" + props[p], d, "getUTC" + props[p]);
+ addProxyMethod(utc, "set" + props[p], d, "setUTC" + props[p]);
+ }
+
+ return utc;
+ };
+
+ // select time zone strategy. This returns a date-like object tied to the
+ // desired timezone
+
+ function dateGenerator(ts, opts) {
+ if (opts.timezone == "browser") {
+ return new Date(ts);
+ } else if (!opts.timezone || opts.timezone == "utc") {
+ return makeUtcWrapper(new Date(ts));
+ } else if (typeof timezoneJS != "undefined" && typeof timezoneJS.Date != "undefined") {
+ var d = new timezoneJS.Date();
+ // timezone-js is fickle, so be sure to set the time zone before
+ // setting the time.
+ d.setTimezone(opts.timezone);
+ d.setTime(ts);
+ return d;
+ } else {
+ return makeUtcWrapper(new Date(ts));
+ }
+ }
+
+ // map of app. size of time units in milliseconds
+
+ var timeUnitSize = {
+ "second": 1000,
+ "minute": 60 * 1000,
+ "hour": 60 * 60 * 1000,
+ "day": 24 * 60 * 60 * 1000,
+ "month": 30 * 24 * 60 * 60 * 1000,
+ "quarter": 3 * 30 * 24 * 60 * 60 * 1000,
+ "year": 365.2425 * 24 * 60 * 60 * 1000
+ };
+
+ // the allowed tick sizes, after 1 year we use
+ // an integer algorithm
+
+ var baseSpec = [
+ [1, "second"], [2, "second"], [5, "second"], [10, "second"],
+ [30, "second"],
+ [1, "minute"], [2, "minute"], [5, "minute"], [10, "minute"],
+ [30, "minute"],
+ [1, "hour"], [2, "hour"], [4, "hour"],
+ [8, "hour"], [12, "hour"],
+ [1, "day"], [2, "day"], [3, "day"],
+ [0.25, "month"], [0.5, "month"], [1, "month"],
+ [2, "month"]
+ ];
+
+ // we don't know which variant(s) we'll need yet, but generating both is
+ // cheap
+
+ var specMonths = baseSpec.concat([[3, "month"], [6, "month"],
+ [1, "year"]]);
+ var specQuarters = baseSpec.concat([[1, "quarter"], [2, "quarter"],
+ [1, "year"]]);
+
+ function init(plot) {
+ plot.hooks.processOptions.push(function (plot, options) {
+ $.each(plot.getAxes(), function(axisName, axis) {
+
+ var opts = axis.options;
+
+ if (opts.mode == "time") {
+ axis.tickGenerator = function(axis) {
+
+ var ticks = [];
+ var d = dateGenerator(axis.min, opts);
+ var minSize = 0;
+
+ // make quarter use a possibility if quarters are
+ // mentioned in either of these options
+
+ var spec = (opts.tickSize && opts.tickSize[1] ===
+ "quarter") ||
+ (opts.minTickSize && opts.minTickSize[1] ===
+ "quarter") ? specQuarters : specMonths;
+
+ if (opts.minTickSize != null) {
+ if (typeof opts.tickSize == "number") {
+ minSize = opts.tickSize;
+ } else {
+ minSize = opts.minTickSize[0] * timeUnitSize[opts.minTickSize[1]];
+ }
+ }
+
+ for (var i = 0; i < spec.length - 1; ++i) {
+ if (axis.delta < (spec[i][0] * timeUnitSize[spec[i][1]]
+ + spec[i + 1][0] * timeUnitSize[spec[i + 1][1]]) / 2
+ && spec[i][0] * timeUnitSize[spec[i][1]] >= minSize) {
+ break;
+ }
+ }
+
+ var size = spec[i][0];
+ var unit = spec[i][1];
+
+ // special-case the possibility of several years
+
+ if (unit == "year") {
+
+ // if given a minTickSize in years, just use it,
+ // ensuring that it's an integer
+
+ if (opts.minTickSize != null && opts.minTickSize[1] == "year") {
+ size = Math.floor(opts.minTickSize[0]);
+ } else {
+
+ var magn = Math.pow(10, Math.floor(Math.log(axis.delta / timeUnitSize.year) / Math.LN10));
+ var norm = (axis.delta / timeUnitSize.year) / magn;
+
+ if (norm < 1.5) {
+ size = 1;
+ } else if (norm < 3) {
+ size = 2;
+ } else if (norm < 7.5) {
+ size = 5;
+ } else {
+ size = 10;
+ }
+
+ size *= magn;
+ }
+
+ // minimum size for years is 1
+
+ if (size < 1) {
+ size = 1;
+ }
+ }
+
+ axis.tickSize = opts.tickSize || [size, unit];
+ var tickSize = axis.tickSize[0];
+ unit = axis.tickSize[1];
+
+ var step = tickSize * timeUnitSize[unit];
+
+ if (unit == "second") {
+ d.setSeconds(floorInBase(d.getSeconds(), tickSize));
+ } else if (unit == "minute") {
+ d.setMinutes(floorInBase(d.getMinutes(), tickSize));
+ } else if (unit == "hour") {
+ d.setHours(floorInBase(d.getHours(), tickSize));
+ } else if (unit == "month") {
+ d.setMonth(floorInBase(d.getMonth(), tickSize));
+ } else if (unit == "quarter") {
+ d.setMonth(3 * floorInBase(d.getMonth() / 3,
+ tickSize));
+ } else if (unit == "year") {
+ d.setFullYear(floorInBase(d.getFullYear(), tickSize));
+ }
+
+ // reset smaller components
+
+ d.setMilliseconds(0);
+
+ if (step >= timeUnitSize.minute) {
+ d.setSeconds(0);
+ }
+ if (step >= timeUnitSize.hour) {
+ d.setMinutes(0);
+ }
+ if (step >= timeUnitSize.day) {
+ d.setHours(0);
+ }
+ if (step >= timeUnitSize.day * 4) {
+ d.setDate(1);
+ }
+ if (step >= timeUnitSize.month * 2) {
+ d.setMonth(floorInBase(d.getMonth(), 3));
+ }
+ if (step >= timeUnitSize.quarter * 2) {
+ d.setMonth(floorInBase(d.getMonth(), 6));
+ }
+ if (step >= timeUnitSize.year) {
+ d.setMonth(0);
+ }
+
+ var carry = 0;
+ var v = Number.NaN;
+ var prev;
+
+ do {
+
+ prev = v;
+ v = d.getTime();
+ ticks.push(v);
+
+ if (unit == "month" || unit == "quarter") {
+ if (tickSize < 1) {
+
+ // a bit complicated - we'll divide the
+ // month/quarter up but we need to take
+ // care of fractions so we don't end up in
+ // the middle of a day
+
+ d.setDate(1);
+ var start = d.getTime();
+ d.setMonth(d.getMonth() +
+ (unit == "quarter" ? 3 : 1));
+ var end = d.getTime();
+ d.setTime(v + carry * timeUnitSize.hour + (end - start) * tickSize);
+ carry = d.getHours();
+ d.setHours(0);
+ } else {
+ d.setMonth(d.getMonth() +
+ tickSize * (unit == "quarter" ? 3 : 1));
+ }
+ } else if (unit == "year") {
+ d.setFullYear(d.getFullYear() + tickSize);
+ } else {
+ d.setTime(v + step);
+ }
+ } while (v < axis.max && v != prev);
+
+ return ticks;
+ };
+
+ axis.tickFormatter = function (v, axis) {
+
+ var d = dateGenerator(v, axis.options);
+
+ // first check global format
+
+ if (opts.timeformat != null) {
+ return formatDate(d, opts.timeformat, opts.monthNames, opts.dayNames);
+ }
+
+ // possibly use quarters if quarters are mentioned in
+ // any of these places
+
+ var useQuarters = (axis.options.tickSize &&
+ axis.options.tickSize[1] == "quarter") ||
+ (axis.options.minTickSize &&
+ axis.options.minTickSize[1] == "quarter");
+
+ var t = axis.tickSize[0] * timeUnitSize[axis.tickSize[1]];
+ var span = axis.max - axis.min;
+ var suffix = (opts.twelveHourClock) ? " %p" : "";
+ var hourCode = (opts.twelveHourClock) ? "%I" : "%H";
+ var fmt;
+
+ if (t < timeUnitSize.minute) {
+ fmt = hourCode + ":%M:%S" + suffix;
+ } else if (t < timeUnitSize.day) {
+ if (span < 2 * timeUnitSize.day) {
+ fmt = hourCode + ":%M" + suffix;
+ } else {
+ fmt = "%b %d " + hourCode + ":%M" + suffix;
+ }
+ } else if (t < timeUnitSize.month) {
+ fmt = "%b %d";
+ } else if ((useQuarters && t < timeUnitSize.quarter) ||
+ (!useQuarters && t < timeUnitSize.year)) {
+ if (span < timeUnitSize.year) {
+ fmt = "%b";
+ } else {
+ fmt = "%b %Y";
+ }
+ } else if (useQuarters && t < timeUnitSize.year) {
+ if (span < timeUnitSize.year) {
+ fmt = "Q%q";
+ } else {
+ fmt = "Q%q %Y";
+ }
+ } else {
+ fmt = "%Y";
+ }
+
+ var rt = formatDate(d, fmt, opts.monthNames, opts.dayNames);
+
+ return rt;
+ };
+ }
+ });
+ });
+ }
+
+ $.plot.plugins.push({
+ init: init,
+ options: options,
+ name: 'time',
+ version: '1.0'
+ });
+
+ // Time-axis support used to be in Flot core, which exposed the
+ // formatDate function on the plot object. Various plugins depend
+ // on the function, so we need to re-expose it here.
+
+ $.plot.formatDate = formatDate;
+
+})(jQuery);
--- /dev/null
+/* Pretty handling of time axes.
+
+Copyright (c) 2007-2013 IOLA and Ole Laursen.
+Licensed under the MIT license.
+
+Set axis.mode to "time" to enable. See the section "Time series data" in
+API.txt for details.
+
+*/(function(e){function n(e,t){return t*Math.floor(e/t)}function r(e,t,n,r){if(typeof e.strftime=="function")return e.strftime(t);var i=function(e,t){return e=""+e,t=""+(t==null?"0":t),e.length==1?t+e:e},s=[],o=!1,u=e.getHours(),a=u<12;n==null&&(n=["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]),r==null&&(r=["Sun","Mon","Tue","Wed","Thu","Fri","Sat"]);var f;u>12?f=u-12:u==0?f=12:f=u;for(var l=0;l<t.length;++l){var c=t.charAt(l);if(o){switch(c){case"a":c=""+r[e.getDay()];break;case"b":c=""+n[e.getMonth()];break;case"d":c=i(e.getDate());break;case"e":c=i(e.getDate()," ");break;case"h":case"H":c=i(u);break;case"I":c=i(f);break;case"l":c=i(f," ");break;case"m":c=i(e.getMonth()+1);break;case"M":c=i(e.getMinutes());break;case"q":c=""+(Math.floor(e.getMonth()/3)+1);break;case"S":c=i(e.getSeconds());break;case"y":c=i(e.getFullYear()%100);break;case"Y":c=""+e.getFullYear();break;case"p":c=a?"am":"pm";break;case"P":c=a?"AM":"PM";break;case"w":c=""+e.getDay()}s.push(c),o=!1}else c=="%"?o=!0:s.push(c)}return s.join("")}function i(e){function t(e,t,n,r){e[t]=function(){return n[r].apply(n,arguments)}}var n={date:e};e.strftime!=undefined&&t(n,"strftime",e,"strftime"),t(n,"getTime",e,"getTime"),t(n,"setTime",e,"setTime");var r=["Date","Day","FullYear","Hours","Milliseconds","Minutes","Month","Seconds"];for(var i=0;i<r.length;i++)t(n,"get"+r[i],e,"getUTC"+r[i]),t(n,"set"+r[i],e,"setUTC"+r[i]);return n}function s(e,t){if(t.timezone=="browser")return new Date(e);if(!t.timezone||t.timezone=="utc")return i(new Date(e));if(typeof timezoneJS!="undefined"&&typeof timezoneJS.Date!="undefined"){var n=new timezoneJS.Date;return n.setTimezone(t.timezone),n.setTime(e),n}return i(new Date(e))}function l(t){t.hooks.processOptions.push(function(t,i){e.each(t.getAxes(),function(e,t){var i=t.options;i.mode=="time"&&(t.tickGenerator=function(e){var t=[],r=s(e.min,i),u=0,l=i.tickSize&&i.tickSize[1]==="quarter"||i.minTickSize&&i.minTickSize[1]==="quarter"?f:a;i.minTickSize!=null&&(typeof i.tickSize=="number"?u=i.tickSize:u=i.minTickSize[0]*o[i.minTickSize[1]]);for(var c=0;c<l.length-1;++c)if(e.delta<(l[c][0]*o[l[c][1]]+l[c+1][0]*o[l[c+1][1]])/2&&l[c][0]*o[l[c][1]]>=u)break;var h=l[c][0],p=l[c][1];if(p=="year"){if(i.minTickSize!=null&&i.minTickSize[1]=="year")h=Math.floor(i.minTickSize[0]);else{var d=Math.pow(10,Math.floor(Math.log(e.delta/o.year)/Math.LN10)),v=e.delta/o.year/d;v<1.5?h=1:v<3?h=2:v<7.5?h=5:h=10,h*=d}h<1&&(h=1)}e.tickSize=i.tickSize||[h,p];var m=e.tickSize[0];p=e.tickSize[1];var g=m*o[p];p=="second"?r.setSeconds(n(r.getSeconds(),m)):p=="minute"?r.setMinutes(n(r.getMinutes(),m)):p=="hour"?r.setHours(n(r.getHours(),m)):p=="month"?r.setMonth(n(r.getMonth(),m)):p=="quarter"?r.setMonth(3*n(r.getMonth()/3,m)):p=="year"&&r.setFullYear(n(r.getFullYear(),m)),r.setMilliseconds(0),g>=o.minute&&r.setSeconds(0),g>=o.hour&&r.setMinutes(0),g>=o.day&&r.setHours(0),g>=o.day*4&&r.setDate(1),g>=o.month*2&&r.setMonth(n(r.getMonth(),3)),g>=o.quarter*2&&r.setMonth(n(r.getMonth(),6)),g>=o.year&&r.setMonth(0);var y=0,b=Number.NaN,w;do{w=b,b=r.getTime(),t.push(b);if(p=="month"||p=="quarter")if(m<1){r.setDate(1);var E=r.getTime();r.setMonth(r.getMonth()+(p=="quarter"?3:1));var S=r.getTime();r.setTime(b+y*o.hour+(S-E)*m),y=r.getHours(),r.setHours(0)}else r.setMonth(r.getMonth()+m*(p=="quarter"?3:1));else p=="year"?r.setFullYear(r.getFullYear()+m):r.setTime(b+g)}while(b<e.max&&b!=w);return t},t.tickFormatter=function(e,t){var n=s(e,t.options);if(i.timeformat!=null)return r(n,i.timeformat,i.monthNames,i.dayNames);var u=t.options.tickSize&&t.options.tickSize[1]=="quarter"||t.options.minTickSize&&t.options.minTickSize[1]=="quarter",a=t.tickSize[0]*o[t.tickSize[1]],f=t.max-t.min,l=i.twelveHourClock?" %p":"",c=i.twelveHourClock?"%I":"%H",h;a<o.minute?h=c+":%M:%S"+l:a<o.day?f<2*o.day?h=c+":%M"+l:h="%b %d "+c+":%M"+l:a<o.month?h="%b %d":u&&a<o.quarter||!u&&a<o.year?f<o.year?h="%b":h="%b %Y":u&&a<o.year?f<o.year?h="Q%q":h="Q%q %Y":h="%Y";var p=r(n,h,i.monthNames,i.dayNames);return p})})})}var t={xaxis:{timezone:null,timeformat:null,twelveHourClock:!1,monthNames:null}},o={second:1e3,minute:6e4,hour:36e5,day:864e5,month:2592e6,quarter:7776e6,year:525949.2*60*1e3},u=[[1,"second"],[2,"second"],[5,"second"],[10,"second"],[30,"second"],[1,"minute"],[2,"minute"],[5,"minute"],[10,"minute"],[30,"minute"],[1,"hour"],[2,"hour"],[4,"hour"],[8,"hour"],[12,"hour"],[1,"day"],[2,"day"],[3,"day"],[.25,"month"],[.5,"month"],[1,"month"],[2,"month"]],a=u.concat([[3,"month"],[6,"month"],[1,"year"]]),f=u.concat([[1,"quarter"],[2,"quarter"],[1,"year"]]);e.plot.plugins.push({init:l,options:t,name:"time",version:"1.0"}),e.plot.formatDate=r})(jQuery);
\ No newline at end of file
--- /dev/null
+/*
+ http://www.JSON.org/json2.js
+ 2010-08-25
+
+ Public Domain.
+
+ NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+
+ See http://www.JSON.org/js.html
+
+
+ This code should be minified before deployment.
+ See http://javascript.crockford.com/jsmin.html
+
+ USE YOUR OWN COPY. IT IS EXTREMELY UNWISE TO LOAD CODE FROM SERVERS YOU DO
+ NOT CONTROL.
+
+
+ This file creates a global JSON object containing two methods: stringify
+ and parse.
+
+ JSON.stringify(value, replacer, space)
+ value any JavaScript value, usually an object or array.
+
+ replacer an optional parameter that determines how object
+ values are stringified for objects. It can be a
+ function or an array of strings.
+
+ space an optional parameter that specifies the indentation
+ of nested structures. If it is omitted, the text will
+ be packed without extra whitespace. If it is a number,
+ it will specify the number of spaces to indent at each
+ level. If it is a string (such as '\t' or ' '),
+ it contains the characters used to indent at each level.
+
+ This method produces a JSON text from a JavaScript value.
+
+ When an object value is found, if the object contains a toJSON
+ method, its toJSON method will be called and the result will be
+ stringified. A toJSON method does not serialize: it returns the
+ value represented by the name/value pair that should be serialized,
+ or undefined if nothing should be serialized. The toJSON method
+ will be passed the key associated with the value, and this will be
+ bound to the value
+
+ For example, this would serialize Dates as ISO strings.
+
+ Date.prototype.toJSON = function (key) {
+ function f(n) {
+ // Format integers to have at least two digits.
+ return n < 10 ? '0' + n : n;
+ }
+
+ return this.getUTCFullYear() + '-' +
+ f(this.getUTCMonth() + 1) + '-' +
+ f(this.getUTCDate()) + 'T' +
+ f(this.getUTCHours()) + ':' +
+ f(this.getUTCMinutes()) + ':' +
+ f(this.getUTCSeconds()) + 'Z';
+ };
+
+ You can provide an optional replacer method. It will be passed the
+ key and value of each member, with this bound to the containing
+ object. The value that is returned from your method will be
+ serialized. If your method returns undefined, then the member will
+ be excluded from the serialization.
+
+ If the replacer parameter is an array of strings, then it will be
+ used to select the members to be serialized. It filters the results
+ such that only members with keys listed in the replacer array are
+ stringified.
+
+ Values that do not have JSON representations, such as undefined or
+ functions, will not be serialized. Such values in objects will be
+ dropped; in arrays they will be replaced with null. You can use
+ a replacer function to replace those with JSON values.
+ JSON.stringify(undefined) returns undefined.
+
+ The optional space parameter produces a stringification of the
+ value that is filled with line breaks and indentation to make it
+ easier to read.
+
+ If the space parameter is a non-empty string, then that string will
+ be used for indentation. If the space parameter is a number, then
+ the indentation will be that many spaces.
+
+ Example:
+
+ text = JSON.stringify(['e', {pluribus: 'unum'}]);
+ // text is '["e",{"pluribus":"unum"}]'
+
+
+ text = JSON.stringify(['e', {pluribus: 'unum'}], null, '\t');
+ // text is '[\n\t"e",\n\t{\n\t\t"pluribus": "unum"\n\t}\n]'
+
+ text = JSON.stringify([new Date()], function (key, value) {
+ return this[key] instanceof Date ?
+ 'Date(' + this[key] + ')' : value;
+ });
+ // text is '["Date(---current time---)"]'
+
+
+ JSON.parse(text, reviver)
+ This method parses a JSON text to produce an object or array.
+ It can throw a SyntaxError exception.
+
+ The optional reviver parameter is a function that can filter and
+ transform the results. It receives each of the keys and values,
+ and its return value is used instead of the original value.
+ If it returns what it received, then the structure is not modified.
+ If it returns undefined then the member is deleted.
+
+ Example:
+
+ // Parse the text. Values that look like ISO date strings will
+ // be converted to Date objects.
+
+ myData = JSON.parse(text, function (key, value) {
+ var a;
+ if (typeof value === 'string') {
+ a =
+/^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}(?:\.\d*)?)Z$/.exec(value);
+ if (a) {
+ return new Date(Date.UTC(+a[1], +a[2] - 1, +a[3], +a[4],
+ +a[5], +a[6]));
+ }
+ }
+ return value;
+ });
+
+ myData = JSON.parse('["Date(09/09/2001)"]', function (key, value) {
+ var d;
+ if (typeof value === 'string' &&
+ value.slice(0, 5) === 'Date(' &&
+ value.slice(-1) === ')') {
+ d = new Date(value.slice(5, -1));
+ if (d) {
+ return d;
+ }
+ }
+ return value;
+ });
+
+
+ This is a reference implementation. You are free to copy, modify, or
+ redistribute.
+*/
+
+/*jslint evil: true, strict: false */
+
+/*members "", "\b", "\t", "\n", "\f", "\r", "\"", JSON, "\\", apply,
+ call, charCodeAt, getUTCDate, getUTCFullYear, getUTCHours,
+ getUTCMinutes, getUTCMonth, getUTCSeconds, hasOwnProperty, join,
+ lastIndex, length, parse, prototype, push, replace, slice, stringify,
+ test, toJSON, toString, valueOf
+*/
+
+
+// Create a JSON object only if one does not already exist. We create the
+// methods in a closure to avoid creating global variables.
+
+if (!this.JSON) {
+ this.JSON = {};
+}
+
+(function () {
+
+ function f(n) {
+ // Format integers to have at least two digits.
+ return n < 10 ? '0' + n : n;
+ }
+
+ if (typeof Date.prototype.toJSON !== 'function') {
+
+ Date.prototype.toJSON = function (key) {
+
+ return isFinite(this.valueOf()) ?
+ this.getUTCFullYear() + '-' +
+ f(this.getUTCMonth() + 1) + '-' +
+ f(this.getUTCDate()) + 'T' +
+ f(this.getUTCHours()) + ':' +
+ f(this.getUTCMinutes()) + ':' +
+ f(this.getUTCSeconds()) + 'Z' : null;
+ };
+
+ String.prototype.toJSON =
+ Number.prototype.toJSON =
+ Boolean.prototype.toJSON = function (key) {
+ return this.valueOf();
+ };
+ }
+
+ var cx = /[\u0000\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,
+ escapable = /[\\\"\x00-\x1f\x7f-\x9f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,
+ gap,
+ indent,
+ meta = { // table of character substitutions
+ '\b': '\\b',
+ '\t': '\\t',
+ '\n': '\\n',
+ '\f': '\\f',
+ '\r': '\\r',
+ '"' : '\\"',
+ '\\': '\\\\'
+ },
+ rep;
+
+
+ function quote(string) {
+
+// If the string contains no control characters, no quote characters, and no
+// backslash characters, then we can safely slap some quotes around it.
+// Otherwise we must also replace the offending characters with safe escape
+// sequences.
+
+ escapable.lastIndex = 0;
+ return escapable.test(string) ?
+ '"' + string.replace(escapable, function (a) {
+ var c = meta[a];
+ return typeof c === 'string' ? c :
+ '\\u' + ('0000' + a.charCodeAt(0).toString(16)).slice(-4);
+ }) + '"' :
+ '"' + string + '"';
+ }
+
+
+ function str(key, holder) {
+
+// Produce a string from holder[key].
+
+ var i, // The loop counter.
+ k, // The member key.
+ v, // The member value.
+ length,
+ mind = gap,
+ partial,
+ value = holder[key];
+
+// If the value has a toJSON method, call it to obtain a replacement value.
+
+ if (value && typeof value === 'object' &&
+ typeof value.toJSON === 'function') {
+ value = value.toJSON(key);
+ }
+
+// If we were called with a replacer function, then call the replacer to
+// obtain a replacement value.
+
+ if (typeof rep === 'function') {
+ value = rep.call(holder, key, value);
+ }
+
+// What happens next depends on the value's type.
+
+ switch (typeof value) {
+ case 'string':
+ return quote(value);
+
+ case 'number':
+
+// JSON numbers must be finite. Encode non-finite numbers as null.
+
+ return isFinite(value) ? String(value) : 'null';
+
+ case 'boolean':
+ case 'null':
+
+// If the value is a boolean or null, convert it to a string. Note:
+// typeof null does not produce 'null'. The case is included here in
+// the remote chance that this gets fixed someday.
+
+ return String(value);
+
+// If the type is 'object', we might be dealing with an object or an array or
+// null.
+
+ case 'object':
+
+// Due to a specification blunder in ECMAScript, typeof null is 'object',
+// so watch out for that case.
+
+ if (!value) {
+ return 'null';
+ }
+
+// Make an array to hold the partial results of stringifying this object value.
+
+ gap += indent;
+ partial = [];
+
+// Is the value an array?
+
+ if (Object.prototype.toString.apply(value) === '[object Array]') {
+
+// The value is an array. Stringify every element. Use null as a placeholder
+// for non-JSON values.
+
+ length = value.length;
+ for (i = 0; i < length; i += 1) {
+ partial[i] = str(i, value) || 'null';
+ }
+
+// Join all of the elements together, separated with commas, and wrap them in
+// brackets.
+
+ v = partial.length === 0 ? '[]' :
+ gap ? '[\n' + gap +
+ partial.join(',\n' + gap) + '\n' +
+ mind + ']' :
+ '[' + partial.join(',') + ']';
+ gap = mind;
+ return v;
+ }
+
+// If the replacer is an array, use it to select the members to be stringified.
+
+ if (rep && typeof rep === 'object') {
+ length = rep.length;
+ for (i = 0; i < length; i += 1) {
+ k = rep[i];
+ if (typeof k === 'string') {
+ v = str(k, value);
+ if (v) {
+ partial.push(quote(k) + (gap ? ': ' : ':') + v);
+ }
+ }
+ }
+ } else {
+
+// Otherwise, iterate through all of the keys in the object.
+
+ for (k in value) {
+ if (Object.hasOwnProperty.call(value, k)) {
+ v = str(k, value);
+ if (v) {
+ partial.push(quote(k) + (gap ? ': ' : ':') + v);
+ }
+ }
+ }
+ }
+
+// Join all of the member texts together, separated with commas,
+// and wrap them in braces.
+
+ v = partial.length === 0 ? '{}' :
+ gap ? '{\n' + gap + partial.join(',\n' + gap) + '\n' +
+ mind + '}' : '{' + partial.join(',') + '}';
+ gap = mind;
+ return v;
+ }
+ }
+
+// If the JSON object does not yet have a stringify method, give it one.
+
+ if (typeof JSON.stringify !== 'function') {
+ JSON.stringify = function (value, replacer, space) {
+
+// The stringify method takes a value and an optional replacer, and an optional
+// space parameter, and returns a JSON text. The replacer can be a function
+// that can replace values, or an array of strings that will select the keys.
+// A default replacer method can be provided. Use of the space parameter can
+// produce text that is more easily readable.
+
+ var i;
+ gap = '';
+ indent = '';
+
+// If the space parameter is a number, make an indent string containing that
+// many spaces.
+
+ if (typeof space === 'number') {
+ for (i = 0; i < space; i += 1) {
+ indent += ' ';
+ }
+
+// If the space parameter is a string, it will be used as the indent string.
+
+ } else if (typeof space === 'string') {
+ indent = space;
+ }
+
+// If there is a replacer, it must be a function or an array.
+// Otherwise, throw an error.
+
+ rep = replacer;
+ if (replacer && typeof replacer !== 'function' &&
+ (typeof replacer !== 'object' ||
+ typeof replacer.length !== 'number')) {
+ throw new Error('JSON.stringify');
+ }
+
+// Make a fake root object containing our value under the key of ''.
+// Return the result of stringifying the value.
+
+ return str('', {'': value});
+ };
+ }
+
+
+// If the JSON object does not yet have a parse method, give it one.
+
+ if (typeof JSON.parse !== 'function') {
+ JSON.parse = function (text, reviver) {
+
+// The parse method takes a text and an optional reviver function, and returns
+// a JavaScript value if the text is a valid JSON text.
+
+ var j;
+
+ function walk(holder, key) {
+
+// The walk method is used to recursively walk the resulting structure so
+// that modifications can be made.
+
+ var k, v, value = holder[key];
+ if (value && typeof value === 'object') {
+ for (k in value) {
+ if (Object.hasOwnProperty.call(value, k)) {
+ v = walk(value, k);
+ if (v !== undefined) {
+ value[k] = v;
+ } else {
+ delete value[k];
+ }
+ }
+ }
+ }
+ return reviver.call(holder, key, value);
+ }
+
+
+// Parsing happens in four stages. In the first stage, we replace certain
+// Unicode characters with escape sequences. JavaScript handles many characters
+// incorrectly, either silently deleting them, or treating them as line endings.
+
+ text = String(text);
+ cx.lastIndex = 0;
+ if (cx.test(text)) {
+ text = text.replace(cx, function (a) {
+ return '\\u' +
+ ('0000' + a.charCodeAt(0).toString(16)).slice(-4);
+ });
+ }
+
+// In the second stage, we run the text against regular expressions that look
+// for non-JSON patterns. We are especially concerned with '()' and 'new'
+// because they can cause invocation, and '=' because it can cause mutation.
+// But just to be safe, we want to reject all unexpected forms.
+
+// We split the second stage into 4 regexp operations in order to work around
+// crippling inefficiencies in IE's and Safari's regexp engines. First we
+// replace the JSON backslash pairs with '@' (a non-JSON character). Second, we
+// replace all simple value tokens with ']' characters. Third, we delete all
+// open brackets that follow a colon or comma or that begin the text. Finally,
+// we look to see that the remaining characters are only whitespace or ']' or
+// ',' or ':' or '{' or '}'. If that is so, then the text is safe for eval.
+
+ if (/^[\],:{}\s]*$/
+.test(text.replace(/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g, '@')
+.replace(/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g, ']')
+.replace(/(?:^|:|,)(?:\s*\[)+/g, ''))) {
+
+// In the third stage we use the eval function to compile the text into a
+// JavaScript structure. The '{' operator is subject to a syntactic ambiguity
+// in JavaScript: it can begin a block or an object literal. We wrap the text
+// in parens to eliminate the ambiguity.
+
+ j = eval('(' + text + ')');
+
+// In the optional fourth stage, we recursively walk the new structure, passing
+// each name/value pair to a reviver function for possible transformation.
+
+ return typeof reviver === 'function' ?
+ walk({'': j}, '') : j;
+ }
+
+// If the text is not JSON parseable, then a SyntaxError is thrown.
+
+ throw new SyntaxError('JSON.parse');
+ };
+ }
+}());
--- /dev/null
+$(document).ready(function() {
+ replace_content('outer', format('login', {}));
+ start_app_login();
+});
+
+function dispatcher_add(fun) {
+ dispatcher_modules.push(fun);
+ if (dispatcher_modules.length == extension_count) {
+ start_app();
+ }
+}
+
+function dispatcher() {
+ for (var i in dispatcher_modules) {
+ dispatcher_modules[i](this);
+ }
+}
+
+function set_auth_cookie(userinfo) {
+ var b64 = b64_encode_utf8(userinfo);
+ document.cookie = 'auth=' + encodeURIComponent(b64);
+}
+
+function login_route () {
+ var userpass = '' + this.params['username'] + ':' + this.params['password'],
+ location = window.location.href,
+ hash = window.location.hash;
+ set_auth_cookie(decodeURIComponent(userpass));
+ location = location.substr(0, location.length - hash.length);
+ window.location.replace(location);
+ // because we change url, we don't need to hit check_login as
+ // we'll end up doing that at the bottom of start_app_login after
+ // we've changed url.
+}
+
+function start_app_login() {
+ app = new Sammy.Application(function () {
+ this.put('#/login', function() {
+ username = this.params['username'];
+ password = this.params['password'];
+ set_auth_cookie(username + ':' + password);
+ check_login();
+ });
+ this.get('#/login/:username/:password', login_route)
+ });
+ app.run();
+ if (get_cookie('auth') != '') {
+ check_login();
+ }
+}
+
+function check_login() {
+ user = JSON.parse(sync_get('/whoami'));
+ if (user == false) {
+ document.cookie = 'auth=; expires=Thu, 01 Jan 1970 00:00:00 GMT';
+ replace_content('login-status', '<p>Login failed</p>');
+ }
+ else {
+ replace_content('outer', format('layout', {}));
+ setup_global_vars();
+ setup_constant_events();
+ update_vhosts();
+ update_interval();
+ setup_extensions();
+ }
+}
+
+function start_app() {
+ if (app !== undefined) {
+ app.unload();
+ }
+ // Oh boy. Sammy uses various different methods to determine if
+ // the URL hash has changed. Unsurprisingly this is a native event
+ // in modern browsers, and falls back to an icky polling function
+ // in MSIE. But it looks like there's a bug. The polling function
+ // should get installed when the app is started. But it's guarded
+ // behind if (Sammy.HashLocationProxy._interval != null). And of
+ // course that's not specific to the application; it's pretty
+ // global. So we need to manually clear that in order for links to
+ // work in MSIE.
+ // Filed as https://github.com/quirkey/sammy/issues/171
+ //
+ // Note for when we upgrade: HashLocationProxy has become
+ // DefaultLocationProxy in later versions, but otherwise the issue
+ // remains.
+ Sammy.HashLocationProxy._interval = null;
+ app = new Sammy.Application(dispatcher);
+ app.run();
+ var url = this.location.toString();
+ if (url.indexOf('#') == -1) {
+ this.location = url + '#/';
+ }
+}
+
+function setup_constant_events() {
+ $('#update-every').change(function() {
+ var interval = $(this).val();
+ store_pref('interval', interval);
+ if (interval == '')
+ interval = null;
+ else
+ interval = parseInt(interval);
+ set_timer_interval(interval);
+ });
+ $('#show-vhost').change(function() {
+ current_vhost = $(this).val();
+ store_pref('vhost', current_vhost);
+ update();
+ });
+ if (!vhosts_interesting) {
+ $('#vhost-form').hide();
+ }
+}
+
+function update_vhosts() {
+ var vhosts = JSON.parse(sync_get('/vhosts'));
+ vhosts_interesting = vhosts.length > 1;
+ if (vhosts_interesting)
+ $('#vhost-form').show();
+ else
+ $('#vhost-form').hide();
+ var select = $('#show-vhost').get(0);
+ select.options.length = vhosts.length + 1;
+ var index = 0;
+ for (var i = 0; i < vhosts.length; i++) {
+ var vhost = vhosts[i].name;
+ select.options[i + 1] = new Option(vhost, vhost);
+ if (vhost == current_vhost) index = i + 1;
+ }
+ select.selectedIndex = index;
+ current_vhost = select.options[index].value;
+ store_pref('vhost', current_vhost);
+}
+
+function setup_extensions() {
+ var extensions = JSON.parse(sync_get('/extensions'));
+ extension_count = extensions.length;
+ for (var i in extensions) {
+ var extension = extensions[i];
+ dynamic_load(extension.javascript);
+ }
+}
+
+function dynamic_load(filename) {
+ var element = document.createElement('script');
+ element.setAttribute('type', 'text/javascript');
+ element.setAttribute('src', 'js/' + filename);
+ document.getElementsByTagName("head")[0].appendChild(element);
+}
+
+function update_interval() {
+ var intervalStr = get_pref('interval');
+ var interval;
+
+ if (intervalStr == null) interval = 5000;
+ else if (intervalStr == '') interval = null;
+ else interval = parseInt(intervalStr);
+
+ if (isNaN(interval)) interval = null; // Prevent DoS if cookie malformed
+
+ set_timer_interval(interval);
+
+ var select = $('#update-every').get(0);
+ var opts = select.options;
+ for (var i = 0; i < opts.length; i++) {
+ if (opts[i].value == intervalStr) {
+ select.selectedIndex = i;
+ break;
+ }
+ }
+}
+
+function go_to(url) {
+ this.location = url;
+}
+
+function set_timer_interval(interval) {
+ timer_interval = interval;
+ reset_timer();
+}
+
+function reset_timer() {
+ clearInterval(timer);
+ if (timer_interval != null) {
+ timer = setInterval('partial_update()', timer_interval);
+ }
+}
+
+function update_manual(div, query) {
+ var path;
+ var template;
+ if (query == 'memory') {
+ path = current_reqs['node'] + '?memory=true';
+ template = 'memory';
+ }
+ var data = JSON.parse(sync_get(path));
+
+ replace_content(div, format(template, data));
+ postprocess_partial();
+}
+
+function render(reqs, template, highlight) {
+ current_template = template;
+ current_reqs = reqs;
+ current_highlight = highlight;
+ update();
+}
+
+function update() {
+ replace_content('debug', '');
+ clearInterval(timer);
+ with_update(function(html) {
+ update_navigation();
+ replace_content('main', html);
+ postprocess();
+ postprocess_partial();
+ render_charts();
+ maybe_scroll();
+ reset_timer();
+ });
+}
+
+function partial_update() {
+ if ($('.updatable').length > 0) {
+ if (update_counter >= 200) {
+ update_counter = 0;
+ full_refresh();
+ return;
+ }
+ with_update(function(html) {
+ update_counter++;
+ replace_content('scratch', html);
+ var befores = $('#main .updatable');
+ var afters = $('#scratch .updatable');
+ if (befores.length != afters.length) {
+ throw("before/after mismatch");
+ }
+ for (var i = 0; i < befores.length; i++) {
+ $(befores[i]).empty().append($(afters[i]).contents());
+ }
+ replace_content('scratch', '');
+ postprocess_partial();
+ render_charts();
+ });
+ }
+}
+
+function update_navigation() {
+ var l1 = '';
+ var l2 = '';
+ var descend = null;
+
+ for (var k in NAVIGATION) {
+ var val = NAVIGATION[k];
+ var path = val;
+ while (!leaf(path)) {
+ path = first_showable_child(path);
+ }
+ var selected = false;
+ if (contains_current_highlight(val)) {
+ selected = true;
+ if (!leaf(val)) {
+ descend = nav(val);
+ }
+ }
+ if (show(path)) {
+ l1 += '<li><a href="' + nav(path) + '"' +
+ (selected ? ' class="selected"' : '') + '>' + k + '</a></li>';
+ }
+ }
+
+ if (descend) {
+ l2 = obj_to_ul(descend);
+ $('#main').addClass('with-rhs');
+ }
+ else {
+ $('#main').removeClass('with-rhs');
+ }
+
+ replace_content('tabs', l1);
+ replace_content('rhs', l2);
+}
+
+function nav(pair) {
+ return pair[0];
+}
+
+function show(pair) {
+ return jQuery.inArray(pair[1], user_tags) != -1;
+}
+
+function leaf(pair) {
+ return typeof(nav(pair)) == 'string';
+}
+
+function first_showable_child(pair) {
+ var items = pair[0];
+ var ks = keys(items);
+ for (var i = 0; i < ks.length; i++) {
+ var child = items[ks[i]];
+ if (show(child)) return child;
+ }
+ return items[ks[0]]; // We'll end up not showing it anyway
+}
+
+function contains_current_highlight(val) {
+ if (leaf(val)) {
+ return current_highlight == nav(val);
+ }
+ else {
+ var b = false;
+ for (var k in val) {
+ b |= contains_current_highlight(val[k]);
+ }
+ return b;
+ }
+}
+
+function obj_to_ul(val) {
+ var res = '<ul>';
+ for (var k in val) {
+ var obj = val[k];
+ if (show(obj)) {
+ res += '<li>';
+ if (leaf(obj)) {
+ res += '<a href="' + nav(obj) + '"' +
+ (current_highlight == nav(obj) ? ' class="selected"' : '') +
+ '>' + k + '</a>';
+ }
+ else {
+ res += obj_to_ul(nav(obj));
+ }
+ res += '</li>';
+ }
+ }
+ return res + '</ul>';
+}
+
+function full_refresh() {
+ store_pref('position', x_position() + ',' + y_position());
+ location.reload();
+}
+
+function maybe_scroll() {
+ var pos = get_pref('position');
+ if (pos) {
+ clear_pref('position');
+ var xy = pos.split(",");
+ window.scrollTo(parseInt(xy[0]), parseInt(xy[1]));
+ }
+}
+
+function x_position() {
+ return window.pageXOffset ?
+ window.pageXOffset :
+ document.documentElement.scrollLeft ?
+ document.documentElement.scrollLeft :
+ document.body.scrollLeft;
+}
+
+function y_position() {
+ return window.pageYOffset ?
+ window.pageYOffset :
+ document.documentElement.scrollTop ?
+ document.documentElement.scrollTop :
+ document.body.scrollTop;
+}
+
+function with_update(fun) {
+ with_reqs(apply_state(current_reqs), [], function(json) {
+ json.statistics_level = statistics_level;
+ var html = format(current_template, json);
+ fun(html);
+ update_status('ok');
+ });
+}
+
+function apply_state(reqs) {
+ var reqs2 = {};
+ for (k in reqs) {
+ var req = reqs[k];
+ var options = {};
+ if (typeof(req) == "object") {
+ options = req.options;
+ req = req.path;
+ }
+ var req2;
+ if (options['vhost'] != undefined && current_vhost != '') {
+ req2 = req + '/' + esc(current_vhost);
+ }
+ else {
+ req2 = req;
+ }
+
+ var qs = [];
+ if (options['sort'] != undefined && current_sort != null) {
+ qs.push('sort=' + current_sort);
+ qs.push('sort_reverse=' + current_sort_reverse);
+ }
+ if (options['ranges'] != undefined) {
+ for (i in options['ranges']) {
+ var type = options['ranges'][i];
+ var range = get_pref('chart-range-' + type).split('|');
+ var prefix;
+ if (type.substring(0, 8) == 'lengths-') {
+ prefix = 'lengths';
+ }
+ else if (type.substring(0, 10) == 'msg-rates-') {
+ prefix = 'msg_rates';
+ }
+ else if (type.substring(0, 11) == 'data-rates-') {
+ prefix = 'data_rates';
+ }
+ qs.push(prefix + '_age=' + parseInt(range[0]));
+ qs.push(prefix + '_incr=' + parseInt(range[1]));
+ }
+ }
+ qs = qs.join('&');
+ if (qs != '') qs = '?' + qs;
+
+ reqs2[k] = req2 + qs;
+ }
+ return reqs2;
+}
+
+function show_popup(type, text, mode) {
+ var cssClass = '.form-popup-' + type;
+ function hide() {
+ if (mode == 'fade') {
+ $(cssClass).fadeOut(200, function() {
+ $(this).remove();
+ });
+ }
+ else {
+ $(cssClass).slideUp(200, function() {
+ $(this).remove();
+ });
+ }
+ }
+
+ hide();
+ $('h1').after(format('error-popup', {'type': type, 'text': text}));
+ if (mode == 'fade') {
+ $(cssClass).fadeIn(200);
+ }
+ else {
+ $(cssClass).center().slideDown(200);
+ }
+ $(cssClass + ' span').click(function () {
+ $('.popup-owner').removeClass('popup-owner');
+ hide();
+ });
+}
+
+function postprocess() {
+ $('form.confirm').submit(function() {
+ return confirm("Are you sure? This object cannot be recovered " +
+ "after deletion.");
+ });
+ $('div.section h2, div.section-hidden h2').click(function() {
+ toggle_visibility($(this));
+ });
+ $('label').map(function() {
+ if ($(this).attr('for') == '') {
+ var id = 'auto-label-' + Math.floor(Math.random()*1000000000);
+ var input = $(this).parents('tr').first().find('input, select');
+ if (input.attr('id') == '') {
+ $(this).attr('for', id);
+ input.attr('id', id);
+ }
+ }
+ });
+ $('#download-definitions').click(function() {
+ var path = 'api/definitions?download=' +
+ esc($('#download-filename').val()) +
+ '&auth=' + get_cookie('auth');
+ window.location = path;
+ setTimeout('app.run()');
+ return false;
+ });
+ $('.update-manual').click(function() {
+ update_manual($(this).attr('for'), $(this).attr('query'));
+ });
+ $('input, select').die();
+ $('.multifield input').live('keyup', function() {
+ update_multifields();
+ });
+ $('.multifield select').live('change', function() {
+ update_multifields();
+ });
+ $('.controls-appearance').change(function() {
+ var params = $(this).get(0).options;
+ var selected = $(this).val();
+
+ for (i = 0; i < params.length; i++) {
+ var param = params[i].value;
+ if (param == selected) {
+ $('#' + param + '-div').slideDown(100);
+ } else {
+ $('#' + param + '-div').slideUp(100);
+ }
+ }
+ });
+ setup_visibility();
+ $('.help').die().live('click', function() {
+ help($(this).attr('id'))
+ });
+ $('.rate-options').die().live('click', function() {
+ var remove = $('.popup-owner').length == 1 &&
+ $('.popup-owner').get(0) == $(this).get(0);
+ $('.popup-owner').removeClass('popup-owner');
+ if (remove) {
+ $('.form-popup-rate-options').fadeOut(200, function() {
+ $(this).remove();
+ });
+ }
+ else {
+ $(this).addClass('popup-owner');
+ show_popup('rate-options', format('rate-options', {span: $(this)}),
+ 'fade');
+ }
+ });
+ $('input, select').live('focus', function() {
+ update_counter = 0; // If there's interaction, reset the counter.
+ });
+ $('.tag-link').click(function() {
+ $('#tags').val($(this).attr('tag'));
+ });
+ $('form.auto-submit select, form.auto-submit input').live('click', function(){
+ $(this).parents('form').submit();
+ });
+ $('#filter').die().live('keyup', debounce(update_filter, 500));
+ $('#filter-regex-mode').change(update_filter_regex_mode);
+ $('#truncate').die().live('keyup', debounce(update_truncate, 500));
+ if (! user_administrator) {
+ $('.administrator-only').remove();
+ }
+ update_multifields();
+}
+
+function postprocess_partial() {
+ $('.sort').click(function() {
+ var sort = $(this).attr('sort');
+ if (current_sort == sort) {
+ current_sort_reverse = ! current_sort_reverse;
+ }
+ else {
+ current_sort = sort;
+ current_sort_reverse = false;
+ }
+ update();
+ });
+ $('.help').html('(?)');
+ // TODO remove this hack when we get rid of "updatable"
+ if ($('#filter-warning-show').length > 0) {
+ $('#filter-truncate').addClass('filter-warning');
+ }
+ else {
+ $('#filter-truncate').removeClass('filter-warning');
+ }
+}
+
+function update_multifields() {
+ $('div.multifield').each(function(index) {
+ update_multifield($(this), true);
+ });
+}
+
+function update_multifield(multifield, dict) {
+ var largest_id = 0;
+ var empty_found = false;
+ var name = multifield.attr('id');
+ $('#' + name + ' *[name$="_mftype"]').each(function(index) {
+ var re = new RegExp(name + '_([0-9]*)_mftype');
+ var match = $(this).attr('name').match(re);
+ if (!match) return;
+ var id = parseInt(match[1]);
+ largest_id = Math.max(id, largest_id);
+ var prefix = name + '_' + id;
+ var type = $(this).val();
+ var input = $('#' + prefix + '_mfvalue');
+ if (type == 'list') {
+ if (input.size() == 1) {
+ input.replaceWith('<div class="multifield-sub" id="' + prefix +
+ '"></div>');
+ }
+ update_multifield($('#' + prefix), false);
+ }
+ else {
+ if (input.size() == 1) {
+ var key = dict ? $('#' + prefix + '_mfkey').val() : '';
+ var value = input.val();
+ if (key == '' && value == '') {
+ if (empty_found) {
+ $(this).parent().remove();
+ }
+ empty_found = true;
+ }
+ }
+ else {
+ $('#' + prefix).replaceWith(multifield_input(prefix, 'value',
+ 'text'));
+ }
+ }
+ });
+ if (!empty_found) {
+ var prefix = name + '_' + (largest_id + 1);
+ var t = multifield.hasClass('string-only') ? 'hidden' : 'select';
+ var val_type = multifield_input(prefix, 'value', 'text') + ' ' +
+ multifield_input(prefix, 'type', t);
+
+ if (dict) {
+ multifield.append('<table><tr><td>' +
+ multifield_input(prefix, 'key', 'text') +
+ '</td><td class="equals"> = </td><td>' +
+ val_type + '</td></tr></table>');
+ }
+ else {
+ multifield.append('<div>' + val_type + '</div>');
+ }
+ }
+}
+
+function multifield_input(prefix, suffix, type) {
+ if (type == 'hidden' ) {
+ return '<input type="hidden" id="' + prefix + '_mf' + suffix +
+ '" name="' + prefix + '_mf' + suffix + '" value="string"/>';
+ }
+ else if (type == 'text' ) {
+ return '<input type="text" id="' + prefix + '_mf' + suffix +
+ '" name="' + prefix + '_mf' + suffix + '" value=""/>';
+ }
+ else if (type == 'select' ) {
+ return '<select id="' + prefix + '_mf' + suffix + '" name="' + prefix +
+ '_mf' + suffix + '">' +
+ '<option value="string">String</option>' +
+ '<option value="number">Number</option>' +
+ '<option value="boolean">Boolean</option>' +
+ '<option value="list">List</option>' +
+ '</select>';
+ }
+}
+
+function update_filter_regex(jElem) {
+ current_filter_regex = null;
+ jElem.parents('.filter').children('.status-error').remove();
+ if (current_filter_regex_on && $.trim(current_filter).length > 0) {
+ try {
+ current_filter_regex = new RegExp(current_filter,'i');
+ } catch (e) {
+ jElem.parents('.filter').append('<p class="status-error">' +
+ e.message + '</p>');
+ }
+ }
+}
+
+function update_filter_regex_mode() {
+ current_filter_regex_on = $(this).is(':checked');
+ update_filter_regex($(this));
+ partial_update();
+}
+
+function update_filter() {
+ current_filter = $(this).val();
+ var table = $(this).parents('table').first();
+ table.removeClass('filter-active');
+ if ($(this).val() != '') {
+ table.addClass('filter-active');
+ }
+ update_filter_regex($(this));
+ partial_update();
+}
+
+function update_truncate() {
+ var current_truncate_str =
+ $(this).val().replace(new RegExp('\\D', 'g'), '');
+ if (current_truncate_str == '')
+ current_truncate_str = '0';
+ if ($(this).val() != current_truncate_str)
+ $(this).val(current_truncate_str);
+ current_truncate = parseInt(current_truncate_str, 10);
+ store_pref('truncate', current_truncate);
+ partial_update();
+}
+
+function setup_visibility() {
+ $('div.section,div.section-hidden').each(function(_index) {
+ var pref = section_pref(current_template,
+ $(this).children('h2').text());
+ var show = get_pref(pref);
+ if (show == null) {
+ show = $(this).hasClass('section');
+ }
+ else {
+ show = show == 't';
+ }
+ if (show) {
+ $(this).addClass('section-visible');
+ }
+ else {
+ $(this).addClass('section-invisible');
+ }
+ });
+}
+
+function toggle_visibility(item) {
+ var hider = item.next();
+ var all = item.parent();
+ var pref = section_pref(current_template, item.text());
+ item.next().slideToggle(100);
+ if (all.hasClass('section-visible')) {
+ if (all.hasClass('section'))
+ store_pref(pref, 'f');
+ else
+ clear_pref(pref);
+ all.removeClass('section-visible');
+ all.addClass('section-invisible');
+ }
+ else {
+ if (all.hasClass('section-hidden'))
+ store_pref(pref, 't');
+ else
+ clear_pref(pref);
+ all.removeClass('section-invisible');
+ all.addClass('section-visible');
+ }
+}
+
+function publish_msg(params0) {
+ var params = params_magic(params0);
+ var path = fill_path_template('/exchanges/:vhost/:name/publish', params);
+ params['payload_encoding'] = 'string';
+ params['properties'] = {};
+ params['properties']['delivery_mode'] = parseInt(params['delivery_mode']);
+ if (params['headers'] != '')
+ params['properties']['headers'] = params['headers'];
+ var props = [['content_type', 'str'],
+ ['content_encoding', 'str'],
+ ['correlation_id', 'str'],
+ ['reply_to', 'str'],
+ ['expiration', 'str'],
+ ['message_id', 'str'],
+ ['type', 'str'],
+ ['user_id', 'str'],
+ ['app_id', 'str'],
+ ['cluster_id', 'str'],
+ ['priority', 'int'],
+ ['timestamp', 'int']];
+ for (var i in props) {
+ var name = props[i][0];
+ var type = props[i][1];
+ if (params['props'][name] != undefined && params['props'][name] != '') {
+ var value = params['props'][name];
+ if (type == 'int') value = parseInt(value);
+ params['properties'][name] = value;
+ }
+ }
+ with_req('POST', path, JSON.stringify(params), function(resp) {
+ var result = jQuery.parseJSON(resp.responseText);
+ if (result.routed) {
+ show_popup('info', 'Message published.');
+ } else {
+ show_popup('warn', 'Message published, but not routed.');
+ }
+ });
+}
+
+function get_msgs(params) {
+ var path = fill_path_template('/queues/:vhost/:name/get', params);
+ with_req('POST', path, JSON.stringify(params), function(resp) {
+ var msgs = jQuery.parseJSON(resp.responseText);
+ if (msgs.length == 0) {
+ show_popup('info', 'Queue is empty');
+ } else {
+ $('#msg-wrapper').slideUp(200);
+ replace_content('msg-wrapper', format('messages', {'msgs': msgs}));
+ $('#msg-wrapper').slideDown(200);
+ }
+ });
+}
+
+function with_reqs(reqs, acc, fun) {
+ if (keys(reqs).length > 0) {
+ var key = keys(reqs)[0];
+ with_req('GET', reqs[key], null, function(resp) {
+ acc[key] = jQuery.parseJSON(resp.responseText);
+ var remainder = {};
+ for (var k in reqs) {
+ if (k != key) remainder[k] = reqs[k];
+ }
+ with_reqs(remainder, acc, fun);
+ });
+ }
+ else {
+ fun(acc);
+ }
+}
+
+function replace_content(id, html) {
+ $("#" + id).html(html);
+}
+
+var ejs_cached = {};
+
+function format(template, json) {
+ try {
+ var cache = true;
+ if (!(template in ejs_cached)) {
+ ejs_cached[template] = true;
+ cache = false;
+ }
+ var tmpl = new EJS({url: 'js/tmpl/' + template + '.ejs', cache: cache});
+ return tmpl.render(json);
+ } catch (err) {
+ clearInterval(timer);
+ debug(err['name'] + ": " + err['message']);
+ }
+}
+
+function update_status(status) {
+ var text;
+ if (status == 'ok')
+ text = "Last update: " + fmt_date(new Date());
+ else if (status == 'error') {
+ var next_try = new Date(new Date().getTime() + timer_interval);
+ text = "Error: could not connect to server since " +
+ fmt_date(last_successful_connect) + ".<br/>Will retry at " +
+ fmt_date(next_try) + ".";
+ }
+ else
+ throw("Unknown status " + status);
+
+ var html = format('status', {status: status, text: text});
+ replace_content('status', html);
+}
+
+function auth_header() {
+ return "Basic " + decodeURIComponent(get_cookie('auth'));
+}
+
+function with_req(method, path, body, fun) {
+ var json;
+ var req = xmlHttpRequest();
+ req.open(method, 'api' + path, true );
+ req.setRequestHeader('authorization', auth_header());
+ req.onreadystatechange = function () {
+ if (req.readyState == 4) {
+ if (check_bad_response(req, true)) {
+ last_successful_connect = new Date();
+ fun(req);
+ }
+ }
+ };
+ req.send(body);
+}
+
+function sync_get(path) {
+ return sync_req('GET', [], path);
+}
+
+function sync_put(sammy, path_template) {
+ return sync_req('PUT', sammy.params, path_template);
+}
+
+function sync_delete(sammy, path_template, options) {
+ return sync_req('DELETE', sammy.params, path_template, options);
+}
+
+function sync_post(sammy, path_template) {
+ return sync_req('POST', sammy.params, path_template);
+}
+
+function sync_req(type, params0, path_template, options) {
+ var params;
+ var path;
+ try {
+ params = params_magic(params0);
+ path = fill_path_template(path_template, params);
+ } catch (e) {
+ show_popup('warn', e);
+ return false;
+ }
+ var req = xmlHttpRequest();
+ req.open(type, 'api' + path, false);
+ req.setRequestHeader('content-type', 'application/json');
+ req.setRequestHeader('authorization', auth_header());
+
+ if (options != undefined || options != null) {
+ if (options.headers != undefined || options.headers != null) {
+ jQuery.each(options.headers, function (k, v) {
+ req.setRequestHeader(k, v);
+ });
+ }
+ }
+
+ try {
+ if (type == 'GET')
+ req.send(null);
+ else
+ req.send(JSON.stringify(params));
+ }
+ catch (e) {
+ if (e.number == 0x80004004) {
+ // 0x80004004 means "Operation aborted."
+ // http://support.microsoft.com/kb/186063
+ // MSIE6 appears to do this in response to HTTP 204.
+ }
+ }
+
+ if (check_bad_response(req, false)) {
+ if (type == 'GET')
+ return req.responseText;
+ else
+ return true;
+ }
+ else {
+ return false;
+ }
+}
+
+function check_bad_response(req, full_page_404) {
+ // 1223 == 204 - see http://www.enhanceie.com/ie/bugs.asp
+ // MSIE7 and 8 appear to do this in response to HTTP 204.
+ if ((req.status >= 200 && req.status < 300) || req.status == 1223) {
+ return true;
+ }
+ else if (req.status == 404 && full_page_404) {
+ var html = format('404', {});
+ replace_content('main', html);
+ }
+ else if (req.status >= 400 && req.status <= 404) {
+ var reason = JSON.parse(req.responseText).reason;
+ if (typeof(reason) != 'string') reason = JSON.stringify(reason);
+ show_popup('warn', reason);
+ }
+ else if (req.status == 408) {
+ update_status('timeout');
+ }
+ else if (req.status == 0) { // Non-MSIE: could not connect
+ update_status('error');
+ }
+ else if (req.status > 12000) { // MSIE: could not connect
+ update_status('error');
+ }
+ else if (req.status == 503) { // Proxy: could not connect
+ update_status('error');
+ }
+ else {
+ debug("Got response code " + req.status + " with body " +
+ req.responseText);
+ clearInterval(timer);
+ }
+
+ return false;
+}
+
+function fill_path_template(template, params) {
+ var re = /:[a-zA-Z_]*/g;
+ return template.replace(re, function(m) {
+ var str = esc(params[m.substring(1)]);
+ if (str == '') {
+ throw(m.substring(1) + " is required");
+ }
+ return str;
+ });
+}
+
+function params_magic(params) {
+ return check_password(
+ add_known_arguments(
+ maybe_remove_fields(
+ collapse_multifields(params))));
+}
+
+function collapse_multifields(params0) {
+ function set(x) { return x != '' && x != undefined }
+
+ var params = {};
+ var ks = keys(params0);
+ var ids = [];
+ for (i in ks) {
+ var key = ks[i];
+ var match = key.match(/([a-z]*)_([0-9_]*)_mftype/);
+ var match2 = key.match(/[a-z]*_[0-9_]*_mfkey/);
+ var match3 = key.match(/[a-z]*_[0-9_]*_mfvalue/);
+ if (match == null && match2 == null && match3 == null) {
+ params[key] = params0[key];
+ }
+ else if (match == null) {
+ // Do nothing, value is handled below
+ }
+ else {
+ var name = match[1];
+ var id = match[2];
+ ids.push([name, id]);
+ }
+ }
+ ids.sort();
+ var id_map = {};
+ for (i in ids) {
+ var name = ids[i][0];
+ var id = ids[i][1];
+ if (params[name] == undefined) {
+ params[name] = {};
+ id_map[name] = {};
+ }
+ var id_parts = id.split('_');
+ var k = params0[name + '_' + id_parts[0] + '_mfkey'];
+ var v = params0[name + '_' + id + '_mfvalue'];
+ var t = params0[name + '_' + id + '_mftype'];
+ var val = null;
+ var top_level = id_parts.length == 1;
+ if (t == 'list') {
+ val = [];
+ id_map[name][id] = val;
+ }
+ else if ((set(k) && top_level) || set(v)) {
+ if (t == 'boolean') {
+ if (v != 'true' && v != 'false')
+ throw(k + ' must be "true" or "false"; got ' + v);
+ val = (v == 'true');
+ }
+ else if (t == 'number') {
+ var n = parseFloat(v);
+ if (isNaN(n))
+ throw(k + ' must be a number; got ' + v);
+ val = n;
+ }
+ else {
+ val = v;
+ }
+ }
+ if (val != null) {
+ if (top_level) {
+ params[name][k] = val;
+ }
+ else {
+ var prefix = id_parts.slice(0, id_parts.length - 1).join('_');
+ id_map[name][prefix].push(val);
+ }
+ }
+ }
+ return params;
+}
+
+function add_known_arguments(params) {
+ for (var k in KNOWN_ARGS) {
+ var v = params[k];
+ if (v != undefined && v != '') {
+ var type = KNOWN_ARGS[k].type;
+ if (type == 'int') {
+ v = parseInt(v);
+ if (isNaN(v)) {
+ throw(k + " must be an integer.");
+ }
+ }
+ params.arguments[k] = v;
+ }
+ delete params[k];
+ }
+
+ return params;
+}
+
+function check_password(params) {
+ if (params['password'] != undefined) {
+ if (params['password'] == '') {
+ throw("Please specify a password.");
+ }
+ if (params['password'] != params['password_confirm']) {
+ throw("Passwords do not match.");
+ }
+ delete params['password_confirm'];
+ }
+
+ return params;
+}
+
+function maybe_remove_fields(params) {
+ $('.controls-appearance').each(function(index) {
+ var options = $(this).get(0).options;
+ var selected = $(this).val();
+
+ for (i = 0; i < options.length; i++) {
+ var option = options[i].value;
+ if (option != selected) {
+ delete params[option];
+ }
+ }
+ delete params[$(this).attr('name')];
+ });
+ return params;
+}
+
+function put_parameter(sammy, mandatory_keys, num_keys, bool_keys,
+ arrayable_keys) {
+ for (var i in sammy.params) {
+ if (i === 'length' || !sammy.params.hasOwnProperty(i)) continue;
+ if (sammy.params[i] == '' && jQuery.inArray(i, mandatory_keys) == -1) {
+ delete sammy.params[i];
+ }
+ else if (jQuery.inArray(i, num_keys) != -1) {
+ sammy.params[i] = parseInt(sammy.params[i]);
+ }
+ else if (jQuery.inArray(i, bool_keys) != -1) {
+ sammy.params[i] = sammy.params[i] == 'true';
+ }
+ else if (jQuery.inArray(i, arrayable_keys) != -1) {
+ sammy.params[i] = sammy.params[i].split(' ');
+ if (sammy.params[i].length == 1) {
+ sammy.params[i] = sammy.params[i][0];
+ }
+ }
+ }
+ var params = {"component": sammy.params.component,
+ "vhost": sammy.params.vhost,
+ "name": sammy.params.name,
+ "value": params_magic(sammy.params)};
+ delete params.value.vhost;
+ delete params.value.component;
+ delete params.value.name;
+ sammy.params = params;
+ if (sync_put(sammy, '/parameters/:component/:vhost/:name')) update();
+}
+
+function put_policy(sammy, mandatory_keys, num_keys, bool_keys) {
+ for (var i in sammy.params) {
+ if (i === 'length' || !sammy.params.hasOwnProperty(i)) continue;
+ if (sammy.params[i] == '' && jQuery.inArray(i, mandatory_keys) == -1) {
+ delete sammy.params[i];
+ }
+ else if (jQuery.inArray(i, num_keys) != -1) {
+ sammy.params[i] = parseInt(sammy.params[i]);
+ }
+ else if (jQuery.inArray(i, bool_keys) != -1) {
+ sammy.params[i] = sammy.params[i] == 'true';
+ }
+ }
+ if (sync_put(sammy, '/policies/:vhost/:name')) update();
+}
+
+function debug(str) {
+ $('<p>' + str + '</p>').appendTo('#debug');
+}
+
+function keys(obj) {
+ var ks = [];
+ for (var k in obj) {
+ ks.push(k);
+ }
+ return ks;
+}
+
+// Don't use the jQuery AJAX support, it seemss to have trouble reporting
+// server-down type errors.
+function xmlHttpRequest() {
+ var res;
+ try {
+ res = new XMLHttpRequest();
+ }
+ catch(e) {
+ res = new ActiveXObject("Microsoft.XMLHttp");
+ }
+ return res;
+}
+
+// Our base64 library takes a string that is really a byte sequence,
+// and will throw if given a string with chars > 255 (and hence not
+// DTRT for chars > 127). So encode a unicode string as a UTF-8
+// sequence of "bytes".
+function b64_encode_utf8(str) {
+ return base64.encode(encode_utf8(str));
+}
+
+// encodeURIComponent handles utf-8, unescape does not. Neat!
+function encode_utf8(str) {
+ return unescape(encodeURIComponent(str));
+}
+
+(function($){
+ $.fn.extend({
+ center: function () {
+ return this.each(function() {
+ var top = ($(window).height() - $(this).outerHeight()) / 2;
+ var left = ($(window).width() - $(this).outerWidth()) / 2;
+ $(this).css({margin:0, top: (top > 0 ? top : 0)+'px', left: (left > 0 ? left : 0)+'px'});
+ });
+ }
+ });
+})(jQuery);
+
+function debounce(f, delay) {
+ var timeout = null;
+
+ return function() {
+ var obj = this;
+ var args = arguments;
+
+ function delayed () {
+ f.apply(obj, args);
+ timeout = null;
+ }
+ if (timeout) clearTimeout(timeout);
+ timeout = setTimeout(delayed, delay);
+ }
+}
--- /dev/null
+// TODO It would be nice to use DOM storage. When that's available.
+
+function store_pref(k, v) {
+ var d = parse_cookie();
+ d[short_key(k)] = v;
+ store_cookie(d);
+}
+
+function clear_pref(k) {
+ var d = parse_cookie();
+ delete d[short_key(k)];
+ store_cookie(d);
+}
+
+function get_pref(k) {
+ var r = parse_cookie()[short_key(k)];
+ return r == undefined ? default_pref(k) : r;
+}
+
+function section_pref(template, name) {
+ return 'visible|' + template + '|' + name;
+}
+
+// ---------------------------------------------------------------------------
+
+function default_pref(k) {
+ if (k.substring(0, 12) == 'chart-range-') return '60|5';
+ if (k.substring(0, 11) == 'chart-size-') return 'small';
+ if (k.substring(0, 10) == 'rate-mode-') return 'chart';
+ if (k == 'truncate') return '100';
+ return null;
+}
+
+// ---------------------------------------------------------------------------
+
+function parse_cookie() {
+ var c = get_cookie('m');
+ var items = c.length == 0 ? [] : c.split('|');
+
+ var start = 0;
+ var dict = {};
+ for (var i in items) {
+ var kv = items[i].split(':');
+ dict[kv[0]] = unescape(kv[1]);
+ }
+ return dict;
+}
+
+function store_cookie(dict) {
+ var enc = [];
+ for (var k in dict) {
+ enc.push(k + ':' + escape(dict[k]));
+ }
+ var date = new Date();
+ date.setFullYear(date.getFullYear() + 1);
+ document.cookie = 'm=' + enc.join('|') + '; expires=' + date.toUTCString();
+}
+
+function get_cookie(key) {
+ var cookies = document.cookie.split(';');
+ for (var i in cookies) {
+ var kv = jQuery.trim(cookies[i]).split('=');
+ if (kv[0] == key) return kv[1];
+ }
+ return '';
+}
+
+// Try to economise on space since cookies have limited length.
+function short_key(k) {
+ var res = Math.abs(k.hashCode() << 16 >> 16);
+ res = res.toString(16);
+ return res;
+}
+
+String.prototype.hashCode = function() {
+ var hash = 0;
+ if (this.length == 0) return code;
+ for (i = 0; i < this.length; i++) {
+ char = this.charCodeAt(i);
+ hash = 31*hash+char;
+ hash = hash & hash; // Convert to 32bit integer
+ }
+ return hash;
+}
--- /dev/null
+// name: sammy
+// version: 0.6.0pre
+
+(function($) {
+
+ var Sammy,
+ PATH_REPLACER = "([^\/]+)",
+ PATH_NAME_MATCHER = /:([\w\d]+)/g,
+ QUERY_STRING_MATCHER = /\?([^#]*)$/,
+ // mainly for making `arguments` an Array
+ _makeArray = function(nonarray) { return Array.prototype.slice.call(nonarray); },
+ // borrowed from jQuery
+ _isFunction = function( obj ) { return Object.prototype.toString.call(obj) === "[object Function]"; },
+ _isArray = function( obj ) { return Object.prototype.toString.call(obj) === "[object Array]"; },
+ _decode = decodeURIComponent,
+ _escapeHTML = function(s) {
+ return s.replace(/&/g,'&').replace(/</g,'<').replace(/>/g,'>');
+ },
+ _routeWrapper = function(verb) {
+ return function(path, callback) { return this.route.apply(this, [verb, path, callback]); };
+ },
+ _template_cache = {},
+ loggers = [];
+
+
+ // `Sammy` (also aliased as $.sammy) is not only the namespace for a
+ // number of prototypes, its also a top level method that allows for easy
+ // creation/management of `Sammy.Application` instances. There are a
+ // number of different forms for `Sammy()` but each returns an instance
+ // of `Sammy.Application`. When a new instance is created using
+ // `Sammy` it is added to an Object called `Sammy.apps`. This
+ // provides for an easy way to get at existing Sammy applications. Only one
+ // instance is allowed per `element_selector` so when calling
+ // `Sammy('selector')` multiple times, the first time will create
+ // the application and the following times will extend the application
+ // already added to that selector.
+ //
+ // ### Example
+ //
+ // // returns the app at #main or a new app
+ // Sammy('#main')
+ //
+ // // equivilent to "new Sammy.Application", except appends to apps
+ // Sammy();
+ // Sammy(function() { ... });
+ //
+ // // extends the app at '#main' with function.
+ // Sammy('#main', function() { ... });
+ //
+ Sammy = function() {
+ var args = _makeArray(arguments),
+ app, selector;
+ Sammy.apps = Sammy.apps || {};
+ if (args.length === 0 || args[0] && _isFunction(args[0])) { // Sammy()
+ return Sammy.apply(Sammy, ['body'].concat(args));
+ } else if (typeof (selector = args.shift()) == 'string') { // Sammy('#main')
+ app = Sammy.apps[selector] || new Sammy.Application();
+ app.element_selector = selector;
+ if (args.length > 0) {
+ $.each(args, function(i, plugin) {
+ app.use(plugin);
+ });
+ }
+ // if the selector changes make sure the refrence in Sammy.apps changes
+ if (app.element_selector != selector) {
+ delete Sammy.apps[selector];
+ }
+ Sammy.apps[app.element_selector] = app;
+ return app;
+ }
+ };
+
+ Sammy.VERSION = '0.6.0';
+
+ // Add to the global logger pool. Takes a function that accepts an
+ // unknown number of arguments and should print them or send them somewhere
+ // The first argument is always a timestamp.
+ Sammy.addLogger = function(logger) {
+ loggers.push(logger);
+ };
+
+ // Sends a log message to each logger listed in the global
+ // loggers pool. Can take any number of arguments.
+ // Also prefixes the arguments with a timestamp.
+ Sammy.log = function() {
+ var args = _makeArray(arguments);
+ args.unshift("[" + Date() + "]");
+ $.each(loggers, function(i, logger) {
+ logger.apply(Sammy, args);
+ });
+ };
+
+ if (typeof window.console != 'undefined') {
+ if (_isFunction(console.log.apply)) {
+ Sammy.addLogger(function() {
+ window.console.log.apply(console, arguments);
+ });
+ } else {
+ Sammy.addLogger(function() {
+ window.console.log(arguments);
+ });
+ }
+ } else if (typeof console != 'undefined') {
+ Sammy.addLogger(function() {
+ console.log.apply(console, arguments);
+ });
+ }
+
+ $.extend(Sammy, {
+ makeArray: _makeArray,
+ isFunction: _isFunction,
+ isArray: _isArray
+ })
+
+ // Sammy.Object is the base for all other Sammy classes. It provides some useful
+ // functionality, including cloning, iterating, etc.
+ Sammy.Object = function(obj) { // constructor
+ return $.extend(this, obj || {});
+ };
+
+ $.extend(Sammy.Object.prototype, {
+
+ // Escape HTML in string, use in templates to prevent script injection.
+ // Also aliased as `h()`
+ escapeHTML: _escapeHTML,
+ h: _escapeHTML,
+
+ // Returns a copy of the object with Functions removed.
+ toHash: function() {
+ var json = {};
+ $.each(this, function(k,v) {
+ if (!_isFunction(v)) {
+ json[k] = v;
+ }
+ });
+ return json;
+ },
+
+ // Renders a simple HTML version of this Objects attributes.
+ // Does not render functions.
+ // For example. Given this Sammy.Object:
+ //
+ // var s = new Sammy.Object({first_name: 'Sammy', last_name: 'Davis Jr.'});
+ // s.toHTML() //=> '<strong>first_name</strong> Sammy<br /><strong>last_name</strong> Davis Jr.<br />'
+ //
+ toHTML: function() {
+ var display = "";
+ $.each(this, function(k, v) {
+ if (!_isFunction(v)) {
+ display += "<strong>" + k + "</strong> " + v + "<br />";
+ }
+ });
+ return display;
+ },
+
+ // Returns an array of keys for this object. If `attributes_only`
+ // is true will not return keys that map to a `function()`
+ keys: function(attributes_only) {
+ var keys = [];
+ for (var property in this) {
+ if (!_isFunction(this[property]) || !attributes_only) {
+ keys.push(property);
+ }
+ }
+ return keys;
+ },
+
+ // Checks if the object has a value at `key` and that the value is not empty
+ has: function(key) {
+ return this[key] && $.trim(this[key].toString()) != '';
+ },
+
+ // convenience method to join as many arguments as you want
+ // by the first argument - useful for making paths
+ join: function() {
+ var args = _makeArray(arguments);
+ var delimiter = args.shift();
+ return args.join(delimiter);
+ },
+
+ // Shortcut to Sammy.log
+ log: function() {
+ Sammy.log.apply(Sammy, arguments);
+ },
+
+ // Returns a string representation of this object.
+ // if `include_functions` is true, it will also toString() the
+ // methods of this object. By default only prints the attributes.
+ toString: function(include_functions) {
+ var s = [];
+ $.each(this, function(k, v) {
+ if (!_isFunction(v) || include_functions) {
+ s.push('"' + k + '": ' + v.toString());
+ }
+ });
+ return "Sammy.Object: {" + s.join(',') + "}";
+ }
+ });
+
+ // The HashLocationProxy is the default location proxy for all Sammy applications.
+ // A location proxy is a prototype that conforms to a simple interface. The purpose
+ // of a location proxy is to notify the Sammy.Application its bound to when the location
+ // or 'external state' changes. The HashLocationProxy considers the state to be
+ // changed when the 'hash' (window.location.hash / '#') changes. It does this in two
+ // different ways depending on what browser you are using. The newest browsers
+ // (IE, Safari > 4, FF >= 3.6) support a 'onhashchange' DOM event, thats fired whenever
+ // the location.hash changes. In this situation the HashLocationProxy just binds
+ // to this event and delegates it to the application. In the case of older browsers
+ // a poller is set up to track changes to the hash. Unlike Sammy 0.3 or earlier,
+ // the HashLocationProxy allows the poller to be a global object, eliminating the
+ // need for multiple pollers even when thier are multiple apps on the page.
+ Sammy.HashLocationProxy = function(app, run_interval_every) {
+ this.app = app;
+ // set is native to false and start the poller immediately
+ this.is_native = false;
+ this._startPolling(run_interval_every);
+ };
+
+ Sammy.HashLocationProxy.prototype = {
+
+ // bind the proxy events to the current app.
+ bind: function() {
+ var proxy = this, app = this.app;
+ $(window).bind('hashchange.' + this.app.eventNamespace(), function(e, non_native) {
+ // if we receive a native hash change event, set the proxy accordingly
+ // and stop polling
+ if (proxy.is_native === false && !non_native) {
+ Sammy.log('native hash change exists, using');
+ proxy.is_native = true;
+ clearInterval(Sammy.HashLocationProxy._interval);
+ }
+ app.trigger('location-changed');
+ });
+ if (!Sammy.HashLocationProxy._bindings) {
+ Sammy.HashLocationProxy._bindings = 0;
+ }
+ Sammy.HashLocationProxy._bindings++;
+ },
+
+ // unbind the proxy events from the current app
+ unbind: function() {
+ $(window).unbind('hashchange.' + this.app.eventNamespace());
+ Sammy.HashLocationProxy._bindings--;
+ if (Sammy.HashLocationProxy._bindings <= 0) {
+ clearInterval(Sammy.HashLocationProxy._interval);
+ }
+ },
+
+ // get the current location from the hash.
+ getLocation: function() {
+ // Bypass the `window.location.hash` attribute. If a question mark
+ // appears in the hash IE6 will strip it and all of the following
+ // characters from `window.location.hash`.
+ var matches = window.location.toString().match(/^[^#]*(#.+)$/);
+ return matches ? matches[1] : '';
+ },
+
+ // set the current location to `new_location`
+ setLocation: function(new_location) {
+ return (window.location = new_location);
+ },
+
+ _startPolling: function(every) {
+ // set up interval
+ var proxy = this;
+ if (!Sammy.HashLocationProxy._interval) {
+ if (!every) { every = 10; }
+ var hashCheck = function() {
+ current_location = proxy.getLocation();
+ if (!Sammy.HashLocationProxy._last_location ||
+ current_location != Sammy.HashLocationProxy._last_location) {
+ setTimeout(function() {
+ $(window).trigger('hashchange', [true]);
+ }, 13);
+ }
+ Sammy.HashLocationProxy._last_location = current_location;
+ };
+ hashCheck();
+ Sammy.HashLocationProxy._interval = setInterval(hashCheck, every);
+ }
+ }
+ };
+
+
+ // Sammy.Application is the Base prototype for defining 'applications'.
+ // An 'application' is a collection of 'routes' and bound events that is
+ // attached to an element when `run()` is called.
+ // The only argument an 'app_function' is evaluated within the context of the application.
+ Sammy.Application = function(app_function) {
+ var app = this;
+ this.routes = {};
+ this.listeners = new Sammy.Object({});
+ this.arounds = [];
+ this.befores = [];
+ // generate a unique namespace
+ this.namespace = (new Date()).getTime() + '-' + parseInt(Math.random() * 1000, 10);
+ this.context_prototype = function() { Sammy.EventContext.apply(this, arguments); };
+ this.context_prototype.prototype = new Sammy.EventContext();
+
+ if (_isFunction(app_function)) {
+ app_function.apply(this, [this]);
+ }
+ // set the location proxy if not defined to the default (HashLocationProxy)
+ if (!this._location_proxy) {
+ this.setLocationProxy(new Sammy.HashLocationProxy(this, this.run_interval_every));
+ }
+ if (this.debug) {
+ this.bindToAllEvents(function(e, data) {
+ app.log(app.toString(), e.cleaned_type, data || {});
+ });
+ }
+ };
+
+ Sammy.Application.prototype = $.extend({}, Sammy.Object.prototype, {
+
+ // the four route verbs
+ ROUTE_VERBS: ['get','post','put','delete'],
+
+ // An array of the default events triggered by the
+ // application during its lifecycle
+ APP_EVENTS: ['run','unload','lookup-route','run-route','route-found','event-context-before','event-context-after','changed','error','check-form-submission','redirect'],
+
+ _last_route: null,
+ _location_proxy: null,
+ _running: false,
+
+ // Defines what element the application is bound to. Provide a selector
+ // (parseable by `jQuery()`) and this will be used by `$element()`
+ element_selector: 'body',
+
+ // When set to true, logs all of the default events using `log()`
+ debug: false,
+
+ // When set to true, and the error() handler is not overriden, will actually
+ // raise JS errors in routes (500) and when routes can't be found (404)
+ raise_errors: false,
+
+ // The time in milliseconds that the URL is queried for changes
+ run_interval_every: 50,
+
+ // The default template engine to use when using `partial()` in an
+ // `EventContext`. `template_engine` can either be a string that
+ // corresponds to the name of a method/helper on EventContext or it can be a function
+ // that takes two arguments, the content of the unrendered partial and an optional
+ // JS object that contains interpolation data. Template engine is only called/refered
+ // to if the extension of the partial is null or unknown. See `partial()`
+ // for more information
+ template_engine: null,
+
+ // //=> Sammy.Application: body
+ toString: function() {
+ return 'Sammy.Application:' + this.element_selector;
+ },
+
+ // returns a jQuery object of the Applications bound element.
+ $element: function() {
+ return $(this.element_selector);
+ },
+
+ // `use()` is the entry point for including Sammy plugins.
+ // The first argument to use should be a function() that is evaluated
+ // in the context of the current application, just like the `app_function`
+ // argument to the `Sammy.Application` constructor.
+ //
+ // Any additional arguments are passed to the app function sequentially.
+ //
+ // For much more detail about plugins, check out:
+ // http://code.quirkey.com/sammy/doc/plugins.html
+ //
+ // ### Example
+ //
+ // var MyPlugin = function(app, prepend) {
+ //
+ // this.helpers({
+ // myhelper: function(text) {
+ // alert(prepend + " " + text);
+ // }
+ // });
+ //
+ // };
+ //
+ // var app = $.sammy(function() {
+ //
+ // this.use(MyPlugin, 'This is my plugin');
+ //
+ // this.get('#/', function() {
+ // this.myhelper('and dont you forget it!');
+ // //=> Alerts: This is my plugin and dont you forget it!
+ // });
+ //
+ // });
+ //
+ // If plugin is passed as a string it assumes your are trying to load
+ // Sammy."Plugin". This is the prefered way of loading core Sammy plugins
+ // as it allows for better error-messaging.
+ //
+ // ### Example
+ //
+ // $.sammy(function() {
+ // this.use('Mustache'); //=> Sammy.Mustache
+ // this.use('Storage'); //=> Sammy.Storage
+ // });
+ //
+ use: function() {
+ // flatten the arguments
+ var args = _makeArray(arguments),
+ plugin = args.shift(),
+ plugin_name = plugin || '';
+ try {
+ args.unshift(this);
+ if (typeof plugin == 'string') {
+ plugin_name = 'Sammy.' + plugin;
+ plugin = Sammy[plugin];
+ }
+ plugin.apply(this, args);
+ } catch(e) {
+ if (typeof plugin === 'undefined') {
+ this.error("Plugin Error: called use() but plugin (" + plugin_name.toString() + ") is not defined", e);
+ } else if (!_isFunction(plugin)) {
+ this.error("Plugin Error: called use() but '" + plugin_name.toString() + "' is not a function", e);
+ } else {
+ this.error("Plugin Error", e);
+ }
+ }
+ return this;
+ },
+
+ // Sets the location proxy for the current app. By default this is set to
+ // a new `Sammy.HashLocationProxy` on initialization. However, you can set
+ // the location_proxy inside you're app function to give your app a custom
+ // location mechanism. See `Sammy.HashLocationProxy` and `Sammy.DataLocationProxy`
+ // for examples.
+ //
+ // `setLocationProxy()` takes an initialized location proxy.
+ //
+ // ### Example
+ //
+ // // to bind to data instead of the default hash;
+ // var app = $.sammy(function() {
+ // this.setLocationProxy(new Sammy.DataLocationProxy(this));
+ // });
+ //
+ setLocationProxy: function(new_proxy) {
+ var original_proxy = this._location_proxy;
+ this._location_proxy = new_proxy;
+ if (this.isRunning()) {
+ if (original_proxy) {
+ // if there is already a location proxy, unbind it.
+ original_proxy.unbind();
+ }
+ this._location_proxy.bind();
+ }
+ },
+
+ // `route()` is the main method for defining routes within an application.
+ // For great detail on routes, check out: http://code.quirkey.com/sammy/doc/routes.html
+ //
+ // This method also has aliases for each of the different verbs (eg. `get()`, `post()`, etc.)
+ //
+ // ### Arguments
+ //
+ // * `verb` A String in the set of ROUTE_VERBS or 'any'. 'any' will add routes for each
+ // of the ROUTE_VERBS. If only two arguments are passed,
+ // the first argument is the path, the second is the callback and the verb
+ // is assumed to be 'any'.
+ // * `path` A Regexp or a String representing the path to match to invoke this verb.
+ // * `callback` A Function that is called/evaluated whent the route is run see: `runRoute()`.
+ // It is also possible to pass a string as the callback, which is looked up as the name
+ // of a method on the application.
+ //
+ route: function(verb, path, callback) {
+ var app = this, param_names = [], add_route;
+
+ // if the method signature is just (path, callback)
+ // assume the verb is 'any'
+ if (!callback && _isFunction(path)) {
+ path = verb;
+ callback = path;
+ verb = 'any';
+ }
+
+ verb = verb.toLowerCase(); // ensure verb is lower case
+
+ // if path is a string turn it into a regex
+ if (path.constructor == String) {
+
+ // Needs to be explicitly set because IE will maintain the index unless NULL is returned,
+ // which means that with two consecutive routes that contain params, the second set of params will not be found and end up in splat instead of params
+ // https://developer.mozilla.org/en/Core_JavaScript_1.5_Reference/Global_Objects/RegExp/lastIndex
+ PATH_NAME_MATCHER.lastIndex = 0;
+
+ // find the names
+ while ((path_match = PATH_NAME_MATCHER.exec(path)) !== null) {
+ param_names.push(path_match[1]);
+ }
+ // replace with the path replacement
+ path = new RegExp("^" + path.replace(PATH_NAME_MATCHER, PATH_REPLACER) + "$");
+ }
+ // lookup callback
+ if (typeof callback == 'string') {
+ callback = app[callback];
+ }
+
+ add_route = function(with_verb) {
+ var r = {verb: with_verb, path: path, callback: callback, param_names: param_names};
+ // add route to routes array
+ app.routes[with_verb] = app.routes[with_verb] || [];
+ // place routes in order of definition
+ app.routes[with_verb].push(r);
+ };
+
+ if (verb === 'any') {
+ $.each(this.ROUTE_VERBS, function(i, v) { add_route(v); });
+ } else {
+ add_route(verb);
+ }
+
+ // return the app
+ return this;
+ },
+
+ // Alias for route('get', ...)
+ get: _routeWrapper('get'),
+
+ // Alias for route('post', ...)
+ post: _routeWrapper('post'),
+
+ // Alias for route('put', ...)
+ put: _routeWrapper('put'),
+
+ // Alias for route('delete', ...)
+ del: _routeWrapper('delete'),
+
+ // Alias for route('any', ...)
+ any: _routeWrapper('any'),
+
+ // `mapRoutes` takes an array of arrays, each array being passed to route()
+ // as arguments, this allows for mass definition of routes. Another benefit is
+ // this makes it possible/easier to load routes via remote JSON.
+ //
+ // ### Example
+ //
+ // var app = $.sammy(function() {
+ //
+ // this.mapRoutes([
+ // ['get', '#/', function() { this.log('index'); }],
+ // // strings in callbacks are looked up as methods on the app
+ // ['post', '#/create', 'addUser'],
+ // // No verb assumes 'any' as the verb
+ // [/dowhatever/, function() { this.log(this.verb, this.path)}];
+ // ]);
+ // })
+ //
+ mapRoutes: function(route_array) {
+ var app = this;
+ $.each(route_array, function(i, route_args) {
+ app.route.apply(app, route_args);
+ });
+ return this;
+ },
+
+ // A unique event namespace defined per application.
+ // All events bound with `bind()` are automatically bound within this space.
+ eventNamespace: function() {
+ return ['sammy-app', this.namespace].join('-');
+ },
+
+ // Works just like `jQuery.fn.bind()` with a couple noteable differences.
+ //
+ // * It binds all events to the application element
+ // * All events are bound within the `eventNamespace()`
+ // * Events are not actually bound until the application is started with `run()`
+ // * callbacks are evaluated within the context of a Sammy.EventContext
+ //
+ // See http://code.quirkey.com/sammy/docs/events.html for more info.
+ //
+ bind: function(name, data, callback) {
+ var app = this;
+ // build the callback
+ // if the arity is 2, callback is the second argument
+ if (typeof callback == 'undefined') { callback = data; }
+ var listener_callback = function() {
+ // pull off the context from the arguments to the callback
+ var e, context, data;
+ e = arguments[0];
+ data = arguments[1];
+ if (data && data.context) {
+ context = data.context;
+ delete data.context;
+ } else {
+ context = new app.context_prototype(app, 'bind', e.type, data, e.target);
+ }
+ e.cleaned_type = e.type.replace(app.eventNamespace(), '');
+ callback.apply(context, [e, data]);
+ };
+
+ // it could be that the app element doesnt exist yet
+ // so attach to the listeners array and then run()
+ // will actually bind the event.
+ if (!this.listeners[name]) { this.listeners[name] = []; }
+ this.listeners[name].push(listener_callback);
+ if (this.isRunning()) {
+ // if the app is running
+ // *actually* bind the event to the app element
+ this._listen(name, listener_callback);
+ }
+ return this;
+ },
+
+ // Triggers custom events defined with `bind()`
+ //
+ // ### Arguments
+ //
+ // * `name` The name of the event. Automatically prefixed with the `eventNamespace()`
+ // * `data` An optional Object that can be passed to the bound callback.
+ // * `context` An optional context/Object in which to execute the bound callback.
+ // If no context is supplied a the context is a new `Sammy.EventContext`
+ //
+ trigger: function(name, data) {
+ this.$element().trigger([name, this.eventNamespace()].join('.'), [data]);
+ return this;
+ },
+
+ // Reruns the current route
+ refresh: function() {
+ this.last_location = null;
+ this.trigger('location-changed');
+ return this;
+ },
+
+ // Takes a single callback that is pushed on to a stack.
+ // Before any route is run, the callbacks are evaluated in order within
+ // the current `Sammy.EventContext`
+ //
+ // If any of the callbacks explicitly return false, execution of any
+ // further callbacks and the route itself is halted.
+ //
+ // You can also provide a set of options that will define when to run this
+ // before based on the route it proceeds.
+ //
+ // ### Example
+ //
+ // var app = $.sammy(function() {
+ //
+ // // will run at #/route but not at #/
+ // this.before('#/route', function() {
+ // //...
+ // });
+ //
+ // // will run at #/ but not at #/route
+ // this.before({except: {path: '#/route'}}, function() {
+ // this.log('not before #/route');
+ // });
+ //
+ // this.get('#/', function() {});
+ //
+ // this.get('#/route', function() {});
+ //
+ // });
+ //
+ // See `contextMatchesOptions()` for a full list of supported options
+ //
+ before: function(options, callback) {
+ if (_isFunction(options)) {
+ callback = options;
+ options = {};
+ }
+ this.befores.push([options, callback]);
+ return this;
+ },
+
+ // A shortcut for binding a callback to be run after a route is executed.
+ // After callbacks have no guarunteed order.
+ after: function(callback) {
+ return this.bind('event-context-after', callback);
+ },
+
+
+ // Adds an around filter to the application. around filters are functions
+ // that take a single argument `callback` which is the entire route
+ // execution path wrapped up in a closure. This means you can decide whether
+ // or not to proceed with execution by not invoking `callback` or,
+ // more usefuly wrapping callback inside the result of an asynchronous execution.
+ //
+ // ### Example
+ //
+ // The most common use case for around() is calling a _possibly_ async function
+ // and executing the route within the functions callback:
+ //
+ // var app = $.sammy(function() {
+ //
+ // var current_user = false;
+ //
+ // function checkLoggedIn(callback) {
+ // // /session returns a JSON representation of the logged in user
+ // // or an empty object
+ // if (!current_user) {
+ // $.getJSON('/session', function(json) {
+ // if (json.login) {
+ // // show the user as logged in
+ // current_user = json;
+ // // execute the route path
+ // callback();
+ // } else {
+ // // show the user as not logged in
+ // current_user = false;
+ // // the context of aroundFilters is an EventContext
+ // this.redirect('#/login');
+ // }
+ // });
+ // } else {
+ // // execute the route path
+ // callback();
+ // }
+ // };
+ //
+ // this.around(checkLoggedIn);
+ //
+ // });
+ //
+ around: function(callback) {
+ this.arounds.push(callback);
+ return this;
+ },
+
+ // Returns `true` if the current application is running.
+ isRunning: function() {
+ return this._running;
+ },
+
+ // Helpers extends the EventContext prototype specific to this app.
+ // This allows you to define app specific helper functions that can be used
+ // whenever you're inside of an event context (templates, routes, bind).
+ //
+ // ### Example
+ //
+ // var app = $.sammy(function() {
+ //
+ // helpers({
+ // upcase: function(text) {
+ // return text.toString().toUpperCase();
+ // }
+ // });
+ //
+ // get('#/', function() { with(this) {
+ // // inside of this context I can use the helpers
+ // $('#main').html(upcase($('#main').text());
+ // }});
+ //
+ // });
+ //
+ //
+ // ### Arguments
+ //
+ // * `extensions` An object collection of functions to extend the context.
+ //
+ helpers: function(extensions) {
+ $.extend(this.context_prototype.prototype, extensions);
+ return this;
+ },
+
+ // Helper extends the event context just like `helpers()` but does it
+ // a single method at a time. This is especially useful for dynamically named
+ // helpers
+ //
+ // ### Example
+ //
+ // // Trivial example that adds 3 helper methods to the context dynamically
+ // var app = $.sammy(function(app) {
+ //
+ // $.each([1,2,3], function(i, num) {
+ // app.helper('helper' + num, function() {
+ // this.log("I'm helper number " + num);
+ // });
+ // });
+ //
+ // this.get('#/', function() {
+ // this.helper2(); //=> I'm helper number 2
+ // });
+ // });
+ //
+ // ### Arguments
+ //
+ // * `name` The name of the method
+ // * `method` The function to be added to the prototype at `name`
+ //
+ helper: function(name, method) {
+ this.context_prototype.prototype[name] = method;
+ return this;
+ },
+
+ // Actually starts the application's lifecycle. `run()` should be invoked
+ // within a document.ready block to ensure the DOM exists before binding events, etc.
+ //
+ // ### Example
+ //
+ // var app = $.sammy(function() { ... }); // your application
+ // $(function() { // document.ready
+ // app.run();
+ // });
+ //
+ // ### Arguments
+ //
+ // * `start_url` Optionally, a String can be passed which the App will redirect to
+ // after the events/routes have been bound.
+ run: function(start_url) {
+ if (this.isRunning()) { return false; }
+ var app = this;
+
+ // actually bind all the listeners
+ $.each(this.listeners.toHash(), function(name, callbacks) {
+ $.each(callbacks, function(i, listener_callback) {
+ app._listen(name, listener_callback);
+ });
+ });
+
+ this.trigger('run', {start_url: start_url});
+ this._running = true;
+ // set last location
+ this.last_location = null;
+ if (this.getLocation() == '' && typeof start_url != 'undefined') {
+ this.setLocation(start_url);
+ }
+ // check url
+ this._checkLocation();
+ this._location_proxy.bind();
+ this.bind('location-changed', function() {
+ app._checkLocation();
+ });
+
+ // bind to submit to capture post/put/delete routes
+ this.bind('submit', function(e) {
+ var returned = app._checkFormSubmission($(e.target).closest('form'));
+ return (returned === false) ? e.preventDefault() : false;
+ });
+
+ // bind unload to body unload
+ $(window).bind('beforeunload', function() {
+ app.unload();
+ });
+
+ // trigger html changed
+ return this.trigger('changed');
+ },
+
+ // The opposite of `run()`, un-binds all event listeners and intervals
+ // `run()` Automaticaly binds a `onunload` event to run this when
+ // the document is closed.
+ unload: function() {
+ if (!this.isRunning()) { return false; }
+ var app = this;
+ this.trigger('unload');
+ // clear interval
+ this._location_proxy.unbind();
+ // unbind form submits
+ this.$element().unbind('submit').removeClass(app.eventNamespace());
+ // unbind all events
+ $.each(this.listeners.toHash() , function(name, listeners) {
+ $.each(listeners, function(i, listener_callback) {
+ app._unlisten(name, listener_callback);
+ });
+ });
+ this._running = false;
+ return this;
+ },
+
+ // Will bind a single callback function to every event that is already
+ // being listened to in the app. This includes all the `APP_EVENTS`
+ // as well as any custom events defined with `bind()`.
+ //
+ // Used internally for debug logging.
+ bindToAllEvents: function(callback) {
+ var app = this;
+ // bind to the APP_EVENTS first
+ $.each(this.APP_EVENTS, function(i, e) {
+ app.bind(e, callback);
+ });
+ // next, bind to listener names (only if they dont exist in APP_EVENTS)
+ $.each(this.listeners.keys(true), function(i, name) {
+ if (app.APP_EVENTS.indexOf(name) == -1) {
+ app.bind(name, callback);
+ }
+ });
+ return this;
+ },
+
+ // Returns a copy of the given path with any query string after the hash
+ // removed.
+ routablePath: function(path) {
+ return path.replace(QUERY_STRING_MATCHER, '');
+ },
+
+ // Given a verb and a String path, will return either a route object or false
+ // if a matching route can be found within the current defined set.
+ lookupRoute: function(verb, path) {
+ var app = this, routed = false;
+ this.trigger('lookup-route', {verb: verb, path: path});
+ if (typeof this.routes[verb] != 'undefined') {
+ $.each(this.routes[verb], function(i, route) {
+ if (app.routablePath(path).match(route.path)) {
+ routed = route;
+ return false;
+ }
+ });
+ }
+ return routed;
+ },
+
+ // First, invokes `lookupRoute()` and if a route is found, parses the
+ // possible URL params and then invokes the route's callback within a new
+ // `Sammy.EventContext`. If the route can not be found, it calls
+ // `notFound()`. If `raise_errors` is set to `true` and
+ // the `error()` has not been overriden, it will throw an actual JS
+ // error.
+ //
+ // You probably will never have to call this directly.
+ //
+ // ### Arguments
+ //
+ // * `verb` A String for the verb.
+ // * `path` A String path to lookup.
+ // * `params` An Object of Params pulled from the URI or passed directly.
+ //
+ // ### Returns
+ //
+ // Either returns the value returned by the route callback or raises a 404 Not Found error.
+ //
+ runRoute: function(verb, path, params, target) {
+ var app = this,
+ route = this.lookupRoute(verb, path),
+ context,
+ wrapped_route,
+ arounds,
+ around,
+ befores,
+ before,
+ callback_args,
+ final_returned;
+
+ this.log('runRoute', [verb, path].join(' '));
+ this.trigger('run-route', {verb: verb, path: path, params: params});
+ if (typeof params == 'undefined') { params = {}; }
+
+ $.extend(params, this._parseQueryString(path));
+
+ if (route) {
+ this.trigger('route-found', {route: route});
+ // pull out the params from the path
+ if ((path_params = route.path.exec(this.routablePath(path))) !== null) {
+ // first match is the full path
+ path_params.shift();
+ // for each of the matches
+ $.each(path_params, function(i, param) {
+ // if theres a matching param name
+ if (route.param_names[i]) {
+ // set the name to the match
+ params[route.param_names[i]] = _decode(param);
+ } else {
+ // initialize 'splat'
+ if (!params.splat) { params.splat = []; }
+ params.splat.push(_decode(param));
+ }
+ });
+ }
+
+ // set event context
+ context = new this.context_prototype(this, verb, path, params, target);
+ // ensure arrays
+ arounds = this.arounds.slice(0);
+ befores = this.befores.slice(0);
+ // set the callback args to the context + contents of the splat
+ callback_args = [context].concat(params.splat);
+ // wrap the route up with the before filters
+ wrapped_route = function() {
+ var returned;
+ while (befores.length > 0) {
+ before = befores.shift();
+ // check the options
+ if (app.contextMatchesOptions(context, before[0])) {
+ returned = before[1].apply(context, [context]);
+ if (returned === false) { return false; }
+ }
+ }
+ app.last_route = route;
+ context.trigger('event-context-before', {context: context});
+ returned = route.callback.apply(context, callback_args);
+ context.trigger('event-context-after', {context: context});
+ return returned;
+ };
+ $.each(arounds.reverse(), function(i, around) {
+ var last_wrapped_route = wrapped_route;
+ wrapped_route = function() { return around.apply(context, [last_wrapped_route]); };
+ });
+ try {
+ final_returned = wrapped_route();
+ } catch(e) {
+ this.error(['500 Error', verb, path].join(' '), e);
+ }
+ return final_returned;
+ } else {
+ return this.notFound(verb, path);
+ }
+ },
+
+ // Matches an object of options against an `EventContext` like object that
+ // contains `path` and `verb` attributes. Internally Sammy uses this
+ // for matching `before()` filters against specific options. You can set the
+ // object to _only_ match certain paths or verbs, or match all paths or verbs _except_
+ // those that match the options.
+ //
+ // ### Example
+ //
+ // var app = $.sammy(),
+ // context = {verb: 'get', path: '#/mypath'};
+ //
+ // // match against a path string
+ // app.contextMatchesOptions(context, '#/mypath'); //=> true
+ // app.contextMatchesOptions(context, '#/otherpath'); //=> false
+ // // equivilent to
+ // app.contextMatchesOptions(context, {only: {path:'#/mypath'}}); //=> true
+ // app.contextMatchesOptions(context, {only: {path:'#/otherpath'}}); //=> false
+ // // match against a path regexp
+ // app.contextMatchesOptions(context, /path/); //=> true
+ // app.contextMatchesOptions(context, /^path/); //=> false
+ // // match only a verb
+ // app.contextMatchesOptions(context, {only: {verb:'get'}}); //=> true
+ // app.contextMatchesOptions(context, {only: {verb:'post'}}); //=> false
+ // // match all except a verb
+ // app.contextMatchesOptions(context, {except: {verb:'post'}}); //=> true
+ // app.contextMatchesOptions(context, {except: {verb:'get'}}); //=> false
+ // // match all except a path
+ // app.contextMatchesOptions(context, {except: {path:'#/otherpath'}}); //=> true
+ // app.contextMatchesOptions(context, {except: {path:'#/mypath'}}); //=> false
+ //
+ contextMatchesOptions: function(context, match_options, positive) {
+ // empty options always match
+ var options = match_options;
+ if (typeof options === 'undefined' || options == {}) {
+ return true;
+ }
+ if (typeof positive === 'undefined') {
+ positive = true;
+ }
+ // normalize options
+ if (typeof options === 'string' || _isFunction(options.test)) {
+ options = {path: options};
+ }
+ if (options.only) {
+ return this.contextMatchesOptions(context, options.only, true);
+ } else if (options.except) {
+ return this.contextMatchesOptions(context, options.except, false);
+ }
+ var path_matched = true, verb_matched = true;
+ if (options.path) {
+ // wierd regexp test
+ if (_isFunction(options.path.test)) {
+ path_matched = options.path.test(context.path);
+ } else {
+ path_matched = (options.path.toString() === context.path);
+ }
+ }
+ if (options.verb) {
+ verb_matched = options.verb === context.verb;
+ }
+ return positive ? (verb_matched && path_matched) : !(verb_matched && path_matched);
+ },
+
+
+ // Delegates to the `location_proxy` to get the current location.
+ // See `Sammy.HashLocationProxy` for more info on location proxies.
+ getLocation: function() {
+ return this._location_proxy.getLocation();
+ },
+
+ // Delegates to the `location_proxy` to set the current location.
+ // See `Sammy.HashLocationProxy` for more info on location proxies.
+ //
+ // ### Arguments
+ //
+ // * `new_location` A new location string (e.g. '#/')
+ //
+ setLocation: function(new_location) {
+ return this._location_proxy.setLocation(new_location);
+ },
+
+ // Swaps the content of `$element()` with `content`
+ // You can override this method to provide an alternate swap behavior
+ // for `EventContext.partial()`.
+ //
+ // ### Example
+ //
+ // var app = $.sammy(function() {
+ //
+ // // implements a 'fade out'/'fade in'
+ // this.swap = function(content) {
+ // this.$element().hide('slow').html(content).show('slow');
+ // }
+ //
+ // get('#/', function() {
+ // this.partial('index.html.erb') // will fade out and in
+ // });
+ //
+ // });
+ //
+ swap: function(content) {
+ return this.$element().html(content);
+ },
+
+ // a simple global cache for templates. Uses the same semantics as
+ // `Sammy.Cache` and `Sammy.Storage` so can easily be replaced with
+ // a persistant storage that lasts beyond the current request.
+ templateCache: function(key, value) {
+ if (typeof value != 'undefined') {
+ return _template_cache[key] = value;
+ } else {
+ return _template_cache[key];
+ }
+ },
+
+ // This thows a '404 Not Found' error by invoking `error()`.
+ // Override this method or `error()` to provide custom
+ // 404 behavior (i.e redirecting to / or showing a warning)
+ notFound: function(verb, path) {
+ var ret = this.error(['404 Not Found', verb, path].join(' '));
+ return (verb === 'get') ? ret : true;
+ },
+
+ // The base error handler takes a string `message` and an `Error`
+ // object. If `raise_errors` is set to `true` on the app level,
+ // this will re-throw the error to the browser. Otherwise it will send the error
+ // to `log()`. Override this method to provide custom error handling
+ // e.g logging to a server side component or displaying some feedback to the
+ // user.
+ error: function(message, original_error) {
+ if (!original_error) { original_error = new Error(); }
+ original_error.message = [message, original_error.message].join(' ');
+ this.trigger('error', {message: original_error.message, error: original_error});
+ if (this.raise_errors) {
+ throw(original_error);
+ } else {
+ this.log(original_error.message, original_error);
+ }
+ },
+
+ _checkLocation: function() {
+ var location, returned;
+ // get current location
+ location = this.getLocation();
+ // compare to see if hash has changed
+ if (location != this.last_location) {
+ // reset last location
+ this.last_location = location;
+ // lookup route for current hash
+ returned = this.runRoute('get', location);
+ }
+ return returned;
+ },
+
+ _getFormVerb: function(form) {
+ var $form = $(form), verb;
+ $_method = $form.find('input[name="_method"]');
+ if ($_method.length > 0) { verb = $_method.val(); }
+ if (!verb) { verb = $form[0].getAttribute('method'); }
+ return $.trim(verb.toString().toLowerCase());
+ },
+
+ _checkFormSubmission: function(form) {
+ var $form, path, verb, params, returned;
+ this.trigger('check-form-submission', {form: form});
+ $form = $(form);
+ path = $form.attr('action');
+ verb = this._getFormVerb($form);
+ if (!verb || verb == '') { verb = 'get'; }
+ this.log('_checkFormSubmission', $form, path, verb);
+ if (verb === 'get') {
+ this.setLocation(path + '?' + $form.serialize());
+ returned = false;
+ } else {
+ params = $.extend({}, this._parseFormParams($form));
+ returned = this.runRoute(verb, path, params, form.get(0));
+ };
+ return (typeof returned == 'undefined') ? false : returned;
+ },
+
+ _parseFormParams: function($form) {
+ var params = {},
+ form_fields = $form.serializeArray(),
+ i;
+ for (i = 0; i < form_fields.length; i++) {
+ params = this._parseParamPair(params, form_fields[i].name, form_fields[i].value);
+ }
+ return params;
+ },
+
+ _parseQueryString: function(path) {
+ var params = {}, parts, pairs, pair, i;
+
+ parts = path.match(QUERY_STRING_MATCHER);
+ if (parts) {
+ pairs = parts[1].split('&');
+ for (i = 0; i < pairs.length; i++) {
+ pair = pairs[i].split('=');
+ params = this._parseParamPair(params, _decode(pair[0]), _decode(pair[1]));
+ }
+ }
+ return params;
+ },
+
+ _parseParamPair: function(params, key, value) {
+ if (params[key]) {
+ if (_isArray(params[key])) {
+ params[key].push(value);
+ } else {
+ params[key] = [params[key], value];
+ }
+ } else {
+ params[key] = value;
+ }
+ return params;
+ },
+
+ _listen: function(name, callback) {
+ return this.$element().bind([name, this.eventNamespace()].join('.'), callback);
+ },
+
+ _unlisten: function(name, callback) {
+ return this.$element().unbind([name, this.eventNamespace()].join('.'), callback);
+ }
+
+ });
+
+ // `Sammy.RenderContext` is an object that makes sequential template loading,
+ // rendering and interpolation seamless even when dealing with asyncronous
+ // operations.
+ //
+ // `RenderContext` objects are not usually created directly, rather they are
+ // instatiated from an `Sammy.EventContext` by using `render()`, `load()` or
+ // `partial()` which all return `RenderContext` objects.
+ //
+ // `RenderContext` methods always returns a modified `RenderContext`
+ // for chaining (like jQuery itself).
+ //
+ // The core magic is in the `then()` method which puts the callback passed as
+ // an argument into a queue to be executed once the previous callback is complete.
+ // All the methods of `RenderContext` are wrapped in `then()` which allows you
+ // to queue up methods by chaining, but maintaing a guarunteed execution order
+ // even with remote calls to fetch templates.
+ //
+ Sammy.RenderContext = function(event_context) {
+ this.event_context = event_context;
+ this.callbacks = [];
+ this.previous_content = null;
+ this.content = null;
+ this.next_engine = false;
+ this.waiting = false;
+ };
+
+ $.extend(Sammy.RenderContext.prototype, {
+
+ // The "core" of the `RenderContext` object, adds the `callback` to the
+ // queue. If the context is `waiting` (meaning an async operation is happening)
+ // then the callback will be executed in order, once the other operations are
+ // complete. If there is no currently executing operation, the `callback`
+ // is executed immediately.
+ //
+ // The value returned from the callback is stored in `content` for the
+ // subsiquent operation. If you return `false`, the queue will pause, and
+ // the next callback in the queue will not be executed until `next()` is
+ // called. This allows for the guarunteed order of execution while working
+ // with async operations.
+ //
+ // ### Example
+ //
+ // this.get('#/', function() {
+ // // initialize the RenderContext
+ // // Even though `load()` executes async, the next `then()`
+ // // wont execute until the load finishes
+ // this.load('myfile.txt')
+ // .then(function(content) {
+ // // the first argument to then is the content of the
+ // // prev operation
+ // $('#main').html(content);
+ // });
+ // });
+ //
+ then: function(callback) {
+ if (_isFunction(callback)) {
+ var context = this;
+ if (this.waiting) {
+ this.callbacks.push(callback);
+ } else {
+ this.wait();
+ setTimeout(function() {
+ var returned = callback.apply(context, [context.content, context.previous_content]);
+ if (returned !== false) {
+ context.next(returned);
+ }
+ }, 13);
+ }
+ }
+ return this;
+ },
+
+ // Pause the `RenderContext` queue. Combined with `next()` allows for async
+ // operations.
+ //
+ // ### Example
+ //
+ // this.get('#/', function() {
+ // this.load('mytext.json')
+ // .then(function(content) {
+ // var context = this,
+ // data = JSON.parse(content);
+ // // pause execution
+ // context.wait();
+ // // post to a url
+ // $.post(data.url, {}, function(response) {
+ // context.next(JSON.parse(response));
+ // });
+ // })
+ // .then(function(data) {
+ // // data is json from the previous post
+ // $('#message').text(data.status);
+ // });
+ // });
+ wait: function() {
+ this.waiting = true;
+ },
+
+ // Resume the queue, setting `content` to be used in the next operation.
+ // See `wait()` for an example.
+ next: function(content) {
+ this.waiting = false;
+ if (typeof content !== 'undefined') {
+ this.previous_content = this.content;
+ this.content = content;
+ }
+ if (this.callbacks.length > 0) {
+ this.then(this.callbacks.shift());
+ }
+ },
+
+ // Load a template into the context.
+ // The `location` can either be a string specifiying the remote path to the
+ // file, a jQuery object, or a DOM element.
+ //
+ // No interpolation happens by default, the content is stored in
+ // `content`.
+ //
+ // In the case of a path, unless the option `{cache: false}` is passed the
+ // data is stored in the app's `templateCache()`.
+ //
+ // If a jQuery or DOM object is passed the `innerHTML` of the node is pulled in.
+ // This is useful for nesting templates as part of the initial page load wrapped
+ // in invisible elements or `<script>` tags. With template paths, the template
+ // engine is looked up by the extension. For DOM/jQuery embedded templates,
+ // this isnt possible, so there are a couple of options:
+ //
+ // * pass an `{engine:}` option.
+ // * define the engine in the `data-engine` attribute of the passed node.
+ // * just store the raw template data and use `interpolate()` manually
+ //
+ // If a `callback` is passed it is executed after the template load.
+ load: function(location, options, callback) {
+ var context = this;
+ return this.then(function() {
+ var should_cache, cached;
+ if (_isFunction(options)) {
+ callback = options;
+ options = {};
+ } else {
+ options = $.extend({}, options);
+ }
+ if (callback) { this.then(callback); }
+ if (typeof location === 'string') {
+ // its a path
+ should_cache = !(options.cache === false);
+ delete options.cache;
+ if (options.engine) {
+ context.next_engine = options.engine;
+ delete options.engine;
+ }
+ if (should_cache && (cached = this.event_context.app.templateCache(location))) {
+ return cached;
+ }
+ this.wait();
+ $.ajax($.extend({
+ url: location,
+ data: {},
+ type: 'get',
+ success: function(data) {
+ if (should_cache) {
+ context.event_context.app.templateCache(location, data);
+ }
+ context.next(data);
+ }
+ }, options));
+ return false;
+ } else {
+ // its a dom/jQuery
+ if (location.nodeType) {
+ return location.innerHTML;
+ }
+ if (location.selector) {
+ // its a jQuery
+ context.next_engine = location.attr('data-engine');
+ if (options.clone === false) {
+ return location.remove()[0].innerHTML.toString();
+ } else {
+ return location[0].innerHTML.toString();
+ }
+ }
+ }
+ });
+ },
+
+ // `load()` a template and then `interpolate()` it with data.
+ //
+ // ### Example
+ //
+ // this.get('#/', function() {
+ // this.render('mytemplate.template', {name: 'test'});
+ // });
+ //
+ render: function(location, data, callback) {
+ if (_isFunction(location) && !data) {
+ return this.then(location);
+ } else {
+ return this.load(location).interpolate(data, location).then(callback);
+ }
+ },
+
+ // itterates over an array, applying the callback for each item item. the
+ // callback takes the same style of arguments as `jQuery.each()` (index, item).
+ // The return value of each callback is collected as a single string and stored
+ // as `content` to be used in the next iteration of the `RenderContext`.
+ collect: function(array, callback) {
+ var context = this;
+ return this.then(function() {
+ var contents = "";
+ $.each(array, function(i, item) {
+ var returned = callback.apply(context, [i, item]);
+ contents += returned;
+ return returned;
+ });
+ return contents;
+ });
+ },
+
+ // loads a template, and then interpolates it for each item in the `data`
+ // array.
+ renderEach: function(location, name, data, callback) {
+ if (_isArray(name)) {
+ callback = data;
+ data = name;
+ name = null;
+ }
+ if (!data && _isArray(this.content)) {
+ data = this.content;
+ }
+ return this.load(location).collect(data, function(i, value) {
+ var idata = {};
+ name ? (idata[name] = value) : (idata = value);
+ return this.event_context.interpolate(this.content, idata, location);
+ });
+ },
+
+ // uses the previous loaded `content` and the `data` object to interpolate
+ // a template. `engine` defines the templating/interpolation method/engine
+ // that should be used. If `engine` is not passed, the `next_engine` is
+ // used. If `retain` is `true`, the final interpolated data is appended to
+ // the `previous_content` instead of just replacing it.
+ interpolate: function(data, engine, retain) {
+ var context = this;
+ return this.then(function(content, prev) {
+ if (this.next_engine) {
+ engine = this.next_engine;
+ this.next_engine = false;
+ }
+ var rendered = context.event_context.interpolate(content, data, engine);
+ return retain ? prev + rendered : rendered;
+ });
+ },
+
+ // executes `EventContext#swap()` with the `content`
+ swap: function() {
+ return this.then(function(content) {
+ this.event_context.swap(content);
+ }).trigger('changed', {});
+ },
+
+ // Same usage as `jQuery.fn.appendTo()` but uses `then()` to ensure order
+ appendTo: function(selector) {
+ return this.then(function(content) {
+ $(selector).append(content);
+ }).trigger('changed', {});
+ },
+
+ // Same usage as `jQuery.fn.prependTo()` but uses `then()` to ensure order
+ prependTo: function(selector) {
+ return this.then(function(content) {
+ $(selector).prepend(content);
+ }).trigger('changed', {});
+ },
+
+ // Replaces the `$(selector)` using `html()` with the previously loaded
+ // `content`
+ replace: function(selector) {
+ return this.then(function(content) {
+ $(selector).html(content);
+ }).trigger('changed', {});
+ },
+
+ // trigger the event in the order of the event context. Same semantics
+ // as `Sammy.EventContext#trigger()`. If data is ommitted, `content`
+ // is sent as `{content: content}`
+ trigger: function(name, data) {
+ return this.then(function(content) {
+ if (typeof data == 'undefined') { data = {content: content}; }
+ this.event_context.trigger(name, data);
+ });
+ }
+
+ });
+
+ // `Sammy.EventContext` objects are created every time a route is run or a
+ // bound event is triggered. The callbacks for these events are evaluated within a `Sammy.EventContext`
+ // This within these callbacks the special methods of `EventContext` are available.
+ //
+ // ### Example
+ //
+ // $.sammy(function() {
+ // // The context here is this Sammy.Application
+ // this.get('#/:name', function() {
+ // // The context here is a new Sammy.EventContext
+ // if (this.params['name'] == 'sammy') {
+ // this.partial('name.html.erb', {name: 'Sammy'});
+ // } else {
+ // this.redirect('#/somewhere-else')
+ // }
+ // });
+ // });
+ //
+ // Initialize a new EventContext
+ //
+ // ### Arguments
+ //
+ // * `app` The `Sammy.Application` this event is called within.
+ // * `verb` The verb invoked to run this context/route.
+ // * `path` The string path invoked to run this context/route.
+ // * `params` An Object of optional params to pass to the context. Is converted
+ // to a `Sammy.Object`.
+ // * `target` a DOM element that the event that holds this context originates
+ // from. For post, put and del routes, this is the form element that triggered
+ // the route.
+ //
+ Sammy.EventContext = function(app, verb, path, params, target) {
+ this.app = app;
+ this.verb = verb;
+ this.path = path;
+ this.params = new Sammy.Object(params);
+ this.target = target;
+ };
+
+ Sammy.EventContext.prototype = $.extend({}, Sammy.Object.prototype, {
+
+ // A shortcut to the app's `$element()`
+ $element: function() {
+ return this.app.$element();
+ },
+
+ // Look up a templating engine within the current app and context.
+ // `engine` can be one of the following:
+ //
+ // * a function: should conform to `function(content, data) { return interploated; }`
+ // * a template path: 'template.ejs', looks up the extension to match to
+ // the `ejs()` helper
+ // * a string referering to the helper: "mustache" => `mustache()`
+ //
+ // If no engine is found, use the app's default `template_engine`
+ //
+ engineFor: function(engine) {
+ var context = this, engine_match;
+ // if path is actually an engine function just return it
+ if (_isFunction(engine)) { return engine; }
+ // lookup engine name by path extension
+ engine = engine.toString();
+ if ((engine_match = engine.match(/\.([^\.]+)$/))) {
+ engine = engine_match[1];
+ }
+ // set the engine to the default template engine if no match is found
+ if (engine && _isFunction(context[engine])) {
+ return context[engine];
+ }
+ if (context.app.template_engine) {
+ return this.engineFor(context.app.template_engine);
+ }
+ return function(content, data) { return content; };
+ },
+
+ // using the template `engine` found with `engineFor()`, interpolate the
+ // `data` into `content`
+ interpolate: function(content, data, engine) {
+ return this.engineFor(engine).apply(this, [content, data]);
+ },
+
+ // Create and return a `Sammy.RenderContext` calling `render()` on it.
+ // Loads the template and interpolate the data, however does not actual
+ // place it in the DOM.
+ //
+ // ### Example
+ //
+ // // mytemplate.mustache <div class="name">{{name}}</div>
+ // render('mytemplate.mustache', {name: 'quirkey'});
+ // // sets the `content` to <div class="name">quirkey</div>
+ // render('mytemplate.mustache', {name: 'quirkey'})
+ // .appendTo('ul');
+ // // appends the rendered content to $('ul')
+ //
+ render: function(location, data, callback) {
+ return new Sammy.RenderContext(this).render(location, data, callback);
+ },
+
+ // create a new `Sammy.RenderContext` calling `load()` with `location` and
+ // `options`. Called without interpolation or placement, this allows for
+ // preloading/caching the templates.
+ load: function(location, options, callback) {
+ return new Sammy.RenderContext(this).load(location, options, callback);
+ },
+
+ // `render()` the the `location` with `data` and then `swap()` the
+ // app's `$element` with the rendered content.
+ partial: function(location, data) {
+ return this.render(location, data).swap();
+ },
+
+ // Changes the location of the current window. If `to` begins with
+ // '#' it only changes the document's hash. If passed more than 1 argument
+ // redirect will join them together with forward slashes.
+ //
+ // ### Example
+ //
+ // redirect('#/other/route');
+ // // equivilent to
+ // redirect('#', 'other', 'route');
+ //
+ redirect: function() {
+ var to, args = _makeArray(arguments),
+ current_location = this.app.getLocation();
+ if (args.length > 1) {
+ args.unshift('/');
+ to = this.join.apply(this, args);
+ } else {
+ to = args[0];
+ }
+ this.trigger('redirect', {to: to});
+ this.app.last_location = this.path;
+ this.app.setLocation(to);
+ if (current_location == to) {
+ this.app.trigger('location-changed');
+ }
+ },
+
+ // Triggers events on `app` within the current context.
+ trigger: function(name, data) {
+ if (typeof data == 'undefined') { data = {}; }
+ if (!data.context) { data.context = this; }
+ return this.app.trigger(name, data);
+ },
+
+ // A shortcut to app's `eventNamespace()`
+ eventNamespace: function() {
+ return this.app.eventNamespace();
+ },
+
+ // A shortcut to app's `swap()`
+ swap: function(contents) {
+ return this.app.swap(contents);
+ },
+
+ // Raises a possible `notFound()` error for the current path.
+ notFound: function() {
+ return this.app.notFound(this.verb, this.path);
+ },
+
+ // //=> Sammy.EventContext: get #/ {}
+ toString: function() {
+ return "Sammy.EventContext: " + [this.verb, this.path, this.params].join(' ');
+ }
+
+ });
+
+ // An alias to Sammy
+ $.sammy = window.Sammy = Sammy;
+
+})(jQuery);
--- /dev/null
+// -- Sammy -- /sammy.js
+// http://code.quirkey.com/sammy
+// Version: 0.6.0
+// Built: Wed Sep 01 23:12:46 -0700 2010
+(function(g){var m,f="([^/]+)",i=/:([\w\d]+)/g,j=/\?([^#]*)$/,b=function(n){return Array.prototype.slice.call(n)},c=function(n){return Object.prototype.toString.call(n)==="[object Function]"},k=function(n){return Object.prototype.toString.call(n)==="[object Array]"},h=decodeURIComponent,e=function(n){return n.replace(/&/g,"&").replace(/</g,"<").replace(/>/g,">")},l=function(n){return function(o,p){return this.route.apply(this,[n,o,p])}},a={},d=[];m=function(){var o=b(arguments),p,n;m.apps=m.apps||{};if(o.length===0||o[0]&&c(o[0])){return m.apply(m,["body"].concat(o))}else{if(typeof(n=o.shift())=="string"){p=m.apps[n]||new m.Application();p.element_selector=n;if(o.length>0){g.each(o,function(q,r){p.use(r)})}if(p.element_selector!=n){delete m.apps[n]}m.apps[p.element_selector]=p;return p}}};m.VERSION="0.6.0";m.addLogger=function(n){d.push(n)};m.log=function(){var n=b(arguments);n.unshift("["+Date()+"]");g.each(d,function(p,o){o.apply(m,n)})};if(typeof window.console!="undefined"){if(c(console.log.apply)){m.addLogger(function(){window.console.log.apply(console,arguments)})}else{m.addLogger(function(){window.console.log(arguments)})}}else{if(typeof console!="undefined"){m.addLogger(function(){console.log.apply(console,arguments)})}}g.extend(m,{makeArray:b,isFunction:c,isArray:k});m.Object=function(n){return g.extend(this,n||{})};g.extend(m.Object.prototype,{escapeHTML:e,h:e,toHash:function(){var n={};g.each(this,function(p,o){if(!c(o)){n[p]=o}});return n},toHTML:function(){var n="";g.each(this,function(p,o){if(!c(o)){n+="<strong>"+p+"</strong> "+o+"<br />"}});return n},keys:function(n){var o=[];for(var p in this){if(!c(this[p])||!n){o.push(p)}}return o},has:function(n){return this[n]&&g.trim(this[n].toString())!=""},join:function(){var o=b(arguments);var n=o.shift();return o.join(n)},log:function(){m.log.apply(m,arguments)},toString:function(n){var o=[];g.each(this,function(q,p){if(!c(p)||n){o.push('"'+q+'": '+p.toString())}});return"Sammy.Object: {"+o.join(",")+"}"}});m.HashLocationProxy=function(o,n){this.app=o;this.is_native=false;this._startPolling(n)};m.HashLocationProxy.prototype={bind:function(){var n=this,o=this.app;g(window).bind("hashchange."+this.app.eventNamespace(),function(q,p){if(n.is_native===false&&!p){m.log("native hash change exists, using");n.is_native=true;clearInterval(m.HashLocationProxy._interval)}o.trigger("location-changed")});if(!m.HashLocationProxy._bindings){m.HashLocationProxy._bindings=0}m.HashLocationProxy._bindings++},unbind:function(){g(window).unbind("hashchange."+this.app.eventNamespace());m.HashLocationProxy._bindings--;if(m.HashLocationProxy._bindings<=0){clearInterval(m.HashLocationProxy._interval)}},getLocation:function(){var n=window.location.toString().match(/^[^#]*(#.+)$/);return n?n[1]:""},setLocation:function(n){return(window.location=n)},_startPolling:function(p){var o=this;if(!m.HashLocationProxy._interval){if(!p){p=10}var n=function(){current_location=o.getLocation();if(!m.HashLocationProxy._last_location||current_location!=m.HashLocationProxy._last_location){setTimeout(function(){g(window).trigger("hashchange",[true])},13)}m.HashLocationProxy._last_location=current_location};n();m.HashLocationProxy._interval=setInterval(n,p)}}};m.Application=function(n){var o=this;this.routes={};this.listeners=new m.Object({});this.arounds=[];this.befores=[];this.namespace=(new Date()).getTime()+"-"+parseInt(Math.random()*1000,10);this.context_prototype=function(){m.EventContext.apply(this,arguments)};this.context_prototype.prototype=new m.EventContext();if(c(n)){n.apply(this,[this])}if(!this._location_proxy){this.setLocationProxy(new m.HashLocationProxy(this,this.run_interval_every))}if(this.debug){this.bindToAllEvents(function(q,p){o.log(o.toString(),q.cleaned_type,p||{})})}};m.Application.prototype=g.extend({},m.Object.prototype,{ROUTE_VERBS:["get","post","put","delete"],APP_EVENTS:["run","unload","lookup-route","run-route","route-found","event-context-before","event-context-after","changed","error","check-form-submission","redirect"],_last_route:null,_location_proxy:null,_running:false,element_selector:"body",debug:false,raise_errors:false,run_interval_every:50,template_engine:null,toString:function(){return"Sammy.Application:"+this.element_selector},$element:function(){return g(this.element_selector)},use:function(){var n=b(arguments),p=n.shift(),o=p||"";try{n.unshift(this);if(typeof p=="string"){o="Sammy."+p;p=m[p]}p.apply(this,n)}catch(q){if(typeof p==="undefined"){this.error("Plugin Error: called use() but plugin ("+o.toString()+") is not defined",q)}else{if(!c(p)){this.error("Plugin Error: called use() but '"+o.toString()+"' is not a function",q)}else{this.error("Plugin Error",q)}}}return this},setLocationProxy:function(n){var o=this._location_proxy;this._location_proxy=n;if(this.isRunning()){if(o){o.unbind()}this._location_proxy.bind()}},route:function(q,o,s){var p=this,r=[],n;if(!s&&c(o)){o=q;s=o;q="any"}q=q.toLowerCase();if(o.constructor==String){i.lastIndex=0;while((path_match=i.exec(o))!==null){r.push(path_match[1])}o=new RegExp("^"+o.replace(i,f)+"$")}if(typeof s=="string"){s=p[s]}n=function(t){var u={verb:t,path:o,callback:s,param_names:r};p.routes[t]=p.routes[t]||[];p.routes[t].push(u)};if(q==="any"){g.each(this.ROUTE_VERBS,function(u,t){n(t)})}else{n(q)}return this},get:l("get"),post:l("post"),put:l("put"),del:l("delete"),any:l("any"),mapRoutes:function(o){var n=this;g.each(o,function(p,q){n.route.apply(n,q)});return this},eventNamespace:function(){return["sammy-app",this.namespace].join("-")},bind:function(n,p,r){var q=this;if(typeof r=="undefined"){r=p}var o=function(){var u,s,t;u=arguments[0];t=arguments[1];if(t&&t.context){s=t.context;delete t.context}else{s=new q.context_prototype(q,"bind",u.type,t,u.target)}u.cleaned_type=u.type.replace(q.eventNamespace(),"");r.apply(s,[u,t])};if(!this.listeners[n]){this.listeners[n]=[]}this.listeners[n].push(o);if(this.isRunning()){this._listen(n,o)}return this},trigger:function(n,o){this.$element().trigger([n,this.eventNamespace()].join("."),[o]);return this},refresh:function(){this.last_location=null;this.trigger("location-changed");return this},before:function(n,o){if(c(n)){o=n;n={}}this.befores.push([n,o]);return this},after:function(n){return this.bind("event-context-after",n)},around:function(n){this.arounds.push(n);return this},isRunning:function(){return this._running},helpers:function(n){g.extend(this.context_prototype.prototype,n);return this},helper:function(n,o){this.context_prototype.prototype[n]=o;return this},run:function(n){if(this.isRunning()){return false}var o=this;g.each(this.listeners.toHash(),function(p,q){g.each(q,function(s,r){o._listen(p,r)})});this.trigger("run",{start_url:n});this._running=true;this.last_location=null;if(this.getLocation()==""&&typeof n!="undefined"){this.setLocation(n)}this._checkLocation();this._location_proxy.bind();this.bind("location-changed",function(){o._checkLocation()});this.bind("submit",function(q){var p=o._checkFormSubmission(g(q.target).closest("form"));return(p===false)?q.preventDefault():false});g(window).bind("beforeunload",function(){o.unload()});return this.trigger("changed")},unload:function(){if(!this.isRunning()){return false}var n=this;this.trigger("unload");this._location_proxy.unbind();this.$element().unbind("submit").removeClass(n.eventNamespace());g.each(this.listeners.toHash(),function(o,p){g.each(p,function(r,q){n._unlisten(o,q)})});this._running=false;return this},bindToAllEvents:function(o){var n=this;g.each(this.APP_EVENTS,function(p,q){n.bind(q,o)});g.each(this.listeners.keys(true),function(q,p){if(n.APP_EVENTS.indexOf(p)==-1){n.bind(p,o)}});return this},routablePath:function(n){return n.replace(j,"")},lookupRoute:function(q,o){var p=this,n=false;this.trigger("lookup-route",{verb:q,path:o});if(typeof this.routes[q]!="undefined"){g.each(this.routes[q],function(s,r){if(p.routablePath(o).match(r.path)){n=r;return false}})}return n},runRoute:function(p,B,r,u){var q=this,z=this.lookupRoute(p,B),o,x,s,w,A,y,v,n;this.log("runRoute",[p,B].join(" "));this.trigger("run-route",{verb:p,path:B,params:r});if(typeof r=="undefined"){r={}}g.extend(r,this._parseQueryString(B));if(z){this.trigger("route-found",{route:z});if((path_params=z.path.exec(this.routablePath(B)))!==null){path_params.shift();g.each(path_params,function(C,D){if(z.param_names[C]){r[z.param_names[C]]=h(D)}else{if(!r.splat){r.splat=[]}r.splat.push(h(D))}})}o=new this.context_prototype(this,p,B,r,u);s=this.arounds.slice(0);A=this.befores.slice(0);v=[o].concat(r.splat);x=function(){var C;while(A.length>0){y=A.shift();if(q.contextMatchesOptions(o,y[0])){C=y[1].apply(o,[o]);if(C===false){return false}}}q.last_route=z;o.trigger("event-context-before",{context:o});C=z.callback.apply(o,v);o.trigger("event-context-after",{context:o});return C};g.each(s.reverse(),function(C,D){var E=x;x=function(){return D.apply(o,[E])}});try{n=x()}catch(t){this.error(["500 Error",p,B].join(" "),t)}return n}else{return this.notFound(p,B)}},contextMatchesOptions:function(q,s,o){var p=s;if(typeof p==="undefined"||p=={}){return true}if(typeof o==="undefined"){o=true}if(typeof p==="string"||c(p.test)){p={path:p}}if(p.only){return this.contextMatchesOptions(q,p.only,true)}else{if(p.except){return this.contextMatchesOptions(q,p.except,false)}}var n=true,r=true;if(p.path){if(c(p.path.test)){n=p.path.test(q.path)}else{n=(p.path.toString()===q.path)}}if(p.verb){r=p.verb===q.verb}return o?(r&&n):!(r&&n)},getLocation:function(){return this._location_proxy.getLocation()},setLocation:function(n){return this._location_proxy.setLocation(n)},swap:function(n){return this.$element().html(n)},templateCache:function(n,o){if(typeof o!="undefined"){return a[n]=o}else{return a[n]}},notFound:function(p,o){var n=this.error(["404 Not Found",p,o].join(" "));return(p==="get")?n:true},error:function(o,n){if(!n){n=new Error()}n.message=[o,n.message].join(" ");this.trigger("error",{message:n.message,error:n});if(this.raise_errors){throw (n)}else{this.log(n.message,n)}},_checkLocation:function(){var n,o;n=this.getLocation();if(n!=this.last_location){this.last_location=n;o=this.runRoute("get",n)}return o},_getFormVerb:function(o){var n=g(o),p;$_method=n.find('input[name="_method"]');if($_method.length>0){p=$_method.val()}if(!p){p=n[0].getAttribute("method")}return g.trim(p.toString().toLowerCase())},_checkFormSubmission:function(p){var n,q,s,r,o;this.trigger("check-form-submission",{form:p});n=g(p);q=n.attr("action");s=this._getFormVerb(n);if(!s||s==""){s="get"}this.log("_checkFormSubmission",n,q,s);if(s==="get"){this.setLocation(q+"?"+n.serialize());o=false}else{r=g.extend({},this._parseFormParams(n));o=this.runRoute(s,q,r,p.get(0))}return(typeof o=="undefined")?false:o},_parseFormParams:function(n){var q={},p=n.serializeArray(),o;for(o=0;o<p.length;o++){q=this._parseParamPair(q,p[o].name,p[o].value)}return q},_parseQueryString:function(q){var s={},p,o,r,n;p=q.match(j);if(p){o=p[1].split("&");for(n=0;n<o.length;n++){r=o[n].split("=");s=this._parseParamPair(s,h(r[0]),h(r[1]))}}return s},_parseParamPair:function(p,n,o){if(p[n]){if(k(p[n])){p[n].push(o)}else{p[n]=[p[n],o]}}else{p[n]=o}return p},_listen:function(n,o){return this.$element().bind([n,this.eventNamespace()].join("."),o)},_unlisten:function(n,o){return this.$element().unbind([n,this.eventNamespace()].join("."),o)}});m.RenderContext=function(n){this.event_context=n;this.callbacks=[];this.previous_content=null;this.content=null;this.next_engine=false;this.waiting=false};g.extend(m.RenderContext.prototype,{then:function(o){if(c(o)){var n=this;if(this.waiting){this.callbacks.push(o)}else{this.wait();setTimeout(function(){var p=o.apply(n,[n.content,n.previous_content]);if(p!==false){n.next(p)}},13)}}return this},wait:function(){this.waiting=true},next:function(n){this.waiting=false;if(typeof n!=="undefined"){this.previous_content=this.content;this.content=n}if(this.callbacks.length>0){this.then(this.callbacks.shift())}},load:function(n,o,q){var p=this;return this.then(function(){var r,s;if(c(o)){q=o;o={}}else{o=g.extend({},o)}if(q){this.then(q)}if(typeof n==="string"){r=!(o.cache===false);delete o.cache;if(o.engine){p.next_engine=o.engine;delete o.engine}if(r&&(s=this.event_context.app.templateCache(n))){return s}this.wait();g.ajax(g.extend({url:n,data:{},type:"get",success:function(t){if(r){p.event_context.app.templateCache(n,t)}p.next(t)}},o));return false}else{if(n.nodeType){return n.innerHTML}if(n.selector){p.next_engine=n.attr("data-engine");if(o.clone===false){return n.remove()[0].innerHTML.toString()}else{return n[0].innerHTML.toString()}}}})},render:function(n,o,p){if(c(n)&&!o){return this.then(n)}else{return this.load(n).interpolate(o,n).then(p)}},collect:function(p,o){var n=this;return this.then(function(){var q="";g.each(p,function(r,t){var s=o.apply(n,[r,t]);q+=s;return s});return q})},renderEach:function(n,o,p,q){if(k(o)){q=p;p=o;o=null}if(!p&&k(this.content)){p=this.content}return this.load(n).collect(p,function(r,s){var t={};o?(t[o]=s):(t=s);return this.event_context.interpolate(this.content,t,n)})},interpolate:function(q,p,n){var o=this;return this.then(function(s,r){if(this.next_engine){p=this.next_engine;this.next_engine=false}var t=o.event_context.interpolate(s,q,p);return n?r+t:t})},swap:function(){return this.then(function(n){this.event_context.swap(n)}).trigger("changed",{})},appendTo:function(n){return this.then(function(o){g(n).append(o)}).trigger("changed",{})},prependTo:function(n){return this.then(function(o){g(n).prepend(o)}).trigger("changed",{})},replace:function(n){return this.then(function(o){g(n).html(o)}).trigger("changed",{})},trigger:function(n,o){return this.then(function(p){if(typeof o=="undefined"){o={content:p}}this.event_context.trigger(n,o)})}});m.EventContext=function(r,q,o,p,n){this.app=r;this.verb=q;this.path=o;this.params=new m.Object(p);this.target=n};m.EventContext.prototype=g.extend({},m.Object.prototype,{$element:function(){return this.app.$element()},engineFor:function(p){var o=this,n;if(c(p)){return p}p=p.toString();if((n=p.match(/\.([^\.]+)$/))){p=n[1]}if(p&&c(o[p])){return o[p]}if(o.app.template_engine){return this.engineFor(o.app.template_engine)}return function(q,r){return q}},interpolate:function(o,p,n){return this.engineFor(n).apply(this,[o,p])},render:function(n,o,p){return new m.RenderContext(this).render(n,o,p)},load:function(n,o,p){return new m.RenderContext(this).load(n,o,p)},partial:function(n,o){return this.render(n,o).swap()},redirect:function(){var p,o=b(arguments),n=this.app.getLocation();if(o.length>1){o.unshift("/");p=this.join.apply(this,o)}else{p=o[0]}this.trigger("redirect",{to:p});this.app.last_location=this.path;this.app.setLocation(p);if(n==p){this.app.trigger("location-changed")}},trigger:function(n,o){if(typeof o=="undefined"){o={}}if(!o.context){o.context=this}return this.app.trigger(n,o)},eventNamespace:function(){return this.app.eventNamespace()},swap:function(n){return this.app.swap(n)},notFound:function(){return this.app.notFound(this.verb,this.path)},toString:function(){return"Sammy.EventContext: "+[this.verb,this.path,this.params].join(" ")}});g.sammy=window.Sammy=m})(jQuery);
--- /dev/null
+<h1>Not found</h1>
+
+<p>The object you clicked on was not found; it may have been deleted on the server.</p>
--- /dev/null
+<% if (mode == 'queue') { %>
+ <h3 style="padding-top: 20px;">Add binding to this queue</h3>
+<% } else { %>
+ <h3 style="padding-top: 20px;">Add binding from this exchange</h3>
+<% } %>
+ <form action="#/bindings" method="post">
+ <input type="hidden" name="vhost" value="<%= fmt_string(parent.vhost) %>"/>
+<% if (mode == 'queue') { %>
+ <input type="hidden" name="destination" value="<%= fmt_string(parent.name) %>"/>
+<% } else { %>
+ <input type="hidden" name="source" value="<%= fmt_string(parent.name) %>"/>
+<% } %>
+ <table class="form">
+<% if (mode == 'queue') { %>
+ <tr>
+ <th>
+ <label>From exchange:</label>
+ </th>
+ <td>
+ <input type="hidden" name="destination_type" value="q"/>
+ <input type="text" name="source" value=""/>
+ <span class="mand">*</span>
+ </td>
+ </tr>
+<% } else { %>
+ <tr>
+ <th>
+ <select name="destination_type" class="narrow">
+ <option value="e">To exchange</option>
+ <option value="q" selected="selected">To queue</option>
+ </select>:
+ </th>
+ <td>
+ <input type="text" name="destination" value=""/>
+ <span class="mand">*</span>
+ </td>
+ </tr>
+<% } %>
+ <tr>
+ <th><label>Routing key:</label></th>
+ <td><input type="text" name="routing_key" value=""/></td>
+ </tr>
+ <tr>
+ <th><label>Arguments:</label></th>
+ <td><div class="multifield" id="arguments"></div></td>
+ </tr>
+ </table>
+ <input type="submit" value="Bind"/>
+ </form>
--- /dev/null
+<%= maybe_truncate(bindings) %>
+<% if (bindings.length > 0) { %>
+ <table class="list updatable">
+ <thead>
+ <tr>
+<% if (mode == 'exchange_source') { %>
+ <th>To</th>
+<% } else { %>
+ <th>From</th>
+<% } %>
+ <th>Routing key</th>
+ <th>Arguments</th>
+ <th></th>
+ </tr>
+ </thead>
+ <tbody>
+ <%
+ for (var i = 0; i < bindings.length; i++) {
+ var binding = bindings[i];
+ %>
+ <tr<%= alt_rows(i)%>>
+<% if (binding.source == '') { %>
+ <td colspan="4">
+ (Default exchange binding)
+ </td>
+<% } else { %>
+<% if (mode == 'queue' || mode == 'exchange_destination') { %>
+ <td>
+ <span class="exchange">
+ <%= link_exchange(binding.vhost, binding.source) %>
+ </span>
+ </td>
+<% } else if (binding.destination_type == 'exchange') { %>
+ <td>
+ <span class="exchange" title="Exchange">
+ <%= link_exchange(binding.vhost, binding.destination) %>
+ </span>
+ </td>
+<% } else { %>
+ <td>
+ <span class="queue" title="Queue">
+ <%= link_queue(binding.vhost, binding.destination) %>
+ </span>
+ </td>
+<% } %>
+ <td><%= fmt_string(binding.routing_key) %></td>
+ <td><%= fmt_table_short(binding.arguments) %></td>
+ <td class="c">
+ <form action="#/bindings" method="delete" class="confirm">
+ <input type="hidden" name="vhost" value="<%= fmt_string(binding.vhost) %>"/>
+ <input type="hidden" name="source" value="<%= fmt_exchange_url(binding.source) %>"/>
+ <input type="hidden" name="destination" value="<%= fmt_string(binding.destination) %>"/>
+ <input type="hidden" name="destination_type" value="<%= binding.destination_type.substring(0, 1) %>"/>
+ <input type="hidden" name="properties_key" value="<%= fmt_string(binding.properties_key) %>"/>
+ <input type="submit" value="Unbind"/>
+ </form>
+ </td>
+ <% } %>
+ </tr>
+ <% } %>
+ </tbody>
+ </table>
+
+<% } else { %>
+ <p>... no bindings ...</p>
+<% } %>
--- /dev/null
+<h1>Channel: <b><%= fmt_escape_html(channel.name) %></b></h1>
+
+<div class="section">
+<h2>Overview</h2>
+<div class="hider">
+<% if (statistics_level == 'fine') { %>
+ <%= message_rates('msg-rates-ch', channel.message_stats) %>
+<% } %>
+
+<div class="updatable">
+<h3>Details</h3>
+<table class="facts">
+ <tr>
+ <th>Connection</th>
+ <td><%= link_conn(channel.connection_details.name) %></td>
+ </tr>
+<% if (nodes_interesting) { %>
+ <tr>
+ <th>Node</th>
+ <td><%= fmt_node(channel.node) %></td>
+ </tr>
+<% } %>
+<% if (vhosts_interesting) { %>
+ <tr>
+ <th>Virtual host</th>
+ <td><%= fmt_string(channel.vhost) %></td>
+ </tr>
+<% } %>
+ <tr>
+ <th>Username</th>
+ <td><%= fmt_string(channel.user) %></td>
+ </tr>
+ <tr>
+ <th>Mode <span class="help" id="channel-mode"></span></th>
+ <td><%= fmt_channel_mode(channel) %></td>
+ </tr>
+</table>
+
+<table class="facts">
+ <tr>
+ <th>State</th>
+ <td><%= fmt_object_state(channel) %></td>
+ </tr>
+ <tr>
+ <th>Prefetch count</th>
+ <td><%= channel.prefetch_count %></td>
+ </tr>
+ <tr>
+ <th>Global prefetch count</th>
+ <td><%= channel.global_prefetch_count %></td>
+ </tr>
+</table>
+
+<table class="facts">
+ <tr>
+ <th>Messages unacknowledged</th>
+ <td><%= channel.messages_unacknowledged %></td>
+ </tr>
+ <tr>
+ <th>Messages unconfirmed</th>
+ <td><%= channel.messages_unconfirmed %></td>
+ </tr>
+ <tr>
+ <th>Messages uncommitted</th>
+ <td><%= channel.messages_uncommitted %></td>
+ </tr>
+ <tr>
+ <th>Acks uncommitted</th>
+ <td><%= channel.acks_uncommitted %></td>
+ </tr>
+</table>
+</div>
+
+</div>
+</div>
+
+<div class="section">
+ <h2>Consumers</h2>
+ <div class="hider updatable">
+<%= format('consumers', {'mode': 'channel', 'consumers': channel.consumer_details}) %>
+ </div>
+</div>
+
+<% if (statistics_level == 'fine') { %>
+<div class="section">
+<h2>Message rates breakdown</h2>
+<div class="hider updatable">
+<table class="two-col-layout">
+ <tr>
+ <td>
+ <%= format('msg-detail-publishes',
+ {'mode': 'channel',
+ 'object': channel.publishes,
+ 'label': 'Publishes'}) %>
+ </td>
+ <td>
+ <%= format('msg-detail-deliveries',
+ {'mode': 'channel',
+ 'object': channel.deliveries}) %>
+ </td>
+ </tr>
+</table>
+</div>
+</div>
+<% } %>
--- /dev/null
+<% if (channels.length > 0) { %>
+<%
+ var col_return_unroutable = !is_col_empty(channels, 'return_unroutable');
+ var col_redeliver = !is_col_empty(channels, 'redeliver');
+ var ratesWidth = col_return_unroutable ? 5 : 4;
+%>
+<table class="list">
+ <thead>
+ <tr>
+<% if (mode == 'standalone') { %>
+ <th colspan="<% if (nodes_interesting) { %>2<% } else { %>1<% } %>"></th>
+ <th colspan="<% if (vhosts_interesting) { %>7<% } else { %>6<% } %>">Details</th>
+<% } else { %>
+ <th></th>
+ <th colspan="5">Details</th>
+<% } %>
+<% if (statistics_level == 'fine') { %>
+ <th colspan="<%= ratesWidth %>">Message rates</th>
+<% } %>
+ </tr>
+ <tr>
+<% if (mode == 'standalone') { %>
+ <th><%= fmt_sort('Channel', 'name') %></th>
+<% if (nodes_interesting) { %>
+ <th><%= fmt_sort('Node', 'node') %></th>
+<% } %>
+<% if (vhosts_interesting) { %>
+ <th><%= fmt_sort('Virtual host', 'vhost') %></th>
+<% } %>
+ <th><%= fmt_sort('User name', 'user') %></th>
+ <th>Mode <span class="help" id="channel-mode"></span></th>
+ <th>Prefetch <span class="help" id="channel-prefetch"></span></th>
+ <th><%= fmt_sort('Unacked', 'messages_unacknowledged') %></th>
+ <th><%= fmt_sort('Unconfirmed', 'messages_unconfirmed') %></th>
+ <th><%= fmt_sort('State', 'state') %></th>
+<% if (statistics_level == 'fine') { %>
+ <th><%= fmt_sort('publish', 'message_stats.publish_details.rate') %></th>
+ <th><%= fmt_sort('confirm', 'message_stats.confirm_details.rate') %></th>
+ <th>
+ <%= fmt_sort('deliver / get', 'message_stats.deliver_get_details.rate') %>
+ <% if (col_redeliver) { %>
+ <sub><%= fmt_sort('of which redelivered', 'message_stats.redeliver_details.rate') %></sub>
+ <% } %>
+ </th>
+ <th><%= fmt_sort('ack', 'message_stats.ack_details.rate') %></th>
+ <% if (col_return_unroutable) { %>
+ <th><%= fmt_sort('return (mandatory)', 'message_stats.return_unroutable_details.rate') %></th>
+ <% } %>
+<% } %>
+<% } else { %>
+<!-- TODO make sortable after bug 23401 -->
+ <th>Channel</th>
+ <th>Mode <span class="help" id="channel-mode"></span></th>
+ <th>Prefetch <span class="help" id="channel-prefetch"></span></th>
+ <th>Unacked</th>
+ <th>Unconfirmed</th>
+ <th>State</th>
+<% if (statistics_level == 'fine') { %>
+ <th>publish</th>
+ <th>confirm</th>
+ <th>
+ deliver / get
+ <% if (col_redeliver) { %>
+ <sub>of which redelivered</sub>
+ <% } %>
+ </th>
+ <th>ack</th>
+ <% if (col_return_unroutable) { %>
+ <th>return (mandatory)</th>
+ <% } %>
+<% } %>
+<% } %>
+ </tr>
+ </thead>
+ <tbody>
+<%
+ for (var i = 0; i < channels.length; i++) {
+ var channel = channels[i];
+%>
+ <tr<%= alt_rows(i)%>>
+ <td>
+ <%= link_channel(channel.name) %>
+ </td>
+<% if (mode == 'standalone' && nodes_interesting) { %>
+ <td><%= fmt_node(channel.node) %></td>
+<% } %>
+<% if (mode == 'standalone') { %>
+<% if (vhosts_interesting) { %>
+ <td class="c"><%= fmt_string(channel.vhost) %></td>
+<% } %>
+ <td class="c"><%= fmt_string(channel.user) %></td>
+<% } %>
+ <td class="l">
+ <%= fmt_channel_mode(channel) %>
+ <% if (channel.transactional) { %>
+ <small><acronym title="<%= channel.messages_uncommitted %> uncommitted messages"><%= channel.messages_uncommitted %>m</acronym>/<acronym title="<%= channel.acks_uncommitted %> uncommitted acks"><%= channel.acks_uncommitted %>a</acronym></small>
+ <% } %>
+ </td>
+ <td class="c">
+ <% if (channel.prefetch_count != 0) { %>
+ <%= channel.prefetch_count %><br/>
+ <% } %>
+ <% if (channel.global_prefetch_count != 0) { %>
+ <%= channel.global_prefetch_count %> (global)
+ <% } %>
+ </td>
+ <td class="c"><%= channel.messages_unacknowledged %></td>
+ <td class="c"><%= channel.messages_unconfirmed %></td>
+ <td class="c"><%= fmt_object_state(channel) %></td>
+<% if (statistics_level == 'fine') { %>
+ <td class="r"><%= fmt_rate(channel.message_stats, 'publish') %></td>
+ <td class="r"><%= fmt_rate(channel.message_stats, 'confirm') %></td>
+ <td class="r"><%= fmt_deliver_rate(channel.message_stats, col_redeliver) %></td>
+ <td class="r"><%= fmt_rate(channel.message_stats, 'ack') %></td>
+ <% if (col_return_unroutable) { %>
+ <td class="r"><%= fmt_rate(channel.message_stats, 'return_unroutable') %></td>
+ <% } %>
+<% } %>
+ </tr>
+ <% } %>
+ </tbody>
+</table>
+<% } else { %>
+ <p>... no channels ...</p>
+<% } %>
--- /dev/null
+<h1>Channels</h1>
+<%= filter_ui(channels) %>
+<div class="updatable">
+ <%= format('channels-list', {'channels': channels, 'mode': 'standalone'}) %>
+</div>
--- /dev/null
+<h1>Cluster name: <b><%= fmt_string(cluster_name.name) %></b></h1>
+
+<p>
+ The cluster name can be used by clients to identify clusters over
+ AMQP connections, and is used by the shovel and federation plugins
+ to identify which clusters a message has been routed through.
+</p>
+<p>
+ Note that the cluster name is announced to clients in the AMQP
+ server properties; i.e. before authentication has taken
+ place. Therefore it should not be considered secret.
+</p>
+<p>
+ The cluster name is generated by default from the name of the first
+ node in the cluster, but can be changed.
+</p>
+
+<div class="section-hidden">
+ <h2>Change name</h2>
+ <div class="hider">
+ <form action="#/cluster-name" method="put">
+ <table class="form">
+ <tr>
+ <th><label>Name:</label></th>
+ <td><input type="text" name="name" value="<%= fmt_string(cluster_name.name) %>"/><span class="mand">*</span></td>
+ </tr>
+ </table>
+ <input type="submit" value="Change name"/>
+ </form>
+ </div>
+</div>
--- /dev/null
+<h1>Connection <b><%= fmt_string(connection.name) %></b></h1>
+
+<div class="section">
+<h2>Overview</h2>
+<div class="hider">
+ <%= data_rates('data-rates-conn', connection, 'Data rates') %>
+
+<div class="updatable">
+<h3>Details</h3>
+<table class="facts">
+<% if (nodes_interesting) { %>
+<tr>
+ <th>Node</th>
+ <td><%= fmt_node(connection.node) %></td>
+</tr>
+<% } %>
+<% if (vhosts_interesting) { %>
+<tr>
+ <th>Virtual host</th>
+ <td><%= fmt_string(connection.vhost) %></td>
+</tr>
+<% } %>
+<tr>
+ <th>Username</th>
+ <td><%= fmt_string(connection.user) %></td>
+</tr>
+<tr>
+ <th>Protocol</th>
+ <td><%= connection.protocol %></td>
+</tr>
+
+<% if (connection.ssl) { %>
+<tr>
+ <th>SSL</th>
+ <td><%= fmt_boolean(connection.ssl) %></td>
+</tr>
+<% } %>
+
+<% if (connection.auth_mechanism) { %>
+<tr>
+ <th>Authentication</th>
+ <td><%= connection.auth_mechanism %></td>
+</tr>
+<% } %>
+</table>
+
+<% if (connection.state) { %>
+<table class="facts">
+<tr>
+ <th>State</th>
+ <td><%= fmt_object_state(connection) %></td>
+</tr>
+<tr>
+ <th>Timeout</th>
+ <td><%= fmt_time(connection.timeout, 's') %></td>
+</tr>
+<tr>
+ <th>Frame max</th>
+ <td><%= connection.frame_max %> bytes</td>
+</tr>
+<tr>
+ <th>Channel limit</th>
+ <td><%= connection.channel_max %> channels</td>
+</tr>
+</table>
+<% } %>
+</div>
+
+</div>
+</div>
+
+<div class="section">
+ <h2>Channels</h2>
+ <div class="hider updatable">
+ <%= format('channels-list', {'channels': channels, 'mode': 'connection'}) %>
+ </div>
+</div>
+
+<% if (connection.ssl) { %>
+<div class="section">
+<h2>SSL</h2>
+<div class="hider">
+
+<table class="facts">
+ <tr>
+ <th>Protocol Version</th>
+ <td><%= connection.ssl_protocol %></td>
+ </tr>
+ <tr>
+ <th>Key Exchange Algorithm</th>
+ <td><%= connection.ssl_key_exchange %></td>
+ </tr>
+ <tr>
+ <th>Cipher Algorithm</th>
+ <td><%= connection.ssl_cipher %></td>
+ </tr>
+ <tr>
+ <th>Hash Algorithm</th>
+ <td><%= connection.ssl_hash %></td>
+ </tr>
+</table>
+
+<% if (connection.peer_cert_issuer != '') { %>
+<table class="facts">
+ <tr>
+ <th>Peer Certificate Issuer</th>
+ <td><%= connection.peer_cert_issuer %></td>
+ </tr>
+ <tr>
+ <th>Peer Certificate Subject</th>
+ <td><%= connection.peer_cert_subject %></td>
+ </tr>
+ <tr>
+ <th>Peer Certificate Validity</th>
+ <td><%= connection.peer_cert_validity %></td>
+ </tr>
+</table>
+<% } %>
+</div>
+</div>
+<% } %>
+
+<% if (properties_size(connection.client_properties) > 0) { %>
+<div class="section-hidden">
+<h2>Client properties</h2>
+<div class="hider">
+<%= fmt_table_long(connection.client_properties) %>
+</div>
+</div>
+<% } %>
+
+<div class="section-hidden">
+ <h2>Close this connection</h2>
+ <div class="hider">
+ <form action="#/connections" method="delete" class="confirm">
+ <input type="hidden" name="name" value="<%= fmt_string(connection.name) %>"/>
+ <table class="form">
+ <tr>
+ <th><label>Reason:</label></th>
+ <td>
+ <input type="text" name="reason" value="Closed via management plugin" class="wide"/>
+ </td>
+ </tr>
+ </table>
+ <input type="submit" value="Force Close"/>
+ </form>
+ </div>
+</div>
--- /dev/null
+<h1>Connections</h1>
+<%= filter_ui(connections) %>
+<div class="updatable">
+<% if (connections.length > 0) { %>
+<table class="list">
+ <thead>
+ <tr>
+ <th colspan="<% if (nodes_interesting) { %>7<% } else { %>6<% } %>">Network</th>
+ <th colspan="<% if (vhosts_interesting) { %>5<% } else { %>4<% } %>">Overview</th>
+ </tr>
+ <tr>
+ <th><%= fmt_sort('Name', 'name') %></th>
+ <th><%= fmt_sort('Protocol', 'protocol') %></th>
+ <th><%= fmt_sort('Client', 'properties') %></th>
+<% if (nodes_interesting) { %>
+ <th><%= fmt_sort('Node', 'node') %></th>
+<% } %>
+ <th><%= fmt_sort('From client', 'recv_oct_details.rate') %></th>
+ <th><%= fmt_sort('To client', 'send_oct_details.rate') %></th>
+ <th><%= fmt_sort('Timeout', 'timeout') %></th>
+ <th><%= fmt_sort('Channels', 'channels') %></th>
+<% if (vhosts_interesting) { %>
+ <th><%= fmt_sort('Virtual host', 'vhost') %></th>
+<% } %>
+ <th><%= fmt_sort('User name', 'user') %></th>
+ <th><%= fmt_sort('State', 'state') %></th>
+ </tr>
+ </thead>
+ <tbody>
+<%
+ for (var i = 0; i < connections.length; i++) {
+ var connection = connections[i];
+%>
+ <tr<%= alt_rows(i)%>>
+ <td><%= link_conn(connection.name) %></td>
+ <td>
+ <%= connection.protocol %>
+ <% if (connection.ssl) { %>
+ <sub>SSL</sub>
+ <% } %>
+ </td>
+ <td><%= fmt_client_name(connection.client_properties) %></td>
+<% if (nodes_interesting) { %>
+ <td><%= fmt_node(connection.node) %></td>
+<% } %>
+ <td><%= fmt_rate_bytes(connection, 'recv_oct') %></td>
+ <td><%= fmt_rate_bytes(connection, 'send_oct') %></td>
+ <td><%= fmt_time(connection.timeout, 's') %></td>
+ <td><%= connection.channels %></td>
+<% if (vhosts_interesting) { %>
+ <td><%= fmt_string(connection.vhost) %></td>
+<% } %>
+ <td><%= fmt_string(connection.user) %></td>
+ <td><%= fmt_object_state(connection) %></td>
+ </tr>
+ <% } %>
+ </tbody>
+</table>
+<% } else { %>
+ <p>... no connections ...</p>
+<% } %>
+</div>
--- /dev/null
+<% if (consumers.length > 0) { %>
+ <table class="list">
+ <thead>
+ <tr>
+<% if (mode == 'queue') { %>
+ <th>Channel</th>
+ <th>Consumer tag</th>
+<% } else { %>
+ <th>Consumer tag</th>
+ <th>Queue</th>
+<% } %>
+ <th>Ack required</th>
+ <th>Exclusive</th>
+ <th>Prefetch count</th>
+ <th>Arguments</th>
+ </tr>
+ </thead>
+<%
+ for (var i = 0; i < consumers.length; i++) {
+ var consumer = consumers[i];
+%>
+ <tr<%= alt_rows(i) %>>
+<% if (mode == 'queue') { %>
+ <td><%= link_channel(consumer.channel_details.name) %></td>
+ <td><%= fmt_string(consumer.consumer_tag) %></td>
+<% } else { %>
+ <td><%= fmt_string(consumer.consumer_tag) %></td>
+ <td><%= link_queue(consumer.queue.vhost, consumer.queue.name) %></td>
+<% } %>
+ <td class="c"><%= fmt_boolean(consumer.ack_required) %></td>
+ <td class="c"><%= fmt_boolean(consumer.exclusive) %></td>
+ <td class="c"><%= consumer.prefetch_count %></td>
+ <td class="c"><%= fmt_table_short(consumer.arguments) %></td>
+ </tr>
+<% } %>
+ </table>
+<% } else { %>
+ <p>... no consumers ...</p>
+<% } %>
--- /dev/null
+<div class="form-popup-<%= type %>">
+ <%= text %>
+ <br/>
+ <br/>
+ <span>Close</span>
+</div>
--- /dev/null
+<h1>Exchange: <b><%= fmt_exchange(exchange.name) %></b></h1>
+
+<div class="section">
+ <h2>Overview</h2>
+ <div class="hider">
+<% if (statistics_level == 'fine') { %>
+ <%= message_rates('msg-rates-x', exchange.message_stats) %>
+<% } %>
+
+ <div class="updatable">
+ <h3>Details</h3>
+ <table class="facts">
+ <tr>
+ <th>Type</th>
+ <td class="l"><%= fmt_exchange_type(exchange.type) %></td>
+ </tr>
+ <tr>
+ <th>Parameters</th>
+ <td><%= fmt_parameters(exchange) %></td>
+ </tr>
+ <tr>
+ <th>Policy</th>
+ <td><%= fmt_string(exchange.policy, '') %></td>
+ </tr>
+<% if (vhosts_interesting) { %>
+ <tr>
+ <th>Virtual host</th>
+ <td><%= fmt_string(exchange.vhost) %></td>
+ </tr>
+<% } %>
+ </table>
+ </div>
+ </div>
+</div>
+
+<% if (statistics_level == 'fine') { %>
+<div class="section-hidden">
+<h2>Message rates breakdown</h2>
+<div class="hider updatable">
+<table class="two-col-layout">
+ <tr>
+ <td>
+ <%= format('msg-detail-publishes',
+ {'mode': 'exchange-incoming',
+ 'object': exchange.incoming,
+ 'label': 'Incoming <span class="help" id="exchange-rates-incoming"></span>'}) %>
+ </td>
+ <td>
+ <%= format('msg-detail-publishes',
+ {'mode': 'exchange-outgoing',
+ 'object': exchange.outgoing,
+ 'label': 'Outgoing <span class="help" id="exchange-rates-outgoing"></span>'}) %>
+ </td>
+ </tr>
+</table>
+</div>
+</div>
+<% } %>
+
+
+<div class="section-hidden">
+ <h2>Bindings</h2>
+ <div class="hider">
+<% if (exchange.name == "") { %>
+ <h3>Default exchange</h3>
+ <p>
+ The default exchange is implicitly bound to every queue, with a
+ routing key equal to the queue name. It it not possible to
+ explicitly bind to, or unbind from the default exchange. It also
+ cannot be deleted.
+ </p>
+<% } else { %>
+<div class="bindings-wrapper">
+<% if (bindings_destination.length > 0) { %>
+ <%= format('bindings', {'mode': 'exchange_destination', 'bindings': bindings_destination}) %>
+ <p class="arrow">⇓</p>
+<% } %>
+ <p><span class="exchange">This exchange</span></p>
+ <p class="arrow">⇓</p>
+ <%= format('bindings', {'mode': 'exchange_source', 'bindings': bindings_source}) %>
+</div>
+ <%= format('add-binding', {'mode': 'exchange_source', 'parent': exchange}) %>
+<% } %>
+</div>
+</div>
+
+<% if (!exchange.internal) { %>
+<%= format('publish', {'mode': 'exchange', 'exchange': exchange}) %>
+<% } %>
+
+<% if (exchange.name != "") { %>
+<div class="section-hidden">
+ <h2>Delete this exchange</h2>
+ <div class="hider">
+ <form action="#/exchanges" method="delete" class="confirm">
+ <input type="hidden" name="vhost" value="<%= fmt_string(exchange.vhost) %>"/>
+ <input type="hidden" name="name" value="<%= fmt_exchange_url(exchange.name) %>"/>
+ <input type="submit" value="Delete"/>
+ </form>
+ </div>
+</div>
+<% } %>
--- /dev/null
+<h1>Exchanges</h1>
+<div class="section">
+ <h2>All exchanges</h2>
+ <div class="hider">
+<%= filter_ui(exchanges) %>
+ <div class="updatable">
+<% if (exchanges.length > 0) { %>
+<table class="list">
+ <thead>
+ <tr>
+<% if (vhosts_interesting) { %>
+ <th><%= fmt_sort('Virtual host', 'vhost') %></th>
+<% } %>
+ <th><%= fmt_sort('Name', 'name') %></th>
+ <th><%= fmt_sort('Type', 'type') %></th>
+ <th><%= fmt_sort('Policy', 'policy') %></th>
+ <th>Parameters</th>
+<% if (statistics_level == 'fine') { %>
+ <th><%= fmt_sort('Message rate in', 'message_stats.publish_in_details.rate') %></th>
+ <th><%= fmt_sort('Message rate out', 'message_stats.publish_out_details.rate') %></th>
+<% } %>
+ </tr>
+ </thead>
+ <tbody>
+<%
+ for (var i = 0; i < exchanges.length; i++) {
+ var exchange = exchanges[i];
+%>
+ <tr<%= alt_rows(i, exchange.arguments)%>>
+<% if (vhosts_interesting) { %>
+ <td><%= fmt_string(exchange.vhost) %></td>
+<% } %>
+ <td><%= link_exchange(exchange.vhost, exchange.name, exchange.arguments) %></td>
+ <td class="c"><%= fmt_exchange_type(exchange.type) %></td>
+ <td class="c"><%= fmt_string(exchange.policy, '') %></td>
+ <td class="c"><%= fmt_parameters_short(exchange) %></td>
+<% if (statistics_level == 'fine') { %>
+ <td class="r"><%= fmt_rate(exchange.message_stats, 'publish_in') %></td>
+ <td class="r"><%= fmt_rate(exchange.message_stats, 'publish_out') %></td>
+<% } %>
+ </tr>
+ <% } %>
+ </tbody>
+</table>
+<% } else { %>
+ <p>... no exchanges ...</p>
+<% } %>
+ </div>
+ </div>
+</div>
+
+<div class="section-hidden">
+ <h2>Add a new exchange</h2>
+ <div class="hider">
+ <form action="#/exchanges" method="put">
+ <table class="form">
+<% if (vhosts_interesting) { %>
+ <tr>
+ <th><label>Virtual host:</label></th>
+ <td>
+ <select name="vhost">
+ <% for (var i = 0; i < vhosts.length; i++) { %>
+ <option value="<%= fmt_string(vhosts[i].name) %>"><%= fmt_string(vhosts[i].name) %></option>
+ <% } %>
+ </select>
+ </td>
+ </tr>
+<% } else { %>
+ <tr><td><input type="hidden" name="vhost" value="<%= fmt_string(vhosts[0].name) %>"/></td></tr>
+<% } %>
+ <tr>
+ <th><label>Name:</label></th>
+ <td><input type="text" name="name"/><span class="mand">*</span></td>
+ </tr>
+ <tr>
+ <th><label>Type:</label></th>
+ <td>
+ <select name="type">
+ <% for (var i = 0; i < exchange_types.length; i++) {
+ var type = exchange_types[i];
+ if (type.internal_purpose == undefined) { %>
+ <option value="<%= fmt_string(type.name) %>"><%= fmt_string(type.name) %></option>
+ <% }
+ } %>
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Durability:</label></th>
+ <td>
+ <select name="durable">
+ <option value="true">Durable</option>
+ <option value="false">Transient</option>
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Auto delete: <span class="help" id="exchange-auto-delete"></span></label></th>
+ <td>
+ <select name="auto_delete">
+ <option value="false">No</option>
+ <option value="true">Yes</option>
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Internal: <span class="help" id="exchange-internal"></span></label></th>
+ <td>
+ <select name="internal">
+ <option value="false">No</option>
+ <option value="true">Yes</option>
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Alternate exchange: <span class="help" id="exchange-alternate"></span></label></th>
+ <td><input type="text" name="alternate-exchange"/></td>
+ </tr>
+ <tr>
+ <th><label>Arguments:</label></th>
+ <td>
+ <div class="multifield" id="arguments"></div>
+ </td>
+ </tr>
+ </table>
+ <input type="submit" value="Add exchange"/>
+ </form>
+ </div>
+</div>
--- /dev/null
+<h1>Import succeeded</h1>
+<p>
+ Your definitions were imported successfully.
+</p>
--- /dev/null
+<div id="header">
+ <div id="login-version">
+ <div id="login-details"></div>
+ <form action="#/logout" method="put">
+ <input type="submit" value="Log out"/>
+ </form>
+ </div>
+ <div id="logo">
+ <a href="#/">
+ <img src="img/rabbitmqlogo.png" width="204" height="37"/>
+ </a>
+ </div>
+ <div id="menu">
+ <p id="vhost-form">
+ <label for="show-vhost">Virtual host: </label>
+ <select id="show-vhost">
+ <option value="">All</option>
+ </select>
+ </p>
+ <ul id="tabs">
+ </ul>
+ </div>
+</div>
+<div id="rhs"></div>
+<div id="main"></div>
+<div id="footer">
+ <div id="footer-nav">
+ <ul>
+ <li><a href="api/">HTTP API</a></li>
+ <li><a href="cli/">Command Line</a></li>
+ </ul>
+ </div>
+ <div id="update-form">
+ <label for="update-every">Update </label>
+ <select id="update-every">
+ <option value="5000">every 5 seconds</option>
+ <option value="30000">every 30 seconds</option>
+ <option value="300000">every 5 minutes</option>
+ <option value="">never</option>
+ </select>
+ </div>
+</div>
+<div id="status"></div>
--- /dev/null
+<div id="login">
+ <p><img src="img/rabbitmqlogo.png" width="204" height="37"/></p>
+
+ <form action="#/login" method="put">
+ <div id="login-status"></div>
+ <table class="form">
+ <tr>
+ <th><label>Username:</label></th>
+ <td><input type="text" name="username"/><span class="mand">*</span></td>
+ </tr>
+ <tr>
+ <th><label>Password:</label></th>
+ <td><input type="password" name="password"/><span class="mand">*</span></td>
+ </tr>
+ </table>
+ <p><input type="submit" value="Login"/></p>
+ </form>
+</div>
--- /dev/null
+<%
+ var width = 800;
+ if (memory == "not_available") {
+%>
+<p class="warning">
+ Memory statistics not available.
+</p>
+<% } else { %>
+<div class="memory-bar">
+<%
+ var sections = {'connection_procs' : 'Connections',
+ 'queue_procs' : 'Queues',
+ 'plugins' : 'Plugins',
+ 'other_proc' : 'Other process memory',
+ 'mnesia' : 'Mnesia',
+ 'msg_index' : 'Message store index',
+ 'mgmt_db' : 'Management database',
+ 'other_ets' : 'Other ETS tables',
+ 'binary' : 'Binaries',
+ 'code' : 'Code',
+ 'atom' : 'Atoms',
+ 'other_system' : 'Other system'};
+ for (var section in sections) {
+ var section_width = Math.round(width * memory[section] / memory.total);
+%>
+ <div class="memory-section memory_<%= section %>"
+ style="width: <%= section_width %>px;"
+ title="<%= sections[section] %> <%= fmt_bytes(memory[section]) %>">
+ </div>
+<% } %>
+</div>
+<span class="clear"> </span>
+<div class="box">
+<table class="facts">
+ <tr>
+ <th>Connections</th>
+ <td><%= fmt_memory(memory, 'connection_procs') %></td>
+ </tr>
+ <tr>
+ <th>Queues</th>
+ <td><%= fmt_memory(memory, 'queue_procs') %></td>
+ </tr>
+ <tr>
+ <th>Plugins</th>
+ <td><%= fmt_memory(memory, 'plugins') %></td>
+ </tr>
+ <tr>
+ <th>Other process memory</th>
+ <td><%= fmt_memory(memory, 'other_proc') %></td>
+ </tr>
+</table>
+<table class="facts">
+ <tr>
+ <th>Mnesia</th>
+ <td><%= fmt_memory(memory, 'mnesia') %></td>
+ </tr>
+ <tr>
+ <th>Message store index</th>
+ <td><%= fmt_memory(memory, 'msg_index') %></td>
+ </tr>
+ <tr>
+ <th>Management database</th>
+ <td><%= fmt_memory(memory, 'mgmt_db') %></td>
+ </tr>
+ <tr>
+ <th>Other ETS tables</th>
+ <td><%= fmt_memory(memory, 'other_ets') %></td>
+ </tr>
+</table>
+<table class="facts">
+ <tr>
+ <th>Binaries</th>
+ <td><%= fmt_memory(memory, 'binary') %></td>
+ </tr>
+ <tr>
+ <th>Code</th>
+ <td><%= fmt_memory(memory, 'code') %></td>
+ </tr>
+ <tr>
+ <th>Atoms</th>
+ <td><%= fmt_memory(memory, 'atom') %></td>
+ </tr>
+ <tr>
+ <th>Other system</th>
+ <td><%= fmt_memory(memory, 'other_system') %></td>
+ </tr>
+</table>
+</div>
+
+<div class="memory-info">
+ Last updated: <b><%= fmt_date(new Date()) %></b>.<br/>
+ Total memory used at last update: <b><%= fmt_bytes(memory.total) %></b>
+ <span class="help" id="memory-use"></span>
+</div>
+
+<% } %>
--- /dev/null
+<%
+ for (var i = 0; i < msgs.length; i++) {
+ var msg = msgs[i];
+%>
+<div class="box">
+<h3>Message <%= i+1 %></h3>
+<p>The server reported <b><%= msg.message_count %></b> messages remaining.</p>
+<table class="facts">
+ <tr>
+ <th>Exchange</th>
+ <td><%= fmt_exchange(msg.exchange) %></td>
+ </tr>
+ <tr>
+ <th>Routing Key</th>
+ <td><%= fmt_string(msg.routing_key) %></td>
+ </tr>
+ <tr>
+ <th>Redelivered</th>
+ <td><%= fmt_boolean(msg.redelivered) %></td>
+ </tr>
+ <tr>
+ <th>Properties</th>
+ <td><%= fmt_table_short(msg.properties) %></td>
+ </tr>
+ <tr>
+ <th>
+ Payload
+ <sub><%= msg.payload_bytes %> bytes</sub>
+ <sub>Encoding: <%= msg.payload_encoding %></sub>
+ </th>
+ <td>
+ <pre class="msg-payload"><%= fmt_maybe_wrap(msg.payload, msg.payload_encoding) %></pre>
+ </td>
+ </tr>
+</table>
+</div>
+<%
+ }
+%>
--- /dev/null
+<h3>Deliveries</h3>
+<% if (object && object.length > 0) { %>
+<%
+ var col_redeliver = !is_col_empty(object, 'redeliver', function(o) {return o.stats;});
+%>
+<table class="list">
+ <tr>
+<% if (mode == 'queue') { %>
+ <th>Channel</th>
+<% } else { %>
+ <th>Queue</th>
+<% } %>
+ <th>
+ deliver / get
+ <% if (col_redeliver) { %>
+ <sub>of which redelivered</sub>
+ <% } %>
+ </th>
+ <th>ack</th>
+ </tr>
+<%
+ for (var i = 0; i < object.length; i++) {
+ var del = object[i];
+%>
+ <tr<%= alt_rows(i)%>>
+<% if (mode == 'queue') { %>
+ <td><%= link_channel(del.channel_details.name) %></td>
+<% } else { %>
+ <td><%= link_queue(del.queue.vhost, del.queue.name) %></td>
+<% } %>
+ <td class="r"><%= fmt_deliver_rate(del.stats, col_redeliver) %></td>
+ <td class="r"><%= fmt_rate(del.stats, 'ack') %></td>
+ </tr>
+<% } %>
+</table>
+<% } else { %>
+<p> ... no deliveries ...</p>
+<% } %>
--- /dev/null
+<h3><%= label %></h3>
+<% if (object && object.length > 0) { %>
+<%
+ var col_return_unroutable = !is_col_empty(object, 'return_unroutable', function(o) {return o.stats;});
+ var col_confirm = mode != 'exchange-outgoing';
+%>
+<table class="list">
+ <tr>
+<% if (mode == 'channel') { %>
+ <th>Exchange</th>
+<% } else if (mode == 'exchange-incoming') { %>
+ <th>Channel</th>
+<% } else if (mode == 'exchange-outgoing') { %>
+ <th>Queue</th>
+<% } else { %>
+ <th>Exchange</th>
+<% } %>
+ <th>publish</th>
+<% if (col_confirm) { %>
+ <th>confirm</th>
+<% } %>
+<% if (col_return_unroutable) { %>
+ <th>return (mandatory)</th>
+<% } %>
+ </tr>
+<%
+ for (var i = 0; i < object.length; i++) {
+ var pub = object[i];
+%>
+ <tr<%= alt_rows(i)%>>
+
+<% if (mode == 'channel') { %>
+ <td><%= link_exchange(pub.exchange.vhost, pub.exchange.name) %></td>
+<% } else if (mode == 'exchange-incoming') { %>
+ <td><%= link_channel(pub.channel_details.name) %></td>
+<% } else if (mode == 'exchange-outgoing') { %>
+ <td><%= link_queue(pub.queue.vhost, pub.queue.name) %></td>
+<% } else { %>
+ <td><%= link_exchange(pub.exchange.vhost, pub.exchange.name) %></td>
+<% } %>
+ <td class="r"><%= fmt_rate(pub.stats, 'publish') %></td>
+<% if (col_confirm) { %>
+ <td class="r"><%= fmt_rate(pub.stats, 'confirm') %></td>
+<% } %>
+<% if (col_return_unroutable) { %>
+ <td class="r"><%= fmt_rate(pub.stats, 'return_unroutable') %></td>
+<% } %>
+ </tr>
+<% } %>
+</table>
+<% } else { %>
+<p> ... no publishes ...</p>
+<% } %>
--- /dev/null
+<h1>Node <b><%= node.name %></b></h1>
+
+<div class="section">
+<h2>Overview</h2>
+<div class="hider updatable">
+<% if (!node.running) { %>
+<p class="warning">Node not running</p>
+<% } else if (node.os_pid == undefined) { %>
+<p class="warning">Node statistics not available</p>
+<% } else { %>
+ <table class="facts">
+ <tr>
+ <th>
+ File descriptors <span class="help" id="file-descriptors"></span>
+ </th>
+ <td>
+<%= fmt_resource_bar_count(fmt_fd_used(node.fd_used, node.fd_total), node.fd_total, FD_THRESHOLDS) %>
+ </td>
+ </tr>
+ <tr>
+ <th>
+ Socket descriptors <span class="help" id="socket-descriptors"></span>
+ </th>
+ <td>
+<%= fmt_resource_bar_count(node.sockets_used, node.sockets_total, FD_THRESHOLDS) %>
+ </td>
+ </tr>
+ <tr>
+ <th>
+ Erlang processes
+ </th>
+ <td>
+<%= fmt_resource_bar_count(node.proc_used, node.proc_total, PROCESS_THRESHOLDS) %>
+ </td>
+ </tr>
+ </table>
+ <table class="facts">
+ <tr>
+ <th>
+ Memory
+ </th>
+ <td>
+<% if (node.mem_limit != 'memory_monitoring_disabled') { %>
+ <%= fmt_resource_bar(fmt_bytes(node.mem_used),
+ fmt_bytes(node.mem_limit) + ' high watermark',
+ node.mem_used / node.mem_limit,
+ node.mem_alarm ? 'red' : 'green',
+ node.mem_alarm ? 'memory-alarm' : null) %>
+<% } else { %>
+ <%= fmt_bytes(node.mem_used) %>
+<% } %>
+ </td>
+ </tr>
+ <tr>
+ <th>
+ Disk space
+ </th>
+ <td>
+<% if (node.disk_free_limit != 'disk_free_monitoring_disabled') { %>
+ <%= fmt_resource_bar(fmt_bytes(node.disk_free),
+ fmt_bytes(node.disk_free_limit) + ' low watermark',
+ node.disk_free_limit / node.disk_free,
+ node.disk_free_alarm ? 'red' : 'green',
+ node.disk_free_alarm ? 'disk_free-alarm' : null) %>
+<% } else { %>
+ (not available)
+<% } %>
+ </td>
+ </tr>
+ </table>
+
+ <table class="facts">
+ <tr>
+ <th>Uptime</th>
+ <td><%= fmt_uptime(node.uptime) %></td>
+ </tr>
+<% if (rabbit_versions_interesting) { %>
+ <tr>
+ <th>RabbitMQ Version</th>
+ <td><%= fmt_rabbit_version(node.applications) %></td>
+ </tr>
+<% } %>
+ <tr>
+ <th>Type</th>
+ <td>
+ <% if (node.type == 'disc') { %>
+ <acronym title="Broker definitions are held on disc.">Disc</acronym>
+ <% } else { %>
+ <acronym title="Broker definitions are held in RAM. Messages will still be written to disc if necessary.">RAM</acronym>
+ <% } %>
+ </td>
+ </tr>
+ </table>
+<% } %>
+</div>
+</div>
+
+<div class="section">
+<h2>Memory details</h2>
+<div class="hider">
+ <div id="memory-details"></div>
+ <button class="update-manual memory-button" for="memory-details" query="memory">Update</button>
+</div>
+</div>
+
+<div class="section-hidden">
+<h2>Applications</h2>
+<div class="hider updatable">
+<% if (!node.running) { %>
+<p class="warning">Node not running</p>
+<% } else if (node.os_pid == undefined) { %>
+<p class="warning">Node statistics not available</p>
+<% } else { %>
+<table class="list">
+ <tr>
+ <th>Name</th>
+ <th>Version</th>
+ </tr>
+ <%
+ for (var j = 0; j < node.applications.length; j++) {
+ var application = node.applications[j];
+ %>
+ <tr<%= alt_rows(j)%>>
+ <td>
+ <%= application.name %>
+ <sub><%= application.description %></sub>
+ </td>
+ <td><%= application.version %></td>
+ </tr>
+ <% } %>
+</table>
+<% } %>
+</div>
+</div>
+
+<div class="section-hidden">
+<h2>Registry</h2>
+<div class="hider updatable">
+<% if (!node.running) { %>
+<p class="warning">Node not running</p>
+<% } else if (node.os_pid == undefined) { %>
+<p class="warning">Node statistics not available</p>
+<% } else { %>
+<h3>Exchange types</h3>
+<%= format('registry', {'list': node.exchange_types, 'node': node, 'show_enabled': false} ) %>
+<h3>Authentication mechanisms</h3>
+<%= format('registry', {'list': node.auth_mechanisms, 'node': node, 'show_enabled': true} ) %>
+<% } %>
+</div>
+</div>
+
+<div class="section-hidden">
+<h2>Advanced</h2>
+<div class="hider updatable">
+<% if (!node.running) { %>
+<p class="warning">Node not running</p>
+<% } else if (node.os_pid == undefined) { %>
+<p class="warning">Node statistics not available</p>
+<% } else { %>
+ <div class="box">
+ <h3>VM</h3>
+ <table class="facts">
+ <tr>
+ <th>OS pid</th>
+ <td><%= node.os_pid %></td>
+ </tr>
+ <tr>
+ <th>Statistics</th>
+ <td><%= node.statistics_level %></td>
+ </tr>
+ </table>
+
+ <table class="facts">
+ <tr>
+ <th>Run queue</th>
+ <td><%= node.run_queue %></td>
+ </tr>
+ <tr>
+ <th>Processors</th>
+ <td><%= node.processors %></td>
+ </tr>
+ </table>
+<% } %>
+</div>
+</div>
--- /dev/null
+<h1>Overview</h1>
+<% if (user_monitor) { %>
+<%= format('partition', {'nodes': nodes}) %>
+<% } %>
+<div class="section">
+<h2>Totals</h2>
+<div class="hider">
+<% if (overview.statistics_db_node != 'not_running') { %>
+ <%= queue_lengths('lengths-over', overview.queue_totals) %>
+<% if (statistics_level == 'fine') { %>
+ <%= message_rates('msg-rates-over', overview.message_stats) %>
+<% } %>
+<% } else { %>
+ Totals not available
+<% } %>
+
+<div class="updatable">
+ <h3>Global counts <span class="help" id="resource-counts"></span></h3>
+
+ <div class="box">
+ <div class="micro-highlight">
+ <a href="#/connections">
+ Connections: <strong><%= overview.object_totals.connections %></strong>
+ </a>
+ </div>
+ <div class="micro-highlight">
+ <a href="#/channels">
+ Channels: <strong><%= overview.object_totals.channels %></strong>
+ </a>
+ </div>
+ <div class="micro-highlight">
+ <a href="#/exchanges">
+ Exchanges: <strong><%= overview.object_totals.exchanges %></strong>
+ </a>
+ </div>
+ <div class="micro-highlight">
+ <a href="#/queues">
+ Queues: <strong><%= overview.object_totals.queues %></strong>
+ </a>
+ </div>
+<% if (overview.object_totals['consumers'] != undefined) { %> <tr>
+ <div class="micro-highlight">
+ Consumers: <strong><%= overview.object_totals.consumers %></strong>
+ </div>
+<% } %>
+ </div>
+ </div>
+
+</div>
+</div>
+
+<% if (user_monitor) { %>
+<div class="section">
+<h2>Nodes</h2>
+<div class="hider updatable">
+<table class="list">
+ <tr>
+ <th>Name</th>
+ <th>
+ File descriptors <span class="help" id="file-descriptors"></span>
+ </th>
+ <th>
+ Socket descriptors <span class="help" id="socket-descriptors"></span>
+ </th>
+ <th>
+ Erlang processes
+ </th>
+ <th>
+ Memory
+ </th>
+ <th>
+ Disk space
+ </th>
+ <th>Uptime</th>
+ <th>Type</th>
+ </tr>
+<%
+ for (var i = 0; i < nodes.length; i++) {
+ var node = nodes[i];
+%>
+ <tr<%= alt_rows(i)%>>
+ <td>
+ <%= link_node(node.name) %>
+ <% if (rabbit_versions_interesting) { %>
+ <sub>RabbitMQ <%= fmt_rabbit_version(node.applications) %></sub>
+ <% } %>
+ </td>
+<% if (!node.running) { %>
+ <td colspan="6">
+ <div class="status-red">
+ Node not running
+ </div>
+ </td>
+<% } else if (node.os_pid == undefined) { %>
+ <td colspan="6">
+ <div class="status-yellow">
+ <acronym title="The rabbitmq_management_agent plugin should be enabled on this node. If it is not, various statistics will be inaccurate.">
+ Node statistics not available</acronym>
+ </div>
+ </td>
+<% } else { %>
+ <td>
+<%= fmt_resource_bar_count(fmt_fd_used(node.fd_used, node.fd_total), node.fd_total, FD_THRESHOLDS) %>
+ </td>
+ <td>
+<%= fmt_resource_bar_count(node.sockets_used, node.sockets_total, FD_THRESHOLDS) %>
+ </td>
+ <td>
+<%= fmt_resource_bar_count(node.proc_used, node.proc_total, PROCESS_THRESHOLDS) %>
+ </td>
+ <td>
+<% if (node.mem_limit != 'memory_monitoring_disabled') { %>
+ <%= fmt_resource_bar(fmt_bytes(node.mem_used),
+ fmt_bytes(node.mem_limit) + ' high watermark',
+ node.mem_used / node.mem_limit,
+ node.mem_alarm ? 'red' : 'green',
+ node.mem_alarm ? 'memory-alarm' : null) %>
+<% } else { %>
+ <%= fmt_bytes(node.mem_used) %>
+<% } %>
+ </td>
+ <td>
+<% if (node.disk_free_limit != 'disk_free_monitoring_disabled') { %>
+ <%= fmt_resource_bar(fmt_bytes(node.disk_free),
+ fmt_bytes(node.disk_free_limit) + ' low watermark',
+ node.disk_free_limit / node.disk_free,
+ node.disk_free_alarm ? 'red' : 'green',
+ node.disk_free_alarm ? 'disk-free-alarm' : null) %>
+<% } else { %>
+ (not available)
+<% } %>
+ </td>
+ <td class="r">
+ <%= fmt_uptime(node.uptime) %>
+ </td>
+<% } %>
+ <td class="c">
+ <% if (node.type == 'disc') { %>
+ <acronym title="Broker definitions are held on disc.">Disc</acronym>
+ <% } else { %>
+ <acronym title="Broker definitions are held in RAM. Messages will still be written to disc if necessary.">RAM</acronym>
+ <% } %>
+ <% if (overview.statistics_db_node == node.name) { %>
+ <acronym title="This node contains the management statistics database">Stats</acronym>
+ <% } %>
+ <% if (overview.node == node.name) { %>
+ <acronym title="You are accessing the management UI from this node.">*</acronym>
+ <% } %>
+ </td>
+ </tr>
+<% } %>
+</table>
+
+<% if (overview.statistics_db_node == 'not_running') { %>
+ <p class="status-error">Statistics database could not be contacted. Message rates and queue lengths will not be shown.</p>
+<% } %>
+</div>
+</div>
+
+<div class="section">
+<h2>Ports and contexts</h2>
+<div class="hider updatable">
+<h3>Listening ports</h3>
+<table class="list">
+ <tr>
+ <th>Protocol</th>
+<% if (nodes_interesting) { %>
+ <th>Node</th>
+<% } %>
+ <th>Bound to</th>
+ <th>Port</th>
+ </tr>
+ <%
+ for (var i = 0; i < overview.listeners.length; i++) {
+ var listener = overview.listeners[i];
+ %>
+ <tr<%= alt_rows(i)%>>
+ <td><%= listener.protocol %></td>
+<% if (nodes_interesting) { %>
+ <td><%= fmt_node(listener.node) %></td>
+<% } %>
+ <td><%= listener.ip_address %></td>
+ <td><%= listener.port %></td>
+ </tr>
+ <% } %>
+</table>
+<h3>Web contexts</h3>
+<table class="list">
+ <tr>
+ <th>Context</th>
+<% if (nodes_interesting) { %>
+ <th>Node</th>
+<% } %>
+ <th>Bound to</th>
+ <th>Port</th>
+ <th>SSL</th>
+ <th>Path</th>
+ </tr>
+ <%
+ for (var i = 0; i < overview.contexts.length; i++) {
+ var context = overview.contexts[i];
+ %>
+ <tr<%= alt_rows(i)%>>
+ <td><%= context.description %></td>
+<% if (nodes_interesting) { %>
+ <td><%= fmt_node(context.node) %></td>
+<% } %>
+ <td><%= (context.ip != undefined) ? context.ip : "0.0.0.0" %></td>
+ <td><%= context.port %></td>
+ <td class="c"><%= fmt_boolean(context.ssl || false) %></td>
+ <td><%= context.path %></td>
+ </tr>
+ <% } %>
+</table>
+</div>
+</div>
+
+<div class="section-hidden administrator-only">
+<h2>Import / export definitions</h2>
+<div class="hider">
+ <form action="api/definitions" method="post" enctype="multipart/form-data">
+ <table class="two-col-layout">
+ <tr>
+ <td>
+ <h3>Export</h3>
+ <p>
+ <label for="download-filename">Filename for download:</label><br/>
+ <input type="text" id="download-filename" value="<%= fmt_download_filename(overview.node) %>" class="wide" />
+ </p>
+ </td>
+ <td>
+ <h3>Import</h3>
+ <p>
+ <label>Definitions file:</label><br/>
+ <input type="file" name="file"/>
+ </p>
+ </td>
+ </tr>
+ <tr>
+ <td>
+ <p>
+ <button id="download-definitions">Download broker definitions</button>
+ <span class="help" id="export-definitions"></span>
+ </p>
+ </td>
+ <td>
+ <p>
+ <input type="hidden" name="redirect" value="../#/import-succeeded"/>
+ <input type="submit" value="Upload broker definitions"/>
+ <span class="help" id="import-definitions"></span>
+ </p>
+ </td>
+ </tr>
+ </table>
+ </form>
+</div>
+</div>
+
+<% if (overview.statistics_level != 'fine') { %>
+<div class="section-hidden">
+<h2>Message Rates Disabled</h2>
+<div class="hider">
+<p>
+ The statistics level in this RabbitMQ server is currently set to
+ <code><%= overview.statistics_level %></code>. Message rates are therefore
+ disabled.
+</p>
+<p>
+ To re-enable message rates, edit your configuration file and either
+ set <code>collect_statistics</code> to <code>fine</code> in
+ the <code>rabbit</code> application, or
+ set <code>force_fine_statistics</code> to <code>true</code> in
+ the <code>rabbitmq_management_agent</code> application
+</p>
+</div>
+</div>
+<% } %>
+<% } %>
--- /dev/null
+<div class="updatable">
+<%
+ var partitions = [];
+ for (var i = 0; i < nodes.length; i++) {
+ var node = nodes[i];
+ if (node.partitions != undefined && node.partitions.length != 0) {
+ partitions.push({'node': node.name,
+ 'others': node.partitions});
+ }
+ }
+ if (partitions.length > 0) {
+%>
+<p class="status-error">
+ Network partition detected<br/><br/>
+ Mnesia reports that this RabbitMQ cluster has experienced a network partition. This is a dangerous situation. RabbitMQ clusters should not be installed on networks which can experience partitions.
+</p>
+<p>
+ The nature of the partition is as follows:
+</p>
+ <table class="list">
+ <tr>
+ <th>Node</th><th>Was partitioned from</th>
+ </tr>
+
+<%
+ for (var i = 0; i < partitions.length; i++) {
+ var partition = partitions[i];
+%>
+ <tr<%= alt_rows(i)%>>
+ <td><%= partition.node %></td>
+ <td>
+<%
+ for (var j = 0; j < partition.others.length; j++) {
+ var other = partition.others[j];
+%>
+ <%= other %><br/>
+<% } %>
+ </td>
+ </tr>
+<% } %>
+ </table>
+<p>
+ While running in this partitioned state, changes (such as queue or
+ exchange declaration and binding) which take place in one partition
+ will not be visible to other partition(s). Other behaviour is not
+ guaranteed.
+</p>
+<p>
+ <a target="_blank"
+ href="http://www.rabbitmq.com/partitions.html">More information on
+ network partitions.</a>
+</p>
+<% } %>
+</div>
--- /dev/null
+<div class="section">
+ <h2>Permissions</h2>
+ <div class="hider">
+ <h3>Current permissions</h3>
+ <% if (permissions.length > 0) { %>
+ <table class="list">
+ <thead>
+ <tr>
+<% if (mode == 'vhost') { %>
+ <th>User</th>
+<% } else { %>
+ <th>Virtual host</th>
+<% } %>
+ <th>Configure regexp</th>
+ <th>Write regexp</th>
+ <th>Read regexp</th>
+ <th></th>
+ </tr>
+ </thead>
+ <tbody>
+<%
+for (var i = 0; i < permissions.length; i++) {
+ var permission = permissions[i];
+%>
+ <tr<%= alt_rows(i)%>>
+<% if (mode == 'vhost') { %>
+ <td><%= link_user(permission.user) %></td>
+<% } else { %>
+ <td><%= link_vhost(permission.vhost) %></td>
+<% } %>
+ <td><%= fmt_string(permission.configure) %></td>
+ <td><%= fmt_string(permission.write) %></td>
+ <td><%= fmt_string(permission.read) %></td>
+ <td class="c">
+ <form action="#/permissions" method="delete" class="confirm">
+ <input type="hidden" name="username" value="<%= fmt_string(permission.user) %>"/>
+ <input type="hidden" name="vhost" value="<%= fmt_string(permission.vhost) %>"/>
+ <input type="submit" value="Clear"/>
+ </form>
+ </td>
+ </tr>
+ <% } %>
+ </tbody>
+ </table>
+ <% } else { %>
+ <p>... no permissions ...</p>
+ <% } %>
+
+ <h3>Set permission</h3>
+ <form action="#/permissions" method="put">
+ <table class="form">
+ <tr>
+<% if (mode == 'vhost') { %>
+ <th>User</th>
+ <td>
+ <input type="hidden" name="vhost" value="<%= fmt_string(parent.name) %>"/>
+ <select name="username">
+ <% for (var i = 0; i < users.length; i++) { %>
+ <option value="<%= fmt_string(users[i].name) %>"><%= fmt_string(users[i].name) %></option>
+ <% } %>
+ </select>
+ </td>
+<% } else { %>
+ <th><label>Virtual Host:</label></th>
+ <td>
+ <input type="hidden" name="username" value="<%= fmt_string(parent.name) %>"/>
+ <select name="vhost">
+ <% for (var i = 0; i < vhosts.length; i++) { %>
+ <option value="<%= fmt_string(vhosts[i].name) %>"><%= fmt_string(vhosts[i].name) %></option>
+ <% } %>
+ </select>
+ </td>
+<% } %>
+ </tr>
+ <tr>
+ <th><label>Configure regexp:</label></th>
+ <td><input type="text" name="configure" value=".*"/></td>
+ </tr>
+ <tr>
+ <th><label>Write regexp:</label></th>
+ <td><input type="text" name="write" value=".*"/></td>
+ </tr>
+ <tr>
+ <th><label>Read regexp:</label></th>
+ <td><input type="text" name="read" value=".*"/></td>
+ </tr>
+ </table>
+ <input type="submit" value="Set permission"/>
+ </form>
+ </div>
+</div>
--- /dev/null
+<h1>Policies</h1>
+<div class="section">
+ <h2>All policies</h2>
+ <div class="hider">
+<%= filter_ui(policies) %>
+ <div class="updatable">
+<% if (policies.length > 0) { %>
+<table class="list">
+ <thead>
+ <tr>
+<% if (vhosts_interesting) { %>
+ <th>Virtual Host</th>
+<% } %>
+ <th>Name</th>
+ <th>Pattern</th>
+ <th>Apply to</th>
+ <th>Definition</th>
+ <th>Priority</th>
+ </tr>
+ </thead>
+ <tbody>
+<%
+ for (var i = 0; i < policies.length; i++) {
+ var policy = policies[i];
+%>
+ <tr<%= alt_rows(i)%>>
+<% if (vhosts_interesting) { %>
+ <td><%= fmt_string(policy.vhost) %></td>
+<% } %>
+ <td><%= link_policy(policy.vhost, policy.name) %></td>
+ <td><%= fmt_string(policy.pattern) %></td>
+ <td><%= fmt_string(policy['apply-to']) %></td>
+ <td><%= fmt_table_short(policy.definition) %></td>
+ <td><%= fmt_string(policy.priority) %></td>
+ </tr>
+<% } %>
+ </tbody>
+</table>
+<% } else { %>
+ <p>... no policies ...</p>
+<% } %>
+ </div>
+ </div>
+</div>
+
+<div class="section-hidden">
+ <h2>Add / update a policy</h2>
+ <div class="hider">
+ <form action="#/policies" method="put">
+ <table class="form">
+<% if (vhosts_interesting) { %>
+ <tr>
+ <th><label>Virtual host:</label></th>
+ <td>
+ <select name="vhost">
+ <% for (var i = 0; i < vhosts.length; i++) { %>
+ <option value="<%= fmt_string(vhosts[i].name) %>"><%= fmt_string(vhosts[i].name) %></option>
+ <% } %>
+ </select>
+ </td>
+ </tr>
+<% } else { %>
+ <tr><td><input type="hidden" name="vhost" value="<%= fmt_string(vhosts[0].name) %>"/></td></tr>
+<% } %>
+ <tr>
+ <th><label>Name:</label></th>
+ <td><input type="text" name="name"/><span class="mand">*</span></td>
+ </tr>
+ <tr>
+ <th><label>Pattern:</label></th>
+ <td><input type="text" name="pattern"/><span class="mand">*</span></td>
+ </tr>
+ <tr>
+ <th><label>Apply to:</label></th>
+ <td>
+ <select name="apply-to">
+ <option value="all">Exchanges and queues</option>
+ <option value="exchanges">Exchanges</option>
+ <option value="queues">Queues</option>
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Definition: <span class="help" id="policy-definitions"></span></label></th>
+ <td><div class="multifield" id="definition"></div></td>
+ <td class="t"><span class="mand">*</span></td>
+ </tr>
+ <tr>
+ <th><label>Priority:</label></th>
+ <td><input type="text" name="priority"/></td>
+ </tr>
+ </table>
+ <input type="submit" value="Add policy"/>
+ </form>
+ </div>
+</div>
--- /dev/null
+<h1>Policy: <b><%= fmt_string(policy.name) %></b></h1>
+
+<div class="section">
+ <h2>Overview</h2>
+ <div class="hider">
+ <table class="facts">
+ <tr>
+ <th>Virtual Host</th>
+ <td><%= fmt_string(policy.vhost) %></td>
+ </tr>
+ <tr>
+ <th>Pattern</th>
+ <td><%= fmt_string(policy.pattern) %></td>
+ </tr>
+ <tr>
+ <th>Apply to</th>
+ <td><%= fmt_string(policy['apply-to']) %></td>
+ </tr>
+ <tr>
+ <th>Definition</th>
+ <td><%= fmt_table_short(policy.definition) %></td>
+ </tr>
+ <tr>
+ <th>Priority</th>
+ <td><%= fmt_string(policy.priority) %></td>
+ </tr>
+ </table>
+ </div>
+</div>
+
+<div class="section-hidden">
+ <h2>Delete this policy</h2>
+ <div class="hider">
+ <form action="#/policies" method="delete" class="confirm">
+ <input type="hidden" name="component" value="policy"/>
+ <input type="hidden" name="vhost" value="<%= fmt_string(policy.vhost) %>"/>
+ <input type="hidden" name="name" value="<%= fmt_string(policy.name) %>"/>
+ <input type="submit" value="Delete this policy"/>
+ </form>
+ </div>
+</div>
--- /dev/null
+<div class="section-hidden">
+ <h2>Publish message</h2>
+ <div class="hider">
+ <form action="#/exchanges/publish" method="post">
+<% if (mode == 'queue') { %>
+ <input type="hidden" name="vhost" value="<%= fmt_string(queue.vhost) %>"/>
+ <input type="hidden" name="name" value="amq.default"/>
+<% } else { %>
+ <input type="hidden" name="vhost" value="<%= fmt_string(exchange.vhost) %>"/>
+ <input type="hidden" name="name" value="<%= fmt_exchange_url(exchange.name) %>"/>
+<% } %>
+ <input type="hidden" name="properties" value=""/>
+ <table class="form">
+<% if (mode == 'queue') { %>
+ <tr>
+ <td colspan="2"><input type="hidden" name="routing_key" value="<%= fmt_string(queue.name) %>"/> Message will be published to the default exchange with routing key <strong><%= fmt_string(queue.name) %></strong>, routing it to this queue.</td>
+ </tr>
+<% } else { %>
+ <tr>
+ <th><label>Routing key:</label></th>
+ <td><input type="text" name="routing_key" value=""/></td>
+ </tr>
+<% } %>
+ <tr>
+ <th><label>Delivery mode:</label></th>
+ <td>
+ <select name="delivery_mode">
+ <option value="1">1 - Non-persistent</option>
+ <option value="2">2 - Persistent</option>
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th>
+ <label>
+ Headers:
+ <span class="help" id="message-publish-headers"></span>
+ </label>
+ </th>
+ <td>
+ <div class="multifield" id="headers"></div>
+ </td>
+ </tr>
+ <tr>
+ <th>
+ <label>
+ Properties:
+ <span class="help" id="message-publish-properties"></span>
+ </label>
+ </th>
+ <td>
+ <div class="multifield string-only" id="props"></div>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Payload:</label></th>
+ <td><textarea name="payload"></textarea></td>
+ </tr>
+ </table>
+ <input type="submit" value="Publish message" />
+ </form>
+ </div>
+</div>
--- /dev/null
+<h1>Queue <b><%= fmt_string(queue.name) %></b></h1>
+
+<div class="section">
+ <h2>Overview</h2>
+ <div class="hider">
+ <%= queue_lengths('lengths-q', queue) %>
+<% if (statistics_level == 'fine') { %>
+ <%= message_rates('msg-rates-q', queue.message_stats) %>
+<% } %>
+
+ <div class="updatable">
+ <h3>Details</h3>
+ <table class="facts">
+ <tr>
+ <th>Parameters</th>
+ <td><%= fmt_parameters(queue) %></td>
+ </tr>
+ <tr>
+ <th>Policy</th>
+ <td><%= fmt_string(queue.policy, '') %></td>
+ </tr>
+ <tr>
+ <th>Exclusive owner</th>
+ <td>
+ <% if (queue.owner_pid_details == undefined) { %>
+ None
+ <% } else { %>
+ <%= link_conn(queue.owner_pid_details.name) %>
+ <% } %>
+ </td>
+ </tr>
+ </table>
+
+ <table class="facts">
+ <tr>
+ <th>State</th>
+ <td><%= fmt_object_state(queue) %></td>
+ </tr>
+ <tr>
+ <th>Consumers</th>
+ <td><%= fmt_string(queue.consumers) %></td>
+ </tr>
+ <tr>
+ <th>Consumer utilisation <span class="help" id="queue-consumer-utilisation"></th>
+ <td><%= fmt_percent(queue.consumer_utilisation) %></td>
+ </tr>
+ <tr>
+ <th>Memory</th>
+ <td><%= fmt_bytes(queue.memory) %></td>
+ </tr>
+ </table>
+
+ <table class="facts">
+ <tr>
+ <th>
+ Paging <span class="help" id="queue-memory-resident"></span>
+ </th>
+ <td>
+ <% var messages_ram = queue.backing_queue_status.ram_msg_count + queue.backing_queue_status.ram_ack_count; %>
+ <% if (messages_ram == queue.messages) { %>
+ No paging
+ <% } else { %>
+ <%= fmt_num_thousands(messages_ram) %> /
+ <%= fmt_num_thousands(queue.messages) %> msg (in RAM / total)
+ <% } %>
+ <sub>
+ <% if (queue.backing_queue_status.target_ram_count == 'infinity') { %>
+ No limit
+ <% } else { %>
+ RAM target: <%= fmt_num_thousands(queue.backing_queue_status.target_ram_count) %> msg
+ <% } %>
+ </sub>
+ </td>
+ </tr>
+ <tr>
+ <th>
+ Persistent <span class="help" id="queue-persistent"></span>
+ </th>
+ <td>
+ <%= fmt_num_thousands(queue.backing_queue_status.persistent_count) %> msg
+ </td>
+ </tr>
+ </table>
+
+ <table class="facts">
+<% if (vhosts_interesting) { %>
+ <tr>
+ <th>Virtual host</th>
+ <td><%= fmt_string(queue.vhost) %></td>
+ </tr>
+<% } %>
+<% if (nodes_interesting) { %>
+ <tr>
+ <th>Node</th>
+ <td><%= fmt_node(queue.node) %></td>
+ </tr>
+ <tr>
+ <th>Slaves</th>
+ <td>
+ <%
+ var has_unsynced_node = false;
+ for (var i in queue.slave_nodes) {
+ var node = queue.slave_nodes[i];
+ %>
+ <%
+ if (jQuery.inArray(node, queue.synchronised_slave_nodes) == -1) {
+ has_unsynced_node = true;
+ %>
+ <%= fmt_node(node) %> <b>(unsynchronised)</b>
+ <% } else { %>
+ <%= fmt_node(node) %>
+ <% } %>
+ <br/>
+ <% } %>
+ <% if (queue.state == 'syncing') { %>
+ <table>
+ <tr>
+ <td>
+ <%= fmt_sync_state(queue) %>
+ </td>
+ <td>
+ <form action="#/queues/actions" method="post">
+ <input type="hidden" name="vhost" value="<%= fmt_string(queue.vhost) %>"/>
+ <input type="hidden" name="name" value="<%= fmt_string(queue.name) %>"/>
+ <input type="hidden" name="action" value="cancel_sync"/>
+ <input type="submit" value="Cancel" id="action-button" />
+ </form>
+ </td>
+ </tr>
+ </table>
+ <% } else if (has_unsynced_node) { %>
+ <form action="#/queues/actions" method="post">
+ <input type="hidden" name="vhost" value="<%= fmt_string(queue.vhost) %>"/>
+ <input type="hidden" name="name" value="<%= fmt_string(queue.name) %>"/>
+ <input type="hidden" name="action" value="sync"/>
+ <input type="submit" value="Synchronise" id="action-button" />
+ </form>
+ <% } %>
+ </td>
+ </tr>
+<% } %>
+ </table>
+ </div>
+ </div>
+</div>
+
+<% if (statistics_level == 'fine') { %>
+<div class="section-hidden">
+<h2>Message rates breakdown</h2>
+<div class="hider updatable">
+<table class="two-col-layout">
+ <tr>
+ <td>
+ <%= format('msg-detail-publishes',
+ {'mode': 'queue',
+ 'object': queue.incoming,
+ 'label': 'Incoming'}) %>
+
+ </td>
+ <td>
+ <%= format('msg-detail-deliveries',
+ {'mode': 'queue',
+ 'object': queue.deliveries}) %>
+ </td>
+ </tr>
+</table>
+</div>
+</div>
+<% } %>
+
+<div class="section-hidden">
+ <h2>Consumers</h2>
+ <div class="hider updatable">
+<%= format('consumers', {'mode': 'queue', 'consumers': queue.consumer_details}) %>
+ </div>
+</div>
+
+<div class="section-hidden">
+ <h2>Bindings</h2>
+ <div class="hider">
+ <div class="bindings-wrapper">
+ <%= format('bindings', {'mode': 'queue', 'bindings': bindings}) %>
+ <p class="arrow">⇓</p>
+ <p><span class="queue">This queue</span></p>
+
+ <%= format('add-binding', {'mode': 'queue', 'parent': queue}) %>
+ </div>
+ </div>
+</div>
+
+<%= format('publish', {'mode': 'queue', 'queue': queue}) %>
+
+<div class="section-hidden">
+ <h2>Get messages</h2>
+ <div class="hider">
+ <p>
+ Warning: getting messages from a queue is a destructive action.
+ <span class="help" id="message-get-requeue"></span>
+ </p>
+ <form action="#/queues/get" method="post">
+ <input type="hidden" name="vhost" value="<%= fmt_string(queue.vhost) %>"/>
+ <input type="hidden" name="name" value="<%= fmt_string(queue.name) %>"/>
+ <input type="hidden" name="truncate" value="50000"/>
+ <table class="form">
+ <tr>
+ <th><label>Requeue:</label></th>
+ <td>
+ <select name="requeue">
+ <option value="true">Yes</option>
+ <option value="false">No</option>
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Encoding:</label></th>
+ <td>
+ <select name="encoding">
+ <option value="auto">Auto string / base64</option>
+ <option value="base64">base64</option>
+ </select>
+ <span class="help" id="string-base64"></span>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Messages:</label></th>
+ <td><input type="text" name="count" value="1"/></td>
+ </tr>
+ </table>
+ <input type="submit" value="Get Message(s)" />
+ </form>
+ <div id="msg-wrapper"></div>
+ </div>
+</div>
+
+<div class="section-hidden">
+ <h2>Delete / purge</h2>
+ <div class="hider">
+ <form action="#/queues" method="delete" class="confirm inline-form">
+ <input type="hidden" name="vhost" value="<%= fmt_string(queue.vhost) %>"/>
+ <input type="hidden" name="name" value="<%= fmt_string(queue.name) %>"/>
+ <input type="hidden" name="mode" value="delete"/>
+ <input type="submit" value="Delete" />
+ </form>
+
+ <form action="#/queues" method="delete" class="inline-form-right">
+ <input type="hidden" name="vhost" value="<%= fmt_string(queue.vhost) %>"/>
+ <input type="hidden" name="name" value="<%= fmt_string(queue.name) %>"/>
+ <input type="hidden" name="mode" value="purge"/>
+ <input type="submit" value="Purge" />
+ </form>
+ </div>
+</div>
--- /dev/null
+<h1>Queues</h1>
+<div class="section">
+ <h2>All queues</h2>
+ <div class="hider">
+<%= filter_ui(queues) %>
+ <div class="updatable">
+<% if (queues.length > 0) { %>
+<%
+ var col_redeliver = !is_col_empty(queues, 'redeliver');
+%>
+<table class="list">
+ <thead>
+ <tr>
+ <th colspan="<% if (nodes_interesting && vhosts_interesting) { %>7<% } else if (nodes_interesting || vhosts_interesting) { %>6<% } else { %>5<% } %>">Overview</th>
+ <th colspan="3">Messages</th>
+<% if (statistics_level == 'fine') { %>
+ <th colspan="3">Message rates</th>
+<% } %>
+ </tr>
+ <tr>
+<% if (vhosts_interesting) { %>
+ <th><%= fmt_sort('Virtual host', 'vhost') %></th>
+<% } %>
+ <th><%= fmt_sort('Name', 'name') %></th>
+<% if (nodes_interesting) { %>
+ <th><%= fmt_sort('Node', 'node') %></th>
+<% } %>
+ <th><%= fmt_sort('Exclusive', 'owner_pid_details.name') %></th>
+ <th>Parameters</th>
+ <th><%= fmt_sort('Policy', 'policy') %></th>
+ <th><%= fmt_sort('State', 'state') %></th>
+ <th><%= fmt_sort('Ready', 'messages_ready') %></th>
+ <th><%= fmt_sort('Unacked', 'messages_unacknowledged') %></th>
+ <th><%= fmt_sort('Total', 'messages') %></th>
+<% if (statistics_level == 'fine') { %>
+ <th><%= fmt_sort('incoming', 'message_stats.publish_details.rate') %></th>
+ <th><%= fmt_sort('deliver / get', 'message_stats.deliver_get_details.rate') %>
+ <% if (col_redeliver) { %>
+ <sub><%= fmt_sort('of which redelivered', 'message_stats.redeliver_details.rate') %></sub>
+ <% } %>
+</th>
+ <th><%= fmt_sort('ack', 'message_stats.ack_details.rate') %></th>
+<% } %>
+ </tr>
+ </thead>
+ <tbody>
+<%
+ for (var i = 0; i < queues.length; i++) {
+ var queue = queues[i];
+%>
+ <tr<%= alt_rows(i, queue.arguments) %>>
+<% if (vhosts_interesting) { %>
+ <td><%= fmt_string(queue.vhost) %></td>
+<% } %>
+ <td><%= link_queue(queue.vhost, queue.name, queue.arguments) %></td>
+<% if (nodes_interesting) { %>
+ <td>
+ <%= fmt_node(queue.node) %>
+ <%= fmt_mirrors(queue) %>
+ <% if (queue.state == 'syncing') { %>
+ <%= fmt_sync_state(queue) %>
+ <% } %>
+ </td>
+<% } %>
+ <td class="c">
+ <% if (queue.owner_pid_details != undefined) { %>
+ <%= link_conn(queue.owner_pid_details.name, "Owner") %>
+ <% } %>
+ </td>
+ <td class="c"><%= fmt_parameters_short(queue) %></td>
+ <td class="c"><%= fmt_string(queue.policy, '') %></td>
+ <td class="c"><%= fmt_object_state(queue) %></td>
+ <td class="r"><%= fmt_num_thousands(queue.messages_ready) %></td>
+ <td class="r"><%= fmt_num_thousands(queue.messages_unacknowledged) %></td>
+ <td class="r"><%= fmt_num_thousands(queue.messages) %></td>
+<% if (statistics_level == 'fine') { %>
+ <td class="r"><%= fmt_rate(queue.message_stats, 'publish') %></td>
+ <td class="r"><%= fmt_deliver_rate(queue.message_stats, col_redeliver) %></td>
+ <td class="r"><%= fmt_rate(queue.message_stats, 'ack') %></td>
+<% } %>
+ </tr>
+ <% } %>
+ </tbody>
+</table>
+<% } else { %>
+ <p>... no queues ...</p>
+<% } %>
+ </div>
+ </div>
+</div>
+
+<div class="section-hidden">
+ <h2>Add a new queue</h2>
+ <div class="hider">
+ <form action="#/queues" method="put">
+ <table class="form">
+<% if (vhosts_interesting) { %>
+ <tr>
+ <th><label>Virtual host:</label></th>
+ <td>
+ <select name="vhost">
+ <% for (var i = 0; i < vhosts.length; i++) { %>
+ <option value="<%= fmt_string(vhosts[i].name) %>"><%= fmt_string(vhosts[i].name) %></option>
+ <% } %>
+ </select>
+ </td>
+ </tr>
+<% } else { %>
+ <tr><td><input type="hidden" name="vhost" value="<%= fmt_string(vhosts[0].name) %>"/></td></tr>
+<% } %>
+ <tr>
+ <th><label>Name:</label></th>
+ <td><input type="text" name="name"/><span class="mand">*</span></td>
+ </tr>
+ <tr>
+ <th><label>Durability:</label></th>
+ <td>
+ <select name="durable">
+ <option value="true">Durable</option>
+ <option value="false">Transient</option>
+ </select>
+ </td>
+ </tr>
+<%
+ if (nodes_interesting) {
+ var nodes = JSON.parse(sync_get('/nodes'));
+%>
+ <tr>
+ <th><label>Node:</label></th>
+ <td>
+ <select name="node">
+ <% for (var i = 0; i < nodes.length; i++) { %>
+ <option value="<%= fmt_string(nodes[i].name) %>"><%= nodes[i].name %></option>
+ <% } %>
+ </select>
+ </td>
+ </tr>
+<% } %>
+ <tr>
+ <th><label>Auto delete: <span class="help" id="queue-auto-delete"></span></label></th>
+ <td>
+ <select name="auto_delete">
+ <option value="false">No</option>
+ <option value="true">Yes</option>
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Message TTL: <span class="help" id="queue-message-ttl"></span></label></th>
+ <td><input type="text" name="x-message-ttl"/> ms</td>
+ </tr>
+ <tr>
+ <th><label>Auto expire: <span class="help" id="queue-expires"></span></label></th>
+ <td><input type="text" name="x-expires"/> ms</td>
+ </tr>
+ <tr>
+ <th><label>Max length: <span class="help" id="queue-max-length"></span></label></th>
+ <td><input type="text" name="x-max-length"/></td>
+ </tr>
+ <tr>
+ <th><label>Dead letter exchange: <span class="help" id="queue-dead-letter-exchange"></span></label></th>
+ <td><input type="text" name="x-dead-letter-exchange"/></td>
+ </tr>
+ <tr>
+ <th><label>Dead letter routing key: <span class="help" id="queue-dead-letter-routing-key"></span></label></th>
+ <td><input type="text" name="x-dead-letter-routing-key"/></td>
+ </tr>
+ <tr>
+ <th><label>Arguments:</label></th>
+ <td><div class="multifield" id="arguments"></div></td>
+ </tr>
+ </table>
+ <input type="submit" value="Add queue"/>
+ </form>
+ </div>
+</div>
--- /dev/null
+<%
+ var id = span.attr('for');
+ var mode = get_pref('rate-mode-' + id);
+ var size = get_pref('chart-size-' + id);
+ var range = get_pref('chart-range-' + id);
+%>
+
+<form action="#/rate-options" method="put" class="auto-submit">
+ <input type="hidden" name="id" value="<%= id %>"/>
+ <table class="form" width="100%">
+ <tr>
+ <td colspan="2">
+ <h3>This time series</h3>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Display:</label></th>
+ <td>
+ <%= fmt_radio('mode', 'Chart', 'chart', mode) %>
+ <%= fmt_radio('mode', 'Current value', 'curr', mode) %>
+ <%= fmt_radio('mode', 'Moving average', 'avg', mode) %>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Chart size:</label></th>
+ <td>
+ <%= fmt_radio('size', 'Small', 'small', size) %>
+ <%= fmt_radio('size', 'Medium', 'medium', size) %>
+ <%= fmt_radio('size', 'Large', 'large', size) %>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Chart range:</label></th>
+ <td>
+<%
+ for (p in CHART_PERIODS) {
+%>
+ <%= fmt_radio('range', CHART_PERIODS[p], p, range) %>
+<%
+ }
+%>
+ </td>
+ </tr>
+ </table>
+</form>
--- /dev/null
+<% if (node.running) { %>
+<table class="list">
+ <tr>
+ <th>Name</th>
+ <th>Description</th>
+<% if (show_enabled) { %>
+ <th>Enabled</th>
+<% } %>
+ </tr>
+ <%
+ for (var i = 0; i < list.length; i++) {
+ var item = list[i];
+ %>
+ <tr<%= alt_rows(i)%>>
+ <td><%= fmt_string(item.name) %></td>
+ <td><%= fmt_string(item.description) %></td>
+<% if (show_enabled) { %>
+ <td class="c"><%= fmt_boolean(item.enabled) %></td>
+<% } %>
+ </tr>
+ <% } %>
+</table>
+<% } else {%>
+<p>...node not running...</p>
+<% } %>
--- /dev/null
+<p class="status-<%= fmt_string(status) %>"><%= text %></p>
--- /dev/null
+<h1>User: <b><%= fmt_string(user.name) %></b></h1>
+
+<% if (permissions.length == 0) { %>
+<p class="warning">
+ This user does not have permission to access any virtual hosts.<br/>
+ Use "Set Permission" below to grant permission to access virtual hosts.
+</p>
+<% } %>
+
+<div class="section">
+ <h2>Overview</h2>
+ <div class="hider">
+<table class="facts">
+ <tr>
+ <th>Tags</th>
+ <td><%= fmt_string(user.tags) %></td>
+ </tr>
+ <tr>
+ <th>Can log in with password</th>
+ <td><%= fmt_boolean(user.password_hash.length > 0) %></td>
+ </tr>
+</table>
+ </div>
+</div>
+
+<%= format('permissions', {'mode': 'user', 'permissions': permissions, 'vhosts': vhosts, 'parent': user}) %>
+
+<div class="section-hidden">
+ <h2>Update this user</h2>
+ <div class="hider">
+ <form action="#/users-modify" method="put">
+ <input type="hidden" name="username" value="<%= fmt_string(user.name) %>"/>
+ <table class="form">
+ <tr>
+ <th>
+ <label>
+ <select name="has-password" class="narrow controls-appearance">
+ <option value="password">Password:</option>
+ <option value="no-password">No password</option>
+ </select>
+ </label>
+ </th>
+ <td>
+ <div id="password-div">
+ <input type="password" name="password" />
+ <span class="mand">*</span><br/>
+ <input type="password" name="password_confirm" />
+ <span class="mand">*</span>
+ (confirm)
+ </div>
+ <div id="no-password-div" style="display: none;">
+ User cannot log in using password.
+ </div>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Tags:</label></th>
+ <td>
+ <input type="text" name="tags" id="tags" value="<%= fmt_string(user.tags) %>" />
+ <span class="help" id="user-tags"/>
+ <sub>
+ [<span class="tag-link" tag="administrator">Admin</span>]
+ [<span class="tag-link" tag="monitoring">Monitoring</span>]
+ [<span class="tag-link" tag="policymaker">Policymaker</span>]
+ [<span class="tag-link" tag="management">Management</span>]
+ [<span class="tag-link" tag="">None</span>]
+ </sub>
+ </td>
+ </tr>
+ </table>
+ <input type="submit" value="Update user"/>
+ </form>
+ </div>
+</div>
+
+
+<div class="section-hidden">
+ <h2>Delete this user</h2>
+ <div class="hider">
+ <form action="#/users" method="delete" class="confirm">
+ <input type="hidden" name="username" value="<%= fmt_string(user.name) %>"/>
+ <input type="submit" value="Delete"/>
+ </form>
+ </div>
+</div>
--- /dev/null
+<h1>Users</h1>
+<div class="section">
+ <h2>All users</h2>
+ <div class="hider">
+<%= filter_ui(users) %>
+ <div class="updatable">
+<% if (users.length > 0) { %>
+<table class="list">
+ <thead>
+ <tr>
+ <th><%= fmt_sort('Name', 'name') %></th>
+ <th><%= fmt_sort('Tags', 'tags') %></th>
+ <th>Can access virtual hosts</th>
+ <th>Has password</th>
+ </tr>
+ </thead>
+ <tbody>
+ <%
+ for (var i = 0; i < users.length; i++) {
+ var user = users[i];
+ %>
+ <tr<%= alt_rows(i)%>>
+ <td><%= link_user(user.name) %></td>
+ <td class="c"><%= fmt_string(user.tags) %></td>
+ <td class="c"><%= fmt_permissions(user, permissions, 'user', 'vhost',
+ '<p class="warning">No access</p>') %></td>
+ <td class="c"><%= fmt_boolean(user.password_hash.length > 0) %></td>
+ </tr>
+ <% } %>
+ </tbody>
+</table>
+<% } else { %>
+ <p>... no vhosts ...</p>
+<% } %>
+ <p><span class="help" id="internal-users-only"></span></p>
+ </div>
+ </div>
+</div>
+
+<div class="section-hidden">
+ <h2>Add a user</h2>
+ <div class="hider">
+ <form action="#/users-add" method="put">
+ <table class="form">
+ <tr>
+ <th><label>Username:</label></th>
+ <td>
+ <input type="text" name="username"/>
+ <span class="mand">*</span>
+ </td>
+ </tr>
+ <tr>
+ <th>
+ <label>
+ <select name="has-password" class="narrow controls-appearance">
+ <option value="password">Password:</option>
+ <option value="no-password">No password</option>
+ </select>
+ </label>
+ </th>
+ <td>
+ <div id="password-div">
+ <input type="password" name="password" />
+ <span class="mand">*</span><br/>
+ <input type="password" name="password_confirm" />
+ <span class="mand">*</span>
+ (confirm)
+ </div>
+ <div id="no-password-div" style="display: none;">
+ User cannot log in using password.
+ </div>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Tags:</label></th>
+ <td>
+ <input type="text" name="tags" id="tags" />
+ <span class="help" id="user-tags"/>
+ <sub>
+ [<span class="tag-link" tag="administrator">Admin</span>]
+ [<span class="tag-link" tag="monitoring">Monitoring</span>]
+ [<span class="tag-link" tag="policymaker">Policymaker</span>]
+ [<span class="tag-link" tag="management">Management</span>]
+ [<span class="tag-link" tag="">None</span>]
+ </sub>
+ </td>
+ </tr>
+ </table>
+ <input type="submit" value="Add user"/>
+ </form>
+ </div>
+</div>
--- /dev/null
+<h1>Virtual Host: <b><%= fmt_string(vhost.name) %></b></h1>
+
+<% if (permissions.length == 0) { %>
+<p class="warning">
+ No users have permission to access this virtual host.<br/>
+ Use "Set Permission" below to grant users permission to access this virtual host.
+</p>
+<% } %>
+
+<div class="section">
+ <h2>Overview</h2>
+ <div class="hider">
+ <%= queue_lengths('lengths-vhost', vhost) %>
+<% if (statistics_level == 'fine') { %>
+ <%= message_rates('msg-rates-vhost', vhost.message_stats) %>
+<% } %>
+ <%= data_rates('data-rates-vhost', vhost, 'Data rates') %>
+ <div class="updatable">
+ <h3>Details</h3>
+ <table class="facts">
+ <tr>
+ <th>Tracing enabled:</th>
+ <td><%= fmt_boolean(vhost.tracing) %></td>
+ </tr>
+ </table>
+ </div>
+</div>
+</div>
+
+<%= format('permissions', {'mode': 'vhost', 'permissions': permissions, 'users': users, 'parent': vhost}) %>
+
+<div class="section-hidden">
+<h2>Delete this vhost</h2>
+<div class="hider">
+<form action="#/vhosts" method="delete" class="confirm">
+<input type="hidden" name="name" value="<%= fmt_string(vhost.name) %>"/>
+<input type="submit" value="Delete this virtual host"/>
+</form>
+</div>
+</div>
--- /dev/null
+<h1>Virtual Hosts</h1>
+
+<div class="section">
+ <h2>All virtual hosts</h2>
+ <div class="hider">
+<%= filter_ui(vhosts) %>
+ <div class="updatable">
+<% if (vhosts.length > 0) { %>
+<table class="list">
+ <thead>
+ <tr>
+ <th colspan="2">Overview</th>
+ <th colspan="3">Messages</th>
+ <th colspan="2">Data rates</th>
+<% if (statistics_level == 'fine') { %>
+ <th colspan="2">Message rates</th>
+<% } %>
+ </tr>
+ <tr>
+ <th><%= fmt_sort('Name', 'name') %></th>
+ <th>Users <span class="help" id="internal-users-only"></span></th>
+ <th><%= fmt_sort('Ready', 'messages_ready') %></th>
+ <th><%= fmt_sort('Unacked', 'messages_unacknowledged') %></th>
+ <th><%= fmt_sort('Total', 'messages') %></th>
+ <th><%= fmt_sort('From clients', 'recv_oct_details.rate') %></th>
+ <th><%= fmt_sort('To clients', 'send_oct_details.rate') %></th>
+<% if (statistics_level == 'fine') { %>
+ <th><%= fmt_sort('publish', 'message_stats.publish_details.rate') %></th>
+ <th><%= fmt_sort('deliver / get', 'message_stats.deliver_get_details.rate') %>
+<% } %>
+ </tr>
+ </thead>
+ <tbody>
+ <%
+ for (var i = 0; i < vhosts.length; i++) {
+ var vhost = vhosts[i];
+ %>
+ <tr<%= alt_rows(i)%>>
+ <td><%= link_vhost(vhost.name) %></td>
+ <td class="c"><%= fmt_permissions(vhost, permissions, 'vhost', 'user',
+ '<p class="warning">No users</p>') %></td>
+ <td class="r"><%= fmt_string(vhost.messages_ready) %></td>
+ <td class="r"><%= fmt_string(vhost.messages_unacknowledged) %></td>
+ <td class="r"><%= fmt_string(vhost.messages) %></td>
+ <td class="r"><%= fmt_rate_bytes(vhost, 'recv_oct') %></td>
+ <td class="r"><%= fmt_rate_bytes(vhost, 'send_oct') %></td>
+<% if (statistics_level == 'fine') { %>
+ <td class="r"><%= fmt_rate(vhost.message_stats, 'publish') %></td>
+ <td class="r"><%= fmt_deliver_rate(vhost.message_stats, false) %></td>
+<% } %>
+ </tr>
+ <% } %>
+ </tbody>
+</table>
+<% } else { %>
+ <p>... no vhosts ...</p>
+<% } %>
+ </div>
+ </div>
+</div>
+
+<div class="section-hidden">
+ <h2>Add a new virtual host</h2>
+ <div class="hider">
+ <form action="#/vhosts" method="put">
+ <table class="form">
+ <tr>
+ <th><label>Name:</label></th>
+ <td><input type="text" name="name"/><span class="mand">*</span></td>
+ </tr>
+ </table>
+ <input type="submit" value="Add virtual host"/>
+ </form>
+ </div>
+</div>
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_app).
+
+-behaviour(application).
+-export([start/2, stop/1]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-define(CONTEXT, rabbit_mgmt).
+-define(STATIC_PATH, "priv/www").
+
+start(_Type, _StartArgs) ->
+ {ok, Listener} = application:get_env(rabbitmq_management, listener),
+ setup_wm_logging(),
+ register_context(Listener),
+ log_startup(Listener),
+ rabbit_mgmt_sup_sup:start_link().
+
+stop(_State) ->
+ unregister_context(),
+ ok.
+
+register_context(Listener) ->
+ rabbit_web_dispatch:register_context_handler(
+ ?CONTEXT, Listener, "", make_loop(), "RabbitMQ Management").
+
+unregister_context() ->
+ rabbit_web_dispatch:unregister_context(?CONTEXT).
+
+make_loop() ->
+ Dispatch = rabbit_mgmt_dispatcher:build_dispatcher(),
+ WMLoop = rabbit_webmachine:makeloop(Dispatch),
+ LocalPaths = [filename:join(module_path(M), ?STATIC_PATH) ||
+ M <- rabbit_mgmt_dispatcher:modules()],
+ fun(Req) -> respond(Req, LocalPaths, WMLoop) end.
+
+module_path(Module) ->
+ {file, Here} = code:is_loaded(Module),
+ filename:dirname(filename:dirname(Here)).
+
+respond(Req, LocalPaths, WMLoop) ->
+ Path = Req:get(path),
+ Redirect = fun(L) -> {301, [{"Location", L}], ""} end,
+ case Path of
+ "/api/" ++ Rest when length(Rest) > 0 ->
+ WMLoop(Req);
+ "" ->
+ Req:respond(Redirect("/"));
+ "/mgmt/" ->
+ Req:respond(Redirect("/"));
+ "/mgmt" ->
+ Req:respond(Redirect("/"));
+ "/" ++ Stripped ->
+ serve_file(Req, Stripped, LocalPaths, Redirect)
+ end.
+
+serve_file(Req, Path, [LocalPath], _Redirect) ->
+ Req:serve_file(Path, LocalPath);
+serve_file(Req, Path, [LocalPath | Others], Redirect) ->
+ Path1 = filename:join([LocalPath, Path]),
+ case filelib:is_regular(Path1) of
+ true -> Req:serve_file(Path, LocalPath);
+ false -> case filelib:is_regular(Path1 ++ "/index.html") of
+ true -> index(Req, Path, LocalPath, Redirect);
+ false -> serve_file(Req, Path, Others, Redirect)
+ end
+ end.
+
+index(Req, Path, LocalPath, Redirect) ->
+ case lists:reverse(Path) of
+ "" -> Req:serve_file("index.html", LocalPath);
+ "/" ++ _ -> Req:serve_file(Path ++ "index.html", LocalPath);
+ _ -> Req:respond(Redirect(Path ++ "/"))
+ end.
+
+setup_wm_logging() ->
+ rabbit_webmachine:setup(),
+ {ok, LogDir} = application:get_env(rabbitmq_management, http_log_dir),
+ case LogDir of
+ none -> ok;
+ _ -> webmachine_log:add_handler(webmachine_log_handler, [LogDir])
+ end.
+
+log_startup(Listener) ->
+ rabbit_log:info("Management plugin started. Port: ~w~n", [port(Listener)]).
+
+port(Listener) ->
+ proplists:get_value(port, Listener).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_db).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-behaviour(gen_server2).
+
+-export([start_link/0]).
+
+-export([augment_exchanges/3, augment_queues/3,
+ augment_nodes/1, augment_vhosts/2,
+ get_channel/2, get_connection/2,
+ get_all_channels/1, get_all_connections/1,
+ get_overview/2, get_overview/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3, handle_pre_hibernate/1, prioritise_cast/3,
+ format_message_queue/2]).
+
+%% For testing
+-export([override_lookups/1, reset_lookups/0]).
+
+-import(rabbit_misc, [pget/3, pset/3]).
+
+%% The management database listens to events broadcast via the
+%% rabbit_event mechanism, and responds to queries from the various
+%% rabbit_mgmt_wm_* modules. It handles several kinds of events, and
+%% slices and dices them in various ways.
+%%
+%% There are three types of events coming in: created (when an object
+%% is created, containing immutable facts about it), stats (emitted on
+%% a timer, with mutable facts about the object), and deleted (just
+%% containing the object's ID). In this context "objects" means
+%% connections, channels, exchanges, queues, consumers, vhosts and
+%% nodes. Note that we do not care about users, permissions, bindings,
+%% parameters or policies.
+%%
+%% Connections and channels are identified by pids. Queues and
+%% exchanges are identified by names (which are #resource{}s). VHosts
+%% and nodes are identified by names which are binaries. And consumers
+%% are identified by {ChPid, QName, CTag}.
+%%
+%% The management database records the "created" events for
+%% connections, channels and consumers, and can thus be authoritative
+%% about those objects. For queues, exchanges and nodes we go to
+%% Mnesia to find out the immutable details of the objects.
+%%
+%% For everything other than consumers, the database can then augment
+%% these immutable details with stats, as the object changes. (We
+%% never emit anything very interesting about consumers).
+%%
+%% Stats on the inbound side are refered to as coarse- and
+%% fine-grained. Fine grained statistics are the message rates
+%% maintained by channels and associated with tuples: {publishing
+%% channel, exchange}, {publishing channel, exchange, queue} and
+%% {queue, consuming channel}. Coarse grained stats are everything
+%% else and are associated with only one object, not a tuple.
+%%
+%% Within the management database though we rearrange things a bit: we
+%% refer to basic stats, simple stats and detail stats.
+%%
+%% Basic stats are those coarse grained stats for which we do not
+%% retain a history and do not perform any calculations -
+%% e.g. connection.state or channel.prefetch_count.
+%%
+%% Simple stats are those for which we do history / calculations which
+%% are associated with one object *after aggregation* - so these might
+%% originate with coarse grained stats - e.g. connection.send_oct or
+%% queue.messages_ready. But they might also originate from fine
+%% grained stats which have been aggregated - e.g. the message rates
+%% for a vhost or queue.
+%%
+%% Finally, detailed stats are those for which we do history /
+%% calculations which are associated with two objects. These
+%% have to have originated as fine grained stats, but can still have
+%% been aggregated.
+%%
+%% Created events and basic stats are stored in ETS tables by object,
+%% looked up in an orddict in #state.tables. Simple and detailed stats
+%% (which only differ depending on how they're keyed) are stored in
+%% #state.aggregated_stats.
+%%
+%% For detailed stats we also store an index for each object referencing
+%% all the other objects that form a detailed stats key with it. This is
+%% so that we can always avoid table scanning while deleting stats and
+%% thus make sure that handling deleted events is O(n)-ish.
+%%
+%% For each key for simple and detailed stats we maintain a #stats{}
+%% record, essentially a base counter for everything that happened
+%% before the samples we have kept, and a gb_tree of {timestamp,
+%% sample} values.
+%%
+%% We also have #state.old_stats to let us calculate instantaneous
+%% rates, in order to apportion simple / detailed stats into time
+%% slices as they come in. These instantaneous rates are not returned
+%% in response to any query, the rates shown in the API are calculated
+%% at query time. old_stats contains both coarse and fine
+%% entries. Coarse entries are pruned when the corresponding object is
+%% deleted, and fine entries are pruned when the emitting channel is
+%% closed, and whenever we receive new fine stats from a channel. So
+%% it's quite close to being a cache of "the previous stats we
+%% received".
+%%
+%% We also keep a timer going, in order to prune old samples from
+%% #state.aggregated_stats.
+%%
+%% Overall the object is to do all the aggregation when events come
+%% in, and make queries be simple lookups as much as possible. One
+%% area where this does not happen is the global overview - which is
+%% aggregated from vhost stats at query time since we do not want to
+%% reveal anything about other vhosts to unprivileged users.
+
+-record(state, {
+ %% "stats" for which no calculations are required
+ tables,
+ %% database of aggregated samples
+ aggregated_stats,
+ %% index for detailed aggregated_stats that have 2-tuple keys
+ aggregated_stats_index,
+ %% What the previous info item was for any given
+ %% {queue/channel/connection}
+ old_stats,
+ gc_timer,
+ gc_next_key,
+ lookups,
+ interval,
+ event_refresh_ref}).
+
+-define(FINE_STATS_TYPES, [channel_queue_stats, channel_exchange_stats,
+ channel_queue_exchange_stats]).
+-define(TABLES, [queue_stats, connection_stats, channel_stats,
+ consumers_by_queue, consumers_by_channel,
+ node_stats]).
+
+-define(DELIVER_GET, [deliver, deliver_no_ack, get, get_no_ack]).
+-define(FINE_STATS, [publish, publish_in, publish_out,
+ ack, deliver_get, confirm, return_unroutable, redeliver] ++
+ ?DELIVER_GET).
+
+-define(COARSE_QUEUE_STATS,
+ [messages, messages_ready, messages_unacknowledged]).
+
+-define(COARSE_CONN_STATS, [recv_oct, send_oct]).
+
+-define(GC_INTERVAL, 5000).
+-define(GC_MIN_ROWS, 100).
+-define(GC_MIN_RATIO, 0.01).
+
+-define(DROP_LENGTH, 1000).
+
+prioritise_cast({event, #event{type = Type,
+ props = Props}}, Len, _State)
+ when (Type =:= channel_stats orelse
+ Type =:= queue_stats) andalso Len > ?DROP_LENGTH ->
+ case pget(idle_since, Props) of
+ unknown -> drop;
+ _ -> 0
+ end;
+prioritise_cast(_Msg, _Len, _State) ->
+ 0.
+
+%%----------------------------------------------------------------------------
+%% API
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ Ref = make_ref(),
+ case gen_server2:start_link({global, ?MODULE}, ?MODULE, [Ref], []) of
+ {ok, Pid} -> register(?MODULE, Pid), %% [1]
+ rabbit:force_event_refresh(Ref),
+ {ok, Pid};
+ Else -> Else
+ end.
+%% [1] For debugging it's helpful to locally register the name too
+%% since that shows up in places global names don't.
+
+%% R = Ranges, M = Mode
+augment_exchanges(Xs, R, M) -> safe_call({augment_exchanges, Xs, R, M}, Xs).
+augment_queues(Qs, R, M) -> safe_call({augment_queues, Qs, R, M}, Qs).
+augment_vhosts(VHosts, R) -> safe_call({augment_vhosts, VHosts, R}, VHosts).
+augment_nodes(Nodes) -> safe_call({augment_nodes, Nodes}, Nodes).
+
+get_channel(Name, R) -> safe_call({get_channel, Name, R}, not_found).
+get_connection(Name, R) -> safe_call({get_connection, Name, R}, not_found).
+
+get_all_channels(R) -> safe_call({get_all_channels, R}).
+get_all_connections(R) -> safe_call({get_all_connections, R}).
+
+get_overview(User, R) -> safe_call({get_overview, User, R}).
+get_overview(R) -> safe_call({get_overview, all, R}).
+
+override_lookups(Lookups) -> safe_call({override_lookups, Lookups}).
+reset_lookups() -> safe_call(reset_lookups).
+
+safe_call(Term) -> safe_call(Term, []).
+safe_call(Term, Default) -> safe_call(Term, Default, 1).
+
+%% See rabbit_mgmt_sup_sup for a discussion of the retry logic.
+safe_call(Term, Default, Retries) ->
+ try
+ gen_server2:call({global, ?MODULE}, Term, infinity)
+ catch exit:{noproc, _} ->
+ case Retries of
+ 0 -> Default;
+ _ -> rabbit_mgmt_sup_sup:start_child(),
+ safe_call(Term, Default, Retries - 1)
+ end
+ end.
+
+%%----------------------------------------------------------------------------
+%% Internal, gen_server2 callbacks
+%%----------------------------------------------------------------------------
+
+init([Ref]) ->
+ %% When Rabbit is overloaded, it's usually especially important
+ %% that the management plugin work.
+ process_flag(priority, high),
+ {ok, Interval} = application:get_env(rabbit, collect_statistics_interval),
+ rabbit_node_monitor:subscribe(self()),
+ rabbit_log:info("Statistics database started.~n"),
+ Table = fun () -> ets:new(rabbit_mgmt_db, [ordered_set]) end,
+ Tables = orddict:from_list([{Key, Table()} || Key <- ?TABLES]),
+ {ok, set_gc_timer(
+ reset_lookups(
+ #state{interval = Interval,
+ tables = Tables,
+ old_stats = Table(),
+ aggregated_stats = Table(),
+ aggregated_stats_index = Table(),
+ event_refresh_ref = Ref})), hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+handle_call({augment_exchanges, Xs, Ranges, basic}, _From, State) ->
+ reply(list_exchange_stats(Ranges, Xs, State), State);
+
+handle_call({augment_exchanges, Xs, Ranges, full}, _From, State) ->
+ reply(detail_exchange_stats(Ranges, Xs, State), State);
+
+handle_call({augment_queues, Qs, Ranges, basic}, _From, State) ->
+ reply(list_queue_stats(Ranges, Qs, State), State);
+
+handle_call({augment_queues, Qs, Ranges, full}, _From, State) ->
+ reply(detail_queue_stats(Ranges, Qs, State), State);
+
+handle_call({augment_vhosts, VHosts, Ranges}, _From, State) ->
+ reply(vhost_stats(Ranges, VHosts, State), State);
+
+handle_call({augment_nodes, Nodes}, _From, State) ->
+ {reply, node_stats(Nodes, State), State};
+
+handle_call({get_channel, Name, Ranges}, _From,
+ State = #state{tables = Tables}) ->
+ case created_event(Name, channel_stats, Tables) of
+ not_found -> reply(not_found, State);
+ Ch -> [Result] = detail_channel_stats(Ranges, [Ch], State),
+ reply(Result, State)
+ end;
+
+handle_call({get_connection, Name, Ranges}, _From,
+ State = #state{tables = Tables}) ->
+ case created_event(Name, connection_stats, Tables) of
+ not_found -> reply(not_found, State);
+ Conn -> [Result] = connection_stats(Ranges, [Conn], State),
+ reply(Result, State)
+ end;
+
+handle_call({get_all_channels, Ranges}, _From,
+ State = #state{tables = Tables}) ->
+ Chans = created_events(channel_stats, Tables),
+ reply(list_channel_stats(Ranges, Chans, State), State);
+
+handle_call({get_all_connections, Ranges}, _From,
+ State = #state{tables = Tables}) ->
+ Conns = created_events(connection_stats, Tables),
+ reply(connection_stats(Ranges, Conns, State), State);
+
+handle_call({get_overview, User, Ranges}, _From,
+ State = #state{tables = Tables}) ->
+ VHosts = case User of
+ all -> rabbit_vhost:list();
+ _ -> rabbit_mgmt_util:list_visible_vhosts(User)
+ end,
+ %% TODO: there's no reason we can't do an overview of send_oct and
+ %% recv_oct now!
+ VStats = [read_simple_stats(vhost_stats, VHost, State) ||
+ VHost <- VHosts],
+ MessageStats = [overview_sum(Type, VStats) || Type <- ?FINE_STATS],
+ QueueStats = [overview_sum(Type, VStats) || Type <- ?COARSE_QUEUE_STATS],
+ F = case User of
+ all -> fun (L) -> length(L) end;
+ _ -> fun (L) -> length(rabbit_mgmt_util:filter_user(L, User)) end
+ end,
+ %% Filtering out the user's consumers would be rather expensive so let's
+ %% just not show it
+ Consumers = case User of
+ all -> Table = orddict:fetch(consumers_by_queue, Tables),
+ [{consumers, ets:info(Table, size)}];
+ _ -> []
+ end,
+ ObjectTotals = Consumers ++
+ [{queues, length([Q || V <- VHosts,
+ Q <- rabbit_amqqueue:list(V)])},
+ {exchanges, length([X || V <- VHosts,
+ X <- rabbit_exchange:list(V)])},
+ {connections, F(created_events(connection_stats, Tables))},
+ {channels, F(created_events(channel_stats, Tables))}],
+ reply([{message_stats, format_samples(Ranges, MessageStats, State)},
+ {queue_totals, format_samples(Ranges, QueueStats, State)},
+ {object_totals, ObjectTotals}], State);
+
+handle_call({override_lookups, Lookups}, _From, State) ->
+ reply(ok, State#state{lookups = Lookups});
+
+handle_call(reset_lookups, _From, State) ->
+ reply(ok, reset_lookups(State));
+
+handle_call(_Request, _From, State) ->
+ reply(not_understood, State).
+
+%% Only handle events that are real, or pertain to a force-refresh
+%% that we instigated.
+handle_cast({event, Event = #event{reference = none}}, State) ->
+ handle_event(Event, State),
+ noreply(State);
+
+handle_cast({event, Event = #event{reference = Ref}},
+ State = #state{event_refresh_ref = Ref}) ->
+ handle_event(Event, State),
+ noreply(State);
+
+handle_cast(_Request, State) ->
+ noreply(State).
+
+handle_info(gc, State) ->
+ noreply(set_gc_timer(gc_batch(State)));
+
+handle_info({node_down, Node}, State = #state{tables = Tables}) ->
+ Conns = created_events(connection_stats, Tables),
+ Chs = created_events(channel_stats, Tables),
+ delete_all_from_node(connection_closed, Node, Conns, State),
+ delete_all_from_node(channel_closed, Node, Chs, State),
+ noreply(State);
+
+handle_info(_Info, State) ->
+ noreply(State).
+
+terminate(_Arg, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+reply(Reply, NewState) -> {reply, Reply, NewState, hibernate}.
+noreply(NewState) -> {noreply, NewState, hibernate}.
+
+set_gc_timer(State) ->
+ TRef = erlang:send_after(?GC_INTERVAL, self(), gc),
+ State#state{gc_timer = TRef}.
+
+reset_lookups(State) ->
+ State#state{lookups = [{exchange, fun rabbit_exchange:lookup/1},
+ {queue, fun rabbit_amqqueue:lookup/1}]}.
+
+handle_pre_hibernate(State) ->
+ %% rabbit_event can end up holding on to some memory after a busy
+ %% workout, but it's not a gen_server so we can't make it
+ %% hibernate. The best we can do is forcibly GC it here (if
+ %% rabbit_mgmt_db is hibernating the odds are rabbit_event is
+ %% quiescing in some way too).
+ rpc:multicall(
+ rabbit_mnesia:cluster_nodes(running), rabbit_mgmt_db_handler, gc, []),
+ {hibernate, State}.
+
+format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ).
+
+delete_all_from_node(Type, Node, Items, State) ->
+ [case node(Pid) of
+ Node -> handle_event(#event{type = Type, props = [{pid, Pid}]}, State);
+ _ -> ok
+ end || Item <- Items, Pid <- [pget(pid, Item)]].
+
+%%----------------------------------------------------------------------------
+%% Internal, utilities
+%%----------------------------------------------------------------------------
+
+pget(Key, List) -> pget(Key, List, unknown).
+
+%% id_name() and id() are for use when handling events, id_lookup()
+%% for when augmenting. The difference is that when handling events a
+%% queue name will be a resource, but when augmenting we will be
+%% passed a queue proplist that will already have been formatted -
+%% i.e. it will have name and vhost keys.
+id_name(node_stats) -> name;
+id_name(vhost_stats) -> name;
+id_name(queue_stats) -> name;
+id_name(exchange_stats) -> name;
+id_name(channel_stats) -> pid;
+id_name(connection_stats) -> pid.
+
+id(Type, List) -> pget(id_name(Type), List).
+
+id_lookup(queue_stats, List) ->
+ rabbit_misc:r(pget(vhost, List), queue, pget(name, List));
+id_lookup(exchange_stats, List) ->
+ rabbit_misc:r(pget(vhost, List), exchange, pget(name, List));
+id_lookup(Type, List) ->
+ id(Type, List).
+
+lookup_element(Table, Key) -> lookup_element(Table, Key, 2).
+
+lookup_element(Table, Key, Pos) ->
+ try ets:lookup_element(Table, Key, Pos)
+ catch error:badarg -> []
+ end.
+
+fine_stats_id(ChPid, {Q, X}) -> {ChPid, Q, X};
+fine_stats_id(ChPid, QorX) -> {ChPid, QorX}.
+
+floor(TS, #state{interval = Interval}) ->
+ rabbit_mgmt_util:floor(rabbit_mgmt_format:timestamp_ms(TS), Interval).
+ceil(TS, #state{interval = Interval}) ->
+ rabbit_mgmt_util:ceil (rabbit_mgmt_format:timestamp_ms(TS), Interval).
+
+details_key(Key) -> list_to_atom(atom_to_list(Key) ++ "_details").
+
+%%----------------------------------------------------------------------------
+%% Internal, event-receiving side
+%%----------------------------------------------------------------------------
+
+handle_event(#event{type = queue_stats, props = Stats, timestamp = Timestamp},
+ State) ->
+ handle_stats(queue_stats, Stats, Timestamp,
+ [{fun rabbit_mgmt_format:properties/1,[backing_queue_status]},
+ {fun rabbit_mgmt_format:timestamp/1, [idle_since]},
+ {fun rabbit_mgmt_format:queue_state/1, [state]}],
+ [messages, messages_ready, messages_unacknowledged], State);
+
+handle_event(Event = #event{type = queue_deleted,
+ props = [{name, Name}],
+ timestamp = Timestamp},
+ State = #state{old_stats = OldTable}) ->
+ delete_consumers(Name, consumers_by_queue, consumers_by_channel, State),
+ %% This is fiddly. Unlike for connections and channels, we need to
+ %% decrease any amalgamated coarse stats for [messages,
+ %% messages_ready, messages_unacknowledged] for this queue - since
+ %% the queue's deletion means we have really got rid of messages!
+ Id = {coarse, {queue_stats, Name}},
+ %% This ceil must correspond to the ceil in append_samples/5
+ TS = ceil(Timestamp, State),
+ OldStats = lookup_element(OldTable, Id),
+ [record_sample(Id, {Key, -pget(Key, OldStats, 0), TS, State}, State)
+ || Key <- ?COARSE_QUEUE_STATS],
+ delete_samples(channel_queue_stats, {'_', Name}, State),
+ delete_samples(queue_exchange_stats, {Name, '_'}, State),
+ delete_samples(queue_stats, Name, State),
+ handle_deleted(queue_stats, Event, State);
+
+handle_event(Event = #event{type = exchange_deleted,
+ props = [{name, Name}]}, State) ->
+ delete_samples(channel_exchange_stats, {'_', Name}, State),
+ delete_samples(queue_exchange_stats, {'_', Name}, State),
+ delete_samples(exchange_stats, Name, State),
+ handle_deleted(exchange_stats, Event, State);
+
+handle_event(#event{type = vhost_deleted,
+ props = [{name, Name}]}, State) ->
+ delete_samples(vhost_stats, Name, State),
+ {ok, State};
+
+handle_event(#event{type = connection_created, props = Stats}, State) ->
+ handle_created(
+ connection_stats, Stats,
+ [{fun rabbit_mgmt_format:addr/1, [host, peer_host]},
+ {fun rabbit_mgmt_format:port/1, [port, peer_port]},
+ {fun rabbit_mgmt_format:protocol/1, [protocol]},
+ {fun rabbit_mgmt_format:amqp_table/1, [client_properties]}], State);
+
+handle_event(#event{type = connection_stats, props = Stats,
+ timestamp = Timestamp},
+ State) ->
+ handle_stats(connection_stats, Stats, Timestamp, [], ?COARSE_CONN_STATS,
+ State);
+
+handle_event(Event = #event{type = connection_closed,
+ props = [{pid, Pid}]}, State) ->
+ delete_samples(connection_stats, Pid, State),
+ handle_deleted(connection_stats, Event, State);
+
+handle_event(#event{type = channel_created, props = Stats}, State) ->
+ handle_created(channel_stats, Stats, [], State);
+
+handle_event(#event{type = channel_stats, props = Stats, timestamp = Timestamp},
+ State = #state{old_stats = OldTable}) ->
+ handle_stats(channel_stats, Stats, Timestamp,
+ [{fun rabbit_mgmt_format:timestamp/1, [idle_since]}],
+ [], State),
+ ChPid = id(channel_stats, Stats),
+ AllStats = [old_fine_stats(Type, Stats, State)
+ || Type <- ?FINE_STATS_TYPES],
+ ets:match_delete(OldTable, {{fine, {ChPid, '_'}}, '_'}),
+ ets:match_delete(OldTable, {{fine, {ChPid, '_', '_'}}, '_'}),
+ [handle_fine_stats(Timestamp, AllStatsElem, State)
+ || AllStatsElem <- AllStats],
+ {ok, State};
+
+handle_event(Event = #event{type = channel_closed,
+ props = [{pid, Pid}]},
+ State = #state{old_stats = Old}) ->
+ delete_consumers(Pid, consumers_by_channel, consumers_by_queue, State),
+ delete_samples(channel_queue_stats, {Pid, '_'}, State),
+ delete_samples(channel_exchange_stats, {Pid, '_'}, State),
+ delete_samples(channel_stats, Pid, State),
+ handle_deleted(channel_stats, Event, State),
+ ets:match_delete(Old, {{fine, {Pid, '_'}}, '_'}),
+ ets:match_delete(Old, {{fine, {Pid, '_', '_'}}, '_'});
+
+handle_event(#event{type = consumer_created, props = Props}, State) ->
+ Fmt = [{fun rabbit_mgmt_format:amqp_table/1, [arguments]}],
+ handle_consumer(fun(Table, Id, P0) ->
+ P = rabbit_mgmt_format:format(P0, Fmt),
+ ets:insert(Table, {Id, P})
+ end,
+ Props, State);
+
+handle_event(#event{type = consumer_deleted, props = Props}, State) ->
+ handle_consumer(fun(Table, Id, _P) -> ets:delete(Table, Id) end,
+ Props, State);
+
+%% TODO: we don't clear up after dead nodes here - this is a very tiny
+%% leak every time a node is permanently removed from the cluster. Do
+%% we care?
+handle_event(#event{type = node_stats, props = Stats, timestamp = Timestamp},
+ State = #state{tables = Tables}) ->
+ Table = orddict:fetch(node_stats, Tables),
+ ets:insert(Table, {{pget(name, Stats), stats},
+ proplists:delete(name, Stats), Timestamp}),
+ {ok, State};
+
+handle_event(_Event, State) ->
+ {ok, State}.
+
+handle_created(TName, Stats, Funs, State = #state{tables = Tables}) ->
+ Formatted = rabbit_mgmt_format:format(Stats, Funs),
+ ets:insert(orddict:fetch(TName, Tables), {{id(TName, Stats), create},
+ Formatted,
+ pget(name, Stats)}),
+ {ok, State}.
+
+handle_stats(TName, Stats, Timestamp, Funs, RatesKeys,
+ State = #state{tables = Tables, old_stats = OldTable}) ->
+ Id = id(TName, Stats),
+ IdSamples = {coarse, {TName, Id}},
+ OldStats = lookup_element(OldTable, IdSamples),
+ append_samples(Stats, Timestamp, OldStats, IdSamples, RatesKeys, State),
+ StripKeys = [id_name(TName)] ++ RatesKeys ++ ?FINE_STATS_TYPES,
+ Stats1 = [{K, V} || {K, V} <- Stats, not lists:member(K, StripKeys)],
+ Stats2 = rabbit_mgmt_format:format(Stats1, Funs),
+ ets:insert(orddict:fetch(TName, Tables), {{Id, stats}, Stats2, Timestamp}),
+ {ok, State}.
+
+handle_deleted(TName, #event{props = Props}, State = #state{tables = Tables,
+ old_stats = Old}) ->
+ Id = id(TName, Props),
+ case orddict:find(TName, Tables) of
+ {ok, Table} -> ets:delete(Table, {Id, create}),
+ ets:delete(Table, {Id, stats});
+ error -> ok
+ end,
+ ets:delete(Old, {coarse, {TName, Id}}),
+ {ok, State}.
+
+handle_consumer(Fun, Props, State = #state{tables = Tables}) ->
+ P = rabbit_mgmt_format:format(Props, []),
+ CTag = pget(consumer_tag, P),
+ Q = pget(queue, P),
+ Ch = pget(channel, P),
+ QTable = orddict:fetch(consumers_by_queue, Tables),
+ ChTable = orddict:fetch(consumers_by_channel, Tables),
+ Fun(QTable, {Q, Ch, CTag}, P),
+ Fun(ChTable, {Ch, Q, CTag}, P),
+ {ok, State}.
+
+%% The consumer_deleted event is emitted by queues themselves -
+%% therefore in the event that a queue dies suddenly we may not get
+%% it. The best way to handle this is to make sure we also clean up
+%% consumers when we hear about any queue going down.
+delete_consumers(PrimId, PrimTableName, SecTableName,
+ #state{tables = Tables}) ->
+ Table1 = orddict:fetch(PrimTableName, Tables),
+ Table2 = orddict:fetch(SecTableName, Tables),
+ SecIdCTags = ets:match(Table1, {{PrimId, '$1', '$2'}, '_'}),
+ ets:match_delete(Table1, {{PrimId, '_', '_'}, '_'}),
+ [ets:delete(Table2, {SecId, PrimId, CTag}) || [SecId, CTag] <- SecIdCTags].
+
+old_fine_stats(Type, Props, #state{old_stats = Old}) ->
+ case pget(Type, Props) of
+ unknown -> ignore;
+ AllFineStats0 -> ChPid = id(channel_stats, Props),
+ [begin
+ Id = fine_stats_id(ChPid, Ids),
+ {Id, Stats, lookup_element(Old, {fine, Id})}
+ end || {Ids, Stats} <- AllFineStats0]
+ end.
+
+handle_fine_stats(_Timestamp, ignore, _State) ->
+ ok;
+
+handle_fine_stats(Timestamp, AllStats, State) ->
+ [handle_fine_stat(Id, Stats, Timestamp, OldStats, State) ||
+ {Id, Stats, OldStats} <- AllStats].
+
+handle_fine_stat(Id, Stats, Timestamp, OldStats, State) ->
+ Total = lists:sum([V || {K, V} <- Stats, lists:member(K, ?DELIVER_GET)]),
+ Stats1 = case Total of
+ 0 -> Stats;
+ _ -> [{deliver_get, Total}|Stats]
+ end,
+ append_samples(Stats1, Timestamp, OldStats, {fine, Id}, all, State).
+
+delete_samples(Type, {Id, '_'}, State) ->
+ delete_samples_with_index(Type, Id, fun forward/2, State);
+delete_samples(Type, {'_', Id}, State) ->
+ delete_samples_with_index(Type, Id, fun reverse/2, State);
+delete_samples(Type, Id, #state{aggregated_stats = ETS}) ->
+ ets:match_delete(ETS, delete_match(Type, Id)).
+
+delete_samples_with_index(Type, Id, Order,
+ #state{aggregated_stats = ETS,
+ aggregated_stats_index = ETSi}) ->
+ Ids2 = lists:append(ets:match(ETSi, {{Type, Id, '$1'}})),
+ ets:match_delete(ETSi, {{Type, Id, '_'}}),
+ [begin
+ ets:match_delete(ETS, delete_match(Type, Order(Id, Id2))),
+ ets:match_delete(ETSi, {{Type, Id2, Id}})
+ end || Id2 <- Ids2].
+
+forward(A, B) -> {A, B}.
+reverse(A, B) -> {B, A}.
+
+delete_match(Type, Id) -> {{{Type, Id}, '_'}, '_'}.
+
+append_samples(Stats, TS, OldStats, Id, Keys,
+ State = #state{old_stats = OldTable}) ->
+ case ignore_coarse_sample(Id, State) of
+ false ->
+ %% This ceil must correspond to the ceil in handle_event
+ %% queue_deleted
+ NewMS = ceil(TS, State),
+ case Keys of
+ all -> [append_sample(Key, Value, NewMS, OldStats, Id, State)
+ || {Key, Value} <- Stats];
+ _ -> [append_sample(
+ Key, pget(Key, Stats), NewMS, OldStats, Id, State)
+ || Key <- Keys]
+ end,
+ ets:insert(OldTable, {Id, Stats});
+ true ->
+ ok
+ end.
+
+append_sample(Key, Value, NewMS, OldStats, Id, State) when is_number(Value) ->
+ record_sample(
+ Id, {Key, Value - pget(Key, OldStats, 0), NewMS, State}, State);
+
+append_sample(_Key, _Value, _NewMS, _OldStats, _Id, _State) ->
+ ok.
+
+ignore_coarse_sample({coarse, {queue_stats, Q}}, State) ->
+ not object_exists(Q, State);
+ignore_coarse_sample(_, _) ->
+ false.
+
+record_sample({coarse, Id}, Args, State) ->
+ record_sample0(Id, Args),
+ record_sample0({vhost_stats, vhost(Id, State)}, Args);
+
+%% Deliveries / acks (Q -> Ch)
+record_sample({fine, {Ch, Q = #resource{kind = queue}}}, Args, State) ->
+ case object_exists(Q, State) of
+ true -> record_sample0({channel_queue_stats, {Ch, Q}}, Args),
+ record_sample0({queue_stats, Q}, Args);
+ false -> ok
+ end,
+ record_sample0({channel_stats, Ch}, Args),
+ record_sample0({vhost_stats, vhost(Q)}, Args);
+
+%% Publishes / confirms (Ch -> X)
+record_sample({fine, {Ch, X = #resource{kind = exchange}}}, Args, State) ->
+ case object_exists(X, State) of
+ true -> record_sample0({channel_exchange_stats, {Ch, X}}, Args),
+ record_sampleX(publish_in, X, Args);
+ false -> ok
+ end,
+ record_sample0({channel_stats, Ch}, Args),
+ record_sample0({vhost_stats, vhost(X)}, Args);
+
+%% Publishes (but not confirms) (Ch -> X -> Q)
+record_sample({fine, {_Ch,
+ Q = #resource{kind = queue},
+ X = #resource{kind = exchange}}}, Args, State) ->
+ %% TODO This one logically feels like it should be here. It would
+ %% correspond to "publishing channel message rates to queue" -
+ %% which would be nice to handle - except we don't. And just
+ %% uncommenting this means it gets merged in with "consuming
+ %% channel delivery from queue" - which is not very helpful.
+ %% record_sample0({channel_queue_stats, {Ch, Q}}, Args),
+ QExists = object_exists(Q, State),
+ XExists = object_exists(X, State),
+ case QExists of
+ true -> record_sample0({queue_stats, Q}, Args);
+ false -> ok
+ end,
+ case QExists andalso XExists of
+ true -> record_sample0({queue_exchange_stats, {Q, X}}, Args);
+ false -> ok
+ end,
+ case XExists of
+ true -> record_sampleX(publish_out, X, Args);
+ false -> ok
+ end.
+
+%% We have to check the queue and exchange objects still exist since
+%% their deleted event could be overtaken by a channel stats event
+%% which contains fine stats referencing them. That's also why we
+%% don't need to check the channels exist - their deleted event can't
+%% be overtaken by their own last stats event.
+%%
+%% Also, sometimes the queue_deleted event is not emitted by the queue
+%% (in the nodedown case) - so it can overtake the final queue_stats
+%% event (which is not *guaranteed* to be lost). So we make a similar
+%% check for coarse queue stats.
+%%
+%% We can be sure that mnesia will be up to date by the time we receive
+%% the event (even though we dirty read) since the deletions are
+%% synchronous and we do not emit the deleted event until after the
+%% deletion has occurred.
+object_exists(Name = #resource{kind = Kind}, #state{lookups = Lookups}) ->
+ case (pget(Kind, Lookups))(Name) of
+ {ok, _} -> true;
+ _ -> false
+ end.
+
+vhost(#resource{virtual_host = VHost}) -> VHost.
+
+vhost({queue_stats, #resource{virtual_host = VHost}}, _State) ->
+ VHost;
+vhost({TName, Pid}, #state{tables = Tables}) ->
+ Table = orddict:fetch(TName, Tables),
+ pget(vhost, lookup_element(Table, {Pid, create})).
+
+%% exchanges have two sets of "publish" stats, so rearrange things a touch
+record_sampleX(RenamePublishTo, X, {publish, Diff, TS, State}) ->
+ record_sample0({exchange_stats, X}, {RenamePublishTo, Diff, TS, State});
+record_sampleX(_RenamePublishTo, X, {Type, Diff, TS, State}) ->
+ record_sample0({exchange_stats, X}, {Type, Diff, TS, State}).
+
+record_sample0(Id0, {Key, Diff, TS, #state{aggregated_stats = ETS,
+ aggregated_stats_index = ETSi}}) ->
+ Id = {Id0, Key},
+ Old = case lookup_element(ETS, Id) of
+ [] -> case Id0 of
+ {Type, {Id1, Id2}} ->
+ ets:insert(ETSi, {{Type, Id2, Id1}}),
+ ets:insert(ETSi, {{Type, Id1, Id2}});
+ _ ->
+ ok
+ end,
+ rabbit_mgmt_stats:blank();
+ E -> E
+ end,
+ ets:insert(ETS, {Id, rabbit_mgmt_stats:record(TS, Diff, Old)}).
+
+%%----------------------------------------------------------------------------
+%% Internal, querying side
+%%----------------------------------------------------------------------------
+
+-define(QUEUE_DETAILS,
+ {queue_stats, [{incoming, queue_exchange_stats, fun first/1},
+ {deliveries, channel_queue_stats, fun second/1}]}).
+
+-define(EXCHANGE_DETAILS,
+ {exchange_stats, [{incoming, channel_exchange_stats, fun second/1},
+ {outgoing, queue_exchange_stats, fun second/1}]}).
+
+-define(CHANNEL_DETAILS,
+ {channel_stats, [{publishes, channel_exchange_stats, fun first/1},
+ {deliveries, channel_queue_stats, fun first/1}]}).
+
+first(Id) -> {Id, '$1'}.
+second(Id) -> {'$1', Id}.
+
+list_queue_stats(Ranges, Objs, State) ->
+ adjust_hibernated_memory_use(
+ merge_stats(Objs, queue_funs(Ranges, State))).
+
+detail_queue_stats(Ranges, Objs, State) ->
+ adjust_hibernated_memory_use(
+ merge_stats(Objs, [consumer_details_fun(
+ fun (Props) -> id_lookup(queue_stats, Props) end,
+ consumers_by_queue, State),
+ detail_stats_fun(Ranges, ?QUEUE_DETAILS, State)
+ | queue_funs(Ranges, State)])).
+
+queue_funs(Ranges, State) ->
+ [basic_stats_fun(queue_stats, State),
+ simple_stats_fun(Ranges, queue_stats, State),
+ augment_msg_stats_fun(State)].
+
+list_exchange_stats(Ranges, Objs, State) ->
+ merge_stats(Objs, [simple_stats_fun(Ranges, exchange_stats, State),
+ augment_msg_stats_fun(State)]).
+
+detail_exchange_stats(Ranges, Objs, State) ->
+ merge_stats(Objs, [simple_stats_fun(Ranges, exchange_stats, State),
+ detail_stats_fun(Ranges, ?EXCHANGE_DETAILS, State),
+ augment_msg_stats_fun(State)]).
+
+connection_stats(Ranges, Objs, State) ->
+ merge_stats(Objs, [basic_stats_fun(connection_stats, State),
+ simple_stats_fun(Ranges, connection_stats, State),
+ augment_msg_stats_fun(State)]).
+
+list_channel_stats(Ranges, Objs, State) ->
+ merge_stats(Objs, [basic_stats_fun(channel_stats, State),
+ simple_stats_fun(Ranges, channel_stats, State),
+ augment_msg_stats_fun(State)]).
+
+detail_channel_stats(Ranges, Objs, State) ->
+ merge_stats(Objs, [basic_stats_fun(channel_stats, State),
+ simple_stats_fun(Ranges, channel_stats, State),
+ consumer_details_fun(
+ fun (Props) -> pget(pid, Props) end,
+ consumers_by_channel, State),
+ detail_stats_fun(Ranges, ?CHANNEL_DETAILS, State),
+ augment_msg_stats_fun(State)]).
+
+vhost_stats(Ranges, Objs, State) ->
+ merge_stats(Objs, [simple_stats_fun(Ranges, vhost_stats, State)]).
+
+node_stats(Objs, State) ->
+ merge_stats(Objs, [basic_stats_fun(node_stats, State)]).
+
+merge_stats(Objs, Funs) ->
+ [lists:foldl(fun (Fun, Props) -> Fun(Props) ++ Props end, Obj, Funs)
+ || Obj <- Objs].
+
+%% i.e. the non-calculated stats
+basic_stats_fun(Type, #state{tables = Tables}) ->
+ Table = orddict:fetch(Type, Tables),
+ fun (Props) ->
+ Id = id_lookup(Type, Props),
+ lookup_element(Table, {Id, stats})
+ end.
+
+%% i.e. coarse stats, and fine stats aggregated up to a single number per thing
+simple_stats_fun(Ranges, Type, State) ->
+ fun (Props) ->
+ Id = id_lookup(Type, Props),
+ extract_msg_stats(
+ format_samples(Ranges, read_simple_stats(Type, Id, State), State))
+ end.
+
+%% i.e. fine stats that are broken out per sub-thing
+detail_stats_fun(Ranges, {IdType, FineSpecs}, State) ->
+ fun (Props) ->
+ Id = id_lookup(IdType, Props),
+ [detail_stats(Ranges, Name, AggregatedStatsType, IdFun(Id), State)
+ || {Name, AggregatedStatsType, IdFun} <- FineSpecs]
+ end.
+
+read_simple_stats(Type, Id, #state{aggregated_stats = ETS}) ->
+ FromETS = ets:match(ETS, {{{Type, Id}, '$1'}, '$2'}),
+ [{K, V} || [K, V] <- FromETS].
+
+read_detail_stats(Type, Id, #state{aggregated_stats = ETS}) ->
+ %% Id must contain '$1'
+ FromETS = ets:match(ETS, {{{Type, Id}, '$2'}, '$3'}),
+ %% [[G, K, V]] -> [{G, [{K, V}]}] where G is Q/X/Ch, K is from
+ %% ?FINE_STATS and V is a stats tree
+ %% TODO does this need to be optimised?
+ lists:foldl(
+ fun ([G, K, V], L) ->
+ case lists:keyfind(G, 1, L) of
+ false -> [{G, [{K, V}]} | L];
+ {G, KVs} -> lists:keyreplace(G, 1, L, {G, [{K, V} | KVs]})
+ end
+ end, [], FromETS).
+
+extract_msg_stats(Stats) ->
+ FineStats = lists:append([[K, details_key(K)] || K <- ?FINE_STATS]),
+ {MsgStats, Other} =
+ lists:partition(fun({K, _}) -> lists:member(K, FineStats) end, Stats),
+ case MsgStats of
+ [] -> Other;
+ _ -> [{message_stats, MsgStats} | Other]
+ end.
+
+detail_stats(Ranges, Name, AggregatedStatsType, Id, State) ->
+ {Name,
+ [[{stats, format_samples(Ranges, KVs, State)} | format_detail_id(G, State)]
+ || {G, KVs} <- read_detail_stats(AggregatedStatsType, Id, State)]}.
+
+format_detail_id(ChPid, State) when is_pid(ChPid) ->
+ augment_msg_stats([{channel, ChPid}], State);
+format_detail_id(#resource{name = Name, virtual_host = Vhost, kind = Kind},
+ _State) ->
+ [{Kind, [{name, Name}, {vhost, Vhost}]}].
+
+format_samples(Ranges, ManyStats, #state{interval = Interval}) ->
+ lists:append(
+ [case rabbit_mgmt_stats:is_blank(Stats) andalso
+ not lists:member(K, ?COARSE_QUEUE_STATS) of
+ true -> [];
+ false -> {Details, Counter} = rabbit_mgmt_stats:format(
+ pick_range(K, Ranges),
+ Stats, Interval),
+ [{K, Counter},
+ {details_key(K), Details}]
+ end || {K, Stats} <- ManyStats]).
+
+pick_range(K, {RangeL, RangeM, RangeD}) ->
+ case {lists:member(K, ?COARSE_QUEUE_STATS),
+ lists:member(K, ?FINE_STATS),
+ lists:member(K, ?COARSE_CONN_STATS)} of
+ {true, false, false} -> RangeL;
+ {false, true, false} -> RangeM;
+ {false, false, true} -> RangeD
+ end.
+
+%% We do this when retrieving the queue record rather than when
+%% storing it since the memory use will drop *after* we find out about
+%% hibernation, so to do it when we receive a queue stats event would
+%% be fiddly and racy. This should be quite cheap though.
+adjust_hibernated_memory_use(Qs) ->
+ Pids = [pget(pid, Q) ||
+ Q <- Qs, pget(idle_since, Q, not_idle) =/= not_idle],
+ %% We use delegate here not for ordering reasons but because we
+ %% want to get the right amount of parallelism and minimise
+ %% cross-cluster communication.
+ {Mem, _BadNodes} = delegate:invoke(Pids, {erlang, process_info, [memory]}),
+ MemDict = dict:from_list([{P, M} || {P, M = {memory, _}} <- Mem]),
+ [case dict:find(pget(pid, Q), MemDict) of
+ error -> Q;
+ {ok, Memory} -> [Memory|proplists:delete(memory, Q)]
+ end || Q <- Qs].
+
+created_event(Name, Type, Tables) ->
+ Table = orddict:fetch(Type, Tables),
+ case ets:match(Table, {{'$1', create}, '_', Name}) of
+ [] -> not_found;
+ [[Id]] -> lookup_element(Table, {Id, create})
+ end.
+
+created_events(Type, Tables) ->
+ [Facts || {{_, create}, Facts, _Name}
+ <- ets:tab2list(orddict:fetch(Type, Tables))].
+
+consumer_details_fun(KeyFun, TableName, State = #state{tables = Tables}) ->
+ Table = orddict:fetch(TableName, Tables),
+ fun ([]) -> [];
+ (Props) -> Pattern = {KeyFun(Props), '_', '_'},
+ [{consumer_details,
+ [augment_msg_stats(augment_consumer(Obj), State)
+ || Obj <- lists:append(
+ ets:match(Table, {Pattern, '$1'}))]}]
+ end.
+
+augment_consumer(Obj) ->
+ [{queue, rabbit_mgmt_format:resource(pget(queue, Obj))} |
+ proplists:delete(queue, Obj)].
+
+%%----------------------------------------------------------------------------
+%% Internal, query-time summing for overview
+%%----------------------------------------------------------------------------
+
+overview_sum(Type, VHostStats) ->
+ Stats = [pget(Type, VHost, rabbit_mgmt_stats:blank())
+ || VHost <- VHostStats],
+ {Type, rabbit_mgmt_stats:sum(Stats)}.
+
+%%----------------------------------------------------------------------------
+%% Internal, query-time augmentation
+%%----------------------------------------------------------------------------
+
+augment_msg_stats(Props, State) ->
+ rabbit_mgmt_format:strip_pids(
+ (augment_msg_stats_fun(State))(Props) ++ Props).
+
+augment_msg_stats_fun(State) ->
+ Funs = [{connection, fun augment_connection_pid/2},
+ {channel, fun augment_channel_pid/2},
+ {owner_pid, fun augment_connection_pid/2}],
+ fun (Props) -> augment(Props, Funs, State) end.
+
+augment(Items, Funs, State) ->
+ Augmented = [augment(K, Items, Fun, State) || {K, Fun} <- Funs],
+ [{K, V} || {K, V} <- Augmented, V =/= unknown].
+
+augment(K, Items, Fun, State) ->
+ Key = details_key(K),
+ case pget(K, Items) of
+ none -> {Key, unknown};
+ unknown -> {Key, unknown};
+ Id -> {Key, Fun(Id, State)}
+ end.
+
+augment_channel_pid(Pid, #state{tables = Tables}) ->
+ Ch = lookup_element(orddict:fetch(channel_stats, Tables),
+ {Pid, create}),
+ Conn = lookup_element(orddict:fetch(connection_stats, Tables),
+ {pget(connection, Ch), create}),
+ [{name, pget(name, Ch)},
+ {number, pget(number, Ch)},
+ {connection_name, pget(name, Conn)},
+ {peer_port, pget(peer_port, Conn)},
+ {peer_host, pget(peer_host, Conn)}].
+
+augment_connection_pid(Pid, #state{tables = Tables}) ->
+ Conn = lookup_element(orddict:fetch(connection_stats, Tables),
+ {Pid, create}),
+ [{name, pget(name, Conn)},
+ {peer_port, pget(peer_port, Conn)},
+ {peer_host, pget(peer_host, Conn)}].
+
+%%----------------------------------------------------------------------------
+%% Internal, event-GCing
+%%----------------------------------------------------------------------------
+
+gc_batch(State = #state{aggregated_stats = ETS}) ->
+ {ok, Policies} = application:get_env(
+ rabbitmq_management, sample_retention_policies),
+ Rows = erlang:max(?GC_MIN_ROWS,
+ round(?GC_MIN_RATIO * ets:info(ETS, size))),
+ gc_batch(Rows, Policies, State).
+
+gc_batch(0, _Policies, State) ->
+ State;
+gc_batch(Rows, Policies, State = #state{aggregated_stats = ETS,
+ gc_next_key = Key0}) ->
+ Key = case Key0 of
+ undefined -> ets:first(ETS);
+ _ -> ets:next(ETS, Key0)
+ end,
+ Key1 = case Key of
+ '$end_of_table' -> undefined;
+ _ -> Now = floor(erlang:now(), State),
+ Stats = ets:lookup_element(ETS, Key, 2),
+ gc(Key, Stats, Policies, Now, ETS),
+ Key
+ end,
+ gc_batch(Rows - 1, Policies, State#state{gc_next_key = Key1}).
+
+gc({{Type, Id}, Key}, Stats, Policies, Now, ETS) ->
+ Policy = pget(retention_policy(Type), Policies),
+ case rabbit_mgmt_stats:gc({Policy, Now}, Stats) of
+ Stats -> ok;
+ Stats2 -> ets:insert(ETS, {{{Type, Id}, Key}, Stats2})
+ end.
+
+retention_policy(vhost_stats) -> global;
+retention_policy(queue_stats) -> basic;
+retention_policy(exchange_stats) -> basic;
+retention_policy(connection_stats) -> basic;
+retention_policy(channel_stats) -> basic;
+retention_policy(queue_exchange_stats) -> detailed;
+retention_policy(channel_exchange_stats) -> detailed;
+retention_policy(channel_queue_stats) -> detailed.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_dispatcher).
+
+-export([modules/0, build_dispatcher/0]).
+
+-behaviour(rabbit_mgmt_extension).
+-export([dispatcher/0, web_ui/0]).
+
+build_dispatcher() ->
+ [{["api" | Path], Mod, Args} ||
+ {Path, Mod, Args} <-
+ lists:append([Module:dispatcher() || Module <- modules()])].
+
+modules() ->
+ [Module || {Module, Behaviours} <-
+ rabbit_misc:all_module_attributes(behaviour),
+ lists:member(rabbit_mgmt_extension, Behaviours)].
+
+%%----------------------------------------------------------------------------
+
+web_ui() -> [{javascript, <<"dispatcher.js">>}].
+
+dispatcher() ->
+ [{["overview"], rabbit_mgmt_wm_overview, []},
+ {["cluster-name"], rabbit_mgmt_wm_cluster_name, []},
+ {["nodes"], rabbit_mgmt_wm_nodes, []},
+ {["nodes", node], rabbit_mgmt_wm_node, []},
+ {["extensions"], rabbit_mgmt_wm_extensions, []},
+ {["all-configuration"], rabbit_mgmt_wm_definitions, []}, %% This was the old name, let's not break things gratuitously.
+ {["definitions"], rabbit_mgmt_wm_definitions, []},
+ {["parameters"], rabbit_mgmt_wm_parameters, []},
+ {["parameters", component], rabbit_mgmt_wm_parameters, []},
+ {["parameters", component, vhost], rabbit_mgmt_wm_parameters, []},
+ {["parameters", component, vhost, name], rabbit_mgmt_wm_parameter, []},
+ {["policies"], rabbit_mgmt_wm_policies, []},
+ {["policies", vhost], rabbit_mgmt_wm_policies, []},
+ {["policies", vhost, name], rabbit_mgmt_wm_policy, []},
+ {["connections"], rabbit_mgmt_wm_connections, []},
+ {["connections", connection], rabbit_mgmt_wm_connection, []},
+ {["connections", connection, "channels"], rabbit_mgmt_wm_connection_channels, []},
+ {["channels"], rabbit_mgmt_wm_channels, []},
+ {["channels", channel], rabbit_mgmt_wm_channel, []},
+ {["exchanges"], rabbit_mgmt_wm_exchanges, []},
+ {["exchanges", vhost], rabbit_mgmt_wm_exchanges, []},
+ {["exchanges", vhost, exchange], rabbit_mgmt_wm_exchange, []},
+ {["exchanges", vhost, exchange, "publish"], rabbit_mgmt_wm_exchange_publish, []},
+ {["exchanges", vhost, exchange, "bindings", "source"], rabbit_mgmt_wm_bindings, [exchange_source]},
+ {["exchanges", vhost, exchange, "bindings", "destination"], rabbit_mgmt_wm_bindings, [exchange_destination]},
+ {["queues"], rabbit_mgmt_wm_queues, []},
+ {["queues", vhost], rabbit_mgmt_wm_queues, []},
+ {["queues", vhost, queue], rabbit_mgmt_wm_queue, []},
+ {["queues", vhost, destination, "bindings"], rabbit_mgmt_wm_bindings, [queue]},
+ {["queues", vhost, queue, "contents"], rabbit_mgmt_wm_queue_purge, []},
+ {["queues", vhost, queue, "get"], rabbit_mgmt_wm_queue_get, []},
+ {["queues", vhost, queue, "actions"], rabbit_mgmt_wm_queue_actions, []},
+ {["bindings"], rabbit_mgmt_wm_bindings, [all]},
+ {["bindings", vhost], rabbit_mgmt_wm_bindings, [all]},
+ {["bindings", vhost, "e", source, dtype, destination], rabbit_mgmt_wm_bindings, [source_destination]},
+ {["bindings", vhost, "e", source, dtype, destination, props], rabbit_mgmt_wm_binding, []},
+ {["vhosts"], rabbit_mgmt_wm_vhosts, []},
+ {["vhosts", vhost], rabbit_mgmt_wm_vhost, []},
+ {["vhosts", vhost, "permissions"], rabbit_mgmt_wm_permissions_vhost, []},
+ {["users"], rabbit_mgmt_wm_users, []},
+ {["users", user], rabbit_mgmt_wm_user, []},
+ {["users", user, "permissions"], rabbit_mgmt_wm_permissions_user, []},
+ {["whoami"], rabbit_mgmt_wm_whoami, []},
+ {["permissions"], rabbit_mgmt_wm_permissions, []},
+ {["permissions", vhost, user], rabbit_mgmt_wm_permission, []},
+ {["aliveness-test", vhost], rabbit_mgmt_wm_aliveness_test, []}
+ ].
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_extension).
+
+-export([behaviour_info/1]).
+
+behaviour_info(callbacks) ->
+ [
+ %% Return a webmachine dispatcher table to integrate
+ {dispatcher, 0},
+
+ %% Return a proplist of information for the web UI to integrate
+ %% this extension. Currently the proplist should have one key,
+ %% 'javascript', the name of a javascript file to load and run.
+ {web_ui, 0}
+ ];
+behaviour_info(_Other) ->
+ undefined.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_format).
+
+-export([format/2, print/2, remove/1, ip/1, ipb/1, amqp_table/1, tuple/1]).
+-export([parameter/1, timestamp/1, timestamp_ms/1, strip_pids/1]).
+-export([node_from_pid/1, protocol/1, resource/1, queue/1, queue_state/1]).
+-export([exchange/1, user/1, internal_user/1, binding/1, url/2]).
+-export([pack_binding_props/2, tokenise/1]).
+-export([to_amqp_table/1, listener/1, properties/1, basic_properties/1]).
+-export([record/2, to_basic_properties/1]).
+-export([addr/1, port/1]).
+
+-import(rabbit_misc, [pget/2, pset/3]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+
+-define(PIDS_TO_STRIP, [connection, owner_pid, channel,
+ exclusive_consumer_pid]).
+
+%%--------------------------------------------------------------------
+
+format(Stats, Fs) ->
+ lists:concat([format_item(Stat, Fs) || {_Name, Value} = Stat <- Stats,
+ Value =/= unknown]).
+
+format_item(Stat, []) ->
+ [Stat];
+format_item({Name, Value}, [{Fun, Names} | Fs]) ->
+ case lists:member(Name, Names) of
+ true -> case Fun(Value) of
+ List when is_list(List) -> List;
+ Formatted -> [{Name, Formatted}]
+ end;
+ false -> format_item({Name, Value}, Fs)
+ end.
+
+print(Fmt, Val) when is_list(Val) ->
+ list_to_binary(lists:flatten(io_lib:format(Fmt, Val)));
+print(Fmt, Val) ->
+ print(Fmt, [Val]).
+
+%% TODO - can we remove all these "unknown" cases? Coverage never hits them.
+
+remove(_) -> [].
+
+node_from_pid(Pid) when is_pid(Pid) -> [{node, node(Pid)}];
+node_from_pid('') -> [];
+node_from_pid(unknown) -> [];
+node_from_pid(none) -> [].
+
+nodes_from_pids(Name) ->
+ fun('') -> [];
+ (Pids) -> [{Name, [node(Pid) || Pid <- Pids]}]
+ end.
+
+ip(unknown) -> unknown;
+ip(IP) -> list_to_binary(rabbit_misc:ntoa(IP)).
+
+ipb(unknown) -> unknown;
+ipb(IP) -> list_to_binary(rabbit_misc:ntoab(IP)).
+
+addr(S) when is_list(S); is_atom(S); is_binary(S) -> print("~s", S);
+addr(Addr) when is_tuple(Addr) -> ip(Addr).
+
+port(Port) when is_number(Port) -> Port;
+port(Port) -> print("~w", Port).
+
+properties(unknown) -> unknown;
+properties(Table) -> {struct, [{Name, tuple(Value)} ||
+ {Name, Value} <- Table]}.
+
+amqp_table(unknown) -> unknown;
+amqp_table(undefined) -> amqp_table([]);
+amqp_table(Table) -> {struct, [{Name, amqp_value(Type, Value)} ||
+ {Name, Type, Value} <- Table]}.
+
+amqp_value(array, Vs) -> [amqp_value(T, V) || {T, V} <- Vs];
+amqp_value(table, V) -> amqp_table(V);
+amqp_value(_Type, V) when is_binary(V) -> utf8_safe(V);
+amqp_value(_Type, V) -> V.
+
+utf8_safe(V) ->
+ try
+ xmerl_ucs:from_utf8(V),
+ V
+ catch exit:{ucs, _} ->
+ Enc = base64:encode(V),
+ <<"Invalid UTF-8, base64 is: ", Enc/binary>>
+ end.
+
+parameter(P) -> pset(value, rabbit_misc:term_to_json(pget(value, P)), P).
+
+tuple(unknown) -> unknown;
+tuple(Tuple) when is_tuple(Tuple) -> [tuple(E) || E <- tuple_to_list(Tuple)];
+tuple(Term) -> Term.
+
+protocol(unknown) ->
+ unknown;
+protocol(Version = {_Major, _Minor, _Revision}) ->
+ protocol({'AMQP', Version});
+protocol({Family, Version}) ->
+ print("~s ~s", [Family, protocol_version(Version)]).
+
+protocol_version(Arbitrary)
+ when is_list(Arbitrary) -> Arbitrary;
+protocol_version({Major, Minor}) -> io_lib:format("~B-~B", [Major, Minor]);
+protocol_version({Major, Minor, 0}) -> protocol_version({Major, Minor});
+protocol_version({Major, Minor, Revision}) -> io_lib:format("~B-~B-~B",
+ [Major, Minor, Revision]).
+
+timestamp_ms(unknown) ->
+ unknown;
+timestamp_ms(Timestamp) ->
+ timer:now_diff(Timestamp, {0,0,0}) div 1000.
+
+timestamp(unknown) ->
+ unknown;
+timestamp(Timestamp) ->
+ {{Y, M, D}, {H, Min, S}} = calendar:now_to_local_time(Timestamp),
+ print("~w-~2.2.0w-~2.2.0w ~w:~2.2.0w:~2.2.0w", [Y, M, D, H, Min, S]).
+
+resource(unknown) -> unknown;
+resource(Res) -> resource(name, Res).
+
+resource(_, unknown) ->
+ unknown;
+resource(NameAs, #resource{name = Name, virtual_host = VHost}) ->
+ [{NameAs, Name}, {vhost, VHost}].
+
+policy('') -> [];
+policy(Policy) -> [{policy, Policy}].
+
+internal_user(User) ->
+ [{name, User#internal_user.username},
+ {password_hash, base64:encode(User#internal_user.password_hash)},
+ {tags, tags(User#internal_user.tags)}].
+
+user(User) ->
+ [{name, User#user.username},
+ {tags, tags(User#user.tags)},
+ {auth_backend, User#user.auth_backend}].
+
+tags(Tags) ->
+ list_to_binary(string:join([atom_to_list(T) || T <- Tags], ",")).
+
+listener(#listener{node = Node, protocol = Protocol,
+ ip_address = IPAddress, port = Port}) ->
+ [{node, Node},
+ {protocol, Protocol},
+ {ip_address, ip(IPAddress)},
+ {port, Port}].
+
+pack_binding_props(<<"">>, []) ->
+ <<"~">>;
+pack_binding_props(Key, []) ->
+ list_to_binary(quote_binding(Key));
+pack_binding_props(Key, Args) ->
+ ArgsEnc = rabbit_mgmt_wm_binding:args_hash(Args),
+ list_to_binary(quote_binding(Key) ++ "~" ++ quote_binding(ArgsEnc)).
+
+quote_binding(Name) ->
+ re:replace(mochiweb_util:quote_plus(Name), "~", "%7E", [global]).
+
+%% Unfortunately string:tokens("foo~~bar", "~"). -> ["foo","bar"], we lose
+%% the fact that there's a double ~.
+tokenise("") ->
+ [];
+tokenise(Str) ->
+ Count = string:cspan(Str, "~"),
+ case length(Str) of
+ Count -> [Str];
+ _ -> [string:sub_string(Str, 1, Count) |
+ tokenise(string:sub_string(Str, Count + 2))]
+ end.
+
+to_amqp_table({struct, T}) ->
+ to_amqp_table(T);
+to_amqp_table(T) ->
+ [to_amqp_table_row(K, V) || {K, V} <- T].
+
+to_amqp_table_row(K, V) ->
+ {T, V2} = type_val(V),
+ {K, T, V2}.
+
+to_amqp_array(L) ->
+ [type_val(I) || I <- L].
+
+type_val({struct, M}) -> {table, to_amqp_table(M)};
+type_val(L) when is_list(L) -> {array, to_amqp_array(L)};
+type_val(X) when is_binary(X) -> {longstr, X};
+type_val(X) when is_integer(X) -> {long, X};
+type_val(X) when is_number(X) -> {double, X};
+type_val(true) -> {bool, true};
+type_val(false) -> {bool, false};
+type_val(null) -> throw({error, null_not_allowed});
+type_val(X) -> throw({error, {unhandled_type, X}}).
+
+url(Fmt, Vals) ->
+ print(Fmt, [mochiweb_util:quote_plus(V) || V <- Vals]).
+
+exchange(X) ->
+ format(X, [{fun resource/1, [name]},
+ {fun amqp_table/1, [arguments]},
+ {fun policy/1, [policy]}]).
+
+%% We get queues using rabbit_amqqueue:list/1 rather than :info_all/1 since
+%% the latter wakes up each queue. Therefore we have a record rather than a
+%% proplist to deal with.
+queue(#amqqueue{name = Name,
+ durable = Durable,
+ auto_delete = AutoDelete,
+ exclusive_owner = ExclusiveOwner,
+ arguments = Arguments,
+ pid = Pid}) ->
+ format(
+ [{name, Name},
+ {durable, Durable},
+ {auto_delete, AutoDelete},
+ {owner_pid, ExclusiveOwner},
+ {arguments, Arguments},
+ {pid, Pid}],
+ [{fun resource/1, [name]},
+ {fun amqp_table/1, [arguments]},
+ {fun policy/1, [policy]}]).
+
+queue_state({syncing, Msgs}) -> [{state, syncing},
+ {sync_messages, Msgs}];
+queue_state(Status) -> [{state, Status}].
+
+%% We get bindings using rabbit_binding:list_*/1 rather than :info_all/1 since
+%% there are no per-exchange / queue / etc variants for the latter. Therefore
+%% we have a record rather than a proplist to deal with.
+binding(#binding{source = S,
+ key = Key,
+ destination = D,
+ args = Args}) ->
+ format(
+ [{source, S},
+ {destination, D#resource.name},
+ {destination_type, D#resource.kind},
+ {routing_key, Key},
+ {arguments, Args},
+ {properties_key, pack_binding_props(Key, Args)}],
+ [{fun (Res) -> resource(source, Res) end, [source]},
+ {fun amqp_table/1, [arguments]}]).
+
+basic_properties(Props = #'P_basic'{}) ->
+ Res = record(Props, record_info(fields, 'P_basic')),
+ format(Res, [{fun amqp_table/1, [headers]}]).
+
+record(Record, Fields) ->
+ {Res, _Ix} = lists:foldl(fun (K, {L, Ix}) ->
+ {case element(Ix, Record) of
+ undefined -> L;
+ V -> [{K, V}|L]
+ end, Ix + 1}
+ end, {[], 2}, Fields),
+ Res.
+
+to_basic_properties({struct, P}) ->
+ to_basic_properties(P);
+
+to_basic_properties(Props) ->
+ E = fun (A, B) -> throw({error, {A, B}}) end,
+ Fmt = fun (headers, H) -> to_amqp_table(H);
+ (delivery_mode, V) when is_integer(V) -> V;
+ (delivery_mode, _V) -> E(not_int,delivery_mode);
+ (priority, V) when is_integer(V) -> V;
+ (priority, _V) -> E(not_int, priority);
+ (timestamp, V) when is_integer(V) -> V;
+ (timestamp, _V) -> E(not_int, timestamp);
+ (_, V) when is_binary(V) -> V;
+ (K, _V) -> E(not_string, K)
+ end,
+ {Res, _Ix} = lists:foldl(
+ fun (K, {P, Ix}) ->
+ {case proplists:get_value(a2b(K), Props) of
+ undefined -> P;
+ V -> setelement(Ix, P, Fmt(K, V))
+ end, Ix + 1}
+ end, {#'P_basic'{}, 2},
+ record_info(fields, 'P_basic')),
+ Res.
+
+a2b(A) ->
+ list_to_binary(atom_to_list(A)).
+
+%% Items can be connections, channels, consumers or queues, hence remove takes
+%% various items.
+strip_pids(Item = [T | _]) when is_tuple(T) ->
+ format(Item,
+ [{fun node_from_pid/1, [pid]},
+ {fun remove/1, ?PIDS_TO_STRIP},
+ {nodes_from_pids(slave_nodes), [slave_pids]},
+ {nodes_from_pids(synchronised_slave_nodes),
+ [synchronised_slave_pids]}
+ ]);
+
+strip_pids(Items) -> [strip_pids(I) || I <- Items].
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_load_definitions).
+
+-export([maybe_load_definitions/0]).
+
+%% We want to A) make sure we apply defintions before being open for
+%% business (hence why we don't do this in the mgmt app startup) and
+%% B) in fact do it before empty_db_check (so the defaults will not
+%% get created if we don't need 'em).
+
+-rabbit_boot_step({load_definitions,
+ [{description, "configured definitions"},
+ {mfa, {rabbit_mgmt_load_definitions,
+ maybe_load_definitions,
+ []}},
+ {requires, recovery},
+ {enables, empty_db_check}]}).
+
+maybe_load_definitions() ->
+ {ok, File} = application:get_env(rabbitmq_management, load_definitions),
+ case File of
+ none -> ok;
+ _ -> case file:read_file(File) of
+ {ok, Body} -> rabbit_log:info(
+ "Applying definitions from: ~s~n", [File]),
+ load_definitions(Body);
+ {error, E} -> {error, {could_not_read_defs, {File, E}}}
+ end
+ end.
+
+load_definitions(Body) ->
+ rabbit_mgmt_wm_definitions:apply_defs(
+ Body, fun () -> ok end, fun (E) -> {error, E} end).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2012 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_stats).
+
+-include("rabbit_mgmt.hrl").
+
+-export([blank/0, is_blank/1, record/3, format/3, sum/1, gc/2]).
+
+-import(rabbit_misc, [pget/2]).
+
+%%----------------------------------------------------------------------------
+
+blank() -> #stats{diffs = gb_trees:empty(), base = 0}.
+
+is_blank(#stats{diffs = Diffs, base = 0}) -> gb_trees:is_empty(Diffs);
+is_blank(#stats{}) -> false.
+
+%%----------------------------------------------------------------------------
+%% Event-time
+%%----------------------------------------------------------------------------
+
+record(TS, Diff, Stats = #stats{diffs = Diffs}) ->
+ Diffs2 = case gb_trees:lookup(TS, Diffs) of
+ {value, Total} -> gb_trees:update(TS, Diff + Total, Diffs);
+ none -> gb_trees:insert(TS, Diff, Diffs)
+ end,
+ Stats#stats{diffs = Diffs2}.
+
+%%----------------------------------------------------------------------------
+%% Query-time
+%%----------------------------------------------------------------------------
+
+format(no_range, #stats{diffs = Diffs, base = Base}, Interval) ->
+ Now = rabbit_mgmt_format:timestamp_ms(erlang:now()),
+ RangePoint = ((Now div Interval) * Interval) - Interval,
+ Count = sum_entire_tree(gb_trees:iterator(Diffs), Base),
+ {[{rate, format_rate(
+ Diffs, RangePoint, Interval, Interval)}], Count};
+
+format(Range, #stats{diffs = Diffs, base = Base}, Interval) ->
+ RangePoint = Range#range.last - Interval,
+ {Samples, Count} = extract_samples(
+ Range, Base, gb_trees:iterator(Diffs), []),
+ Part1 = [{rate, format_rate(
+ Diffs, RangePoint, Range#range.incr, Interval)},
+ {samples, Samples}],
+ Length = length(Samples),
+ Part2 = case Length > 1 of
+ true -> [{sample, S2}, {timestamp, T2}] = hd(Samples),
+ [{sample, S1}, {timestamp, T1}] = lists:last(Samples),
+ Total = lists:sum([pget(sample, I) || I <- Samples]),
+ [{avg_rate, (S2 - S1) * 1000 / (T2 - T1)},
+ {avg, Total / Length}];
+ false -> []
+ end,
+ {Part1 ++ Part2, Count}.
+
+format_rate(Diffs, RangePoint, Incr, Interval) ->
+ case nth_largest(Diffs, 2) of
+ false -> 0.0;
+ {TS, S} -> case TS - RangePoint of %% [0]
+ D when D =< Incr andalso D >= 0 -> S * 1000 / Interval;
+ _ -> 0.0
+ end
+ end.
+
+%% [0] Only display the rate if it's live - i.e. ((the end of the
+%% range) - interval) corresponds to the second to last data point we
+%% have. If the end of the range is earlier we have gone silent, if
+%% it's later we have been asked for a range back in time (in which
+%% case showing the correct instantaneous rate would be quite a faff,
+%% and probably unwanted). Why the second to last? Because data is
+%% still arriving for the last...
+nth_largest(Tree, N) ->
+ case gb_trees:is_empty(Tree) of
+ true -> false;
+ false when N == 1 -> gb_trees:largest(Tree);
+ false -> {_, _, Tree2} = gb_trees:take_largest(Tree),
+ nth_largest(Tree2, N - 1)
+ end.
+
+sum_entire_tree(Iter, Acc) ->
+ case gb_trees:next(Iter) of
+ none -> Acc;
+ {_TS, S, Iter2} -> sum_entire_tree(Iter2, Acc + S)
+ end.
+
+%% What we want to do here is: given the #range{}, provide a set of
+%% samples such that we definitely provide a set of samples which
+%% covers the exact range requested, despite the fact that we might
+%% not have it. We need to spin up over the entire range of the
+%% samples we *do* have since they are diff-based (and we convert to
+%% absolute values here).
+extract_samples(Range = #range{first = Next}, Base, It, Samples) ->
+ case gb_trees:next(It) of
+ {TS, S, It2} -> extract_samples1(Range, Base, TS, S, It2, Samples);
+ none -> extract_samples1(Range, Base, Next, 0, It, Samples)
+ end.
+
+extract_samples1(Range = #range{first = Next, last = Last, incr = Incr},
+ Base, TS, S, It, Samples) ->
+ if
+ %% We've gone over the range. Terminate.
+ Next > Last ->
+ {Samples, Base};
+ %% We've hit bang on a sample. Record it and move to the next.
+ Next =:= TS ->
+ extract_samples(Range#range{first = Next + Incr}, Base + S, It,
+ append(Base + S, Next, Samples));
+ %% We haven't yet hit the beginning of our range.
+ Next > TS ->
+ extract_samples(Range, Base + S, It, Samples);
+ %% We have a valid sample, but we haven't used it up
+ %% yet. Append it and loop around.
+ Next < TS ->
+ extract_samples1(Range#range{first = Next + Incr}, Base, TS, S, It,
+ append(Base, Next, Samples))
+ end.
+
+append(S, TS, Samples) -> [[{sample, S}, {timestamp, TS}] | Samples].
+
+sum([]) -> blank();
+
+sum([Stats | StatsN]) ->
+ lists:foldl(
+ fun (#stats{diffs = D1, base = B1}, #stats{diffs = D2, base = B2}) ->
+ #stats{diffs = add_trees(D1, gb_trees:iterator(D2)),
+ base = B1 + B2}
+ end, Stats, StatsN).
+
+add_trees(Tree, It) ->
+ case gb_trees:next(It) of
+ none -> Tree;
+ {K, V, It2} -> add_trees(
+ case gb_trees:lookup(K, Tree) of
+ {value, V2} -> gb_trees:update(K, V + V2, Tree);
+ none -> gb_trees:insert(K, V, Tree)
+ end, It2)
+ end.
+
+%%----------------------------------------------------------------------------
+%% Event-GCing
+%%----------------------------------------------------------------------------
+
+gc(Cutoff, #stats{diffs = Diffs, base = Base}) ->
+ List = lists:reverse(gb_trees:to_list(Diffs)),
+ gc(Cutoff, List, [], Base).
+
+%% Go through the list, amalgamating all too-old samples with the next
+%% newest keepable one [0] (we move samples forward in time since the
+%% semantics of a sample is "we had this many x by this time"). If the
+%% sample is too old, but would not be too old if moved to a rounder
+%% timestamp which does not exist then invent one and move it there
+%% [1]. But if it's just outright too old, move it to the base [2].
+gc(_Cutoff, [], Keep, Base) ->
+ #stats{diffs = gb_trees:from_orddict(Keep), base = Base};
+gc(Cutoff, [H = {TS, S} | T], Keep, Base) ->
+ {NewKeep, NewBase} =
+ case keep(Cutoff, TS) of
+ keep -> {[H | Keep], Base};
+ drop -> {Keep, S + Base}; %% [2]
+ {move, D} when Keep =:= [] -> {[{TS + D, S}], Base}; %% [1]
+ {move, _} -> [{KTS, KS} | KT] = Keep,
+ {[{KTS, KS + S} | KT], Base} %% [0]
+ end,
+ gc(Cutoff, T, NewKeep, NewBase).
+
+keep({Policy, Now}, TS) ->
+ lists:foldl(fun ({AgeSec, DivisorSec}, Action) ->
+ prefer_action(
+ Action,
+ case (Now - TS) =< (AgeSec * 1000) of
+ true -> DivisorMillis = DivisorSec * 1000,
+ case TS rem DivisorMillis of
+ 0 -> keep;
+ Rem -> {move, DivisorMillis - Rem}
+ end;
+ false -> drop
+ end)
+ end, drop, Policy).
+
+prefer_action(keep, _) -> keep;
+prefer_action(_, keep) -> keep;
+prefer_action({move, A}, {move, B}) -> {move, lists:min([A, B])};
+prefer_action({move, A}, drop) -> {move, A};
+prefer_action(drop, {move, A}) -> {move, A};
+prefer_action(drop, drop) -> drop.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Console.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_sup).
+
+-behaviour(mirrored_supervisor).
+
+-export([init/1]).
+-export([start_link/0]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+init([]) ->
+ DB = {rabbit_mgmt_db, {rabbit_mgmt_db, start_link, []},
+ permanent, ?MAX_WAIT, worker, [rabbit_mgmt_db]},
+ {ok, {{one_for_one, 10, 10}, [DB]}}.
+
+start_link() ->
+ mirrored_supervisor:start_link(
+ {local, ?MODULE}, ?MODULE, fun rabbit_misc:execute_mnesia_transaction/1,
+ ?MODULE, []).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Console.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_sup_sup).
+
+%% We want there to be one management database in the cluster, with a
+%% globally registered name. So we use mirrored_supervisor for
+%% failover (in rabbit_mgmt_sup) and register a global name for the
+%% database.
+%%
+%% Unfortunately it's more complicated than using these things
+%% naively. The first problem is that on failover the mirrored
+%% supervisor might move the DB to a new node before the global name
+%% database notices and removes the old record. In that case starting
+%% the new database will fail.
+%%
+%% The second problem is that after a network partition things get
+%% worse. Since mirrored_supervisor uses Mnesia for global shared
+%% state, we have effectively two (or more) mirrored_supervisors. But
+%% the global name database does not do this, so at least one of them
+%% cannot start the management database; so the mirrored supervisor
+%% has to die. But what if the admin restarts the partition which
+%% contains the management DB? In that case we need to start a new
+%% management DB in the winning partition.
+%%
+%% Rather than try to get mirrored_supervisor to handle this
+%% post-partition state we go for a simpler approach: allow the whole
+%% mirrored_supervisor to die in the two edge cases above, and
+%% whenever we want to call into the mgmt DB we will start it up if it
+%% appears not to be there. See rabbit_mgmt_db:safe_call/3 for the
+%% code which restarts the DB if necessary.
+
+-behaviour(supervisor2).
+
+-export([start_link/0, start_child/0]).
+-export([init/1]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+start_link() -> supervisor2:start_link({local, ?MODULE}, ?MODULE, []).
+
+start_child() -> supervisor2:start_child( ?MODULE, sup()).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{one_for_one, 0, 1}, [sup()]}}.
+
+sup() ->
+ {rabbit_mgmt_sup, {rabbit_mgmt_sup, start_link, []},
+ temporary, ?MAX_WAIT, supervisor, [rabbit_mgmt_sup]}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_util).
+
+%% TODO sort all this out; maybe there's scope for rabbit_mgmt_request?
+
+-export([is_authorized/2, is_authorized_admin/2, is_authorized_admin/4,
+ vhost/1]).
+-export([is_authorized_vhost/2, is_authorized_user/3,
+ is_authorized_monitor/2, is_authorized_policies/2]).
+-export([bad_request/3, bad_request_exception/4, id/2, parse_bool/1,
+ parse_int/1]).
+-export([with_decode/4, not_found/3, amqp_request/4]).
+-export([with_channel/4, with_channel/5]).
+-export([props_to_method/2, props_to_method/4]).
+-export([all_or_one_vhost/2, http_to_amqp/5, reply/3, filter_vhost/3]).
+-export([filter_conn_ch_list/3, filter_user/2, list_login_vhosts/1]).
+-export([with_decode/5, decode/1, decode/2, redirect/2, args/1]).
+-export([reply_list/3, reply_list/4, sort_list/2, destination_type/1]).
+-export([post_respond/1, columns/1, is_monitor/1]).
+-export([list_visible_vhosts/1, b64decode_or_throw/1, no_range/0, range/1,
+ range_ceil/1, floor/2, ceil/2]).
+
+-import(rabbit_misc, [pget/2, pget/3]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-define(FRAMING, rabbit_framing_amqp_0_9_1).
+
+%%--------------------------------------------------------------------
+
+is_authorized(ReqData, Context) ->
+ is_authorized(ReqData, Context, '', fun(_) -> true end).
+
+is_authorized_admin(ReqData, Context) ->
+ is_authorized(ReqData, Context,
+ <<"Not administrator user">>,
+ fun(#user{tags = Tags}) -> is_admin(Tags) end).
+
+is_authorized_admin(ReqData, Context, Username, Password) ->
+ is_authorized(ReqData, Context, Username, Password,
+ <<"Not administrator user">>,
+ fun(#user{tags = Tags}) -> is_admin(Tags) end).
+
+is_authorized_monitor(ReqData, Context) ->
+ is_authorized(ReqData, Context,
+ <<"Not monitor user">>,
+ fun(#user{tags = Tags}) -> is_monitor(Tags) end).
+
+is_authorized_vhost(ReqData, Context) ->
+ is_authorized(ReqData, Context,
+ <<"User not authorised to access virtual host">>,
+ fun(User) ->
+ user_matches_vhost(ReqData, User)
+ end).
+
+user_matches_vhost(ReqData, User) ->
+ case vhost(ReqData) of
+ not_found -> true;
+ none -> true;
+ V -> lists:member(V, list_login_vhosts(User))
+ end.
+
+%% Used for connections / channels. A normal user can only see / delete
+%% their own stuff. Monitors can see other users' and delete their
+%% own. Admins can do it all.
+is_authorized_user(ReqData, Context, Item) ->
+ is_authorized(ReqData, Context,
+ <<"User not authorised to access object">>,
+ fun(#user{username = Username, tags = Tags}) ->
+ case wrq:method(ReqData) of
+ 'DELETE' -> is_admin(Tags);
+ _ -> is_monitor(Tags)
+ end orelse Username == pget(user, Item)
+ end).
+
+%% For policies / parameters. Like is_authorized_vhost but you have to
+%% be a policymaker.
+is_authorized_policies(ReqData, Context) ->
+ is_authorized(ReqData, Context,
+ <<"User not authorised to access object">>,
+ fun(User = #user{tags = Tags}) ->
+ is_policymaker(Tags) andalso
+ user_matches_vhost(ReqData, User)
+ end).
+
+is_authorized(ReqData, Context, ErrorMsg, Fun) ->
+ case rabbit_web_dispatch_util:parse_auth_header(
+ wrq:get_req_header("authorization", ReqData)) of
+ [Username, Password] ->
+ is_authorized(ReqData, Context, Username, Password, ErrorMsg, Fun);
+ _ ->
+ {?AUTH_REALM, ReqData, Context}
+ end.
+
+is_authorized(ReqData, Context, Username, Password, ErrorMsg, Fun) ->
+ ErrFun = fun (Msg) ->
+ rabbit_log:warning("HTTP access denied: user '~s' - ~s~n",
+ [Username, Msg]),
+ not_authorised(Msg, ReqData, Context)
+ end,
+ case rabbit_access_control:check_user_pass_login(Username, Password) of
+ {ok, User = #user{tags = Tags}} ->
+ IPStr = wrq:peer(ReqData),
+ %% inet_parse:address/1 is an undocumented function but
+ %% exists in old versions of Erlang. inet:parse_address/1
+ %% is a documented wrapper round it but introduced in R16B.
+ {ok, IP} = inet_parse:address(IPStr),
+ case rabbit_access_control:check_user_loopback(Username, IP) of
+ ok ->
+ case is_mgmt_user(Tags) of
+ true ->
+ case Fun(User) of
+ true -> {true, ReqData,
+ Context#context{user = User,
+ password = Password}};
+ false -> ErrFun(ErrorMsg)
+ end;
+ false ->
+ ErrFun(<<"Not management user">>)
+ end;
+ not_allowed ->
+ ErrFun(<<"User can only log in via localhost">>)
+ end;
+ {refused, Msg, Args} ->
+ rabbit_log:warning("HTTP access denied: ~s~n",
+ [rabbit_misc:format(Msg, Args)]),
+ not_authorised(<<"Login failed">>, ReqData, Context)
+ end.
+
+vhost(ReqData) ->
+ case id(vhost, ReqData) of
+ none -> none;
+ VHost -> case rabbit_vhost:exists(VHost) of
+ true -> VHost;
+ false -> not_found
+ end
+ end.
+
+destination_type(ReqData) ->
+ case id(dtype, ReqData) of
+ <<"e">> -> exchange;
+ <<"q">> -> queue
+ end.
+
+reply(Facts, ReqData, Context) ->
+ reply0(extract_columns(Facts, ReqData), ReqData, Context).
+
+reply0(Facts, ReqData, Context) ->
+ ReqData1 = wrq:set_resp_header("Cache-Control", "no-cache", ReqData),
+ try
+ {mochijson2:encode(Facts), ReqData1, Context}
+ catch exit:{json_encode, E} ->
+ Error = iolist_to_binary(
+ io_lib:format("JSON encode error: ~p", [E])),
+ Reason = iolist_to_binary(
+ io_lib:format("While encoding:~n~p", [Facts])),
+ internal_server_error(Error, Reason, ReqData1, Context)
+ end.
+
+reply_list(Facts, ReqData, Context) ->
+ reply_list(Facts, ["vhost", "name"], ReqData, Context).
+
+reply_list(Facts, DefaultSorts, ReqData, Context) ->
+ reply(sort_list(
+ extract_columns_list(Facts, ReqData),
+ DefaultSorts,
+ wrq:get_qs_value("sort", ReqData),
+ wrq:get_qs_value("sort_reverse", ReqData)),
+ ReqData, Context).
+
+sort_list(Facts, Sorts) -> sort_list(Facts, Sorts, undefined, false).
+
+sort_list(Facts, DefaultSorts, Sort, Reverse) ->
+ SortList = case Sort of
+ undefined -> DefaultSorts;
+ Extra -> [Extra | DefaultSorts]
+ end,
+ %% lists:sort/2 is much more expensive than lists:sort/1
+ Sorted = [V || {_K, V} <- lists:sort(
+ [{sort_key(F, SortList), F} || F <- Facts])],
+ case Reverse of
+ "true" -> lists:reverse(Sorted);
+ _ -> Sorted
+ end.
+
+sort_key(_Item, []) ->
+ [];
+sort_key(Item, [Sort | Sorts]) ->
+ [get_dotted_value(Sort, Item) | sort_key(Item, Sorts)].
+
+get_dotted_value(Key, Item) ->
+ Keys = string:tokens(Key, "."),
+ get_dotted_value0(Keys, Item).
+
+get_dotted_value0([Key], Item) ->
+ %% Put "nothing" before everything else, in number terms it usually
+ %% means 0.
+ pget_bin(list_to_binary(Key), Item, 0);
+get_dotted_value0([Key | Keys], Item) ->
+ get_dotted_value0(Keys, pget_bin(list_to_binary(Key), Item, [])).
+
+pget_bin(Key, List, Default) ->
+ case lists:partition(fun ({K, _V}) -> a2b(K) =:= Key end, List) of
+ {[{_K, V}], _} -> V;
+ {[], _} -> Default
+ end.
+
+extract_columns(Item, ReqData) ->
+ extract_column_items(Item, columns(ReqData)).
+
+extract_columns_list(Items, ReqData) ->
+ Cols = columns(ReqData),
+ [extract_column_items(Item, Cols) || Item <- Items].
+
+columns(ReqData) ->
+ case wrq:get_qs_value("columns", ReqData) of
+ undefined -> all;
+ Str -> [[list_to_binary(T) || T <- string:tokens(C, ".")]
+ || C <- string:tokens(Str, ",")]
+ end.
+
+extract_column_items(Item, all) ->
+ Item;
+extract_column_items({struct, L}, Cols) ->
+ extract_column_items(L, Cols);
+extract_column_items(Item = [T | _], Cols) when is_tuple(T) ->
+ [{K, extract_column_items(V, descend_columns(a2b(K), Cols))} ||
+ {K, V} <- Item, want_column(a2b(K), Cols)];
+extract_column_items(L, Cols) when is_list(L) ->
+ [extract_column_items(I, Cols) || I <- L];
+extract_column_items(O, _Cols) ->
+ O.
+
+want_column(_Col, all) -> true;
+want_column(Col, Cols) -> lists:any(fun([C|_]) -> C == Col end, Cols).
+
+descend_columns(_K, []) -> [];
+descend_columns( K, [[K] | _Rest]) -> all;
+descend_columns( K, [[K | K2] | Rest]) -> [K2 | descend_columns(K, Rest)];
+descend_columns( K, [[_K2 | _ ] | Rest]) -> descend_columns(K, Rest).
+
+a2b(A) when is_atom(A) -> list_to_binary(atom_to_list(A));
+a2b(B) -> B.
+
+bad_request(Reason, ReqData, Context) ->
+ halt_response(400, bad_request, Reason, ReqData, Context).
+
+not_authorised(Reason, ReqData, Context) ->
+ halt_response(401, not_authorised, Reason, ReqData, Context).
+
+not_found(Reason, ReqData, Context) ->
+ halt_response(404, not_found, Reason, ReqData, Context).
+
+internal_server_error(Error, Reason, ReqData, Context) ->
+ rabbit_log:error("~s~n~s~n", [Error, Reason]),
+ halt_response(500, Error, Reason, ReqData, Context).
+
+halt_response(Code, Type, Reason, ReqData, Context) ->
+ Json = {struct, [{error, Type},
+ {reason, rabbit_mgmt_format:tuple(Reason)}]},
+ ReqData1 = wrq:append_to_response_body(mochijson2:encode(Json), ReqData),
+ {{halt, Code}, ReqData1, Context}.
+
+id(Key, ReqData) when Key =:= exchange;
+ Key =:= source;
+ Key =:= destination ->
+ case id0(Key, ReqData) of
+ <<"amq.default">> -> <<"">>;
+ Name -> Name
+ end;
+id(Key, ReqData) ->
+ id0(Key, ReqData).
+
+id0(Key, ReqData) ->
+ case orddict:find(Key, wrq:path_info(ReqData)) of
+ {ok, Id} -> list_to_binary(mochiweb_util:unquote(Id));
+ error -> none
+ end.
+
+with_decode(Keys, ReqData, Context, Fun) ->
+ with_decode(Keys, wrq:req_body(ReqData), ReqData, Context, Fun).
+
+with_decode(Keys, Body, ReqData, Context, Fun) ->
+ case decode(Keys, Body) of
+ {error, Reason} -> bad_request(Reason, ReqData, Context);
+ {ok, Values, JSON} -> try
+ Fun(Values, JSON)
+ catch {error, Error} ->
+ bad_request(Error, ReqData, Context)
+ end
+ end.
+
+decode(Keys, Body) ->
+ case decode(Body) of
+ {ok, J0} -> J = [{list_to_atom(binary_to_list(K)), V} || {K, V} <- J0],
+ Results = [get_or_missing(K, J) || K <- Keys],
+ case [E || E = {key_missing, _} <- Results] of
+ [] -> {ok, Results, J};
+ Errors -> {error, Errors}
+ end;
+ Else -> Else
+ end.
+
+decode(<<"">>) ->
+ {ok, []};
+
+decode(Body) ->
+ try
+ {struct, J} = mochijson2:decode(Body),
+ {ok, J}
+ catch error:_ -> {error, not_json}
+ end.
+
+get_or_missing(K, L) ->
+ case pget(K, L) of
+ undefined -> {key_missing, K};
+ V -> V
+ end.
+
+http_to_amqp(MethodName, ReqData, Context, Transformers, Extra) ->
+ case vhost(ReqData) of
+ not_found ->
+ not_found(vhost_not_found, ReqData, Context);
+ VHost ->
+ case decode(wrq:req_body(ReqData)) of
+ {ok, Props} ->
+ try
+ Node = case pget(<<"node">>, Props) of
+ undefined -> node();
+ N -> rabbit_nodes:make(
+ binary_to_list(N))
+ end,
+ amqp_request(VHost, ReqData, Context, Node,
+ props_to_method(
+ MethodName, Props, Transformers, Extra))
+ catch {error, Error} ->
+ bad_request(Error, ReqData, Context)
+ end;
+ {error, Reason} ->
+ bad_request(Reason, ReqData, Context)
+ end
+ end.
+
+props_to_method(MethodName, Props, Transformers, Extra) ->
+ Props1 = [{list_to_atom(binary_to_list(K)), V} || {K, V} <- Props],
+ props_to_method(
+ MethodName, rabbit_mgmt_format:format(Props1 ++ Extra, Transformers)).
+
+props_to_method(MethodName, Props) ->
+ Props1 = rabbit_mgmt_format:format(
+ Props,
+ [{fun (Args) -> [{arguments, args(Args)}] end, [arguments]}]),
+ FieldNames = ?FRAMING:method_fieldnames(MethodName),
+ {Res, _Idx} = lists:foldl(
+ fun (K, {R, Idx}) ->
+ NewR = case pget(K, Props1) of
+ undefined -> R;
+ V -> setelement(Idx, R, V)
+ end,
+ {NewR, Idx + 1}
+ end, {?FRAMING:method_record(MethodName), 2},
+ FieldNames),
+ Res.
+
+parse_bool(<<"true">>) -> true;
+parse_bool(<<"false">>) -> false;
+parse_bool(true) -> true;
+parse_bool(false) -> false;
+parse_bool(undefined) -> undefined;
+parse_bool(V) -> throw({error, {not_boolean, V}}).
+
+parse_int(I) when is_integer(I) -> I;
+parse_int(F) when is_number(F) -> trunc(F);
+parse_int(S) -> try
+ list_to_integer(binary_to_list(S))
+ catch error:badarg ->
+ throw({error, {not_integer, S}})
+ end.
+
+amqp_request(VHost, ReqData, Context, Method) ->
+ amqp_request(VHost, ReqData, Context, node(), Method).
+
+amqp_request(VHost, ReqData, Context, Node, Method) ->
+ with_channel(VHost, ReqData, Context, Node,
+ fun (Ch) ->
+ amqp_channel:call(Ch, Method),
+ {true, ReqData, Context}
+ end).
+
+with_channel(VHost, ReqData, Context, Fun) ->
+ with_channel(VHost, ReqData, Context, node(), Fun).
+
+with_channel(VHost, ReqData,
+ Context = #context{user = #user {username = Username},
+ password = Password},
+ Node, Fun) ->
+ Params = #amqp_params_direct{username = Username,
+ password = Password,
+ node = Node,
+ virtual_host = VHost},
+ case amqp_connection:start(Params) of
+ {ok, Conn} ->
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ try
+ Fun(Ch)
+ catch
+ exit:{{shutdown,
+ {server_initiated_close, ?NOT_FOUND, Reason}}, _} ->
+ not_found(Reason, ReqData, Context);
+ exit:{{shutdown,
+ {server_initiated_close, ?ACCESS_REFUSED, Reason}}, _} ->
+ not_authorised(Reason, ReqData, Context);
+ exit:{{shutdown, {ServerClose, Code, Reason}}, _}
+ when ServerClose =:= server_initiated_close;
+ ServerClose =:= server_initiated_hard_close ->
+ bad_request_exception(Code, Reason, ReqData, Context);
+ exit:{{shutdown, {connection_closing,
+ {ServerClose, Code, Reason}}}, _}
+ when ServerClose =:= server_initiated_close;
+ ServerClose =:= server_initiated_hard_close ->
+ bad_request_exception(Code, Reason, ReqData, Context)
+ after
+ catch amqp_channel:close(Ch),
+ catch amqp_connection:close(Conn)
+ end;
+ {error, {auth_failure, Msg}} ->
+ not_authorised(Msg, ReqData, Context);
+ {error, {nodedown, N}} ->
+ bad_request(
+ list_to_binary(
+ io_lib:format("Node ~s could not be contacted", [N])),
+ ReqData, Context)
+ end.
+
+bad_request_exception(Code, Reason, ReqData, Context) ->
+ bad_request(list_to_binary(io_lib:format("~p ~s", [Code, Reason])),
+ ReqData, Context).
+
+all_or_one_vhost(ReqData, Fun) ->
+ case rabbit_mgmt_util:vhost(ReqData) of
+ none -> lists:append([Fun(V) || V <- rabbit_vhost:list()]);
+ not_found -> vhost_not_found;
+ VHost -> Fun(VHost)
+ end.
+
+filter_vhost(List, _ReqData, Context) ->
+ VHosts = list_login_vhosts(Context#context.user),
+ [I || I <- List, lists:member(pget(vhost, I), VHosts)].
+
+filter_user(List, _ReqData, #context{user = User}) ->
+ filter_user(List, User).
+
+filter_user(List, #user{username = Username, tags = Tags}) ->
+ case is_monitor(Tags) of
+ true -> List;
+ false -> [I || I <- List, pget(user, I) == Username]
+ end.
+
+filter_conn_ch_list(List, ReqData, Context) ->
+ rabbit_mgmt_format:strip_pids(
+ filter_user(
+ case vhost(ReqData) of
+ none -> List;
+ VHost -> [I || I <- List, pget(vhost, I) =:= VHost]
+ end, ReqData, Context)).
+
+redirect(Location, ReqData) ->
+ wrq:do_redirect(true,
+ wrq:set_resp_header("Location",
+ binary_to_list(Location), ReqData)).
+
+args({struct, L}) -> args(L);
+args(L) -> rabbit_mgmt_format:to_amqp_table(L).
+
+%% Make replying to a post look like anything else...
+post_respond({true, ReqData, Context}) ->
+ {true, ReqData, Context};
+post_respond({{halt, Code}, ReqData, Context}) ->
+ {{halt, Code}, ReqData, Context};
+post_respond({JSON, ReqData, Context}) ->
+ {true, wrq:set_resp_header(
+ "content-type", "application/json",
+ wrq:append_to_response_body(JSON, ReqData)), Context}.
+
+is_admin(T) -> intersects(T, [administrator]).
+is_policymaker(T) -> intersects(T, [administrator, policymaker]).
+is_monitor(T) -> intersects(T, [administrator, monitoring]).
+is_mgmt_user(T) -> intersects(T, [administrator, monitoring, policymaker,
+ management]).
+
+intersects(A, B) -> lists:any(fun(I) -> lists:member(I, B) end, A).
+
+%% The distinction between list_visible_vhosts and list_login_vhosts
+%% is there to ensure that admins / monitors can always learn of the
+%% existence of all vhosts, and can always see their contribution to
+%% global stats. However, if an admin / monitor does not have any
+%% permissions for a vhost, it's probably less confusing to make that
+%% prevent them from seeing "into" it, than letting them see stuff
+%% that they then can't touch.
+
+list_visible_vhosts(User = #user{tags = Tags}) ->
+ case is_monitor(Tags) of
+ true -> rabbit_vhost:list();
+ false -> list_login_vhosts(User)
+ end.
+
+list_login_vhosts(User) ->
+ [V || V <- rabbit_vhost:list(),
+ case catch rabbit_access_control:check_vhost_access(User, V) of
+ ok -> true;
+ _ -> false
+ end].
+
+%% Wow, base64:decode throws lots of weird errors. Catch and convert to one
+%% that will cause a bad_request.
+b64decode_or_throw(B64) ->
+ try
+ base64:decode(B64)
+ catch error:_ ->
+ throw({error, {not_base64, B64}})
+ end.
+
+no_range() -> {no_range, no_range, no_range}.
+
+%% Take floor on queries so we make sure we only return samples
+%% for which we've finished receiving events. Fixes the "drop at
+%% the end" problem.
+range(ReqData) -> {range("lengths", fun floor/2, ReqData),
+ range("msg_rates", fun floor/2, ReqData),
+ range("data_rates", fun floor/2, ReqData)}.
+
+%% ...but if we know only one event could have contributed towards
+%% what we are interested in, then let's take the ceiling instead and
+%% get slightly fresher data.
+%%
+%% Why does msg_rates still use floor/2? Because in the cases where we
+%% call this function (for connections and queues) the msg_rates are still
+%% aggregated even though the lengths and data rates aren't.
+range_ceil(ReqData) -> {range("lengths", fun ceil/2, ReqData),
+ range("msg_rates", fun floor/2, ReqData),
+ range("data_rates", fun ceil/2, ReqData)}.
+
+range(Prefix, Round, ReqData) ->
+ Age0 = int(Prefix ++ "_age", ReqData),
+ Incr0 = int(Prefix ++ "_incr", ReqData),
+ if
+ is_integer(Age0) andalso is_integer(Incr0) ->
+ Age = Age0 * 1000,
+ Incr = Incr0 * 1000,
+ Now = rabbit_mgmt_format:timestamp_ms(erlang:now()),
+ Last = Round(Now, Incr),
+ #range{first = (Last - Age),
+ last = Last,
+ incr = Incr};
+ true ->
+ no_range
+ end.
+
+floor(TS, Interval) -> (TS div Interval) * Interval.
+
+ceil(TS, Interval) -> case floor(TS, Interval) of
+ TS -> TS;
+ Floor -> Floor + Interval
+ end.
+
+int(Name, ReqData) ->
+ case wrq:get_qs_value(Name, ReqData) of
+ undefined -> undefined;
+ Str -> case catch list_to_integer(Str) of
+ {'EXIT', _} -> undefined;
+ Integer -> Integer
+ end
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_aliveness_test).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([resource_exists/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-define(QUEUE, <<"aliveness-test">>).
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case rabbit_mgmt_util:vhost(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:with_channel(
+ rabbit_mgmt_util:vhost(ReqData), ReqData, Context,
+ fun(Ch) ->
+ amqp_channel:call(Ch, #'queue.declare'{queue = ?QUEUE}),
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = ?QUEUE},
+ #amqp_msg{payload = <<"test_message">>}),
+ {#'basic.get_ok'{}, _} =
+ amqp_channel:call(Ch, #'basic.get'{queue = ?QUEUE,
+ no_ack = true}),
+ %% Don't delete the queue. If this is pinged every few
+ %% seconds we don't want to create a mnesia transaction
+ %% each time.
+ rabbit_mgmt_util:reply([{status, ok}], ReqData, Context)
+ end).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_vhost(ReqData, Context).
+
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_binding).
+
+-export([init/1, resource_exists/2, to_json/2,
+ content_types_provided/2, content_types_accepted/2,
+ is_authorized/2, allowed_methods/2, delete_resource/2,
+ args_hash/1]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%--------------------------------------------------------------------
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{"application/json", accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {['HEAD', 'GET', 'DELETE'], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ Binding = binding(ReqData),
+ {case Binding of
+ not_found -> false;
+ {bad_request, _} -> false;
+ _ -> case rabbit_binding:exists(Binding) of
+ true -> true;
+ _ -> false
+ end
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ with_binding(ReqData, Context,
+ fun(Binding) ->
+ rabbit_mgmt_util:reply(
+ rabbit_mgmt_format:binding(Binding),
+ ReqData, Context)
+ end).
+
+delete_resource(ReqData, Context) ->
+ MethodName = case rabbit_mgmt_util:destination_type(ReqData) of
+ exchange -> 'exchange.unbind';
+ queue -> 'queue.unbind'
+ end,
+ sync_resource(MethodName, ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_vhost(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+binding(ReqData) ->
+ case rabbit_mgmt_util:vhost(ReqData) of
+ not_found -> not_found;
+ VHost -> Source = rabbit_mgmt_util:id(source, ReqData),
+ Dest = rabbit_mgmt_util:id(destination, ReqData),
+ DestType = rabbit_mgmt_util:destination_type(ReqData),
+ Props = rabbit_mgmt_util:id(props, ReqData),
+ SName = rabbit_misc:r(VHost, exchange, Source),
+ DName = rabbit_misc:r(VHost, DestType, Dest),
+ case unpack(SName, DName, Props) of
+ {bad_request, Str} ->
+ {bad_request, Str};
+ {Key, Args} ->
+ #binding{ source = SName,
+ destination = DName,
+ key = Key,
+ args = Args }
+ end
+ end.
+
+unpack(Src, Dst, Props) ->
+ case rabbit_mgmt_format:tokenise(binary_to_list(Props)) of
+ ["~"] -> {<<>>, []};
+ [Key] -> {unquote(Key), []};
+ ["~", ArgsEnc] -> lookup(<<>>, ArgsEnc, Src, Dst);
+ [Key, ArgsEnc] -> lookup(unquote(Key), ArgsEnc, Src, Dst);
+ _ -> {bad_request, {too_many_tokens, Props}}
+ end.
+
+lookup(RoutingKey, ArgsEnc, Src, Dst) ->
+ lookup(RoutingKey, unquote(ArgsEnc),
+ rabbit_binding:list_for_source_and_destination(Src, Dst)).
+
+lookup(_RoutingKey, _Hash, []) ->
+ {bad_request, "binding not found"};
+lookup(RoutingKey, Hash, [#binding{args = Args} | Rest]) ->
+ case args_hash(Args) =:= Hash of
+ true -> {RoutingKey, Args};
+ false -> lookup(RoutingKey, Hash, Rest)
+ end.
+
+args_hash(Args) ->
+ list_to_binary(rabbit_misc:base64url(erlang:md5(term_to_binary(Args)))).
+
+unquote(Name) ->
+ list_to_binary(mochiweb_util:unquote(Name)).
+
+with_binding(ReqData, Context, Fun) ->
+ case binding(ReqData) of
+ {bad_request, Reason} ->
+ rabbit_mgmt_util:bad_request(Reason, ReqData, Context);
+ Binding ->
+ Fun(Binding)
+ end.
+
+sync_resource(MethodName, ReqData, Context) ->
+ with_binding(
+ ReqData, Context,
+ fun(Binding) ->
+ Props0 = rabbit_mgmt_format:binding(Binding),
+ Props = Props0 ++
+ [{exchange, proplists:get_value(source, Props0)},
+ {queue, proplists:get_value(destination, Props0)}],
+ rabbit_mgmt_util:amqp_request(
+ rabbit_mgmt_util:vhost(ReqData), ReqData, Context,
+ rabbit_mgmt_util:props_to_method(MethodName, Props))
+ end).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_bindings).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([allowed_methods/2, post_is_create/2, create_path/2]).
+-export([content_types_accepted/2, accept_content/2, resource_exists/2]).
+-export([basic/1, augmented/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%--------------------------------------------------------------------
+
+init([Mode]) ->
+ {ok, {Mode, #context{}}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+resource_exists(ReqData, {Mode, Context}) ->
+ {case list_bindings(Mode, ReqData) of
+ vhost_not_found -> false;
+ _ -> true
+ end, ReqData, {Mode, Context}}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{"application/json", accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, {Mode, Context}) ->
+ {case Mode of
+ source_destination -> ['HEAD', 'GET', 'POST'];
+ _ -> ['HEAD', 'GET']
+ end, ReqData, {Mode, Context}}.
+
+post_is_create(ReqData, Context) ->
+ {true, ReqData, Context}.
+
+to_json(ReqData, {Mode, Context}) ->
+ Bs = [rabbit_mgmt_format:binding(B) || B <- list_bindings(Mode, ReqData)],
+ rabbit_mgmt_util:reply_list(
+ rabbit_mgmt_util:filter_vhost(Bs, ReqData, Context),
+ ["vhost", "source", "type", "destination",
+ "routing_key", "properties_key"],
+ ReqData, {Mode, Context}).
+
+create_path(ReqData, Context) ->
+ {"dummy", ReqData, Context}.
+
+accept_content(ReqData, {_Mode, Context}) ->
+ Source = rabbit_mgmt_util:id(source, ReqData),
+ Dest = rabbit_mgmt_util:id(destination, ReqData),
+ DestType = rabbit_mgmt_util:id(dtype, ReqData),
+ VHost = rabbit_mgmt_util:vhost(ReqData),
+ {ok, Props} = rabbit_mgmt_util:decode(wrq:req_body(ReqData)),
+ {Method, Key, Args} = method_key_args(DestType, Source, Dest, Props),
+ Response = rabbit_mgmt_util:amqp_request(VHost, ReqData, Context, Method),
+ case Response of
+ {{halt, _}, _, _} = Res ->
+ Res;
+ {true, ReqData, Context2} ->
+ Loc = rabbit_web_dispatch_util:relativise(
+ wrq:path(ReqData),
+ binary_to_list(
+ rabbit_mgmt_format:url(
+ "/api/bindings/~s/e/~s/~s/~s/~s",
+ [VHost, Source, DestType, Dest,
+ rabbit_mgmt_format:pack_binding_props(Key, Args)]))),
+ ReqData2 = wrq:set_resp_header("Location", Loc, ReqData),
+ {true, ReqData2, Context2}
+ end.
+
+is_authorized(ReqData, {Mode, Context}) ->
+ {Res, RD2, C2} = rabbit_mgmt_util:is_authorized_vhost(ReqData, Context),
+ {Res, RD2, {Mode, C2}}.
+
+%%--------------------------------------------------------------------
+
+basic(ReqData) ->
+ [rabbit_mgmt_format:binding(B) ||
+ B <- list_bindings(all, ReqData)].
+
+augmented(ReqData, Context) ->
+ rabbit_mgmt_util:filter_vhost(basic(ReqData), ReqData, Context).
+
+method_key_args(<<"q">>, Source, Dest, Props) ->
+ M = #'queue.bind'{routing_key = K, arguments = A} =
+ rabbit_mgmt_util:props_to_method(
+ 'queue.bind', Props,
+ [], [{exchange, Source}, {queue, Dest}]),
+ {M, K, A};
+
+method_key_args(<<"e">>, Source, Dest, Props) ->
+ M = #'exchange.bind'{routing_key = K, arguments = A} =
+ rabbit_mgmt_util:props_to_method(
+ 'exchange.bind', Props,
+ [], [{source, Source}, {destination, Dest}]),
+ {M, K, A}.
+
+%%--------------------------------------------------------------------
+
+list_bindings(all, ReqData) ->
+ rabbit_mgmt_util:all_or_one_vhost(ReqData,
+ fun (VHost) ->
+ rabbit_binding:list(VHost)
+ end);
+list_bindings(exchange_source, ReqData) ->
+ rabbit_binding:list_for_source(r(exchange, exchange, ReqData));
+list_bindings(exchange_destination, ReqData) ->
+ rabbit_binding:list_for_destination(r(exchange, exchange, ReqData));
+list_bindings(queue, ReqData) ->
+ rabbit_binding:list_for_destination(r(queue, destination, ReqData));
+list_bindings(source_destination, ReqData) ->
+ DestType = rabbit_mgmt_util:destination_type(ReqData),
+ rabbit_binding:list_for_source_and_destination(
+ r(exchange, source, ReqData),
+ r(DestType, destination, ReqData)).
+
+r(Type, Name, ReqData) ->
+ rabbit_misc:r(rabbit_mgmt_util:vhost(ReqData), Type,
+ rabbit_mgmt_util:id(Name, ReqData)).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_channel).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([resource_exists/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ case channel(ReqData) of
+ not_found -> {false, ReqData, Context};
+ _Conn -> {true, ReqData, Context}
+ end.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply(
+ {struct, rabbit_mgmt_format:strip_pids(channel(ReqData))},
+ ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_user(ReqData, Context, channel(ReqData)).
+
+%%--------------------------------------------------------------------
+
+channel(ReqData) ->
+ rabbit_mgmt_db:get_channel(rabbit_mgmt_util:id(channel, ReqData),
+ rabbit_mgmt_util:range(ReqData)).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_channels).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2,
+ augmented/2]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply_list(augmented(ReqData, Context), ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized(ReqData, Context).
+
+augmented(ReqData, Context) ->
+ rabbit_mgmt_util:filter_conn_ch_list(
+ rabbit_mgmt_db:get_all_channels(
+ rabbit_mgmt_util:range(ReqData)), ReqData, Context).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_cluster_name).
+
+-export([init/1, resource_exists/2, to_json/2,
+ content_types_provided/2, content_types_accepted/2,
+ is_authorized/2, allowed_methods/2, accept_content/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%--------------------------------------------------------------------
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{"application/json", accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {['HEAD', 'GET', 'PUT'], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {true, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply(
+ [{name, rabbit_nodes:cluster_name()}], ReqData, Context).
+
+accept_content(ReqData, Context) ->
+ rabbit_mgmt_util:with_decode(
+ [name], ReqData, Context, fun([Name], _) ->
+ rabbit_nodes:set_cluster_name(Name),
+ {true, ReqData, Context}
+ end).
+
+is_authorized(ReqData, Context) ->
+ case wrq:method(ReqData) of
+ 'PUT' -> rabbit_mgmt_util:is_authorized_admin(ReqData, Context);
+ _ -> rabbit_mgmt_util:is_authorized(ReqData, Context)
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_connection).
+
+-export([init/1, resource_exists/2, to_json/2, content_types_provided/2,
+ is_authorized/2, allowed_methods/2, delete_resource/2, conn/1]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {['HEAD', 'GET', 'DELETE'], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ case conn(ReqData) of
+ not_found -> {false, ReqData, Context};
+ _Conn -> {true, ReqData, Context}
+ end.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply(
+ {struct, rabbit_mgmt_format:strip_pids(conn(ReqData))}, ReqData, Context).
+
+delete_resource(ReqData, Context) ->
+ Conn = conn(ReqData),
+ Pid = proplists:get_value(pid, Conn),
+ Reason = case wrq:get_req_header(<<"X-Reason">>, ReqData) of
+ undefined -> "Closed via management plugin";
+ V -> V
+ end,
+ case proplists:get_value(type, Conn) of
+ direct -> amqp_direct_connection:server_close(Pid, 320, Reason);
+ network -> rabbit_networking:close_connection(Pid, Reason)
+ end,
+ {true, ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_user(ReqData, Context, conn(ReqData)).
+
+%%--------------------------------------------------------------------
+
+conn(ReqData) ->
+ rabbit_mgmt_db:get_connection(rabbit_mgmt_util:id(connection, ReqData),
+ rabbit_mgmt_util:range_ceil(ReqData)).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_connection_channels).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([resource_exists/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ case rabbit_mgmt_wm_connection:conn(ReqData) of
+ error -> {false, ReqData, Context};
+ _Conn -> {true, ReqData, Context}
+ end.
+
+to_json(ReqData, Context) ->
+ Name = proplists:get_value(name, rabbit_mgmt_wm_connection:conn(ReqData)),
+ Chs = rabbit_mgmt_db:get_all_channels(rabbit_mgmt_util:range(ReqData)),
+ rabbit_mgmt_util:reply_list(
+ [Ch || Ch <- rabbit_mgmt_util:filter_conn_ch_list(Chs, ReqData, Context),
+ conn_name(Ch) =:= Name],
+ ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_user(
+ ReqData, Context, rabbit_mgmt_wm_connection:conn(ReqData)).
+
+%%--------------------------------------------------------------------
+
+conn_name(Ch) ->
+ proplists:get_value(name, proplists:get_value(connection_details, Ch)).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_connections).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2,
+ augmented/2]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply_list(augmented(ReqData, Context), ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized(ReqData, Context).
+
+augmented(ReqData, Context) ->
+ rabbit_mgmt_util:filter_conn_ch_list(
+ rabbit_mgmt_db:get_all_connections(
+ rabbit_mgmt_util:range_ceil(ReqData)), ReqData, Context).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_definitions).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([content_types_accepted/2, allowed_methods/2, accept_json/2]).
+-export([post_is_create/2, create_path/2, accept_multipart/2]).
+
+-export([apply_defs/3]).
+
+-import(rabbit_misc, [pget/2, pget/3]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%--------------------------------------------------------------------
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{"application/json", accept_json},
+ {"multipart/form-data", accept_multipart}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {['HEAD', 'GET', 'POST'], ReqData, Context}.
+
+post_is_create(ReqData, Context) ->
+ {true, ReqData, Context}.
+
+create_path(ReqData, Context) ->
+ {"dummy", ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ Xs = [X || X <- rabbit_mgmt_wm_exchanges:basic(ReqData),
+ export_exchange(X)],
+ Qs = [Q || Q <- rabbit_mgmt_wm_queues:basic(ReqData),
+ export_queue(Q)],
+ QNames = [{pget(name, Q), pget(vhost, Q)} || Q <- Qs],
+ Bs = [B || B <- rabbit_mgmt_wm_bindings:basic(ReqData),
+ export_binding(B, QNames)],
+ {ok, Vsn} = application:get_key(rabbit, vsn),
+ rabbit_mgmt_util:reply(
+ [{rabbit_version, list_to_binary(Vsn)}] ++
+ filter(
+ [{users, rabbit_mgmt_wm_users:users()},
+ {vhosts, rabbit_mgmt_wm_vhosts:basic()},
+ {permissions, rabbit_mgmt_wm_permissions:permissions()},
+ {parameters, rabbit_mgmt_wm_parameters:basic(ReqData)},
+ {policies, rabbit_mgmt_wm_policies:basic(ReqData)},
+ {queues, Qs},
+ {exchanges, Xs},
+ {bindings, Bs}]),
+ case wrq:get_qs_value("download", ReqData) of
+ undefined -> ReqData;
+ Filename -> wrq:set_resp_header(
+ "Content-Disposition",
+ "attachment; filename=" ++
+ mochiweb_util:unquote(Filename), ReqData)
+ end,
+ Context).
+
+accept_json(ReqData, Context) ->
+ accept(wrq:req_body(ReqData), ReqData, Context).
+
+accept_multipart(ReqData, Context) ->
+ Parts = webmachine_multipart:get_all_parts(
+ wrq:req_body(ReqData),
+ webmachine_multipart:find_boundary(ReqData)),
+ Redirect = get_part("redirect", Parts),
+ Json = get_part("file", Parts),
+ Resp = {Res, _, _} = accept(Json, ReqData, Context),
+ case Res of
+ true ->
+ ReqData1 =
+ case Redirect of
+ unknown -> ReqData;
+ _ -> rabbit_mgmt_util:redirect(Redirect, ReqData)
+ end,
+ {true, ReqData1, Context};
+ _ ->
+ Resp
+ end.
+
+is_authorized(ReqData, Context) ->
+ case wrq:get_qs_value("auth", ReqData) of
+ undefined -> rabbit_mgmt_util:is_authorized_admin(ReqData, Context);
+ Auth -> is_authorized_qs(ReqData, Context, Auth)
+ end.
+
+%% Support for the web UI - it can't add a normal "authorization"
+%% header for a file download.
+is_authorized_qs(ReqData, Context, Auth) ->
+ case rabbit_web_dispatch_util:parse_auth_header("Basic " ++ Auth) of
+ [Username, Password] -> rabbit_mgmt_util:is_authorized_admin(
+ ReqData, Context, Username, Password);
+ _ -> {?AUTH_REALM, ReqData, Context}
+ end.
+
+%%--------------------------------------------------------------------
+
+accept(Body, ReqData, Context) ->
+ apply_defs(Body, fun() -> {true, ReqData, Context} end,
+ fun(E) -> rabbit_mgmt_util:bad_request(E, ReqData, Context) end).
+
+apply_defs(Body, SuccessFun, ErrorFun) ->
+ case rabbit_mgmt_util:decode([], Body) of
+ {error, E} ->
+ ErrorFun(E);
+ {ok, _, All} ->
+ try
+ for_all(users, All, fun add_user/1),
+ for_all(vhosts, All, fun add_vhost/1),
+ for_all(permissions, All, fun add_permission/1),
+ for_all(parameters, All, fun add_parameter/1),
+ for_all(policies, All, fun add_policy/1),
+ for_all(queues, All, fun add_queue/1),
+ for_all(exchanges, All, fun add_exchange/1),
+ for_all(bindings, All, fun add_binding/1),
+ SuccessFun()
+ catch {error, E} -> ErrorFun(format(E));
+ exit:E -> ErrorFun(format(E))
+ end
+ end.
+
+format(#amqp_error{name = Name, explanation = Explanation}) ->
+ list_to_binary(rabbit_misc:format("~s: ~s", [Name, Explanation]));
+format(E) ->
+ list_to_binary(rabbit_misc:format("~p", [E])).
+
+get_part(Name, Parts) ->
+ %% TODO any reason not to use lists:keyfind instead?
+ Filtered = [Value || {N, _Meta, Value} <- Parts, N == Name],
+ case Filtered of
+ [] -> unknown;
+ [F] -> F
+ end.
+
+export_queue(Queue) ->
+ pget(owner_pid, Queue) == none.
+
+export_binding(Binding, Qs) ->
+ Src = pget(source, Binding),
+ Dest = pget(destination, Binding),
+ DestType = pget(destination_type, Binding),
+ VHost = pget(vhost, Binding),
+ Src =/= <<"">>
+ andalso
+ ( (DestType =:= queue andalso lists:member({Dest, VHost}, Qs))
+ orelse (DestType =:= exchange andalso Dest =/= <<"">>) ).
+
+export_exchange(Exchange) ->
+ export_name(pget(name, Exchange)).
+
+export_name(<<>>) -> false;
+export_name(<<"amq.", _/binary>>) -> false;
+export_name(_Name) -> true.
+
+%%--------------------------------------------------------------------
+
+rw_state() ->
+ [{users, [name, password_hash, tags]},
+ {vhosts, [name]},
+ {permissions, [user, vhost, configure, write, read]},
+ {parameters, [vhost, component, name, value]},
+ {policies, [vhost, name, pattern, definition, priority, 'apply-to']},
+ {queues, [name, vhost, durable, auto_delete, arguments]},
+ {exchanges, [name, vhost, type, durable, auto_delete, internal,
+ arguments]},
+ {bindings, [source, vhost, destination, destination_type, routing_key,
+ arguments]}].
+
+filter(Items) ->
+ [filter_items(N, V, proplists:get_value(N, rw_state())) || {N, V} <- Items].
+
+filter_items(Name, List, Allowed) ->
+ {Name, [filter_item(I, Allowed) || I <- List]}.
+
+filter_item(Item, Allowed) ->
+ [{K, Fact} || {K, Fact} <- Item, lists:member(K, Allowed)].
+
+%%--------------------------------------------------------------------
+
+for_all(Name, All, Fun) ->
+ case pget(Name, All) of
+ undefined -> ok;
+ List -> [Fun([{atomise_name(K), V} || {K, V} <- I]) ||
+ {struct, I} <- List]
+ end.
+
+atomise_name(N) -> list_to_atom(binary_to_list(N)).
+
+%%--------------------------------------------------------------------
+
+add_parameter(Param) ->
+ VHost = pget(vhost, Param),
+ Comp = pget(component, Param),
+ Key = pget(name, Param),
+ Term = rabbit_misc:json_to_term(pget(value, Param)),
+ case rabbit_runtime_parameters:set(VHost, Comp, Key, Term, none) of
+ ok -> ok;
+ {error_string, E} -> S = rabbit_misc:format(" (~s/~s/~s)",
+ [VHost, Comp, Key]),
+ exit(list_to_binary(E ++ S))
+ end.
+
+add_policy(Param) ->
+ VHost = pget(vhost, Param),
+ Key = pget(name, Param),
+ case rabbit_policy:set(
+ VHost, Key, pget(pattern, Param),
+ rabbit_misc:json_to_term(pget(definition, Param)),
+ pget(priority, Param),
+ pget('apply-to', Param, <<"all">>)) of
+ ok -> ok;
+ {error_string, E} -> S = rabbit_misc:format(" (~s/~s)", [VHost, Key]),
+ exit(list_to_binary(E ++ S))
+ end.
+
+add_user(User) ->
+ rabbit_mgmt_wm_user:put_user(User).
+
+add_vhost(VHost) ->
+ VHostName = pget(name, VHost),
+ VHostTrace = pget(tracing, VHost),
+ rabbit_mgmt_wm_vhost:put_vhost(VHostName, VHostTrace).
+
+add_permission(Permission) ->
+ rabbit_auth_backend_internal:set_permissions(pget(user, Permission),
+ pget(vhost, Permission),
+ pget(configure, Permission),
+ pget(write, Permission),
+ pget(read, Permission)).
+
+add_queue(Queue) ->
+ rabbit_amqqueue:declare(r(queue, Queue),
+ pget(durable, Queue),
+ pget(auto_delete, Queue),
+ rabbit_mgmt_util:args(pget(arguments, Queue)),
+ none).
+
+add_exchange(Exchange) ->
+ Internal = case pget(internal, Exchange) of
+ undefined -> false; %% =< 2.2.0
+ I -> I
+ end,
+ rabbit_exchange:declare(r(exchange, Exchange),
+ rabbit_exchange:check_type(pget(type, Exchange)),
+ pget(durable, Exchange),
+ pget(auto_delete, Exchange),
+ Internal,
+ rabbit_mgmt_util:args(pget(arguments, Exchange))).
+
+add_binding(Binding) ->
+ DestType = list_to_atom(binary_to_list(pget(destination_type, Binding))),
+ rabbit_binding:add(
+ #binding{source = r(exchange, source, Binding),
+ destination = r(DestType, destination, Binding),
+ key = pget(routing_key, Binding),
+ args = rabbit_mgmt_util:args(pget(arguments, Binding))}).
+
+r(Type, Props) -> r(Type, name, Props).
+
+r(Type, Name, Props) ->
+ rabbit_misc:r(pget(vhost, Props), Type, pget(Name, Props)).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_exchange).
+
+-export([init/1, resource_exists/2, to_json/2,
+ content_types_provided/2, content_types_accepted/2,
+ is_authorized/2, allowed_methods/2, accept_content/2,
+ delete_resource/2, exchange/1, exchange/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%--------------------------------------------------------------------
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{"application/json", accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {['HEAD', 'GET', 'PUT', 'DELETE'], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case exchange(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ [X] = rabbit_mgmt_db:augment_exchanges(
+ [exchange(ReqData)], rabbit_mgmt_util:range(ReqData), full),
+ rabbit_mgmt_util:reply(X, ReqData, Context).
+
+accept_content(ReqData, Context) ->
+ rabbit_mgmt_util:http_to_amqp(
+ 'exchange.declare', ReqData, Context,
+ [{fun rabbit_mgmt_util:parse_bool/1, [durable, auto_delete, internal]}],
+ [{exchange, rabbit_mgmt_util:id(exchange, ReqData)}]).
+
+delete_resource(ReqData, Context) ->
+ rabbit_mgmt_util:amqp_request(
+ rabbit_mgmt_util:vhost(ReqData), ReqData, Context,
+ #'exchange.delete'{ exchange = id(ReqData) }).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_vhost(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+exchange(ReqData) ->
+ case rabbit_mgmt_util:vhost(ReqData) of
+ not_found -> not_found;
+ VHost -> exchange(VHost, id(ReqData))
+ end.
+
+exchange(VHost, XName) ->
+ Name = rabbit_misc:r(VHost, exchange, XName),
+ case rabbit_exchange:lookup(Name) of
+ {ok, X} -> rabbit_mgmt_format:exchange(
+ rabbit_exchange:info(X));
+ {error, not_found} -> not_found
+ end.
+
+id(ReqData) ->
+ rabbit_mgmt_util:id(exchange, ReqData).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_exchange_publish).
+
+-export([init/1, resource_exists/2, post_is_create/2, is_authorized/2,
+ allowed_methods/2, process_post/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%--------------------------------------------------------------------
+init(_Config) -> {ok, #context{}}.
+
+allowed_methods(ReqData, Context) ->
+ {['POST'], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case rabbit_mgmt_wm_exchange:exchange(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+post_is_create(ReqData, Context) ->
+ {false, ReqData, Context}.
+
+process_post(ReqData, Context) ->
+ rabbit_mgmt_util:post_respond(do_it(ReqData, Context)).
+
+do_it(ReqData, Context) ->
+ VHost = rabbit_mgmt_util:vhost(ReqData),
+ X = rabbit_mgmt_util:id(exchange, ReqData),
+ rabbit_mgmt_util:with_decode(
+ [routing_key, properties, payload, payload_encoding], ReqData, Context,
+ fun ([RoutingKey, Props0, Payload0, Enc], _) when is_binary(Payload0) ->
+ rabbit_mgmt_util:with_channel(
+ VHost, ReqData, Context,
+ fun (Ch) ->
+ MRef = erlang:monitor(process, Ch),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ amqp_channel:register_return_handler(Ch, self()),
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ Props = rabbit_mgmt_format:to_basic_properties(Props0),
+ Payload = decode(Payload0, Enc),
+ amqp_channel:cast(Ch, #'basic.publish'{
+ exchange = X,
+ routing_key = RoutingKey,
+ mandatory = true},
+ #amqp_msg{props = Props,
+ payload = Payload}),
+ receive
+ {#'basic.return'{}, _} ->
+ receive
+ #'basic.ack'{} -> ok
+ end,
+ good(MRef, false, ReqData, Context);
+ #'basic.ack'{} ->
+ good(MRef, true, ReqData, Context);
+ {'DOWN', _, _, _, Err} ->
+ bad(Err, ReqData, Context)
+ end
+ end);
+ ([_RoutingKey, _Props, _Payload, _Enc], _) ->
+ throw({error, payload_not_string})
+ end).
+
+good(MRef, Routed, ReqData, Context) ->
+ erlang:demonitor(MRef),
+ rabbit_mgmt_util:reply([{routed, Routed}], ReqData, Context).
+
+bad({shutdown, {connection_closing,
+ {server_initiated_close, Code, Reason}}}, ReqData, Context) ->
+ rabbit_mgmt_util:bad_request_exception(Code, Reason, ReqData, Context);
+
+bad({shutdown, {server_initiated_close, Code, Reason}}, ReqData, Context) ->
+ rabbit_mgmt_util:bad_request_exception(Code, Reason, ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_vhost(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+decode(Payload, <<"string">>) -> Payload;
+decode(Payload, <<"base64">>) -> rabbit_mgmt_util:b64decode_or_throw(Payload);
+decode(_Payload, Enc) -> throw({error, {unsupported_encoding, Enc}}).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_exchanges).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2,
+ resource_exists/2, basic/1, augmented/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case exchanges0(ReqData) of
+ vhost_not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply_list(augmented(ReqData, Context), ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_vhost(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+augmented(ReqData, Context) ->
+ rabbit_mgmt_db:augment_exchanges(
+ rabbit_mgmt_util:filter_vhost(basic(ReqData), ReqData, Context),
+ rabbit_mgmt_util:range(ReqData), basic).
+
+basic(ReqData) ->
+ [rabbit_mgmt_format:exchange(X) || X <- exchanges0(ReqData)].
+
+exchanges0(ReqData) ->
+ rabbit_mgmt_util:all_or_one_vhost(ReqData, fun rabbit_exchange:info_all/1).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_extensions).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ Modules = rabbit_mgmt_dispatcher:modules(),
+ rabbit_mgmt_util:reply(
+ [Module:web_ui() || Module <- Modules], ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized(ReqData, Context).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Console.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_node).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([resource_exists/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case node0(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply(node0(ReqData), ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_monitor(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+node0(ReqData) ->
+ Name = list_to_atom(binary_to_list(rabbit_mgmt_util:id(node, ReqData))),
+ case [N || N <- rabbit_mgmt_wm_nodes:all_nodes(),
+ proplists:get_value(name, N) == Name] of
+ [] -> not_found;
+ [Node] -> augment(ReqData, Name, Node)
+ end.
+
+augment(ReqData, Name, Node) ->
+ case wrq:get_qs_value("memory", ReqData) of
+ "true" -> Mem = case rpc:call(Name, rabbit_vm, memory, [], infinity) of
+ {badrpc, _} -> not_available;
+ Memory -> Memory
+ end,
+ [{memory, Mem} | Node];
+ _ -> Node
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Console.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_nodes).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([all_nodes/0]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply_list(all_nodes(), ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_monitor(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+all_nodes() ->
+ S = rabbit_mnesia:status(),
+ Nodes = proplists:get_value(nodes, S),
+ Types = proplists:get_keys(Nodes),
+ Running = proplists:get_value(running_nodes, S),
+ rabbit_mgmt_db:augment_nodes(
+ [[{name, Node}, {type, Type}, {running, lists:member(Node, Running)}] ||
+ Type <- Types, Node <- proplists:get_value(Type, Nodes)]).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_overview).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+
+-import(rabbit_misc, [pget/2, pget/3]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+to_json(ReqData, Context = #context{user = User = #user{tags = Tags}}) ->
+ {ok, StatsLevel} = application:get_env(rabbit, collect_statistics),
+ %% NB: this duplicates what's in /nodes but we want a global idea
+ %% of this. And /nodes is not accessible to non-monitor users.
+ ExchangeTypes = rabbit_mgmt_external_stats:list_registry_plugins(exchange),
+ Overview0 = [{management_version, version(rabbitmq_management)},
+ {statistics_level, StatsLevel},
+ {exchange_types, ExchangeTypes},
+ {rabbitmq_version, version(rabbit)},
+ {cluster_name, rabbit_nodes:cluster_name()},
+ {erlang_version, erl_version(otp_release)},
+ {erlang_full_version, erl_version(system_version)}],
+ Range = rabbit_mgmt_util:range(ReqData),
+ Overview =
+ case rabbit_mgmt_util:is_monitor(Tags) of
+ true ->
+ Overview0 ++
+ [{K, {struct, V}} ||
+ {K, V} <- rabbit_mgmt_db:get_overview(Range)] ++
+ [{node, node()},
+ {statistics_db_node, stats_db_node()},
+ {listeners, listeners()},
+ {contexts, rabbit_web_dispatch_contexts()}];
+ _ ->
+ Overview0 ++
+ [{K, {struct, V}} ||
+ {K, V} <- rabbit_mgmt_db:get_overview(User, Range)]
+ end,
+ rabbit_mgmt_util:reply(Overview, ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+stats_db_node() ->
+ case global:whereis_name(rabbit_mgmt_db) of
+ undefined -> not_running;
+ Pid -> node(Pid)
+ end.
+
+version(App) ->
+ {ok, V} = application:get_key(App, vsn),
+ list_to_binary(V).
+
+listeners() ->
+ rabbit_mgmt_util:sort_list(
+ [rabbit_mgmt_format:listener(L)
+ || L <- rabbit_networking:active_listeners()],
+ ["protocol", "port", "node"] ).
+
+%%--------------------------------------------------------------------
+
+rabbit_web_dispatch_contexts() ->
+ rabbit_mgmt_util:sort_list(
+ lists:append(
+ [rabbit_web_dispatch_contexts(N) || N <- rabbit_mgmt_wm_nodes:all_nodes()]),
+ ["description", "port", "node"]).
+
+rabbit_web_dispatch_contexts(N) ->
+ [[{node, pget(name, N)} | C] || C <- pget(contexts, N, [])].
+
+erl_version(K) ->
+ list_to_binary(string:strip(erlang:system_info(K), both, $\n)).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_parameter).
+
+-export([init/1, resource_exists/2, to_json/2,
+ content_types_provided/2, content_types_accepted/2,
+ is_authorized/2, allowed_methods/2, accept_content/2,
+ delete_resource/2]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{"application/json", accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {['HEAD', 'GET', 'PUT', 'DELETE'], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case parameter(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply(rabbit_mgmt_format:parameter(parameter(ReqData)),
+ ReqData, Context).
+
+accept_content(ReqData, Context = #context{user = User}) ->
+ case rabbit_mgmt_util:vhost(ReqData) of
+ not_found ->
+ rabbit_mgmt_util:not_found(vhost_not_found, ReqData, Context);
+ VHost ->
+ rabbit_mgmt_util:with_decode(
+ [value], ReqData, Context,
+ fun([Value], _) ->
+ case rabbit_runtime_parameters:set(
+ VHost, component(ReqData), name(ReqData),
+ rabbit_misc:json_to_term(Value), User) of
+ ok ->
+ {true, ReqData, Context};
+ {error_string, Reason} ->
+ rabbit_mgmt_util:bad_request(
+ list_to_binary(Reason), ReqData, Context)
+ end
+ end)
+ end.
+
+delete_resource(ReqData, Context) ->
+ ok = rabbit_runtime_parameters:clear(
+ rabbit_mgmt_util:vhost(ReqData), component(ReqData), name(ReqData)),
+ {true, ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_policies(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+parameter(ReqData) ->
+ rabbit_runtime_parameters:lookup(
+ rabbit_mgmt_util:vhost(ReqData), component(ReqData), name(ReqData)).
+
+component(ReqData) -> rabbit_mgmt_util:id(component, ReqData).
+name(ReqData) -> rabbit_mgmt_util:id(name, ReqData).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_parameters).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2,
+ resource_exists/2, basic/1]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case basic(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply_list(
+ rabbit_mgmt_util:filter_vhost(basic(ReqData), ReqData, Context),
+ ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_policies(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+basic(ReqData) ->
+ Raw = case rabbit_mgmt_util:id(component, ReqData) of
+ none -> rabbit_runtime_parameters:list();
+ Name -> case rabbit_mgmt_util:vhost(ReqData) of
+ none -> rabbit_runtime_parameters:list_component(
+ Name);
+ not_found -> not_found;
+ VHost -> rabbit_runtime_parameters:list(
+ VHost, Name)
+ end
+ end,
+ case Raw of
+ not_found -> not_found;
+ _ -> [rabbit_mgmt_format:parameter(P) || P <- Raw]
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_permission).
+
+-export([init/1, resource_exists/2, to_json/2,
+ content_types_provided/2, content_types_accepted/2,
+ is_authorized/2, allowed_methods/2, accept_content/2,
+ delete_resource/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{"application/json", accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {['HEAD', 'GET', 'PUT', 'DELETE'], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case perms(ReqData) of
+ none -> false;
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply(perms(ReqData), ReqData, Context).
+
+accept_content(ReqData, Context) ->
+ case perms(ReqData) of
+ not_found ->
+ rabbit_mgmt_util:bad_request(vhost_or_user_not_found,
+ ReqData, Context);
+ _ ->
+ User = rabbit_mgmt_util:id(user, ReqData),
+ VHost = rabbit_mgmt_util:id(vhost, ReqData),
+ rabbit_mgmt_util:with_decode(
+ [configure, write, read], ReqData, Context,
+ fun([Conf, Write, Read], _) ->
+ rabbit_auth_backend_internal:set_permissions(
+ User, VHost, Conf, Write, Read),
+ {true, ReqData, Context}
+ end)
+ end.
+
+delete_resource(ReqData, Context) ->
+ User = rabbit_mgmt_util:id(user, ReqData),
+ VHost = rabbit_mgmt_util:id(vhost, ReqData),
+ rabbit_auth_backend_internal:clear_permissions(User, VHost),
+ {true, ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+perms(ReqData) ->
+ User = rabbit_mgmt_util:id(user, ReqData),
+ case rabbit_auth_backend_internal:lookup_user(User) of
+ {ok, _} ->
+ case rabbit_mgmt_util:vhost(ReqData) of
+ not_found ->
+ not_found;
+ VHost ->
+ Perms =
+ rabbit_auth_backend_internal:list_user_vhost_permissions(
+ User, VHost),
+ case Perms of
+ [Rest] -> [{user, User},
+ {vhost, VHost} | Rest];
+ [] -> none
+ end
+ end;
+ {error, _} ->
+ not_found
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_permissions).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([permissions/0]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply_list(permissions(), ["vhost", "user"],
+ ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+permissions() ->
+ rabbit_auth_backend_internal:list_permissions().
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_permissions_user).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ User = rabbit_mgmt_util:id(user, ReqData),
+ Perms = rabbit_auth_backend_internal:list_user_permissions(User),
+ rabbit_mgmt_util:reply_list([[{user, User} | Rest] || Rest <- Perms],
+ ["vhost", "user"], ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_permissions_vhost).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ VHost = rabbit_mgmt_util:id(vhost, ReqData),
+ Perms = rabbit_auth_backend_internal:list_vhost_permissions(VHost),
+ rabbit_mgmt_util:reply_list([[{vhost, VHost} | Rest] || Rest <- Perms],
+ ["vhost", "user"], ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_policies).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2,
+ resource_exists/2, basic/1]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case basic(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply_list(
+ rabbit_mgmt_util:filter_vhost(basic(ReqData), ReqData, Context),
+ ["priority"], ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_policies(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+basic(ReqData) ->
+ case rabbit_mgmt_util:vhost(ReqData) of
+ not_found -> not_found;
+ none -> rabbit_policy:list();
+ VHost -> rabbit_policy:list(VHost)
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_policy).
+
+-export([init/1, resource_exists/2, to_json/2,
+ content_types_provided/2, content_types_accepted/2,
+ is_authorized/2, allowed_methods/2, accept_content/2,
+ delete_resource/2]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{"application/json", accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {['HEAD', 'GET', 'PUT', 'DELETE'], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case policy(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply(policy(ReqData), ReqData, Context).
+
+accept_content(ReqData, Context) ->
+ case rabbit_mgmt_util:vhost(ReqData) of
+ not_found ->
+ rabbit_mgmt_util:not_found(vhost_not_found, ReqData, Context);
+ VHost ->
+ rabbit_mgmt_util:with_decode(
+ [pattern, definition], ReqData, Context,
+ fun([Pattern, Definition], Body) ->
+ case rabbit_policy:set(
+ VHost, name(ReqData), Pattern,
+ rabbit_misc:json_to_term(Definition),
+ proplists:get_value(priority, Body),
+ proplists:get_value('apply-to', Body)) of
+ ok ->
+ {true, ReqData, Context};
+ {error_string, Reason} ->
+ rabbit_mgmt_util:bad_request(
+ list_to_binary(Reason), ReqData, Context)
+ end
+ end)
+ end.
+
+delete_resource(ReqData, Context) ->
+ ok = rabbit_policy:delete(
+ rabbit_mgmt_util:vhost(ReqData), name(ReqData)),
+ {true, ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_policies(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+policy(ReqData) ->
+ rabbit_policy:lookup(
+ rabbit_mgmt_util:vhost(ReqData), name(ReqData)).
+
+name(ReqData) -> rabbit_mgmt_util:id(name, ReqData).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_queue).
+
+-export([init/1, resource_exists/2, to_json/2,
+ content_types_provided/2, content_types_accepted/2,
+ is_authorized/2, allowed_methods/2, accept_content/2,
+ delete_resource/2, queue/1, queue/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%--------------------------------------------------------------------
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{"application/json", accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {['HEAD', 'GET', 'PUT', 'DELETE'], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case queue(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ [Q] = rabbit_mgmt_db:augment_queues(
+ [queue(ReqData)], rabbit_mgmt_util:range_ceil(ReqData), full),
+ rabbit_mgmt_util:reply(rabbit_mgmt_format:strip_pids(Q), ReqData, Context).
+
+accept_content(ReqData, Context) ->
+ rabbit_mgmt_util:http_to_amqp(
+ 'queue.declare', ReqData, Context,
+ [{fun rabbit_mgmt_util:parse_bool/1, [durable, auto_delete]}],
+ [{queue, rabbit_mgmt_util:id(queue, ReqData)}]).
+
+delete_resource(ReqData, Context) ->
+ rabbit_mgmt_util:amqp_request(
+ rabbit_mgmt_util:vhost(ReqData),
+ ReqData, Context,
+ #'queue.delete'{ queue = rabbit_mgmt_util:id(queue, ReqData) }).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_vhost(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+queue(ReqData) ->
+ case rabbit_mgmt_util:vhost(ReqData) of
+ not_found -> not_found;
+ VHost -> queue(VHost, rabbit_mgmt_util:id(queue, ReqData))
+ end.
+
+
+queue(VHost, QName) ->
+ Name = rabbit_misc:r(VHost, queue, QName),
+ case rabbit_amqqueue:lookup(Name) of
+ {ok, Q} -> rabbit_mgmt_format:queue(Q);
+ {error, not_found} -> not_found
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2012 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_queue_actions).
+
+-export([init/1, resource_exists/2, post_is_create/2, is_authorized/2,
+ allowed_methods/2, process_post/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+allowed_methods(ReqData, Context) ->
+ {['POST'], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case rabbit_mgmt_wm_queue:queue(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+post_is_create(ReqData, Context) ->
+ {false, ReqData, Context}.
+
+process_post(ReqData, Context) ->
+ rabbit_mgmt_util:post_respond(do_it(ReqData, Context)).
+
+do_it(ReqData, Context) ->
+ VHost = rabbit_mgmt_util:vhost(ReqData),
+ QName = rabbit_mgmt_util:id(queue, ReqData),
+ rabbit_mgmt_util:with_decode(
+ [action], ReqData, Context,
+ fun([Action], _Body) ->
+ rabbit_amqqueue:with(
+ rabbit_misc:r(VHost, queue, QName),
+ fun(Q) -> action(Action, Q, ReqData, Context) end)
+ end).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+action(<<"sync">>, #amqqueue{pid = QPid}, ReqData, Context) ->
+ spawn(fun() -> rabbit_amqqueue:sync_mirrors(QPid) end),
+ {true, ReqData, Context};
+
+action(<<"cancel_sync">>, #amqqueue{pid = QPid}, ReqData, Context) ->
+ rabbit_amqqueue:cancel_sync_mirrors(QPid),
+ {true, ReqData, Context};
+
+action(Else, _Q, ReqData, Context) ->
+ rabbit_mgmt_util:bad_request({unknown, Else}, ReqData, Context).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_queue_get).
+
+-export([init/1, resource_exists/2, post_is_create/2, is_authorized/2,
+ allowed_methods/2, process_post/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+allowed_methods(ReqData, Context) ->
+ {['POST'], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case rabbit_mgmt_wm_queue:queue(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+post_is_create(ReqData, Context) ->
+ {false, ReqData, Context}.
+
+process_post(ReqData, Context) ->
+ rabbit_mgmt_util:post_respond(do_it(ReqData, Context)).
+
+do_it(ReqData, Context) ->
+ VHost = rabbit_mgmt_util:vhost(ReqData),
+ Q = rabbit_mgmt_util:id(queue, ReqData),
+ rabbit_mgmt_util:with_decode(
+ [requeue, count, encoding], ReqData, Context,
+ fun([RequeueBin, CountBin, EncBin], Body) ->
+ rabbit_mgmt_util:with_channel(
+ VHost, ReqData, Context,
+ fun (Ch) ->
+ NoAck = not rabbit_mgmt_util:parse_bool(RequeueBin),
+ Count = rabbit_mgmt_util:parse_int(CountBin),
+ Enc = case EncBin of
+ <<"auto">> -> auto;
+ <<"base64">> -> base64;
+ _ -> throw({error,
+ {bad_encoding,
+ EncBin}})
+ end,
+ Trunc = case proplists:get_value(truncate, Body) of
+ undefined -> none;
+ TruncBin -> rabbit_mgmt_util:parse_int(
+ TruncBin)
+ end,
+ rabbit_mgmt_util:reply(
+ basic_gets(Count, Ch, Q, NoAck, Enc, Trunc),
+ ReqData, Context)
+ end)
+ end).
+
+basic_gets(0, _, _, _, _, _) ->
+ [];
+
+basic_gets(Count, Ch, Q, NoAck, Enc, Trunc) ->
+ case basic_get(Ch, Q, NoAck, Enc, Trunc) of
+ none -> [];
+ M -> [M | basic_gets(Count - 1, Ch, Q, NoAck, Enc, Trunc)]
+ end.
+
+basic_get(Ch, Q, NoAck, Enc, Trunc) ->
+ case amqp_channel:call(Ch, #'basic.get'{queue = Q,
+ no_ack = NoAck}) of
+ {#'basic.get_ok'{redelivered = Redelivered,
+ exchange = Exchange,
+ routing_key = RoutingKey,
+ message_count = MessageCount},
+ #amqp_msg{props = Props, payload = Payload}} ->
+ [{payload_bytes, size(Payload)},
+ {redelivered, Redelivered},
+ {exchange, Exchange},
+ {routing_key, RoutingKey},
+ {message_count, MessageCount},
+ {properties, rabbit_mgmt_format:basic_properties(Props)}] ++
+ payload_part(maybe_truncate(Payload, Trunc), Enc);
+ #'basic.get_empty'{} ->
+ none
+ end.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_vhost(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+maybe_truncate(Payload, none) -> Payload;
+maybe_truncate(Payload, Len) when size(Payload) < Len -> Payload;
+maybe_truncate(Payload, Len) ->
+ <<Start:Len/binary, _Rest/binary>> = Payload,
+ Start.
+
+payload_part(Payload, Enc) ->
+ {PL, E} = case Enc of
+ auto -> try
+ %% TODO mochijson does this but is it safe?
+ xmerl_ucs:from_utf8(Payload),
+ {Payload, string}
+ catch exit:{ucs, _} ->
+ {base64:encode(Payload), base64}
+ end;
+ _ -> {base64:encode(Payload), base64}
+ end,
+ [{payload, PL}, {payload_encoding, E}].
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_queue_purge).
+
+-export([init/1, resource_exists/2, is_authorized/2, allowed_methods/2,
+ delete_resource/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%--------------------------------------------------------------------
+init(_Config) -> {ok, #context{}}.
+
+allowed_methods(ReqData, Context) ->
+ {['DELETE'], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case rabbit_mgmt_wm_queue:queue(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+delete_resource(ReqData, Context) ->
+ rabbit_mgmt_util:amqp_request(
+ rabbit_mgmt_util:vhost(ReqData),
+ ReqData, Context,
+ #'queue.purge'{ queue = rabbit_mgmt_util:id(queue, ReqData) }).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_vhost(ReqData, Context).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_queues).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2,
+ resource_exists/2, basic/1, augmented/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case queues0(ReqData) of
+ vhost_not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply_list(augmented(ReqData, Context), ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_vhost(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+augmented(ReqData, Context) ->
+ rabbit_mgmt_format:strip_pids(
+ rabbit_mgmt_db:augment_queues(
+ rabbit_mgmt_util:filter_vhost(basic(ReqData), ReqData, Context),
+ rabbit_mgmt_util:range_ceil(ReqData), basic)).
+
+basic(ReqData) ->
+ [rabbit_mgmt_format:queue(Q) || Q <- queues0(ReqData)].
+
+queues0(ReqData) ->
+ rabbit_mgmt_util:all_or_one_vhost(ReqData, fun rabbit_amqqueue:list/1).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_user).
+
+-export([init/1, resource_exists/2, to_json/2,
+ content_types_provided/2, content_types_accepted/2,
+ is_authorized/2, allowed_methods/2, accept_content/2,
+ delete_resource/2, put_user/1]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{"application/json", accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {['HEAD', 'GET', 'PUT', 'DELETE'], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case user(ReqData) of
+ {ok, _} -> true;
+ {error, _} -> false
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ {ok, User} = user(ReqData),
+ rabbit_mgmt_util:reply(rabbit_mgmt_format:internal_user(User),
+ ReqData, Context).
+
+accept_content(ReqData, Context) ->
+ Username = rabbit_mgmt_util:id(user, ReqData),
+ rabbit_mgmt_util:with_decode(
+ [], ReqData, Context,
+ fun(_, User) ->
+ put_user([{name, Username} | User]),
+ {true, ReqData, Context}
+ end).
+
+delete_resource(ReqData, Context) ->
+ User = rabbit_mgmt_util:id(user, ReqData),
+ rabbit_auth_backend_internal:delete_user(User),
+ {true, ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+user(ReqData) ->
+ rabbit_auth_backend_internal:lookup_user(rabbit_mgmt_util:id(user, ReqData)).
+
+put_user(User) ->
+ CP = fun rabbit_auth_backend_internal:change_password/2,
+ CPH = fun rabbit_auth_backend_internal:change_password_hash/2,
+ case {proplists:is_defined(password, User),
+ proplists:is_defined(password_hash, User)} of
+ {true, _} -> put_user(User, pget(password, User), CP);
+ {_, true} -> Hash = rabbit_mgmt_util:b64decode_or_throw(
+ pget(password_hash, User)),
+ put_user(User, Hash, CPH);
+ _ -> put_user(User, <<>>, CPH)
+ end.
+
+put_user(User, PWArg, PWFun) ->
+ Username = pget(name, User),
+ Tags = case {pget(tags, User), pget(administrator, User)} of
+ {undefined, undefined} ->
+ throw({error, tags_not_present});
+ {undefined, AdminS} ->
+ case rabbit_mgmt_util:parse_bool(AdminS) of
+ true -> [administrator];
+ false -> []
+ end;
+ {TagsS, _} ->
+ [list_to_atom(string:strip(T)) ||
+ T <- string:tokens(binary_to_list(TagsS), ",")]
+ end,
+ case rabbit_auth_backend_internal:lookup_user(Username) of
+ {error, not_found} ->
+ rabbit_auth_backend_internal:add_user(
+ Username, rabbit_guid:binary(rabbit_guid:gen_secure(), "tmp"));
+ _ ->
+ ok
+ end,
+ PWFun(Username, PWArg),
+ ok = rabbit_auth_backend_internal:set_tags(Username, Tags).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_users).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([users/0]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply_list(users(), ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+users() ->
+ [begin
+ {ok, User} = rabbit_auth_backend_internal:lookup_user(pget(user, U)),
+ rabbit_mgmt_format:internal_user(User)
+ end || U <- rabbit_auth_backend_internal:list_users()].
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_vhost).
+
+-export([init/1, resource_exists/2, to_json/2,
+ content_types_provided/2, content_types_accepted/2,
+ is_authorized/2, allowed_methods/2, accept_content/2,
+ delete_resource/2, put_vhost/2]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{"application/json", accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {['HEAD', 'GET', 'PUT', 'DELETE'], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {rabbit_vhost:exists(id(ReqData)), ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply(
+ hd(rabbit_mgmt_db:augment_vhosts(
+ [rabbit_vhost:info(id(ReqData))], rabbit_mgmt_util:range(ReqData))),
+ ReqData, Context).
+
+accept_content(ReqData, Context) ->
+ Name = id(ReqData),
+ rabbit_mgmt_util:with_decode(
+ [], ReqData, Context,
+ fun(_, VHost) ->
+ put_vhost(Name, rabbit_mgmt_util:parse_bool(
+ pget(tracing, VHost))),
+ {true, ReqData, Context}
+ end).
+
+delete_resource(ReqData, Context) ->
+ VHost = id(ReqData),
+ rabbit_vhost:delete(VHost),
+ {true, ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+id(ReqData) ->
+ rabbit_mgmt_util:id(vhost, ReqData).
+
+put_vhost(Name, Trace) ->
+ case rabbit_vhost:exists(Name) of
+ true -> ok;
+ false -> rabbit_vhost:add(Name)
+ end,
+ case Trace of
+ true -> rabbit_trace:start(Name);
+ false -> rabbit_trace:stop(Name);
+ undefined -> ok
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_vhosts).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([basic/0, augmented/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply_list(augmented(ReqData, Context), ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+augmented(ReqData, #context{user = User}) ->
+ rabbit_mgmt_db:augment_vhosts(
+ [rabbit_vhost:info(V) || V <- rabbit_mgmt_util:list_visible_vhosts(User)],
+ rabbit_mgmt_util:range(ReqData)).
+
+basic() ->
+ rabbit_vhost:info_all([name]).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Plugin.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_whoami).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+to_json(ReqData, Context = #context{user = User}) ->
+ rabbit_mgmt_util:reply(rabbit_mgmt_format:user(User), ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized(ReqData, Context).
--- /dev/null
+{application, rabbitmq_management,
+ [{description, "RabbitMQ Management Console"},
+ {vsn, "%%VSN%%"},
+ {modules, []},
+ {registered, []},
+ {mod, {rabbit_mgmt_app, []}},
+ {env, [{listener, [{port, 15672}]},
+ {http_log_dir, none},
+ {load_definitions, none},
+ {sample_retention_policies,
+ %% List of {MaxAgeSecs, IfTimestampDivisibleBySecs}
+ [{global, [{605, 5}, {3660, 60}, {29400, 600}, {86400, 1800}]},
+ {basic, [{605, 5}, {3600, 60}]},
+ {detailed, [{10, 5}]}]}
+ ]},
+ {applications, [kernel, stdlib, rabbit, xmerl, rabbitmq_web_dispatch,
+ amqp_client, rabbitmq_management_agent]}]}.
--- /dev/null
+# rabbitmqadmin.conf.example START
+
+[non_default]
+hostname = localhost
+port = 25672
+username = guest
+password = guest
+declare_vhost = / # Used as default for declare / delete only
+vhost = / # Used as default for declare / delete / list
+
+[bad_host]
+hostname = rabbit.acme.com
+port = 15672
+username = guest
+password = guest
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Console.
+%%
+%% The Initial Developers of the Original Code are Rabbit Technologies Ltd.
+%%
+%% Copyright (C) 2010 Rabbit Technologies Ltd.
+%%
+%% All Rights Reserved.
+%%
+%% Contributor(s): ______________________________________.
+%%
+-module(rabbit_mgmt_test_clustering).
+
+-compile(export_all).
+-include("rabbit_mgmt_test.hrl").
+
+-import(rabbit_mgmt_test_http, [http_get/1, http_put/3, http_delete/2]).
+-import(rabbit_misc, [pget/2]).
+
+%%----------------------------------------------------------------------------
+
+cluster_nodes_with() -> cluster_ab.
+cluster_nodes([_A, _B]) ->
+ ?assertEqual(2, length(http_get("/nodes"))),
+ ok.
+
+ha_with() -> cluster_ab.
+ha([RabbitCfg, HareCfg]) ->
+ Rabbit = pget(nodename, RabbitCfg),
+ Hare = pget(nodename, HareCfg),
+ Policy = [{pattern, <<".*">>},
+ {definition, [{'ha-mode', <<"all">>}]}],
+ http_put("/policies/%2f/HA", Policy, ?NO_CONTENT),
+ QArgs = [{node, list_to_binary(atom_to_list(Hare))}],
+ http_put("/queues/%2f/ha-queue", QArgs, ?NO_CONTENT),
+ Q = wait_for("/queues/%2f/ha-queue"),
+ assert_node(Hare, pget(node, Q)),
+ assert_single_node(Rabbit, pget(slave_nodes, Q)),
+ assert_single_node(Rabbit, pget(synchronised_slave_nodes, Q)),
+ _HareCfg2 = rabbit_test_configs:restart_node(HareCfg),
+
+ Q2 = wait_for("/queues/%2f/ha-queue"),
+ assert_node(Rabbit, pget(node, Q2)),
+ assert_single_node(Hare, pget(slave_nodes, Q2)),
+ assert_single_node(Hare, pget(synchronised_slave_nodes, Q2)),
+ http_delete("/queues/%2f/ha-queue", ?NO_CONTENT),
+ http_delete("/policies/%2f/HA", ?NO_CONTENT),
+ ok.
+
+%%----------------------------------------------------------------------------
+
+wait_for(Path) ->
+ wait_for(Path, [slave_nodes, synchronised_slave_nodes]).
+
+wait_for(Path, Keys) ->
+ wait_for(Path, Keys, 1000).
+
+wait_for(Path, Keys, 0) ->
+ exit({timeout, {Path, Keys}});
+
+wait_for(Path, Keys, Count) ->
+ Res = http_get(Path),
+ case present(Keys, Res) of
+ false -> timer:sleep(10),
+ wait_for(Path, Keys, Count - 1);
+ true -> Res
+ end.
+
+present(Keys, Res) ->
+ lists:all(fun (Key) ->
+ X = pget(Key, Res),
+ X =/= [] andalso X =/= undefined
+ end, Keys).
+
+assert_single_node(Exp, Act) ->
+ ?assertEqual(1, length(Act)),
+ assert_node(Exp, hd(Act)).
+
+assert_nodes(Exp, Act0) ->
+ Act = [read_node(A) || A <- Act0],
+ ?debugVal({Exp, Act}),
+ ?assertEqual(length(Exp), length(Act)),
+ [?assert(lists:member(E, Act)) || E <- Exp].
+
+assert_node(Exp, Act) ->
+ ?assertEqual(Exp, read_node(Act)).
+
+read_node(N) ->
+ list_to_atom(hd(string:tokens(binary_to_list(N), "@"))).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Console.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_test_db).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-import(rabbit_misc, [pget/2]).
+-import(rabbit_mgmt_test_util, [assert_list/2, assert_item/2, test_item/2]).
+
+-define(debugVal2(E),
+ ((fun (__V) ->
+ ?debugFmt(<<"~s = ~p">>, [(??E), __V]),
+ __V
+ end)(E))).
+
+%%----------------------------------------------------------------------------
+%% Tests
+%%----------------------------------------------------------------------------
+
+queue_coarse_test() ->
+ rabbit_mgmt_db:override_lookups([{exchange, fun dummy_lookup/1},
+ {queue, fun dummy_lookup/1}]),
+ create_q(test, 0),
+ create_q(test2, 0),
+ stats_q(test, 0, 10),
+ stats_q(test2, 0, 1),
+ R = range(0, 1, 1),
+ Exp = fun(N) -> simple_details(messages, N, R) end,
+ assert_item(Exp(10), get_q(test, R)),
+ assert_item(Exp(11), get_vhost(R)),
+ assert_item(Exp(11), get_overview_q(R)),
+ delete_q(test, 0),
+ assert_item(Exp(1), get_vhost(R)),
+ assert_item(Exp(1), get_overview_q(R)),
+ delete_q(test2, 0),
+ assert_item(Exp(0), get_vhost(R)),
+ assert_item(Exp(0), get_overview_q(R)),
+ rabbit_mgmt_db:reset_lookups(),
+ ok.
+
+connection_coarse_test() ->
+ create_conn(test, 0),
+ create_conn(test2, 0),
+ stats_conn(test, 0, 10),
+ stats_conn(test2, 0, 1),
+ R = range(0, 1, 1),
+ Exp = fun(N) -> simple_details(recv_oct, N, R) end,
+ assert_item(Exp(10), get_conn(test, R)),
+ assert_item(Exp(1), get_conn(test2, R)),
+ delete_conn(test, 1),
+ delete_conn(test2, 1),
+ assert_list([], rabbit_mgmt_db:get_all_connections(R)),
+ ok.
+
+fine_stats_aggregation_test() ->
+ rabbit_mgmt_db:override_lookups([{exchange, fun dummy_lookup/1},
+ {queue, fun dummy_lookup/1}]),
+ create_ch(ch1, 0),
+ create_ch(ch2, 0),
+ stats_ch(ch1, 0, [{x, 100}], [{q1, x, 100},
+ {q2, x, 10}], [{q1, 2},
+ {q2, 1}]),
+ stats_ch(ch2, 0, [{x, 10}], [{q1, x, 50},
+ {q2, x, 5}], []),
+ fine_stats_aggregation_test0(true),
+ delete_q(q2, 0),
+ fine_stats_aggregation_test0(false),
+ delete_ch(ch1, 1),
+ delete_ch(ch2, 1),
+ rabbit_mgmt_db:reset_lookups(),
+ ok.
+
+fine_stats_aggregation_test0(Q2Exists) ->
+ R = range(0, 1, 1),
+ Ch1 = get_ch(ch1, R),
+ Ch2 = get_ch(ch2, R),
+ X = get_x(x, R),
+ Q1 = get_q(q1, R),
+ V = get_vhost(R),
+ O = get_overview(R),
+ assert_fine_stats(m, publish, 100, Ch1, R),
+ assert_fine_stats(m, publish, 10, Ch2, R),
+ assert_fine_stats(m, publish_in, 110, X, R),
+ assert_fine_stats(m, publish_out, 165, X, R),
+ assert_fine_stats(m, publish, 150, Q1, R),
+ assert_fine_stats(m, deliver_get, 2, Q1, R),
+ assert_fine_stats(m, deliver_get, 3, Ch1, R),
+ assert_fine_stats(m, publish, 110, V, R),
+ assert_fine_stats(m, deliver_get, 3, V, R),
+ assert_fine_stats(m, publish, 110, O, R),
+ assert_fine_stats(m, deliver_get, 3, O, R),
+ assert_fine_stats({pub, x}, publish, 100, Ch1, R),
+ assert_fine_stats({pub, x}, publish, 10, Ch2, R),
+ assert_fine_stats({in, ch1}, publish, 100, X, R),
+ assert_fine_stats({in, ch2}, publish, 10, X, R),
+ assert_fine_stats({out, q1}, publish, 150, X, R),
+ assert_fine_stats({in, x}, publish, 150, Q1, R),
+ assert_fine_stats({del, ch1}, deliver_get, 2, Q1, R),
+ assert_fine_stats({del, q1}, deliver_get, 2, Ch1, R),
+ case Q2Exists of
+ true -> Q2 = get_q(q2, R),
+ assert_fine_stats(m, publish, 15, Q2, R),
+ assert_fine_stats(m, deliver_get, 1, Q2, R),
+ assert_fine_stats({out, q2}, publish, 15, X, R),
+ assert_fine_stats({in, x}, publish, 15, Q2, R),
+ assert_fine_stats({del, ch1}, deliver_get, 1, Q2, R),
+ assert_fine_stats({del, q2}, deliver_get, 1, Ch1, R);
+ false -> assert_fine_stats_neg({out, q2}, X),
+ assert_fine_stats_neg({del, q2}, Ch1)
+ end,
+ ok.
+
+fine_stats_aggregation_time_test() ->
+ rabbit_mgmt_db:override_lookups([{exchange, fun dummy_lookup/1},
+ {queue, fun dummy_lookup/1}]),
+ create_ch(ch, 0),
+ stats_ch(ch, 0, [{x, 100}], [{q, x, 50}], [{q, 20}]),
+ stats_ch(ch, 5, [{x, 110}], [{q, x, 55}], [{q, 22}]),
+
+ R1 = range(0, 1, 1),
+ assert_fine_stats(m, publish, 100, get_ch(ch, R1), R1),
+ assert_fine_stats(m, publish, 50, get_q(q, R1), R1),
+ assert_fine_stats(m, deliver_get, 20, get_q(q, R1), R1),
+
+ R2 = range(5, 6, 1),
+ assert_fine_stats(m, publish, 110, get_ch(ch, R2), R2),
+ assert_fine_stats(m, publish, 55, get_q(q, R2), R2),
+ assert_fine_stats(m, deliver_get, 22, get_q(q, R2), R2),
+
+ delete_q(q, 0),
+ delete_ch(ch, 1),
+ rabbit_mgmt_db:reset_lookups(),
+ ok.
+
+assert_fine_stats(m, Type, N, Obj, R) ->
+ Act = pget(message_stats, Obj),
+ assert_item(simple_details(Type, N, R), Act);
+assert_fine_stats({T2, Name}, Type, N, Obj, R) ->
+ Act = find_detailed_stats(Name, pget(expand(T2), Obj)),
+ assert_item(simple_details(Type, N, R), Act).
+
+assert_fine_stats_neg({T2, Name}, Obj) ->
+ detailed_stats_absent(Name, pget(expand(T2), Obj)).
+
+%%----------------------------------------------------------------------------
+%% Events in
+%%----------------------------------------------------------------------------
+
+create_q(Name, Timestamp) ->
+ %% Technically we do not need this, the DB ignores it, but let's
+ %% be symmetrical...
+ event(queue_created, [{name, q(Name)}], Timestamp).
+
+create_conn(Name, Timestamp) ->
+ event(connection_created, [{pid, pid(Name)},
+ {name, a2b(Name)}], Timestamp).
+
+create_ch(Name, Timestamp) ->
+ event(channel_created, [{pid, pid(Name)},
+ {name, a2b(Name)}], Timestamp).
+
+stats_q(Name, Timestamp, Msgs) ->
+ event(queue_stats, [{name, q(Name)},
+ {messages, Msgs}], Timestamp).
+
+stats_conn(Name, Timestamp, Oct) ->
+ event(connection_stats, [{pid , pid(Name)},
+ {recv_oct, Oct}], Timestamp).
+
+stats_ch(Name, Timestamp, XStats, QXStats, QStats) ->
+ XStats1 = [{x(XName), [{publish, N}]} || {XName, N} <- XStats],
+ QXStats1 = [{{q(QName), x(XName)}, [{publish, N}]}
+ || {QName, XName, N} <- QXStats],
+ QStats1 = [{q(QName), [{deliver_no_ack, N}]} || {QName, N} <- QStats],
+ event(channel_stats,
+ [{pid, pid(Name)},
+ {channel_exchange_stats, XStats1},
+ {channel_queue_exchange_stats, QXStats1},
+ {channel_queue_stats, QStats1}], Timestamp).
+
+delete_q(Name, Timestamp) ->
+ event(queue_deleted, [{name, q(Name)}], Timestamp).
+
+delete_conn(Name, Timestamp) ->
+ event(connection_closed, [{pid, pid_del(Name)}], Timestamp).
+
+delete_ch(Name, Timestamp) ->
+ event(channel_closed, [{pid, pid_del(Name)}], Timestamp).
+
+event(Type, Stats, Timestamp) ->
+ gen_server:cast({global, rabbit_mgmt_db},
+ {event, #event{type = Type,
+ props = Stats,
+ reference = none,
+ timestamp = sec_to_triple(Timestamp)}}).
+
+sec_to_triple(Sec) -> {Sec div 1000000, Sec rem 1000000, 0}.
+
+%%----------------------------------------------------------------------------
+%% Events out
+%%----------------------------------------------------------------------------
+
+range(F, L, I) ->
+ R = #range{first = F * 1000, last = L * 1000, incr = I * 1000},
+ {R, R, R}.
+
+get_x(Name, Range) ->
+ [X] = rabbit_mgmt_db:augment_exchanges([x2(Name)], Range, full),
+ X.
+
+get_q(Name, Range) ->
+ [Q] = rabbit_mgmt_db:augment_queues([q2(Name)], Range, full),
+ Q.
+
+get_vhost(Range) ->
+ [VHost] = rabbit_mgmt_db:augment_vhosts([[{name, <<"/">>}]], Range),
+ VHost.
+
+get_conn(Name, Range) -> rabbit_mgmt_db:get_connection(a2b(Name), Range).
+get_ch(Name, Range) -> rabbit_mgmt_db:get_channel(a2b(Name), Range).
+
+get_overview(Range) -> rabbit_mgmt_db:get_overview(Range).
+get_overview_q(Range) -> pget(queue_totals, get_overview(Range)).
+
+details0(R, AR, A, L) ->
+ [{rate, R},
+ {samples, [[{sample, S}, {timestamp, T}] || {T, S} <- L]},
+ {avg_rate, AR},
+ {avg, A}].
+
+simple_details(Thing, N, {#range{first = First, last = Last}, _, _}) ->
+ [{Thing, N},
+ {atom_suffix(Thing, "_details"),
+ details0(0.0, 0.0, N * 1.0, [{Last, N}, {First, N}])}].
+
+atom_suffix(Atom, Suffix) ->
+ list_to_atom(atom_to_list(Atom) ++ Suffix).
+
+find_detailed_stats(Name, List) ->
+ [S] = filter_detailed_stats(Name, List),
+ S.
+
+detailed_stats_absent(Name, List) ->
+ [] = filter_detailed_stats(Name, List).
+
+filter_detailed_stats(Name, List) ->
+ [Stats || [{stats, Stats}, {_, Details}] <- List,
+ pget(name, Details) =:= a2b(Name)].
+
+expand(in) -> incoming;
+expand(out) -> outgoing;
+expand(del) -> deliveries;
+expand(pub) -> publishes.
+
+%%----------------------------------------------------------------------------
+%% Util
+%%----------------------------------------------------------------------------
+
+x(Name) -> rabbit_misc:r(<<"/">>, exchange, a2b(Name)).
+x2(Name) -> q2(Name).
+q(Name) -> rabbit_misc:r(<<"/">>, queue, a2b(Name)).
+q2(Name) -> [{name, a2b(Name)},
+ {vhost, <<"/">>}].
+
+pid(Name) ->
+ case get({pid, Name}) of
+ undefined -> P = spawn(fun() -> ok end),
+ put({pid, Name}, P),
+ P;
+ Pid -> Pid
+ end.
+
+pid_del(Name) ->
+ Pid = pid(Name),
+ erase({pid, Name}),
+ Pid.
+
+a2b(A) -> list_to_binary(atom_to_list(A)).
+
+dummy_lookup(_Thing) -> {ok, ignore_this}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Console.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2012 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_test_db_unit).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+gc_test() ->
+ T = fun (Before, After) ->
+ ?assertEqual(After, unstats(
+ rabbit_mgmt_stats:gc(
+ cutoff(), stats(Before))))
+ end,
+ %% Cut off old sample, move to base
+ T({[{8999, 123}, {9000, 456}], 0},
+ {[{9000, 456}], 123}),
+ %% Amalgamate old samples to rounder one
+ T({[{9001, 100}, {9010, 020}, {10000, 003}], 0},
+ {[{10000, 123}], 0}),
+ %% The same, but a bit less
+ T({[{9000, 100}, {9901, 020}, {9910, 003}], 0},
+ {[{9000, 100}, {9910, 023}], 0}),
+ %% Nothing needs to be done
+ T({[{9000, 100}, {9990, 020}, {9991, 003}], 0},
+ {[{9000, 100}, {9990, 020}, {9991, 003}], 0}),
+ %% Invent a newer sample that's acceptable
+ T({[{9001, 10}, {9010, 02}], 0},
+ {[{9100, 12}], 0}),
+ %% ...but don't if it's too old
+ T({[{8001, 10}, {8010, 02}], 0},
+ {[], 12}),
+ ok.
+
+format_test() ->
+ Interval = 10,
+ T = fun ({First, Last, Incr}, Stats, Results) ->
+ ?assertEqual(format(Results),
+ rabbit_mgmt_stats:format(
+ #range{first = First * 1000,
+ last = Last * 1000,
+ incr = Incr * 1000},
+ stats(Stats),
+ Interval * 1000))
+ end,
+
+ %% Just three samples, all of which we format. Note the
+ %% instantaneous rate is taken from the penultimate sample.
+ T({10, 30, 10}, {[{10, 10}, {20, 20}, {30, 30}], 1},
+ {[{30, 61}, {20, 31}, {10, 11}], 2.0, 2.5, 103/3, 61}),
+
+ %% Skip over the second (and ditto).
+ T({10, 30, 20}, {[{10, 10}, {20, 20}, {30, 30}], 1},
+ {[{30, 61}, {10, 11}], 2.0, 2.5, 36.0, 61}),
+
+ %% Skip over some and invent some. Note that the instantaneous
+ %% rate drops to 0 since the last event is now in the past.
+ T({0, 40, 20}, {[{10, 10}, {20, 20}, {30, 30}], 1},
+ {[{40, 61}, {20, 31}, {0, 1}], 0.0, 1.5, 31.0, 61}),
+
+ %% And a case where the range starts after the samples
+ T({20, 40, 10}, {[{10, 10}, {20, 20}, {30, 30}], 1},
+ {[{40, 61}, {30, 61}, {20, 31}], 0.0, 1.5, 51.0, 61}),
+
+ %% A single sample - which should lead to some bits not getting generated
+ T({10, 10, 10}, {[{10, 10}, {20, 20}, {30, 30}], 1},
+ {[{10, 11}], 0.0, 11}),
+
+ %% No samples - which should also lead to some bits not getting generated
+ T({10, 0, 10}, {[{10, 10}, {20, 20}, {30, 30}], 1},
+ {[], 0.0, 1}),
+
+ %% TODO more?
+ ok.
+
+format_no_range_test() ->
+ Interval = 10,
+ T = fun (Stats, Results) ->
+ ?assertEqual(format(Results),
+ rabbit_mgmt_stats:format(
+ no_range, stats(Stats), Interval * 1000))
+ end,
+
+ %% Just three samples
+ T({[{10, 10}, {20, 20}, {30, 30}], 1},
+ {0.0, 61}),
+ ok.
+
+
+%%--------------------------------------------------------------------
+
+cutoff() ->
+ {[{10, 1}, {100, 10}, {1000, 100}], %% Sec
+ 10000000}. %% Millis
+
+stats({Diffs, Base}) ->
+ #stats{diffs = gb_trees:from_orddict(secs_to_millis(Diffs)), base = Base}.
+
+unstats(#stats{diffs = Diffs, base = Base}) ->
+ {millis_to_secs(gb_trees:to_list(Diffs)), Base}.
+
+secs_to_millis(L) -> [{TS * 1000, S} || {TS, S} <- L].
+millis_to_secs(L) -> [{TS div 1000, S} || {TS, S} <- L].
+
+format({Rate, Count}) ->
+ {[{rate, Rate}],
+ Count};
+
+format({Samples, Rate, Count}) ->
+ {[{rate, Rate},
+ {samples, format_samples(Samples)}],
+ Count};
+
+format({Samples, Rate, AvgRate, Avg, Count}) ->
+ {[{rate, Rate},
+ {samples, format_samples(Samples)},
+ {avg_rate, AvgRate},
+ {avg, Avg}],
+ Count}.
+
+format_samples(Samples) ->
+ [[{sample, S}, {timestamp, TS * 1000}] || {TS, S} <- Samples].
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Console.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_test_http).
+
+-include("rabbit_mgmt_test.hrl").
+
+-export([http_get/1, http_put/3, http_delete/2]).
+
+-import(rabbit_mgmt_test_util, [assert_list/2, assert_item/2, test_item/2]).
+-import(rabbit_misc, [pget/2]).
+
+overview_test() ->
+ %% Rather crude, but this req doesn't say much and at least this means it
+ %% didn't blow up.
+ true = 0 < length(pget(listeners, http_get("/overview"))),
+ http_put("/users/myuser", [{password, <<"myuser">>},
+ {tags, <<"management">>}], ?NO_CONTENT),
+ http_get("/overview", "myuser", "myuser", ?OK),
+ http_delete("/users/myuser", ?NO_CONTENT),
+ %% TODO uncomment when priv works in test
+ %%http_get(""),
+ ok.
+
+cluster_name_test() ->
+ http_put("/users/myuser", [{password, <<"myuser">>},
+ {tags, <<"management">>}], ?NO_CONTENT),
+ http_put("/cluster-name", [{name, "foo"}], "myuser", "myuser", ?NOT_AUTHORISED),
+ http_put("/cluster-name", [{name, "foo"}], ?NO_CONTENT),
+ [{name, "foo"}] = http_get("/cluster-name", "myuser", "myuser", ?OK),
+ http_delete("/users/myuser", ?NO_CONTENT),
+ ok.
+
+nodes_test() ->
+ http_put("/users/user", [{password, <<"user">>},
+ {tags, <<"management">>}], ?NO_CONTENT),
+ http_put("/users/monitor", [{password, <<"monitor">>},
+ {tags, <<"monitoring">>}], ?NO_CONTENT),
+ DiscNode = [{type, <<"disc">>}, {running, true}],
+ assert_list([DiscNode], http_get("/nodes")),
+ assert_list([DiscNode], http_get("/nodes", "monitor", "monitor", ?OK)),
+ http_get("/nodes", "user", "user", ?NOT_AUTHORISED),
+ [Node] = http_get("/nodes"),
+ Path = "/nodes/" ++ binary_to_list(pget(name, Node)),
+ assert_item(DiscNode, http_get(Path, ?OK)),
+ assert_item(DiscNode, http_get(Path, "monitor", "monitor", ?OK)),
+ http_get(Path, "user", "user", ?NOT_AUTHORISED),
+ http_delete("/users/user", ?NO_CONTENT),
+ http_delete("/users/monitor", ?NO_CONTENT),
+ ok.
+
+auth_test() ->
+ http_put("/users/user", [{password, <<"user">>},
+ {tags, <<"">>}], ?NO_CONTENT),
+ test_auth(?NOT_AUTHORISED, []),
+ test_auth(?NOT_AUTHORISED, [auth_header("user", "user")]),
+ test_auth(?NOT_AUTHORISED, [auth_header("guest", "gust")]),
+ test_auth(?OK, [auth_header("guest", "guest")]),
+ http_delete("/users/user", ?NO_CONTENT),
+ ok.
+
+%% This test is rather over-verbose as we're trying to test understanding of
+%% Webmachine
+vhosts_test() ->
+ assert_list([[{name, <<"/">>}]], http_get("/vhosts")),
+ %% Create a new one
+ http_put("/vhosts/myvhost", none, ?NO_CONTENT),
+ %% PUT should be idempotent
+ http_put("/vhosts/myvhost", none, ?NO_CONTENT),
+ %% Check it's there
+ assert_list([[{name, <<"/">>}], [{name, <<"myvhost">>}]],
+ http_get("/vhosts")),
+ %% Check individually
+ assert_item([{name, <<"/">>}], http_get("/vhosts/%2f", ?OK)),
+ assert_item([{name, <<"myvhost">>}],http_get("/vhosts/myvhost")),
+ %% Delete it
+ http_delete("/vhosts/myvhost", ?NO_CONTENT),
+ %% It's not there
+ http_get("/vhosts/myvhost", ?NOT_FOUND),
+ http_delete("/vhosts/myvhost", ?NOT_FOUND).
+
+vhosts_trace_test() ->
+ http_put("/vhosts/myvhost", none, ?NO_CONTENT),
+ Disabled = [{name, <<"myvhost">>}, {tracing, false}],
+ Enabled = [{name, <<"myvhost">>}, {tracing, true}],
+ Disabled = http_get("/vhosts/myvhost"),
+ http_put("/vhosts/myvhost", [{tracing, true}], ?NO_CONTENT),
+ Enabled = http_get("/vhosts/myvhost"),
+ http_put("/vhosts/myvhost", [{tracing, true}], ?NO_CONTENT),
+ Enabled = http_get("/vhosts/myvhost"),
+ http_put("/vhosts/myvhost", [{tracing, false}], ?NO_CONTENT),
+ Disabled = http_get("/vhosts/myvhost"),
+ http_delete("/vhosts/myvhost", ?NO_CONTENT).
+
+users_test() ->
+ assert_item([{name, <<"guest">>}, {tags, <<"administrator">>}],
+ http_get("/whoami")),
+ http_get("/users/myuser", ?NOT_FOUND),
+ http_put_raw("/users/myuser", "Something not JSON", ?BAD_REQUEST),
+ http_put("/users/myuser", [{flim, <<"flam">>}], ?BAD_REQUEST),
+ http_put("/users/myuser", [{tags, <<"management">>}], ?NO_CONTENT),
+ http_put("/users/myuser", [{password_hash, <<"not_hash">>}], ?BAD_REQUEST),
+ http_put("/users/myuser", [{password_hash,
+ <<"IECV6PZI/Invh0DL187KFpkO5Jc=">>},
+ {tags, <<"management">>}], ?NO_CONTENT),
+ http_put("/users/myuser", [{password, <<"password">>},
+ {tags, <<"administrator, foo">>}], ?NO_CONTENT),
+ assert_item([{name, <<"myuser">>}, {tags, <<"administrator,foo">>}],
+ http_get("/users/myuser")),
+ assert_list([[{name, <<"myuser">>}, {tags, <<"administrator,foo">>}],
+ [{name, <<"guest">>}, {tags, <<"administrator">>}]],
+ http_get("/users")),
+ test_auth(?OK, [auth_header("myuser", "password")]),
+ http_delete("/users/myuser", ?NO_CONTENT),
+ test_auth(?NOT_AUTHORISED, [auth_header("myuser", "password")]),
+ http_get("/users/myuser", ?NOT_FOUND),
+ ok.
+
+users_legacy_administrator_test() ->
+ http_put("/users/myuser1", [{administrator, <<"true">>}], ?NO_CONTENT),
+ http_put("/users/myuser2", [{administrator, <<"false">>}], ?NO_CONTENT),
+ assert_item([{name, <<"myuser1">>}, {tags, <<"administrator">>}],
+ http_get("/users/myuser1")),
+ assert_item([{name, <<"myuser2">>}, {tags, <<"">>}],
+ http_get("/users/myuser2")),
+ http_delete("/users/myuser1", ?NO_CONTENT),
+ http_delete("/users/myuser2", ?NO_CONTENT),
+ ok.
+
+permissions_validation_test() ->
+ Good = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+ http_put("/permissions/wrong/guest", Good, ?BAD_REQUEST),
+ http_put("/permissions/%2f/wrong", Good, ?BAD_REQUEST),
+ http_put("/permissions/%2f/guest",
+ [{configure, <<"[">>}, {write, <<".*">>}, {read, <<".*">>}],
+ ?BAD_REQUEST),
+ http_put("/permissions/%2f/guest", Good, ?NO_CONTENT),
+ ok.
+
+permissions_list_test() ->
+ [[{user,<<"guest">>},
+ {vhost,<<"/">>},
+ {configure,<<".*">>},
+ {write,<<".*">>},
+ {read,<<".*">>}]] =
+ http_get("/permissions"),
+
+ http_put("/users/myuser1", [{password, <<"">>}, {tags, <<"administrator">>}],
+ ?NO_CONTENT),
+ http_put("/users/myuser2", [{password, <<"">>}, {tags, <<"administrator">>}],
+ ?NO_CONTENT),
+ http_put("/vhosts/myvhost1", none, ?NO_CONTENT),
+ http_put("/vhosts/myvhost2", none, ?NO_CONTENT),
+
+ Perms = [{configure, <<"foo">>}, {write, <<"foo">>}, {read, <<"foo">>}],
+ http_put("/permissions/myvhost1/myuser1", Perms, ?NO_CONTENT),
+ http_put("/permissions/myvhost2/myuser1", Perms, ?NO_CONTENT),
+ http_put("/permissions/myvhost1/myuser2", Perms, ?NO_CONTENT),
+
+ 4 = length(http_get("/permissions")),
+ 2 = length(http_get("/users/myuser1/permissions")),
+ 1 = length(http_get("/users/myuser2/permissions")),
+
+ http_delete("/users/myuser1", ?NO_CONTENT),
+ http_delete("/users/myuser2", ?NO_CONTENT),
+ http_delete("/vhosts/myvhost1", ?NO_CONTENT),
+ http_delete("/vhosts/myvhost2", ?NO_CONTENT),
+ ok.
+
+permissions_test() ->
+ http_put("/users/myuser", [{password, <<"myuser">>}, {tags, <<"administrator">>}],
+ ?NO_CONTENT),
+ http_put("/vhosts/myvhost", none, ?NO_CONTENT),
+
+ http_put("/permissions/myvhost/myuser",
+ [{configure, <<"foo">>}, {write, <<"foo">>}, {read, <<"foo">>}],
+ ?NO_CONTENT),
+
+ Permission = [{user,<<"myuser">>},
+ {vhost,<<"myvhost">>},
+ {configure,<<"foo">>},
+ {write,<<"foo">>},
+ {read,<<"foo">>}],
+ Default = [{user,<<"guest">>},
+ {vhost,<<"/">>},
+ {configure,<<".*">>},
+ {write,<<".*">>},
+ {read,<<".*">>}],
+ Permission = http_get("/permissions/myvhost/myuser"),
+ assert_list([Permission, Default], http_get("/permissions")),
+ assert_list([Permission], http_get("/users/myuser/permissions")),
+ http_delete("/permissions/myvhost/myuser", ?NO_CONTENT),
+ http_get("/permissions/myvhost/myuser", ?NOT_FOUND),
+
+ http_delete("/users/myuser", ?NO_CONTENT),
+ http_delete("/vhosts/myvhost", ?NO_CONTENT),
+ ok.
+
+connections_test() ->
+ {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
+ LocalPort = local_port(Conn),
+ Path = binary_to_list(
+ rabbit_mgmt_format:print(
+ "/connections/127.0.0.1%3A~w%20->%20127.0.0.1%3A5672",
+ [LocalPort])),
+ http_get(Path, ?OK),
+ http_delete(Path, ?NO_CONTENT),
+ %% TODO rabbit_reader:shutdown/2 returns before the connection is
+ %% closed. It may not be worth fixing.
+ timer:sleep(200),
+ http_get(Path, ?NOT_FOUND).
+
+test_auth(Code, Headers) ->
+ {ok, {{_, Code, _}, _, _}} = req(get, "/overview", Headers).
+
+exchanges_test() ->
+ %% Can pass booleans or strings
+ Good = [{type, <<"direct">>}, {durable, <<"true">>}],
+ http_put("/vhosts/myvhost", none, ?NO_CONTENT),
+ http_get("/exchanges/myvhost/foo", ?NOT_AUTHORISED),
+ http_put("/exchanges/myvhost/foo", Good, ?NOT_AUTHORISED),
+ http_put("/permissions/myvhost/guest",
+ [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+ ?NO_CONTENT),
+ http_get("/exchanges/myvhost/foo", ?NOT_FOUND),
+ http_put("/exchanges/myvhost/foo", Good, ?NO_CONTENT),
+ http_put("/exchanges/myvhost/foo", Good, ?NO_CONTENT),
+ http_get("/exchanges/%2f/foo", ?NOT_FOUND),
+ assert_item([{name,<<"foo">>},
+ {vhost,<<"myvhost">>},
+ {type,<<"direct">>},
+ {durable,true},
+ {auto_delete,false},
+ {internal,false},
+ {arguments,[]}],
+ http_get("/exchanges/myvhost/foo")),
+
+ http_put("/exchanges/badvhost/bar", Good, ?NOT_FOUND),
+ http_put("/exchanges/myvhost/bar", [{type, <<"bad_exchange_type">>}],
+ ?BAD_REQUEST),
+ http_put("/exchanges/myvhost/bar", [{type, <<"direct">>},
+ {durable, <<"troo">>}],
+ ?BAD_REQUEST),
+ http_put("/exchanges/myvhost/foo", [{type, <<"direct">>}],
+ ?BAD_REQUEST),
+
+ http_delete("/exchanges/myvhost/foo", ?NO_CONTENT),
+ http_delete("/exchanges/myvhost/foo", ?NOT_FOUND),
+
+ http_delete("/vhosts/myvhost", ?NO_CONTENT),
+ http_get("/exchanges/badvhost", ?NOT_FOUND),
+ ok.
+
+queues_test() ->
+ Good = [{durable, true}],
+ http_get("/queues/%2f/foo", ?NOT_FOUND),
+ http_put("/queues/%2f/foo", Good, ?NO_CONTENT),
+ http_put("/queues/%2f/foo", Good, ?NO_CONTENT),
+ http_get("/queues/%2f/foo", ?OK),
+
+ http_put("/queues/badvhost/bar", Good, ?NOT_FOUND),
+ http_put("/queues/%2f/bar",
+ [{durable, <<"troo">>}],
+ ?BAD_REQUEST),
+ http_put("/queues/%2f/foo",
+ [{durable, false}],
+ ?BAD_REQUEST),
+
+ http_put("/queues/%2f/baz", Good, ?NO_CONTENT),
+
+ Queues = http_get("/queues/%2f"),
+ Queue = http_get("/queues/%2f/foo"),
+ assert_list([[{name, <<"foo">>},
+ {vhost, <<"/">>},
+ {durable, true},
+ {auto_delete, false},
+ {arguments, []}],
+ [{name, <<"baz">>},
+ {vhost, <<"/">>},
+ {durable, true},
+ {auto_delete, false},
+ {arguments, []}]], Queues),
+ assert_item([{name, <<"foo">>},
+ {vhost, <<"/">>},
+ {durable, true},
+ {auto_delete, false},
+ {arguments, []}], Queue),
+
+ http_delete("/queues/%2f/foo", ?NO_CONTENT),
+ http_delete("/queues/%2f/baz", ?NO_CONTENT),
+ http_delete("/queues/%2f/foo", ?NOT_FOUND),
+ http_get("/queues/badvhost", ?NOT_FOUND),
+ ok.
+
+bindings_test() ->
+ XArgs = [{type, <<"direct">>}],
+ QArgs = [],
+ http_put("/exchanges/%2f/myexchange", XArgs, ?NO_CONTENT),
+ http_put("/queues/%2f/myqueue", QArgs, ?NO_CONTENT),
+ BArgs = [{routing_key, <<"routing">>}, {arguments, []}],
+ http_post("/bindings/%2f/e/myexchange/q/myqueue", BArgs, ?CREATED),
+ http_get("/bindings/%2f/e/myexchange/q/myqueue/routing", ?OK),
+ http_get("/bindings/%2f/e/myexchange/q/myqueue/rooting", ?NOT_FOUND),
+ Binding =
+ [{source,<<"myexchange">>},
+ {vhost,<<"/">>},
+ {destination,<<"myqueue">>},
+ {destination_type,<<"queue">>},
+ {routing_key,<<"routing">>},
+ {arguments,[]},
+ {properties_key,<<"routing">>}],
+ DBinding =
+ [{source,<<"">>},
+ {vhost,<<"/">>},
+ {destination,<<"myqueue">>},
+ {destination_type,<<"queue">>},
+ {routing_key,<<"myqueue">>},
+ {arguments,[]},
+ {properties_key,<<"myqueue">>}],
+ Binding = http_get("/bindings/%2f/e/myexchange/q/myqueue/routing"),
+ assert_list([Binding],
+ http_get("/bindings/%2f/e/myexchange/q/myqueue")),
+ assert_list([Binding, DBinding],
+ http_get("/queues/%2f/myqueue/bindings")),
+ assert_list([Binding],
+ http_get("/exchanges/%2f/myexchange/bindings/source")),
+ http_delete("/bindings/%2f/e/myexchange/q/myqueue/routing", ?NO_CONTENT),
+ http_delete("/bindings/%2f/e/myexchange/q/myqueue/routing", ?NOT_FOUND),
+ http_delete("/exchanges/%2f/myexchange", ?NO_CONTENT),
+ http_delete("/queues/%2f/myqueue", ?NO_CONTENT),
+ http_get("/bindings/badvhost", ?NOT_FOUND),
+ http_get("/bindings/badvhost/myqueue/myexchange/routing", ?NOT_FOUND),
+ http_get("/bindings/%2f/e/myexchange/q/myqueue/routing", ?NOT_FOUND),
+ ok.
+
+bindings_post_test() ->
+ XArgs = [{type, <<"direct">>}],
+ QArgs = [],
+ BArgs = [{routing_key, <<"routing">>}, {arguments, [{foo, <<"bar">>}]}],
+ http_put("/exchanges/%2f/myexchange", XArgs, ?NO_CONTENT),
+ http_put("/queues/%2f/myqueue", QArgs, ?NO_CONTENT),
+ http_post("/bindings/%2f/e/myexchange/q/badqueue", BArgs, ?NOT_FOUND),
+ http_post("/bindings/%2f/e/badexchange/q/myqueue", BArgs, ?NOT_FOUND),
+ Headers1 = http_post("/bindings/%2f/e/myexchange/q/myqueue", [], ?CREATED),
+ "../../../../%2F/e/myexchange/q/myqueue/~" = pget("location", Headers1),
+ Headers2 = http_post("/bindings/%2f/e/myexchange/q/myqueue", BArgs, ?CREATED),
+ PropertiesKey = "routing~V4mGFgnPNrdtRmluZIxTDA",
+ PropertiesKeyBin = list_to_binary(PropertiesKey),
+ "../../../../%2F/e/myexchange/q/myqueue/" ++ PropertiesKey =
+ pget("location", Headers2),
+ URI = "/bindings/%2F/e/myexchange/q/myqueue/" ++ PropertiesKey,
+ [{source,<<"myexchange">>},
+ {vhost,<<"/">>},
+ {destination,<<"myqueue">>},
+ {destination_type,<<"queue">>},
+ {routing_key,<<"routing">>},
+ {arguments,[{foo,<<"bar">>}]},
+ {properties_key,PropertiesKeyBin}] = http_get(URI, ?OK),
+ http_get(URI ++ "x", ?NOT_FOUND),
+ http_delete(URI, ?NO_CONTENT),
+ http_delete("/exchanges/%2f/myexchange", ?NO_CONTENT),
+ http_delete("/queues/%2f/myqueue", ?NO_CONTENT),
+ ok.
+
+bindings_e2e_test() ->
+ BArgs = [{routing_key, <<"routing">>}, {arguments, []}],
+ http_post("/bindings/%2f/e/amq.direct/e/badexchange", BArgs, ?NOT_FOUND),
+ http_post("/bindings/%2f/e/badexchange/e/amq.fanout", BArgs, ?NOT_FOUND),
+ Headers = http_post("/bindings/%2f/e/amq.direct/e/amq.fanout", BArgs, ?CREATED),
+ "../../../../%2F/e/amq.direct/e/amq.fanout/routing" =
+ pget("location", Headers),
+ [{source,<<"amq.direct">>},
+ {vhost,<<"/">>},
+ {destination,<<"amq.fanout">>},
+ {destination_type,<<"exchange">>},
+ {routing_key,<<"routing">>},
+ {arguments,[]},
+ {properties_key,<<"routing">>}] =
+ http_get("/bindings/%2f/e/amq.direct/e/amq.fanout/routing", ?OK),
+ http_delete("/bindings/%2f/e/amq.direct/e/amq.fanout/routing", ?NO_CONTENT),
+ http_post("/bindings/%2f/e/amq.direct/e/amq.headers", BArgs, ?CREATED),
+ Binding =
+ [{source,<<"amq.direct">>},
+ {vhost,<<"/">>},
+ {destination,<<"amq.headers">>},
+ {destination_type,<<"exchange">>},
+ {routing_key,<<"routing">>},
+ {arguments,[]},
+ {properties_key,<<"routing">>}],
+ Binding = http_get("/bindings/%2f/e/amq.direct/e/amq.headers/routing"),
+ assert_list([Binding],
+ http_get("/bindings/%2f/e/amq.direct/e/amq.headers")),
+ assert_list([Binding],
+ http_get("/exchanges/%2f/amq.direct/bindings/source")),
+ assert_list([Binding],
+ http_get("/exchanges/%2f/amq.headers/bindings/destination")),
+ http_delete("/bindings/%2f/e/amq.direct/e/amq.headers/routing", ?NO_CONTENT),
+ http_get("/bindings/%2f/e/amq.direct/e/amq.headers/rooting", ?NOT_FOUND),
+ ok.
+
+permissions_administrator_test() ->
+ http_put("/users/isadmin", [{password, <<"isadmin">>},
+ {tags, <<"administrator">>}], ?NO_CONTENT),
+ http_put("/users/notadmin", [{password, <<"notadmin">>},
+ {tags, <<"administrator">>}], ?NO_CONTENT),
+ http_put("/users/notadmin", [{password, <<"notadmin">>},
+ {tags, <<"management">>}], ?NO_CONTENT),
+ Test =
+ fun(Path) ->
+ http_get(Path, "notadmin", "notadmin", ?NOT_AUTHORISED),
+ http_get(Path, "isadmin", "isadmin", ?OK),
+ http_get(Path, "guest", "guest", ?OK)
+ end,
+ %% All users can get a list of vhosts. It may be filtered.
+ %%Test("/vhosts"),
+ Test("/vhosts/%2f"),
+ Test("/vhosts/%2f/permissions"),
+ Test("/users"),
+ Test("/users/guest"),
+ Test("/users/guest/permissions"),
+ Test("/permissions"),
+ Test("/permissions/%2f/guest"),
+ http_delete("/users/notadmin", ?NO_CONTENT),
+ http_delete("/users/isadmin", ?NO_CONTENT),
+ ok.
+
+permissions_vhost_test() ->
+ QArgs = [],
+ PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+ http_put("/users/myuser", [{password, <<"myuser">>},
+ {tags, <<"management">>}], ?NO_CONTENT),
+ http_put("/vhosts/myvhost1", none, ?NO_CONTENT),
+ http_put("/vhosts/myvhost2", none, ?NO_CONTENT),
+ http_put("/permissions/myvhost1/myuser", PermArgs, ?NO_CONTENT),
+ http_put("/permissions/myvhost1/guest", PermArgs, ?NO_CONTENT),
+ http_put("/permissions/myvhost2/guest", PermArgs, ?NO_CONTENT),
+ assert_list([[{name, <<"/">>}],
+ [{name, <<"myvhost1">>}],
+ [{name, <<"myvhost2">>}]], http_get("/vhosts", ?OK)),
+ assert_list([[{name, <<"myvhost1">>}]],
+ http_get("/vhosts", "myuser", "myuser", ?OK)),
+ http_put("/queues/myvhost1/myqueue", QArgs, ?NO_CONTENT),
+ http_put("/queues/myvhost2/myqueue", QArgs, ?NO_CONTENT),
+ Test1 =
+ fun(Path) ->
+ Results = http_get(Path, "myuser", "myuser", ?OK),
+ [case pget(vhost, Result) of
+ <<"myvhost2">> ->
+ throw({got_result_from_vhost2_in, Path, Result});
+ _ ->
+ ok
+ end || Result <- Results]
+ end,
+ Test2 =
+ fun(Path1, Path2) ->
+ http_get(Path1 ++ "/myvhost1/" ++ Path2, "myuser", "myuser",
+ ?OK),
+ http_get(Path1 ++ "/myvhost2/" ++ Path2, "myuser", "myuser",
+ ?NOT_AUTHORISED)
+ end,
+ Test1("/exchanges"),
+ Test2("/exchanges", ""),
+ Test2("/exchanges", "amq.direct"),
+ Test1("/queues"),
+ Test2("/queues", ""),
+ Test2("/queues", "myqueue"),
+ Test1("/bindings"),
+ Test2("/bindings", ""),
+ Test2("/queues", "myqueue/bindings"),
+ Test2("/exchanges", "amq.default/bindings/source"),
+ Test2("/exchanges", "amq.default/bindings/destination"),
+ Test2("/bindings", "e/amq.default/q/myqueue"),
+ Test2("/bindings", "e/amq.default/q/myqueue/myqueue"),
+ http_delete("/vhosts/myvhost1", ?NO_CONTENT),
+ http_delete("/vhosts/myvhost2", ?NO_CONTENT),
+ http_delete("/users/myuser", ?NO_CONTENT),
+ ok.
+
+permissions_amqp_test() ->
+ %% Just test that it works at all, not that it works in all possible cases.
+ QArgs = [],
+ PermArgs = [{configure, <<"foo.*">>}, {write, <<"foo.*">>},
+ {read, <<"foo.*">>}],
+ http_put("/users/myuser", [{password, <<"myuser">>},
+ {tags, <<"management">>}], ?NO_CONTENT),
+ http_put("/permissions/%2f/myuser", PermArgs, ?NO_CONTENT),
+ http_put("/queues/%2f/bar-queue", QArgs, "myuser", "myuser",
+ ?NOT_AUTHORISED),
+ http_put("/queues/%2f/bar-queue", QArgs, "nonexistent", "nonexistent",
+ ?NOT_AUTHORISED),
+ http_delete("/users/myuser", ?NO_CONTENT),
+ ok.
+
+get_conn(Username, Password) ->
+ {ok, Conn} = amqp_connection:start(#amqp_params_network{
+ username = list_to_binary(Username),
+ password = list_to_binary(Password)}),
+ LocalPort = local_port(Conn),
+ ConnPath = rabbit_misc:format(
+ "/connections/127.0.0.1%3A~w%20->%20127.0.0.1%3A5672",
+ [LocalPort]),
+ ChPath = rabbit_misc:format(
+ "/channels/127.0.0.1%3A~w%20->%20127.0.0.1%3A5672%20(1)",
+ [LocalPort]),
+ ConnChPath = rabbit_misc:format(
+ "/connections/127.0.0.1%3A~w%20->%20127.0.0.1%3A5672/channels",
+ [LocalPort]),
+ {Conn, ConnPath, ChPath, ConnChPath}.
+
+permissions_connection_channel_test() ->
+ PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+ http_put("/users/user", [{password, <<"user">>},
+ {tags, <<"management">>}], ?NO_CONTENT),
+ http_put("/permissions/%2f/user", PermArgs, ?NO_CONTENT),
+ http_put("/users/monitor", [{password, <<"monitor">>},
+ {tags, <<"monitoring">>}], ?NO_CONTENT),
+ http_put("/permissions/%2f/monitor", PermArgs, ?NO_CONTENT),
+ {Conn1, UserConn, UserCh, UserConnCh} = get_conn("user", "user"),
+ {Conn2, MonConn, MonCh, MonConnCh} = get_conn("monitor", "monitor"),
+ {Conn3, AdmConn, AdmCh, AdmConnCh} = get_conn("guest", "guest"),
+ {ok, _Ch1} = amqp_connection:open_channel(Conn1),
+ {ok, _Ch2} = amqp_connection:open_channel(Conn2),
+ {ok, _Ch3} = amqp_connection:open_channel(Conn3),
+
+ AssertLength = fun (Path, User, Len) ->
+ ?assertEqual(Len,
+ length(http_get(Path, User, User, ?OK)))
+ end,
+ [begin
+ AssertLength(P, "user", 1),
+ AssertLength(P, "monitor", 3),
+ AssertLength(P, "guest", 3)
+ end || P <- ["/connections", "/channels"]],
+
+ AssertRead = fun(Path, UserStatus) ->
+ http_get(Path, "user", "user", UserStatus),
+ http_get(Path, "monitor", "monitor", ?OK),
+ http_get(Path, ?OK)
+ end,
+ AssertRead(UserConn, ?OK),
+ AssertRead(MonConn, ?NOT_AUTHORISED),
+ AssertRead(AdmConn, ?NOT_AUTHORISED),
+ AssertRead(UserCh, ?OK),
+ AssertRead(MonCh, ?NOT_AUTHORISED),
+ AssertRead(AdmCh, ?NOT_AUTHORISED),
+ AssertRead(UserConnCh, ?OK),
+ AssertRead(MonConnCh, ?NOT_AUTHORISED),
+ AssertRead(AdmConnCh, ?NOT_AUTHORISED),
+
+ AssertClose = fun(Path, User, Status) ->
+ http_delete(Path, User, User, Status)
+ end,
+ AssertClose(UserConn, "monitor", ?NOT_AUTHORISED),
+ AssertClose(MonConn, "user", ?NOT_AUTHORISED),
+ AssertClose(AdmConn, "guest", ?NO_CONTENT),
+ AssertClose(MonConn, "guest", ?NO_CONTENT),
+ AssertClose(UserConn, "user", ?NO_CONTENT),
+
+ http_delete("/users/user", ?NO_CONTENT),
+ http_delete("/users/monitor", ?NO_CONTENT),
+ http_get("/connections/foo", ?NOT_FOUND),
+ http_get("/channels/foo", ?NOT_FOUND),
+ ok.
+
+defs(Key, URI, CreateMethod, Args) ->
+ defs(Key, URI, CreateMethod, Args,
+ fun(URI2) -> http_delete(URI2, ?NO_CONTENT) end).
+
+defs_v(Key, URI, CreateMethod, Args) ->
+ Rep1 = fun (S, S2) -> re:replace(S, "<vhost>", S2, [{return, list}]) end,
+ Rep2 = fun (L, V2) -> lists:keymap(fun (vhost) -> V2;
+ (V) -> V end, 2, L) end,
+ %% Test against default vhost
+ defs(Key, Rep1(URI, "%2f"), CreateMethod, Rep2(Args, <<"/">>)),
+
+ %% Test against new vhost
+ http_put("/vhosts/test", none, ?NO_CONTENT),
+ PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+ http_put("/permissions/test/guest", PermArgs, ?NO_CONTENT),
+ defs(Key, Rep1(URI, "test"), CreateMethod, Rep2(Args, <<"test">>),
+ fun(URI2) -> http_delete(URI2, ?NO_CONTENT),
+ http_delete("/vhosts/test", ?NO_CONTENT) end).
+
+defs(Key, URI, CreateMethod, Args, DeleteFun) ->
+ %% Create the item
+ URI2 = case CreateMethod of
+ put -> http_put(URI, Args, ?NO_CONTENT),
+ URI;
+ post -> Headers = http_post(URI, Args, ?CREATED),
+ rabbit_web_dispatch_util:unrelativise(
+ URI, pget("location", Headers))
+ end,
+ %% Make sure it ends up in definitions
+ Definitions = http_get("/definitions", ?OK),
+ true = lists:any(fun(I) -> test_item(Args, I) end, pget(Key, Definitions)),
+
+ %% Delete it
+ DeleteFun(URI2),
+
+ %% Post the definitions back, it should get recreated in correct form
+ http_post("/definitions", Definitions, ?CREATED),
+ assert_item(Args, http_get(URI2, ?OK)),
+
+ %% And delete it again
+ DeleteFun(URI2),
+
+ ok.
+
+definitions_test() ->
+ rabbit_runtime_parameters_test:register(),
+ rabbit_runtime_parameters_test:register_policy_validator(),
+
+ defs_v(queues, "/queues/<vhost>/my-queue", put,
+ [{name, <<"my-queue">>},
+ {durable, true}]),
+ defs_v(exchanges, "/exchanges/<vhost>/my-exchange", put,
+ [{name, <<"my-exchange">>},
+ {type, <<"direct">>}]),
+ defs_v(bindings, "/bindings/<vhost>/e/amq.direct/e/amq.fanout", post,
+ [{routing_key, <<"routing">>}, {arguments, []}]),
+ defs_v(policies, "/policies/<vhost>/my-policy", put,
+ [{vhost, vhost},
+ {name, <<"my-policy">>},
+ {pattern, <<".*">>},
+ {definition, [{testpos, [1, 2, 3]}]},
+ {priority, 1}]),
+ defs_v(parameters, "/parameters/test/<vhost>/good", put,
+ [{vhost, vhost},
+ {component, <<"test">>},
+ {name, <<"good">>},
+ {value, <<"ignore">>}]),
+ defs(users, "/users/myuser", put,
+ [{name, <<"myuser">>},
+ {password_hash, <<"WAbU0ZIcvjTpxM3Q3SbJhEAM2tQ=">>},
+ {tags, <<"management">>}]),
+ defs(vhosts, "/vhosts/myvhost", put,
+ [{name, <<"myvhost">>}]),
+ defs(permissions, "/permissions/%2f/guest", put,
+ [{user, <<"guest">>},
+ {vhost, <<"/">>},
+ {configure, <<"c">>},
+ {write, <<"w">>},
+ {read, <<"r">>}]),
+
+ %% We just messed with guest's permissions
+ http_put("/permissions/%2f/guest",
+ [{configure, <<".*">>},
+ {write, <<".*">>},
+ {read, <<".*">>}], ?NO_CONTENT),
+
+ BrokenConfig =
+ [{users, []},
+ {vhosts, []},
+ {permissions, []},
+ {queues, []},
+ {exchanges, [[{name, <<"amq.direct">>},
+ {vhost, <<"/">>},
+ {type, <<"definitely not direct">>},
+ {durable, true},
+ {auto_delete, false},
+ {arguments, []}
+ ]]},
+ {bindings, []}],
+ http_post("/definitions", BrokenConfig, ?BAD_REQUEST),
+
+ rabbit_runtime_parameters_test:unregister_policy_validator(),
+ rabbit_runtime_parameters_test:unregister(),
+ ok.
+
+definitions_remove_things_test() ->
+ {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ amqp_channel:call(Ch, #'queue.declare'{ queue = <<"my-exclusive">>,
+ exclusive = true }),
+ http_get("/queues/%2f/my-exclusive", ?OK),
+ Definitions = http_get("/definitions", ?OK),
+ [] = pget(queues, Definitions),
+ [] = pget(exchanges, Definitions),
+ [] = pget(bindings, Definitions),
+ amqp_channel:close(Ch),
+ amqp_connection:close(Conn),
+ ok.
+
+definitions_server_named_queue_test() ->
+ {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ #'queue.declare_ok'{ queue = QName } =
+ amqp_channel:call(Ch, #'queue.declare'{}),
+ amqp_channel:close(Ch),
+ amqp_connection:close(Conn),
+ Path = "/queues/%2f/" ++ mochiweb_util:quote_plus(QName),
+ http_get(Path, ?OK),
+ Definitions = http_get("/definitions", ?OK),
+ http_delete(Path, ?NO_CONTENT),
+ http_get(Path, ?NOT_FOUND),
+ http_post("/definitions", Definitions, ?CREATED),
+ http_get(Path, ?OK),
+ http_delete(Path, ?NO_CONTENT),
+ ok.
+
+aliveness_test() ->
+ [{status, <<"ok">>}] = http_get("/aliveness-test/%2f", ?OK),
+ http_get("/aliveness-test/foo", ?NOT_FOUND),
+ http_delete("/queues/%2f/aliveness-test", ?NO_CONTENT),
+ ok.
+
+arguments_test() ->
+ XArgs = [{type, <<"headers">>},
+ {arguments, [{'alternate-exchange', <<"amq.direct">>}]}],
+ QArgs = [{arguments, [{'x-expires', 1800000}]}],
+ BArgs = [{routing_key, <<"">>},
+ {arguments, [{'x-match', <<"all">>},
+ {foo, <<"bar">>}]}],
+ http_put("/exchanges/%2f/myexchange", XArgs, ?NO_CONTENT),
+ http_put("/queues/%2f/myqueue", QArgs, ?NO_CONTENT),
+ http_post("/bindings/%2f/e/myexchange/q/myqueue", BArgs, ?CREATED),
+ Definitions = http_get("/definitions", ?OK),
+ http_delete("/exchanges/%2f/myexchange", ?NO_CONTENT),
+ http_delete("/queues/%2f/myqueue", ?NO_CONTENT),
+ http_post("/definitions", Definitions, ?CREATED),
+ [{'alternate-exchange', <<"amq.direct">>}] =
+ pget(arguments, http_get("/exchanges/%2f/myexchange", ?OK)),
+ [{'x-expires', 1800000}] =
+ pget(arguments, http_get("/queues/%2f/myqueue", ?OK)),
+ true = lists:sort([{'x-match', <<"all">>}, {foo, <<"bar">>}]) =:=
+ lists:sort(pget(arguments,
+ http_get("/bindings/%2f/e/myexchange/q/myqueue/" ++
+ "~nXOkVwqZzUOdS9_HcBWheg", ?OK))),
+ http_delete("/exchanges/%2f/myexchange", ?NO_CONTENT),
+ http_delete("/queues/%2f/myqueue", ?NO_CONTENT),
+ ok.
+
+arguments_table_test() ->
+ Args = [{'upstreams', [<<"amqp://localhost/%2f/upstream1">>,
+ <<"amqp://localhost/%2f/upstream2">>]}],
+ XArgs = [{type, <<"headers">>},
+ {arguments, Args}],
+ http_put("/exchanges/%2f/myexchange", XArgs, ?NO_CONTENT),
+ Definitions = http_get("/definitions", ?OK),
+ http_delete("/exchanges/%2f/myexchange", ?NO_CONTENT),
+ http_post("/definitions", Definitions, ?CREATED),
+ Args = pget(arguments, http_get("/exchanges/%2f/myexchange", ?OK)),
+ http_delete("/exchanges/%2f/myexchange", ?NO_CONTENT),
+ ok.
+
+queue_purge_test() ->
+ QArgs = [],
+ http_put("/queues/%2f/myqueue", QArgs, ?NO_CONTENT),
+ {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ Publish = fun() ->
+ amqp_channel:call(
+ Ch, #'basic.publish'{exchange = <<"">>,
+ routing_key = <<"myqueue">>},
+ #amqp_msg{payload = <<"message">>})
+ end,
+ Publish(),
+ Publish(),
+ amqp_channel:call(
+ Ch, #'queue.declare'{queue = <<"exclusive">>, exclusive = true}),
+ {#'basic.get_ok'{}, _} =
+ amqp_channel:call(Ch, #'basic.get'{queue = <<"myqueue">>}),
+ http_delete("/queues/%2f/myqueue/contents", ?NO_CONTENT),
+ http_delete("/queues/%2f/badqueue/contents", ?NOT_FOUND),
+ http_delete("/queues/%2f/exclusive/contents", ?BAD_REQUEST),
+ http_delete("/queues/%2f/exclusive", ?BAD_REQUEST),
+ #'basic.get_empty'{} =
+ amqp_channel:call(Ch, #'basic.get'{queue = <<"myqueue">>}),
+ amqp_channel:close(Ch),
+ amqp_connection:close(Conn),
+ http_delete("/queues/%2f/myqueue", ?NO_CONTENT),
+ ok.
+
+queue_actions_test() ->
+ http_put("/queues/%2f/q", [], ?NO_CONTENT),
+ http_post("/queues/%2f/q/actions", [{action, sync}], ?NO_CONTENT),
+ http_post("/queues/%2f/q/actions", [{action, cancel_sync}], ?NO_CONTENT),
+ http_post("/queues/%2f/q/actions", [{action, change_colour}], ?BAD_REQUEST),
+ http_delete("/queues/%2f/q", ?NO_CONTENT),
+ ok.
+
+exclusive_consumer_test() ->
+ {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ #'queue.declare_ok'{ queue = QName } =
+ amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = QName,
+ exclusive = true}, self()),
+ timer:sleep(1000), %% Sadly we need to sleep to let the stats update
+ http_get("/queues/%2f/"), %% Just check we don't blow up
+ amqp_channel:close(Ch),
+ amqp_connection:close(Conn),
+ ok.
+
+sorting_test() ->
+ QArgs = [],
+ PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+ http_put("/vhosts/vh1", none, ?NO_CONTENT),
+ http_put("/permissions/vh1/guest", PermArgs, ?NO_CONTENT),
+ http_put("/queues/%2f/test0", QArgs, ?NO_CONTENT),
+ http_put("/queues/vh1/test1", QArgs, ?NO_CONTENT),
+ http_put("/queues/%2f/test2", QArgs, ?NO_CONTENT),
+ http_put("/queues/vh1/test3", QArgs, ?NO_CONTENT),
+ assert_list([[{name, <<"test0">>}],
+ [{name, <<"test2">>}],
+ [{name, <<"test1">>}],
+ [{name, <<"test3">>}]], http_get("/queues", ?OK)),
+ assert_list([[{name, <<"test0">>}],
+ [{name, <<"test1">>}],
+ [{name, <<"test2">>}],
+ [{name, <<"test3">>}]], http_get("/queues?sort=name", ?OK)),
+ assert_list([[{name, <<"test0">>}],
+ [{name, <<"test2">>}],
+ [{name, <<"test1">>}],
+ [{name, <<"test3">>}]], http_get("/queues?sort=vhost", ?OK)),
+ assert_list([[{name, <<"test3">>}],
+ [{name, <<"test1">>}],
+ [{name, <<"test2">>}],
+ [{name, <<"test0">>}]], http_get("/queues?sort_reverse=true", ?OK)),
+ assert_list([[{name, <<"test3">>}],
+ [{name, <<"test2">>}],
+ [{name, <<"test1">>}],
+ [{name, <<"test0">>}]], http_get("/queues?sort=name&sort_reverse=true", ?OK)),
+ assert_list([[{name, <<"test3">>}],
+ [{name, <<"test1">>}],
+ [{name, <<"test2">>}],
+ [{name, <<"test0">>}]], http_get("/queues?sort=vhost&sort_reverse=true", ?OK)),
+ %% Rather poor but at least test it doesn't blow up with dots
+ http_get("/queues?sort=owner_pid_details.name", ?OK),
+ http_delete("/queues/%2f/test0", ?NO_CONTENT),
+ http_delete("/queues/vh1/test1", ?NO_CONTENT),
+ http_delete("/queues/%2f/test2", ?NO_CONTENT),
+ http_delete("/queues/vh1/test3", ?NO_CONTENT),
+ http_delete("/vhosts/vh1", ?NO_CONTENT),
+ ok.
+
+columns_test() ->
+ http_put("/queues/%2f/test", [{arguments, [{<<"foo">>, <<"bar">>}]}],
+ ?NO_CONTENT),
+ [[{name, <<"test">>}, {arguments, [{foo, <<"bar">>}]}]] =
+ http_get("/queues?columns=arguments.foo,name", ?OK),
+ [{name, <<"test">>}, {arguments, [{foo, <<"bar">>}]}] =
+ http_get("/queues/%2f/test?columns=arguments.foo,name", ?OK),
+ http_delete("/queues/%2f/test", ?NO_CONTENT),
+ ok.
+
+get_test() ->
+ %% Real world example...
+ Headers = [{<<"x-forwarding">>, array,
+ [{table,
+ [{<<"uri">>, longstr,
+ <<"amqp://localhost/%2f/upstream">>}]}]}],
+ http_put("/queues/%2f/myqueue", [], ?NO_CONTENT),
+ {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ Publish = fun (Payload) ->
+ amqp_channel:cast(
+ Ch, #'basic.publish'{exchange = <<>>,
+ routing_key = <<"myqueue">>},
+ #amqp_msg{props = #'P_basic'{headers = Headers},
+ payload = Payload})
+ end,
+ Publish(<<"1aaa">>),
+ Publish(<<"2aaa">>),
+ Publish(<<"3aaa">>),
+ amqp_connection:close(Conn),
+ [Msg] = http_post("/queues/%2f/myqueue/get", [{requeue, false},
+ {count, 1},
+ {encoding, auto},
+ {truncate, 1}], ?OK),
+ false = pget(redelivered, Msg),
+ <<>> = pget(exchange, Msg),
+ <<"myqueue">> = pget(routing_key, Msg),
+ <<"1">> = pget(payload, Msg),
+ [{'x-forwarding',
+ [[{uri,<<"amqp://localhost/%2f/upstream">>}]]}] =
+ pget(headers, pget(properties, Msg)),
+
+ [M2, M3] = http_post("/queues/%2f/myqueue/get", [{requeue, true},
+ {count, 5},
+ {encoding, auto}], ?OK),
+ <<"2aaa">> = pget(payload, M2),
+ <<"3aaa">> = pget(payload, M3),
+ 2 = length(http_post("/queues/%2f/myqueue/get", [{requeue, false},
+ {count, 5},
+ {encoding, auto}], ?OK)),
+ [] = http_post("/queues/%2f/myqueue/get", [{requeue, false},
+ {count, 5},
+ {encoding, auto}], ?OK),
+ http_delete("/queues/%2f/myqueue", ?NO_CONTENT),
+ ok.
+
+get_fail_test() ->
+ http_put("/users/myuser", [{password, <<"password">>},
+ {tags, <<"management">>}], ?NO_CONTENT),
+ http_put("/queues/%2f/myqueue", [], ?NO_CONTENT),
+ http_post("/queues/%2f/myqueue/get",
+ [{requeue, false},
+ {count, 1},
+ {encoding, auto}], "myuser", "password", ?NOT_AUTHORISED),
+ http_delete("/queues/%2f/myqueue", ?NO_CONTENT),
+ http_delete("/users/myuser", ?NO_CONTENT),
+ ok.
+
+publish_test() ->
+ Headers = [{'x-forwarding', [[{uri,<<"amqp://localhost/%2f/upstream">>}]]}],
+ Msg = msg(<<"myqueue">>, Headers, <<"Hello world">>),
+ http_put("/queues/%2f/myqueue", [], ?NO_CONTENT),
+ ?assertEqual([{routed, true}],
+ http_post("/exchanges/%2f/amq.default/publish", Msg, ?OK)),
+ [Msg2] = http_post("/queues/%2f/myqueue/get", [{requeue, false},
+ {count, 1},
+ {encoding, auto}], ?OK),
+ assert_item(Msg, Msg2),
+ http_post("/exchanges/%2f/amq.default/publish", Msg2, ?OK),
+ [Msg3] = http_post("/queues/%2f/myqueue/get", [{requeue, false},
+ {count, 1},
+ {encoding, auto}], ?OK),
+ assert_item(Msg, Msg3),
+ http_delete("/queues/%2f/myqueue", ?NO_CONTENT),
+ ok.
+
+publish_fail_test() ->
+ Msg = msg(<<"myqueue">>, [], <<"Hello world">>),
+ http_put("/queues/%2f/myqueue", [], ?NO_CONTENT),
+ http_put("/users/myuser", [{password, <<"password">>},
+ {tags, <<"management">>}], ?NO_CONTENT),
+ http_post("/exchanges/%2f/amq.default/publish", Msg, "myuser", "password",
+ ?NOT_AUTHORISED),
+ Msg2 = [{exchange, <<"">>},
+ {routing_key, <<"myqueue">>},
+ {properties, [{user_id, <<"foo">>}]},
+ {payload, <<"Hello world">>},
+ {payload_encoding, <<"string">>}],
+ http_post("/exchanges/%2f/amq.default/publish", Msg2, ?BAD_REQUEST),
+ Msg3 = [{exchange, <<"">>},
+ {routing_key, <<"myqueue">>},
+ {properties, []},
+ {payload, [<<"not a string">>]},
+ {payload_encoding, <<"string">>}],
+ http_post("/exchanges/%2f/amq.default/publish", Msg3, ?BAD_REQUEST),
+ MsgTemplate = [{exchange, <<"">>},
+ {routing_key, <<"myqueue">>},
+ {payload, <<"Hello world">>},
+ {payload_encoding, <<"string">>}],
+ [http_post("/exchanges/%2f/amq.default/publish",
+ [{properties, [BadProp]} | MsgTemplate], ?BAD_REQUEST)
+ || BadProp <- [{priority, <<"really high">>},
+ {timestamp, <<"recently">>},
+ {expiration, 1234}]],
+ http_delete("/users/myuser", ?NO_CONTENT),
+ ok.
+
+publish_base64_test() ->
+ Msg = msg(<<"myqueue">>, [], <<"YWJjZA==">>, <<"base64">>),
+ BadMsg1 = msg(<<"myqueue">>, [], <<"flibble">>, <<"base64">>),
+ BadMsg2 = msg(<<"myqueue">>, [], <<"YWJjZA==">>, <<"base99">>),
+ http_put("/queues/%2f/myqueue", [], ?NO_CONTENT),
+ http_post("/exchanges/%2f/amq.default/publish", Msg, ?OK),
+ http_post("/exchanges/%2f/amq.default/publish", BadMsg1, ?BAD_REQUEST),
+ http_post("/exchanges/%2f/amq.default/publish", BadMsg2, ?BAD_REQUEST),
+ [Msg2] = http_post("/queues/%2f/myqueue/get", [{requeue, false},
+ {count, 1},
+ {encoding, auto}], ?OK),
+ ?assertEqual(<<"abcd">>, pget(payload, Msg2)),
+ http_delete("/queues/%2f/myqueue", ?NO_CONTENT),
+ ok.
+
+publish_unrouted_test() ->
+ Msg = msg(<<"hmmm">>, [], <<"Hello world">>),
+ ?assertEqual([{routed, false}],
+ http_post("/exchanges/%2f/amq.default/publish", Msg, ?OK)).
+
+parameters_test() ->
+ rabbit_runtime_parameters_test:register(),
+
+ http_put("/parameters/test/%2f/good", [{value, <<"ignore">>}], ?NO_CONTENT),
+ http_put("/parameters/test/%2f/maybe", [{value, <<"good">>}], ?NO_CONTENT),
+ http_put("/parameters/test/%2f/maybe", [{value, <<"bad">>}], ?BAD_REQUEST),
+ http_put("/parameters/test/%2f/bad", [{value, <<"good">>}], ?BAD_REQUEST),
+ http_put("/parameters/test/um/good", [{value, <<"ignore">>}], ?NOT_FOUND),
+
+ Good = [{vhost, <<"/">>},
+ {component, <<"test">>},
+ {name, <<"good">>},
+ {value, <<"ignore">>}],
+ Maybe = [{vhost, <<"/">>},
+ {component, <<"test">>},
+ {name, <<"maybe">>},
+ {value, <<"good">>}],
+ List = [Good, Maybe],
+
+ assert_list(List, http_get("/parameters")),
+ assert_list(List, http_get("/parameters/test")),
+ assert_list(List, http_get("/parameters/test/%2f")),
+ assert_list([], http_get("/parameters/oops")),
+ http_get("/parameters/test/oops", ?NOT_FOUND),
+
+ assert_item(Good, http_get("/parameters/test/%2f/good", ?OK)),
+ assert_item(Maybe, http_get("/parameters/test/%2f/maybe", ?OK)),
+
+ http_delete("/parameters/test/%2f/good", ?NO_CONTENT),
+ http_delete("/parameters/test/%2f/maybe", ?NO_CONTENT),
+ http_delete("/parameters/test/%2f/bad", ?NOT_FOUND),
+
+ 0 = length(http_get("/parameters")),
+ 0 = length(http_get("/parameters/test")),
+ 0 = length(http_get("/parameters/test/%2f")),
+ rabbit_runtime_parameters_test:unregister(),
+ ok.
+
+policy_test() ->
+ rabbit_runtime_parameters_test:register_policy_validator(),
+ PolicyPos = [{vhost, <<"/">>},
+ {name, <<"policy_pos">>},
+ {pattern, <<".*">>},
+ {definition, [{testpos,[1,2,3]}]},
+ {priority, 10}],
+ PolicyEven = [{vhost, <<"/">>},
+ {name, <<"policy_even">>},
+ {pattern, <<".*">>},
+ {definition, [{testeven,[1,2,3,4]}]},
+ {priority, 10}],
+ http_put(
+ "/policies/%2f/policy_pos",
+ lists:keydelete(key, 1, PolicyPos),
+ ?NO_CONTENT),
+ http_put(
+ "/policies/%2f/policy_even",
+ lists:keydelete(key, 1, PolicyEven),
+ ?NO_CONTENT),
+ assert_item(PolicyPos, http_get("/policies/%2f/policy_pos", ?OK)),
+ assert_item(PolicyEven, http_get("/policies/%2f/policy_even", ?OK)),
+ List = [PolicyPos, PolicyEven],
+ assert_list(List, http_get("/policies", ?OK)),
+ assert_list(List, http_get("/policies/%2f", ?OK)),
+
+ http_delete("/policies/%2f/policy_pos", ?NO_CONTENT),
+ http_delete("/policies/%2f/policy_even", ?NO_CONTENT),
+ 0 = length(http_get("/policies")),
+ 0 = length(http_get("/policies/%2f")),
+ rabbit_runtime_parameters_test:unregister_policy_validator(),
+ ok.
+
+policy_permissions_test() ->
+ rabbit_runtime_parameters_test:register(),
+
+ http_put("/users/admin", [{password, <<"admin">>},
+ {tags, <<"administrator">>}], ?NO_CONTENT),
+ http_put("/users/mon", [{password, <<"monitor">>},
+ {tags, <<"monitoring">>}], ?NO_CONTENT),
+ http_put("/users/policy", [{password, <<"policy">>},
+ {tags, <<"policymaker">>}], ?NO_CONTENT),
+ http_put("/users/mgmt", [{password, <<"mgmt">>},
+ {tags, <<"management">>}], ?NO_CONTENT),
+ Perms = [{configure, <<".*">>},
+ {write, <<".*">>},
+ {read, <<".*">>}],
+ http_put("/vhosts/v", none, ?NO_CONTENT),
+ http_put("/permissions/v/admin", Perms, ?NO_CONTENT),
+ http_put("/permissions/v/mon", Perms, ?NO_CONTENT),
+ http_put("/permissions/v/policy", Perms, ?NO_CONTENT),
+ http_put("/permissions/v/mgmt", Perms, ?NO_CONTENT),
+
+ Policy = [{pattern, <<".*">>},
+ {definition, [{<<"ha-mode">>, <<"all">>}]}],
+ Param = [{value, <<"">>}],
+
+ http_put("/policies/%2f/HA", Policy, ?NO_CONTENT),
+ http_put("/parameters/test/%2f/good", Param, ?NO_CONTENT),
+
+ Pos = fun (U) ->
+ http_put("/policies/v/HA", Policy, U, U, ?NO_CONTENT),
+ http_put(
+ "/parameters/test/v/good", Param, U, U, ?NO_CONTENT),
+ 1 = length(http_get("/policies", U, U, ?OK)),
+ 1 = length(http_get("/parameters/test", U, U, ?OK)),
+ 1 = length(http_get("/parameters", U, U, ?OK)),
+ 1 = length(http_get("/policies/v", U, U, ?OK)),
+ 1 = length(http_get("/parameters/test/v", U, U, ?OK)),
+ http_get("/policies/v/HA", U, U, ?OK),
+ http_get("/parameters/test/v/good", U, U, ?OK)
+ end,
+ Neg = fun (U) ->
+ http_put("/policies/v/HA", Policy, U, U, ?NOT_AUTHORISED),
+ http_put(
+ "/parameters/test/v/good", Param, U, U, ?NOT_AUTHORISED),
+ http_put(
+ "/parameters/test/v/admin", Param, U, U, ?NOT_AUTHORISED),
+ http_get("/policies", U, U, ?NOT_AUTHORISED),
+ http_get("/policies/v", U, U, ?NOT_AUTHORISED),
+ http_get("/parameters", U, U, ?NOT_AUTHORISED),
+ http_get("/parameters/test", U, U, ?NOT_AUTHORISED),
+ http_get("/parameters/test/v", U, U, ?NOT_AUTHORISED),
+ http_get("/policies/v/HA", U, U, ?NOT_AUTHORISED),
+ http_get("/parameters/test/v/good", U, U, ?NOT_AUTHORISED)
+ end,
+ AlwaysNeg =
+ fun (U) ->
+ http_put("/policies/%2f/HA", Policy, U, U, ?NOT_AUTHORISED),
+ http_put(
+ "/parameters/test/%2f/good", Param, U, U, ?NOT_AUTHORISED),
+ http_get("/policies/%2f/HA", U, U, ?NOT_AUTHORISED),
+ http_get("/parameters/test/%2f/good", U, U, ?NOT_AUTHORISED)
+ end,
+
+ [Neg(U) || U <- ["mon", "mgmt"]],
+ [Pos(U) || U <- ["admin", "policy"]],
+ [AlwaysNeg(U) || U <- ["mon", "mgmt", "admin", "policy"]],
+
+ %% This one is deliberately different between admin and policymaker.
+ http_put("/parameters/test/v/admin", Param, "admin", "admin", ?NO_CONTENT),
+ http_put("/parameters/test/v/admin", Param, "policy", "policy",
+ ?BAD_REQUEST),
+
+ http_delete("/vhosts/v", ?NO_CONTENT),
+ http_delete("/users/admin", ?NO_CONTENT),
+ http_delete("/users/mon", ?NO_CONTENT),
+ http_delete("/users/policy", ?NO_CONTENT),
+ http_delete("/users/mgmt", ?NO_CONTENT),
+ http_delete("/policies/%2f/HA", ?NO_CONTENT),
+
+ rabbit_runtime_parameters_test:unregister(),
+ ok.
+
+
+extensions_test() ->
+ [[{javascript,<<"dispatcher.js">>}]] = http_get("/extensions", ?OK),
+ ok.
+
+%%---------------------------------------------------------------------------
+
+msg(Key, Headers, Body) ->
+ msg(Key, Headers, Body, <<"string">>).
+
+msg(Key, Headers, Body, Enc) ->
+ [{exchange, <<"">>},
+ {routing_key, Key},
+ {properties, [{delivery_mode, 2},
+ {headers, Headers}]},
+ {payload, Body},
+ {payload_encoding, Enc}].
+
+local_port(Conn) ->
+ [{sock, Sock}] = amqp_connection:info(Conn, [sock]),
+ {ok, Port} = inet:port(Sock),
+ Port.
+
+%%---------------------------------------------------------------------------
+http_get(Path) ->
+ http_get(Path, ?OK).
+
+http_get(Path, CodeExp) ->
+ http_get(Path, "guest", "guest", CodeExp).
+
+http_get(Path, User, Pass, CodeExp) ->
+ {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
+ req(get, Path, [auth_header(User, Pass)]),
+ assert_code(CodeExp, CodeAct, "GET", Path, ResBody),
+ decode(CodeExp, Headers, ResBody).
+
+http_put(Path, List, CodeExp) ->
+ http_put_raw(Path, format_for_upload(List), CodeExp).
+
+http_put(Path, List, User, Pass, CodeExp) ->
+ http_put_raw(Path, format_for_upload(List), User, Pass, CodeExp).
+
+http_post(Path, List, CodeExp) ->
+ http_post_raw(Path, format_for_upload(List), CodeExp).
+
+http_post(Path, List, User, Pass, CodeExp) ->
+ http_post_raw(Path, format_for_upload(List), User, Pass, CodeExp).
+
+format_for_upload(none) ->
+ <<"">>;
+format_for_upload(List) ->
+ iolist_to_binary(mochijson2:encode({struct, List})).
+
+http_put_raw(Path, Body, CodeExp) ->
+ http_upload_raw(put, Path, Body, "guest", "guest", CodeExp).
+
+http_put_raw(Path, Body, User, Pass, CodeExp) ->
+ http_upload_raw(put, Path, Body, User, Pass, CodeExp).
+
+http_post_raw(Path, Body, CodeExp) ->
+ http_upload_raw(post, Path, Body, "guest", "guest", CodeExp).
+
+http_post_raw(Path, Body, User, Pass, CodeExp) ->
+ http_upload_raw(post, Path, Body, User, Pass, CodeExp).
+
+http_upload_raw(Type, Path, Body, User, Pass, CodeExp) ->
+ {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
+ req(Type, Path, [auth_header(User, Pass)], Body),
+ assert_code(CodeExp, CodeAct, Type, Path, ResBody),
+ decode(CodeExp, Headers, ResBody).
+
+http_delete(Path, CodeExp) ->
+ http_delete(Path, "guest", "guest", CodeExp).
+
+http_delete(Path, User, Pass, CodeExp) ->
+ {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
+ req(delete, Path, [auth_header(User, Pass)]),
+ assert_code(CodeExp, CodeAct, "DELETE", Path, ResBody),
+ decode(CodeExp, Headers, ResBody).
+
+assert_code(CodeExp, CodeAct, Type, Path, Body) ->
+ case CodeExp of
+ CodeAct -> ok;
+ _ -> throw({expected, CodeExp, got, CodeAct, type, Type,
+ path, Path, body, Body})
+ end.
+
+req(Type, Path, Headers) ->
+ httpc:request(Type, {?PREFIX ++ Path, Headers}, ?HTTPC_OPTS, []).
+
+req(Type, Path, Headers, Body) ->
+ httpc:request(Type, {?PREFIX ++ Path, Headers, "application/json", Body},
+ ?HTTPC_OPTS, []).
+
+decode(?OK, _Headers, ResBody) -> cleanup(mochijson2:decode(ResBody));
+decode(_, Headers, _ResBody) -> Headers.
+
+cleanup(L) when is_list(L) ->
+ [cleanup(I) || I <- L];
+cleanup({struct, I}) ->
+ cleanup(I);
+cleanup({K, V}) when is_binary(K) ->
+ {list_to_atom(binary_to_list(K)), cleanup(V)};
+cleanup(I) ->
+ I.
+
+auth_header(Username, Password) ->
+ {"Authorization",
+ "Basic " ++ binary_to_list(base64:encode(Username ++ ":" ++ Password))}.
+
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Console.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_test_unit).
+
+-include_lib("eunit/include/eunit.hrl").
+
+tokenise_test() ->
+ [] = rabbit_mgmt_format:tokenise(""),
+ ["foo"] = rabbit_mgmt_format:tokenise("foo"),
+ ["foo", "bar"] = rabbit_mgmt_format:tokenise("foo~bar"),
+ ["foo", "", "bar"] = rabbit_mgmt_format:tokenise("foo~~bar"),
+ ok.
+
+pack_binding_test() ->
+ assert_binding(<<"~">>,
+ <<"">>, []),
+ assert_binding(<<"foo">>,
+ <<"foo">>, []),
+ assert_binding(<<"foo%7Ebar%2Fbash">>,
+ <<"foo~bar/bash">>, []),
+ assert_binding(<<"foo%7Ebar%7Ebash">>,
+ <<"foo~bar~bash">>, []),
+ ok.
+
+amqp_table_test() ->
+ assert_table({struct, []}, []),
+ assert_table({struct, [{<<"x-expires">>, 1000}]},
+ [{<<"x-expires">>, long, 1000}]),
+ assert_table({struct,
+ [{<<"x-forwarding">>,
+ [{struct,
+ [{<<"uri">>, <<"amqp://localhost/%2f/upstream">>}]}]}]},
+ [{<<"x-forwarding">>, array,
+ [{table, [{<<"uri">>, longstr,
+ <<"amqp://localhost/%2f/upstream">>}]}]}]).
+
+assert_table(JSON, AMQP) ->
+ ?assertEqual(JSON, rabbit_mgmt_format:amqp_table(AMQP)),
+ ?assertEqual(AMQP, rabbit_mgmt_format:to_amqp_table(JSON)).
+
+%%--------------------------------------------------------------------
+
+assert_binding(Packed, Routing, Args) ->
+ case rabbit_mgmt_format:pack_binding_props(Routing, Args) of
+ Packed ->
+ ok;
+ Act ->
+ throw({pack, Routing, Args, expected, Packed, got, Act})
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Console.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2012 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mgmt_test_util).
+
+-export([assert_list/2, assert_item/2, test_item/2]).
+
+assert_list(Exp, Act) ->
+ case length(Exp) == length(Act) of
+ true -> ok;
+ false -> throw({expected, Exp, actual, Act})
+ end,
+ [case length(lists:filter(fun(ActI) -> test_item(ExpI, ActI) end, Act)) of
+ 1 -> ok;
+ N -> throw({found, N, ExpI, in, Act})
+ end || ExpI <- Exp].
+
+assert_item(Exp, Act) ->
+ case test_item0(Exp, Act) of
+ [] -> ok;
+ Or -> throw(Or)
+ end.
+
+test_item(Exp, Act) ->
+ case test_item0(Exp, Act) of
+ [] -> true;
+ _ -> false
+ end.
+
+test_item0(Exp, Act) ->
+ [{did_not_find, ExpI, in, Act} || ExpI <- Exp,
+ not lists:member(ExpI, Act)].
--- /dev/null
+#!/usr/bin/env python
+
+import unittest
+import os
+import os.path
+import socket
+import subprocess
+import sys
+import shutil
+
+# TODO test: SSL, depth, config file, encodings(?), completion(???)
+
+class TestRabbitMQAdmin(unittest.TestCase):
+ def test_no_args(self):
+ self.run_fail([])
+
+ def test_help(self):
+ self.run_success(['--help'])
+ self.run_success(['help', 'subcommands'])
+ self.run_success(['help', 'config'])
+ self.run_fail(['help', 'astronomy'])
+
+ def test_host(self):
+ self.run_success(['show', 'overview'])
+ self.run_success(['--host', 'localhost', 'show', 'overview'])
+ self.run_fail(['--host', 'some-host-that-does-not-exist', 'show', 'overview'])
+
+ def test_port(self):
+ # Test port selection
+ self.run_success(['--port', '15672', 'show', 'overview'])
+ # Test port not open
+ self.run_fail(['--port', '15673', 'show', 'overview'])
+ # Test port open but not talking HTTP
+ self.run_fail(['--port', '5672', 'show', 'overview'])
+
+ def test_config(self):
+ original_home = os.getenv('HOME')
+ tmpdir = os.getenv("TMPDIR") or os.getenv("TEMP") or "/tmp"
+ shutil.copyfile(os.path.dirname(__file__) + os.sep + "default-config",
+ tmpdir + os.sep + ".rabbitmqadmin.conf")
+ os.environ['HOME'] = tmpdir
+
+ self.run_fail(['--config', '/tmp/no-such-config-file', 'show', 'overview'])
+
+ cf = os.path.dirname(__file__) + os.sep + "test-config"
+ self.run_success(['--config', cf, '--node', 'host_normal', 'show', 'overview'])
+
+ # test 'default node in the config file' where "default" uses an invalid host
+ self.run_fail(['--config', cf, 'show', 'overview'])
+ self.run_success(["show", "overview"])
+ self.run_fail(['--node', 'non_default', "show", "overview"])
+ os.environ['HOME'] = original_home
+
+ def test_user(self):
+ self.run_success(['--user', 'guest', '--password', 'guest', 'show', 'overview'])
+ self.run_fail(['--user', 'no', '--password', 'guest', 'show', 'overview'])
+ self.run_fail(['--user', 'guest', '--password', 'no', 'show', 'overview'])
+
+ def test_fmt_long(self):
+ self.assert_output("""
+--------------------------------------------------------------------------------
+
+ name: /
+tracing: False
+
+--------------------------------------------------------------------------------
+
+""", ['--format', 'long', 'list', 'vhosts', 'name', 'tracing'])
+
+ def test_fmt_kvp(self):
+ self.assert_output("""name="/" tracing="False"
+""", ['--format', 'kvp', 'list', 'vhosts', 'name', 'tracing'])
+
+ def test_fmt_tsv(self):
+ self.assert_output("""name tracing
+/ False
+""", ['--format', 'tsv', 'list', 'vhosts', 'name', 'tracing'])
+
+ def test_fmt_table(self):
+ out = """+------+---------+
+| name | tracing |
++------+---------+
+| / | False |
++------+---------+
+"""
+ self.assert_output(out, ['list', 'vhosts', 'name', 'tracing'])
+ self.assert_output(out, ['--format', 'table', 'list', 'vhosts', 'name', 'tracing'])
+
+ def test_fmt_bash(self):
+ self.assert_output("""/
+""", ['--format', 'bash', 'list', 'vhosts', 'name', 'tracing'])
+
+ def test_vhosts(self):
+ self.assert_list(['/'], l('vhosts'))
+ self.run_success(['declare', 'vhost', 'name=foo'])
+ self.assert_list(['/', 'foo'], l('vhosts'))
+ self.run_success(['delete', 'vhost', 'name=foo'])
+ self.assert_list(['/'], l('vhosts'))
+
+ def test_users(self):
+ self.assert_list(['guest'], l('users'))
+ self.run_fail(['declare', 'user', 'name=foo'])
+ self.run_success(['declare', 'user', 'name=foo', 'password=pass', 'tags='])
+ self.assert_list(['foo', 'guest'], l('users'))
+ self.run_success(['delete', 'user', 'name=foo'])
+ self.assert_list(['guest'], l('users'))
+
+ def test_permissions(self):
+ self.run_success(['declare', 'vhost', 'name=foo'])
+ self.run_success(['declare', 'user', 'name=bar', 'password=pass', 'tags='])
+ self.assert_table([['guest', '/']], ['list', 'permissions', 'user', 'vhost'])
+ self.run_success(['declare', 'permission', 'user=bar', 'vhost=foo', 'configure=.*', 'write=.*', 'read=.*'])
+ self.assert_table([['guest', '/'], ['bar', 'foo']], ['list', 'permissions', 'user', 'vhost'])
+ self.run_success(['delete', 'user', 'name=bar'])
+ self.run_success(['delete', 'vhost', 'name=foo'])
+
+ def test_alt_vhost(self):
+ self.run_success(['declare', 'vhost', 'name=foo'])
+ self.run_success(['declare', 'permission', 'user=guest', 'vhost=foo', 'configure=.*', 'write=.*', 'read=.*'])
+ self.run_success(['declare', 'queue', 'name=in_/'])
+ self.run_success(['--vhost', 'foo', 'declare', 'queue', 'name=in_foo'])
+ self.assert_table([['/', 'in_/'], ['foo', 'in_foo']], ['list', 'queues', 'vhost', 'name'])
+ self.run_success(['--vhost', 'foo', 'delete', 'queue', 'name=in_foo'])
+ self.run_success(['delete', 'queue', 'name=in_/'])
+ self.run_success(['delete', 'vhost', 'name=foo'])
+
+ def test_exchanges(self):
+ self.run_success(['declare', 'exchange', 'name=foo', 'type=direct'])
+ self.assert_list(['', 'amq.direct', 'amq.fanout', 'amq.headers', 'amq.match', 'amq.rabbitmq.log', 'amq.rabbitmq.trace', 'amq.topic', 'foo'], l('exchanges'))
+ self.run_success(['delete', 'exchange', 'name=foo'])
+
+ def test_queues(self):
+ self.run_success(['declare', 'queue', 'name=foo'])
+ self.assert_list(['foo'], l('queues'))
+ self.run_success(['delete', 'queue', 'name=foo'])
+
+ def test_bindings(self):
+ self.run_success(['declare', 'queue', 'name=foo'])
+ self.run_success(['declare', 'binding', 'source=amq.direct', 'destination=foo', 'destination_type=queue', 'routing_key=test'])
+ self.assert_table([['', 'foo', 'queue', 'foo'], ['amq.direct', 'foo', 'queue', 'test']], ['list', 'bindings', 'source', 'destination', 'destination_type', 'routing_key'])
+ self.run_success(['delete', 'queue', 'name=foo'])
+
+ def test_policies(self):
+ self.run_success(['declare', 'policy', 'name=ha', 'pattern=.*', 'definition={"ha-mode":"all"}'])
+ self.assert_table([['ha', '/', '.*', '{"ha-mode": "all"}']], ['list', 'policies', 'name', 'vhost', 'pattern', 'definition'])
+ self.run_success(['delete', 'policy', 'name=ha'])
+
+ def test_parameters(self):
+ self.ctl(['eval', 'rabbit_runtime_parameters_test:register().'])
+ self.run_success(['declare', 'parameter', 'component=test', 'name=good', 'value=123'])
+ self.assert_table([['test', 'good', '/', '123']], ['list', 'parameters', 'component', 'name', 'vhost', 'value'])
+ self.run_success(['delete', 'parameter', 'component=test', 'name=good'])
+ self.ctl(['eval', 'rabbit_runtime_parameters_test:unregister().'])
+
+ def test_publish(self):
+ self.run_success(['declare', 'queue', 'name=test'])
+ self.run_success(['publish', 'routing_key=test', 'payload=test_1'])
+ self.run_success(['publish', 'routing_key=test', 'payload=test_2'])
+ self.run_success(['publish', 'routing_key=test'], stdin='test_3')
+ self.assert_table([exp_msg('test', 2, False, 'test_1')], ['get', 'queue=test', 'requeue=false'])
+ self.assert_table([exp_msg('test', 1, False, 'test_2')], ['get', 'queue=test', 'requeue=true'])
+ self.assert_table([exp_msg('test', 1, True, 'test_2')], ['get', 'queue=test', 'requeue=false'])
+ self.assert_table([exp_msg('test', 0, False, 'test_3')], ['get', 'queue=test', 'requeue=false'])
+ self.run_success(['publish', 'routing_key=test'], stdin='test_4')
+ filename = '/tmp/rabbitmq-test/get.txt'
+ self.run_success(['get', 'queue=test', 'requeue=false', 'payload_file=' + filename])
+ with open(filename) as f:
+ self.assertEqual('test_4', f.read())
+ os.remove(filename)
+ self.run_success(['delete', 'queue', 'name=test'])
+
+ def test_ignore_vhost(self):
+ self.run_success(['--vhost', '/', 'show', 'overview'])
+ self.run_success(['--vhost', '/', 'list', 'users'])
+ self.run_success(['--vhost', '/', 'list', 'vhosts'])
+ self.run_success(['--vhost', '/', 'list', 'nodes'])
+ self.run_success(['--vhost', '/', 'list', 'permissions'])
+ self.run_success(['--vhost', '/', 'declare', 'user', 'name=foo', 'password=pass', 'tags='])
+ self.run_success(['delete', 'user', 'name=foo'])
+
+ def test_sort(self):
+ self.run_success(['declare', 'queue', 'name=foo'])
+ self.run_success(['declare', 'binding', 'source=amq.direct', 'destination=foo', 'destination_type=queue', 'routing_key=bbb'])
+ self.run_success(['declare', 'binding', 'source=amq.topic', 'destination=foo', 'destination_type=queue', 'routing_key=aaa'])
+ self.assert_table([['', 'foo'], ['amq.direct', 'bbb'], ['amq.topic', 'aaa']], ['--sort', 'source', 'list', 'bindings', 'source', 'routing_key'])
+ self.assert_table([['amq.topic', 'aaa'], ['amq.direct', 'bbb'], ['', 'foo']], ['--sort', 'routing_key', 'list', 'bindings', 'source', 'routing_key'])
+ self.assert_table([['amq.topic', 'aaa'], ['amq.direct', 'bbb'], ['', 'foo']], ['--sort', 'source', '--sort-reverse', 'list', 'bindings', 'source', 'routing_key'])
+ self.run_success(['delete', 'queue', 'name=foo'])
+
+ # ---------------------------------------------------------------------------
+
+ def run_success(self, args, **kwargs):
+ (stdout, ret) = self.admin(args, **kwargs)
+ if ret != 0:
+ self.fail(stdout)
+
+ def run_fail(self, args):
+ (stdout, ret) = self.admin(args)
+ if ret == 0:
+ self.fail(stdout)
+
+ def assert_output(self, expected, args):
+ self.assertEqual(expected, self.admin(args)[0])
+
+ def assert_list(self, expected, args0):
+ args = ['-f', 'tsv', '-q']
+ args.extend(args0)
+ self.assertEqual(expected, self.admin(args)[0].splitlines())
+
+ def assert_table(self, expected, args0):
+ args = ['-f', 'tsv', '-q']
+ args.extend(args0)
+ self.assertEqual(expected, [l.split('\t') for l in self.admin(args)[0].splitlines()])
+
+ def admin(self, args, stdin=None):
+ return run('../../../bin/rabbitmqadmin', args, stdin)
+
+ def ctl(self, args0, stdin=None):
+ args = ['-n', 'rabbit-test']
+ args.extend(args0)
+ (stdout, ret) = run('../../../../rabbitmq-server/scripts/rabbitmqctl', args, stdin)
+ if ret != 0:
+ self.fail(stdout)
+
+def run(cmd, args, stdin):
+ path = os.path.normpath(os.path.join(os.getcwd(), sys.argv[0], cmd))
+ cmdline = [path]
+ cmdline.extend(args)
+ proc = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (stdout, stderr) = proc.communicate(stdin)
+ returncode = proc.returncode
+ return (stdout + stderr, returncode)
+
+def l(thing):
+ return ['list', thing, 'name']
+
+def exp_msg(key, count, redelivered, payload):
+ # routing_key, exchange, message_count, payload, payload_bytes, payload_encoding, properties, redelivered
+ return [key, '', str(count), payload, str(len(payload)), 'string', '', str(redelivered)]
+
+if __name__ == '__main__':
+ print "\nrabbitmqadmin tests\n===================\n"
+ suite = unittest.TestLoader().loadTestsFromTestCase(TestRabbitMQAdmin)
+ unittest.TextTestRunner(verbosity=2).run(suite)
--- /dev/null
+# rabbitmqadmin.conf.example START
+
+[host_normal]
+hostname = localhost
+port = 15672
+username = guest
+password = guest
+declare_vhost = / # Used as default for declare / delete only
+vhost = / # Used as default for declare / delete / list
+
+[default]
+hostname = localhost
+port = 99999
+username = guest
+password = guest
--- /dev/null
+include ../umbrella.mk
--- /dev/null
+# RabbitMQ MQTT adapter
+
+The MQTT adapter is included in the RabbitMQ distribution. To enable
+it, use <href="http://www.rabbitmq.com/man/rabbitmq-plugins.1.man.html">rabbitmq-plugins</a>:
+
+ rabbitmq-plugins enable rabbitmq_mqtt
+
+Full usage instructions can be found at
+<http://www.rabbitmq.com/mqtt.html>.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-define(CLIENT_ID_MAXLEN, 23).
+
+%% reader state
+-record(state, { socket,
+ conn_name,
+ await_recv,
+ connection_state,
+ keepalive,
+ keepalive_sup,
+ conserve,
+ parse_state,
+ proc_state }).
+
+%% processor state
+-record(proc_state, { socket,
+ subscriptions,
+ consumer_tags,
+ unacked_pubs,
+ awaiting_ack,
+ awaiting_seqno,
+ message_id,
+ client_id,
+ clean_sess,
+ will_msg,
+ channels,
+ connection,
+ exchange }).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-define(PROTOCOL_NAMES, [{3, "MQIsdp"}, {4, "MQTT"}]).
+
+%% frame types
+
+-define(CONNECT, 1).
+-define(CONNACK, 2).
+-define(PUBLISH, 3).
+-define(PUBACK, 4).
+-define(PUBREC, 5).
+-define(PUBREL, 6).
+-define(PUBCOMP, 7).
+-define(SUBSCRIBE, 8).
+-define(SUBACK, 9).
+-define(UNSUBSCRIBE, 10).
+-define(UNSUBACK, 11).
+-define(PINGREQ, 12).
+-define(PINGRESP, 13).
+-define(DISCONNECT, 14).
+
+%% connect return codes
+
+-define(CONNACK_ACCEPT, 0).
+-define(CONNACK_PROTO_VER, 1). %% unacceptable protocol version
+-define(CONNACK_INVALID_ID, 2). %% identifier rejected
+-define(CONNACK_SERVER, 3). %% server unavailable
+-define(CONNACK_CREDENTIALS, 4). %% bad user name or password
+-define(CONNACK_AUTH, 5). %% not authorized
+
+%% qos levels
+
+-define(QOS_0, 0).
+-define(QOS_1, 1).
+-define(QOS_2, 2).
+
+-record(mqtt_frame, {fixed,
+ variable,
+ payload}).
+
+-record(mqtt_frame_fixed, {type = 0,
+ dup = 0,
+ qos = 0,
+ retain = 0}).
+
+-record(mqtt_frame_connect, {proto_ver,
+ will_retain,
+ will_qos,
+ will_flag,
+ clean_sess,
+ keep_alive,
+ client_id,
+ will_topic,
+ will_msg,
+ username,
+ password}).
+
+-record(mqtt_frame_connack, {return_code}).
+
+-record(mqtt_frame_publish, {topic_name,
+ message_id}).
+
+-record(mqtt_frame_subscribe,{message_id,
+ topic_table}).
+
+-record(mqtt_frame_suback, {message_id,
+ qos_table = []}).
+
+-record(mqtt_topic, {name,
+ qos}).
+
+-record(mqtt_frame_other, {other}).
+
+-record(mqtt_msg, {retain,
+ qos,
+ topic,
+ dup,
+ message_id,
+ payload}).
--- /dev/null
+RELEASABLE:=true
+DEPS:=rabbitmq-erlang-client
+
+RABBITMQ_TEST_PATH=$(PACKAGE_DIR)/../../rabbitmq-test
+WITH_BROKER_TEST_SCRIPTS:=$(PACKAGE_DIR)/test/test.sh
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mqtt).
+
+-behaviour(application).
+-export([start/2, stop/1]).
+
+start(normal, []) ->
+ {ok, Listeners} = application:get_env(tcp_listeners),
+ {ok, SslListeners} = application:get_env(ssl_listeners),
+ rabbit_mqtt_sup:start_link({Listeners, SslListeners}, []).
+
+stop(_State) ->
+ ok.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mqtt_collector).
+
+-behaviour(gen_server).
+
+-export([start_link/0, register/2, unregister/2]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-record(state, {client_ids}).
+
+-define(SERVER, ?MODULE).
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+register(ClientId, Pid) ->
+ gen_server:call(rabbit_mqtt_collector, {register, ClientId, Pid}, infinity).
+
+unregister(ClientId, Pid) ->
+ gen_server:call(rabbit_mqtt_collector, {unregister, ClientId, Pid}, infinity).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, #state{client_ids = dict:new()}}. % clientid -> {pid, monitor}
+
+%%--------------------------------------------------------------------------
+
+handle_call({register, ClientId, Pid}, _From,
+ State = #state{client_ids = Ids}) ->
+ Ids1 = case dict:find(ClientId, Ids) of
+ {ok, {OldPid, MRef}} when Pid =/= OldPid ->
+ catch gen_server2:cast(OldPid, duplicate_id),
+ erlang:demonitor(MRef),
+ dict:erase(ClientId, Ids);
+ error ->
+ Ids
+ end,
+ Ids2 = dict:store(ClientId, {Pid, erlang:monitor(process, Pid)}, Ids1),
+ {reply, ok, State#state{client_ids = Ids2}};
+
+handle_call({unregister, ClientId, Pid}, _From, State = #state{client_ids = Ids}) ->
+ {Reply, Ids1} = case dict:find(ClientId, Ids) of
+ {ok, {Pid, MRef}} -> erlang:demonitor(MRef),
+ {ok, dict:erase(ClientId, Ids)};
+ _ -> {ok, Ids}
+ end,
+ {reply, Reply, State#state{ client_ids = Ids1 }};
+
+handle_call(Msg, _From, State) ->
+ {stop, {unhandled_call, Msg}, State}.
+
+handle_cast(Msg, State) ->
+ {stop, {unhandled_cast, Msg}, State}.
+
+handle_info({'EXIT', _, {shutdown, closed}}, State) ->
+ {stop, {shutdown, closed}, State};
+
+handle_info({'DOWN', MRef, process, DownPid, _Reason},
+ State = #state{client_ids = Ids}) ->
+ Ids1 = dict:filter(fun (ClientId, {Pid, M})
+ when Pid =:= DownPid, MRef =:= M ->
+ rabbit_log:warning("MQTT disconnect from ~p~n",
+ [ClientId]),
+ false;
+ (_, _) ->
+ true
+ end, Ids),
+ {noreply, State #state{ client_ids = Ids1 }}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mqtt_connection_sup).
+
+-behaviour(supervisor2).
+
+-define(MAX_WAIT, 16#ffffffff).
+
+-export([start_link/0, start_keepalive_link/0]).
+
+-export([init/1]).
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ {ok, SupPid} = supervisor2:start_link(?MODULE, []),
+ {ok, ReaderPid} = supervisor2:start_child(
+ SupPid,
+ {rabbit_mqtt_reader,
+ {rabbit_mqtt_reader, start_link, []},
+ intrinsic, ?MAX_WAIT, worker, [rabbit_mqtt_reader]}),
+ {ok, KeepaliveSup} = supervisor2:start_child(
+ SupPid,
+ {rabbit_keepalive_sup,
+ {rabbit_mqtt_connection_sup, start_keepalive_link, []},
+ intrinsic, infinity, supervisor, [rabbit_keepalive_sup]}),
+ {ok, SupPid, {KeepaliveSup, ReaderPid}}.
+
+start_keepalive_link() ->
+ supervisor2:start_link(?MODULE, []).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{one_for_all, 0, 1}, []}}.
+
+
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mqtt_frame).
+
+-export([parse/2, initial_state/0]).
+-export([serialise/1]).
+
+-include("rabbit_mqtt_frame.hrl").
+
+-define(RESERVED, 0).
+-define(MAX_LEN, 16#fffffff).
+-define(HIGHBIT, 2#10000000).
+-define(LOWBITS, 2#01111111).
+
+initial_state() -> none.
+
+parse(<<>>, none) ->
+ {more, fun(Bin) -> parse(Bin, none) end};
+parse(<<MessageType:4, Dup:1, QoS:2, Retain:1, Rest/binary>>, none) ->
+ parse_remaining_len(Rest, #mqtt_frame_fixed{ type = MessageType,
+ dup = bool(Dup),
+ qos = QoS,
+ retain = bool(Retain) });
+parse(Bin, Cont) -> Cont(Bin).
+
+parse_remaining_len(<<>>, Fixed) ->
+ {more, fun(Bin) -> parse_remaining_len(Bin, Fixed) end};
+parse_remaining_len(Rest, Fixed) ->
+ parse_remaining_len(Rest, Fixed, 1, 0).
+
+parse_remaining_len(_Bin, _Fixed, _Multiplier, Length)
+ when Length > ?MAX_LEN ->
+ {error, invalid_mqtt_frame_len};
+parse_remaining_len(<<>>, Fixed, Multiplier, Length) ->
+ {more, fun(Bin) -> parse_remaining_len(Bin, Fixed, Multiplier, Length) end};
+parse_remaining_len(<<1:1, Len:7, Rest/binary>>, Fixed, Multiplier, Value) ->
+ parse_remaining_len(Rest, Fixed, Multiplier * ?HIGHBIT, Value + Len * Multiplier);
+parse_remaining_len(<<0:1, Len:7, Rest/binary>>, Fixed, Multiplier, Value) ->
+ parse_frame(Rest, Fixed, Value + Len * Multiplier).
+
+parse_frame(Bin, #mqtt_frame_fixed{ type = Type,
+ qos = Qos } = Fixed, Length) ->
+ case {Type, Bin} of
+ {?CONNECT, <<FrameBin:Length/binary, Rest/binary>>} ->
+ {ProtoName, Rest1} = parse_utf(FrameBin),
+ <<ProtoVersion : 8, Rest2/binary>> = Rest1,
+ <<UsernameFlag : 1,
+ PasswordFlag : 1,
+ WillRetain : 1,
+ WillQos : 2,
+ WillFlag : 1,
+ CleanSession : 1,
+ _Reserved : 1,
+ KeepAlive : 16/big,
+ Rest3/binary>> = Rest2,
+ {ClientId, Rest4} = parse_utf(Rest3),
+ {WillTopic, Rest5} = parse_utf(Rest4, WillFlag),
+ {WillMsg, Rest6} = parse_msg(Rest5, WillFlag),
+ {UserName, Rest7} = parse_utf(Rest6, UsernameFlag),
+ {PasssWord, <<>>} = parse_utf(Rest7, PasswordFlag),
+ case protocol_name_approved(ProtoVersion, ProtoName) of
+ true ->
+ wrap(Fixed,
+ #mqtt_frame_connect{
+ proto_ver = ProtoVersion,
+ will_retain = bool(WillRetain),
+ will_qos = WillQos,
+ will_flag = bool(WillFlag),
+ clean_sess = bool(CleanSession),
+ keep_alive = KeepAlive,
+ client_id = ClientId,
+ will_topic = WillTopic,
+ will_msg = WillMsg,
+ username = UserName,
+ password = PasssWord}, Rest);
+ false ->
+ {error, protocol_header_corrupt}
+ end;
+ {?PUBLISH, <<FrameBin:Length/binary, Rest/binary>>} ->
+ {TopicName, Rest1} = parse_utf(FrameBin),
+ {MessageId, Payload} = case Qos of
+ 0 -> {undefined, Rest1};
+ _ -> <<M:16/big, R/binary>> = Rest1,
+ {M, R}
+ end,
+ wrap(Fixed, #mqtt_frame_publish { topic_name = TopicName,
+ message_id = MessageId },
+ Payload, Rest);
+ {?PUBACK, <<FrameBin:Length/binary, Rest/binary>>} ->
+ <<MessageId:16/big>> = FrameBin,
+ wrap(Fixed, #mqtt_frame_publish { message_id = MessageId }, Rest);
+ {Subs, <<FrameBin:Length/binary, Rest/binary>>}
+ when Subs =:= ?SUBSCRIBE orelse Subs =:= ?UNSUBSCRIBE ->
+ 1 = Qos,
+ <<MessageId:16/big, Rest1/binary>> = FrameBin,
+ Topics = parse_topics(Subs, Rest1, []),
+ wrap(Fixed, #mqtt_frame_subscribe { message_id = MessageId,
+ topic_table = Topics }, Rest);
+ {Minimal, Rest}
+ when Minimal =:= ?DISCONNECT orelse Minimal =:= ?PINGREQ ->
+ Length = 0,
+ wrap(Fixed, Rest);
+ {_, TooShortBin} ->
+ {more, fun(BinMore) ->
+ parse_frame(<<TooShortBin/binary, BinMore/binary>>,
+ Fixed, Length)
+ end}
+ end.
+
+parse_topics(_, <<>>, Topics) ->
+ Topics;
+parse_topics(?SUBSCRIBE = Sub, Bin, Topics) ->
+ {Name, <<_:6, QoS:2, Rest/binary>>} = parse_utf(Bin),
+ parse_topics(Sub, Rest, [#mqtt_topic { name = Name, qos = QoS } | Topics]);
+parse_topics(?UNSUBSCRIBE = Sub, Bin, Topics) ->
+ {Name, <<Rest/binary>>} = parse_utf(Bin),
+ parse_topics(Sub, Rest, [#mqtt_topic { name = Name } | Topics]).
+
+wrap(Fixed, Variable, Payload, Rest) ->
+ {ok, #mqtt_frame { variable = Variable, fixed = Fixed, payload = Payload }, Rest}.
+wrap(Fixed, Variable, Rest) ->
+ {ok, #mqtt_frame { variable = Variable, fixed = Fixed }, Rest}.
+wrap(Fixed, Rest) ->
+ {ok, #mqtt_frame { fixed = Fixed }, Rest}.
+
+parse_utf(Bin, 0) ->
+ {undefined, Bin};
+parse_utf(Bin, _) ->
+ parse_utf(Bin).
+
+parse_utf(<<Len:16/big, Str:Len/binary, Rest/binary>>) ->
+ {binary_to_list(Str), Rest}.
+
+parse_msg(Bin, 0) ->
+ {undefined, Bin};
+parse_msg(<<Len:16/big, Msg:Len/binary, Rest/binary>>, _) ->
+ {Msg, Rest}.
+
+bool(0) -> false;
+bool(1) -> true.
+
+%% serialisation
+
+serialise(#mqtt_frame{ fixed = Fixed,
+ variable = Variable,
+ payload = Payload }) ->
+ serialise_variable(Fixed, Variable, serialise_payload(Payload)).
+
+serialise_payload(undefined) -> <<>>;
+serialise_payload(B) when is_binary(B) -> B.
+
+serialise_variable(#mqtt_frame_fixed { type = ?CONNACK } = Fixed,
+ #mqtt_frame_connack { return_code = ReturnCode },
+ <<>> = PayloadBin) ->
+ VariableBin = <<?RESERVED:8, ReturnCode:8>>,
+ serialise_fixed(Fixed, VariableBin, PayloadBin);
+
+serialise_variable(#mqtt_frame_fixed { type = SubAck } = Fixed,
+ #mqtt_frame_suback { message_id = MessageId,
+ qos_table = Qos },
+ <<>> = _PayloadBin)
+ when SubAck =:= ?SUBACK orelse SubAck =:= ?UNSUBACK ->
+ VariableBin = <<MessageId:16/big>>,
+ QosBin = << <<?RESERVED:6, Q:2>> || Q <- Qos >>,
+ serialise_fixed(Fixed, VariableBin, QosBin);
+
+serialise_variable(#mqtt_frame_fixed { type = ?PUBLISH,
+ qos = Qos } = Fixed,
+ #mqtt_frame_publish { topic_name = TopicName,
+ message_id = MessageId },
+ PayloadBin) ->
+ TopicBin = serialise_utf(TopicName),
+ MessageIdBin = case Qos of
+ 0 -> <<>>;
+ 1 -> <<MessageId:16/big>>
+ end,
+ serialise_fixed(Fixed, <<TopicBin/binary, MessageIdBin/binary>>, PayloadBin);
+
+serialise_variable(#mqtt_frame_fixed { type = ?PUBACK } = Fixed,
+ #mqtt_frame_publish { message_id = MessageId },
+ PayloadBin) ->
+ MessageIdBin = <<MessageId:16/big>>,
+ serialise_fixed(Fixed, MessageIdBin, PayloadBin);
+
+serialise_variable(#mqtt_frame_fixed {} = Fixed,
+ undefined,
+ <<>> = _PayloadBin) ->
+ serialise_fixed(Fixed, <<>>, <<>>).
+
+serialise_fixed(#mqtt_frame_fixed{ type = Type,
+ dup = Dup,
+ qos = Qos,
+ retain = Retain }, VariableBin, PayloadBin)
+ when is_integer(Type) andalso ?CONNECT =< Type andalso Type =< ?DISCONNECT ->
+ Len = size(VariableBin) + size(PayloadBin),
+ true = (Len =< ?MAX_LEN),
+ LenBin = serialise_len(Len),
+ <<Type:4, (opt(Dup)):1, (opt(Qos)):2, (opt(Retain)):1,
+ LenBin/binary, VariableBin/binary, PayloadBin/binary>>.
+
+serialise_utf(String) ->
+ StringBin = unicode:characters_to_binary(String),
+ Len = size(StringBin),
+ true = (Len =< 16#ffff),
+ <<Len:16/big, StringBin/binary>>.
+
+serialise_len(N) when N =< ?LOWBITS ->
+ <<0:1, N:7>>;
+serialise_len(N) ->
+ <<1:1, (N rem ?HIGHBIT):7, (serialise_len(N div ?HIGHBIT))/binary>>.
+
+opt(undefined) -> ?RESERVED;
+opt(false) -> 0;
+opt(true) -> 1;
+opt(X) when is_integer(X) -> X.
+
+protocol_name_approved(Ver, Name) ->
+ lists:member({Ver, Name}, ?PROTOCOL_NAMES).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mqtt_processor).
+
+-export([info/2, initial_state/1,
+ process_frame/2, amqp_pub/2, amqp_callback/2, send_will/1,
+ close_connection/1]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_mqtt_frame.hrl").
+-include("rabbit_mqtt.hrl").
+
+-define(FRAME_TYPE(Frame, Type),
+ Frame = #mqtt_frame{ fixed = #mqtt_frame_fixed{ type = Type }}).
+
+initial_state(Socket) ->
+ #proc_state{ unacked_pubs = gb_trees:empty(),
+ awaiting_ack = gb_trees:empty(),
+ message_id = 1,
+ subscriptions = dict:new(),
+ consumer_tags = {undefined, undefined},
+ channels = {undefined, undefined},
+ exchange = rabbit_mqtt_util:env(exchange),
+ socket = Socket }.
+
+info(client_id, #proc_state{ client_id = ClientId }) -> ClientId.
+
+process_frame(#mqtt_frame{ fixed = #mqtt_frame_fixed{ type = Type }},
+ PState = #proc_state{ connection = undefined } )
+ when Type =/= ?CONNECT ->
+ {error, connect_expected, PState};
+process_frame(Frame = #mqtt_frame{ fixed = #mqtt_frame_fixed{ type = Type }},
+ PState ) ->
+ %%rabbit_log:info("MQTT received frame ~p ~n", [Frame]),
+ try process_request(Type, Frame, PState) of
+ Result -> Result
+ catch _:Error ->
+ close_connection(PState),
+ {error, Error}
+ end.
+
+process_request(?CONNECT,
+ #mqtt_frame{ variable = #mqtt_frame_connect{
+ username = Username,
+ password = Password,
+ proto_ver = ProtoVersion,
+ clean_sess = CleanSess,
+ client_id = ClientId0,
+ keep_alive = Keepalive} = Var}, PState) ->
+ ClientId = case ClientId0 of
+ [] -> rabbit_mqtt_util:gen_client_id();
+ [_|_] -> ClientId0
+ end,
+ {ReturnCode, PState1} =
+ case {lists:member(ProtoVersion, proplists:get_keys(?PROTOCOL_NAMES)),
+ ClientId0 =:= [] andalso CleanSess =:= false} of
+ {false, _} ->
+ {?CONNACK_PROTO_VER, PState};
+ {_, true} ->
+ {?CONNACK_INVALID_ID, PState};
+ _ ->
+ case creds(Username, Password) of
+ nocreds ->
+ rabbit_log:error("MQTT login failed - no credentials~n"),
+ {?CONNACK_CREDENTIALS, PState};
+ {UserBin, PassBin} ->
+ case process_login(UserBin, PassBin, ProtoVersion, PState) of
+ {?CONNACK_ACCEPT, Conn} ->
+ link(Conn),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ ok = rabbit_mqtt_collector:register(
+ ClientId, self()),
+ Prefetch = rabbit_mqtt_util:env(prefetch),
+ #'basic.qos_ok'{} = amqp_channel:call(
+ Ch, #'basic.qos'{prefetch_count = Prefetch}),
+ rabbit_mqtt_reader:start_keepalive(self(), Keepalive),
+ {?CONNACK_ACCEPT,
+ maybe_clean_sess(
+ PState #proc_state{ will_msg = make_will_msg(Var),
+ clean_sess = CleanSess,
+ channels = {Ch, undefined},
+ connection = Conn,
+ client_id = ClientId })};
+ ConnAck ->
+ {ConnAck, PState}
+ end
+ end
+ end,
+ send_client(#mqtt_frame{ fixed = #mqtt_frame_fixed{ type = ?CONNACK},
+ variable = #mqtt_frame_connack{
+ return_code = ReturnCode }}, PState1),
+ {ok, PState1};
+
+process_request(?PUBACK,
+ #mqtt_frame{
+ variable = #mqtt_frame_publish{ message_id = MessageId }},
+ #proc_state{ channels = {Channel, _},
+ awaiting_ack = Awaiting } = PState) ->
+ Tag = gb_trees:get(MessageId, Awaiting),
+ amqp_channel:cast(
+ Channel, #'basic.ack'{ delivery_tag = Tag }),
+ {ok, PState #proc_state{ awaiting_ack = gb_trees:delete( MessageId, Awaiting)}};
+
+process_request(?PUBLISH,
+ #mqtt_frame{
+ fixed = #mqtt_frame_fixed{ qos = ?QOS_2 }}, PState) ->
+ {error, qos2_not_supported, PState};
+process_request(?PUBLISH,
+ #mqtt_frame{
+ fixed = #mqtt_frame_fixed{ qos = Qos,
+ retain = Retain,
+ dup = Dup },
+ variable = #mqtt_frame_publish{ topic_name = Topic,
+ message_id = MessageId },
+ payload = Payload }, PState) ->
+ {ok, amqp_pub(#mqtt_msg{ retain = Retain,
+ qos = Qos,
+ topic = Topic,
+ dup = Dup,
+ message_id = MessageId,
+ payload = Payload }, PState)};
+
+process_request(?SUBSCRIBE,
+ #mqtt_frame{
+ variable = #mqtt_frame_subscribe{ message_id = MessageId,
+ topic_table = Topics },
+ payload = undefined },
+ #proc_state{ channels = {Channel, _},
+ exchange = Exchange} = PState0) ->
+ {QosResponse, PState1} =
+ lists:foldl(fun (#mqtt_topic{ name = TopicName,
+ qos = Qos }, {QosList, PState}) ->
+ SupportedQos = supported_subs_qos(Qos),
+ {Queue, #proc_state{ subscriptions = Subs } = PState1} =
+ ensure_queue(SupportedQos, PState),
+ Binding = #'queue.bind'{
+ queue = Queue,
+ exchange = Exchange,
+ routing_key = rabbit_mqtt_util:mqtt2amqp(
+ TopicName)},
+ #'queue.bind_ok'{} = amqp_channel:call(Channel, Binding),
+ {[SupportedQos | QosList],
+ PState1 #proc_state{ subscriptions =
+ dict:append(TopicName, SupportedQos, Subs) }}
+ end, {[], PState0}, Topics),
+ send_client(#mqtt_frame{ fixed = #mqtt_frame_fixed{ type = ?SUBACK },
+ variable = #mqtt_frame_suback{
+ message_id = MessageId,
+ qos_table = QosResponse }}, PState1),
+
+ {ok, PState1};
+
+process_request(?UNSUBSCRIBE,
+ #mqtt_frame{
+ variable = #mqtt_frame_subscribe{ message_id = MessageId,
+ topic_table = Topics },
+ payload = undefined }, #proc_state{ channels = {Channel, _},
+ exchange = Exchange,
+ client_id = ClientId,
+ subscriptions = Subs0} = PState) ->
+ Queues = rabbit_mqtt_util:subcription_queue_name(ClientId),
+ Subs1 =
+ lists:foldl(
+ fun (#mqtt_topic{ name = TopicName }, Subs) ->
+ QosSubs = case dict:find(TopicName, Subs) of
+ {ok, Val} when is_list(Val) -> lists:usort(Val);
+ error -> []
+ end,
+ lists:foreach(
+ fun (QosSub) ->
+ Queue = element(QosSub + 1, Queues),
+ Binding = #'queue.unbind'{
+ queue = Queue,
+ exchange = Exchange,
+ routing_key =
+ rabbit_mqtt_util:mqtt2amqp(TopicName)},
+ #'queue.unbind_ok'{} = amqp_channel:call(Channel, Binding)
+ end, QosSubs),
+ dict:erase(TopicName, Subs)
+ end, Subs0, Topics),
+ send_client(#mqtt_frame{ fixed = #mqtt_frame_fixed { type = ?UNSUBACK },
+ variable = #mqtt_frame_suback{ message_id = MessageId }},
+ PState),
+ {ok, PState #proc_state{ subscriptions = Subs1 }};
+
+process_request(?PINGREQ, #mqtt_frame{}, PState) ->
+ send_client(#mqtt_frame{ fixed = #mqtt_frame_fixed{ type = ?PINGRESP }},
+ PState),
+ {ok, PState};
+
+process_request(?DISCONNECT, #mqtt_frame{}, PState) ->
+ {stop, PState}.
+
+%%----------------------------------------------------------------------------
+
+amqp_callback({#'basic.deliver'{ consumer_tag = ConsumerTag,
+ delivery_tag = DeliveryTag,
+ routing_key = RoutingKey },
+ #amqp_msg{ props = #'P_basic'{ headers = Headers },
+ payload = Payload }} = Delivery,
+ #proc_state{ channels = {Channel, _},
+ awaiting_ack = Awaiting,
+ message_id = MsgId } = PState) ->
+ case {delivery_dup(Delivery), delivery_qos(ConsumerTag, Headers, PState)} of
+ {true, {?QOS_0, ?QOS_1}} ->
+ amqp_channel:cast(
+ Channel, #'basic.ack'{ delivery_tag = DeliveryTag }),
+ {ok, PState};
+ {true, {?QOS_0, ?QOS_0}} ->
+ {ok, PState};
+ {Dup, {DeliveryQos, _SubQos} = Qos} ->
+ send_client(
+ #mqtt_frame{ fixed = #mqtt_frame_fixed{
+ type = ?PUBLISH,
+ qos = DeliveryQos,
+ dup = Dup },
+ variable = #mqtt_frame_publish{
+ message_id =
+ case DeliveryQos of
+ ?QOS_0 -> undefined;
+ ?QOS_1 -> MsgId
+ end,
+ topic_name =
+ rabbit_mqtt_util:amqp2mqtt(
+ RoutingKey) },
+ payload = Payload}, PState),
+ case Qos of
+ {?QOS_0, ?QOS_0} ->
+ {ok, PState};
+ {?QOS_1, ?QOS_1} ->
+ {ok,
+ next_msg_id(
+ PState #proc_state{
+ awaiting_ack =
+ gb_trees:insert(MsgId, DeliveryTag, Awaiting)})};
+ {?QOS_0, ?QOS_1} ->
+ amqp_channel:cast(
+ Channel, #'basic.ack'{ delivery_tag = DeliveryTag }),
+ {ok, PState}
+ end
+ end;
+
+amqp_callback(#'basic.ack'{ multiple = true, delivery_tag = Tag } = Ack,
+ PState = #proc_state{ unacked_pubs = UnackedPubs }) ->
+ case gb_trees:size(UnackedPubs) > 0 andalso
+ gb_trees:take_smallest(UnackedPubs) of
+ {TagSmall, MsgId, UnackedPubs1} when TagSmall =< Tag ->
+ send_client(
+ #mqtt_frame{ fixed = #mqtt_frame_fixed{ type = ?PUBACK },
+ variable = #mqtt_frame_publish{ message_id = MsgId }},
+ PState),
+ amqp_callback(Ack, PState #proc_state{ unacked_pubs = UnackedPubs1 });
+ _ ->
+ {ok, PState}
+ end;
+
+amqp_callback(#'basic.ack'{ multiple = false, delivery_tag = Tag },
+ PState = #proc_state{ unacked_pubs = UnackedPubs }) ->
+ send_client(
+ #mqtt_frame{ fixed = #mqtt_frame_fixed{ type = ?PUBACK },
+ variable = #mqtt_frame_publish{
+ message_id = gb_trees:get(
+ Tag, UnackedPubs) }}, PState),
+ {ok, PState #proc_state{ unacked_pubs = gb_trees:delete(Tag, UnackedPubs) }}.
+
+delivery_dup({#'basic.deliver'{ redelivered = Redelivered },
+ #amqp_msg{ props = #'P_basic'{ headers = Headers }}}) ->
+ case rabbit_mqtt_util:table_lookup(Headers, <<"x-mqtt-dup">>) of
+ undefined -> Redelivered;
+ {bool, Dup} -> Redelivered orelse Dup
+ end.
+
+next_msg_id(PState = #proc_state{ message_id = 16#ffff }) ->
+ PState #proc_state{ message_id = 1 };
+next_msg_id(PState = #proc_state{ message_id = MsgId }) ->
+ PState #proc_state{ message_id = MsgId + 1 }.
+
+%% decide at which qos level to deliver based on subscription
+%% and the message publish qos level. non-MQTT publishes are
+%% assumed to be qos 1, regardless of delivery_mode.
+delivery_qos(Tag, _Headers, #proc_state{ consumer_tags = {Tag, _} }) ->
+ {?QOS_0, ?QOS_0};
+delivery_qos(Tag, Headers, #proc_state{ consumer_tags = {_, Tag} }) ->
+ case rabbit_mqtt_util:table_lookup(Headers, <<"x-mqtt-publish-qos">>) of
+ {byte, Qos} -> {lists:min([Qos, ?QOS_1]), ?QOS_1};
+ undefined -> {?QOS_1, ?QOS_1}
+ end.
+
+maybe_clean_sess(PState = #proc_state { clean_sess = false }) ->
+ {_Queue, PState1} = ensure_queue(?QOS_1, PState),
+ PState1;
+maybe_clean_sess(PState = #proc_state { clean_sess = true,
+ connection = Conn,
+ client_id = ClientId }) ->
+ {_, Queue} = rabbit_mqtt_util:subcription_queue_name(ClientId),
+ {ok, Channel} = amqp_connection:open_channel(Conn),
+ try amqp_channel:call(Channel, #'queue.delete'{ queue = Queue }) of
+ #'queue.delete_ok'{} -> ok = amqp_channel:close(Channel)
+ catch
+ exit:_Error -> ok
+ end,
+ PState.
+
+%%----------------------------------------------------------------------------
+
+make_will_msg(#mqtt_frame_connect{ will_flag = false }) ->
+ undefined;
+make_will_msg(#mqtt_frame_connect{ will_retain = Retain,
+ will_qos = Qos,
+ will_topic = Topic,
+ will_msg = Msg }) ->
+ #mqtt_msg{ retain = Retain,
+ qos = Qos,
+ topic = Topic,
+ dup = false,
+ payload = Msg }.
+
+process_login(UserBin, PassBin, ProtoVersion,
+ #proc_state{ channels = {undefined, undefined},
+ socket = Sock }) ->
+ {VHost, UsernameBin} = get_vhost_username(UserBin),
+ case amqp_connection:start(#amqp_params_direct{
+ username = UsernameBin,
+ password = PassBin,
+ virtual_host = VHost,
+ adapter_info = adapter_info(Sock, ProtoVersion)}) of
+ {ok, Connection} ->
+ case rabbit_access_control:check_user_loopback(UsernameBin, Sock) of
+ ok -> {?CONNACK_ACCEPT, Connection};
+ not_allowed -> amqp_connection:close(Connection),
+ rabbit_log:warning(
+ "MQTT login failed for ~p access_refused "
+ "(access must be from localhost)~n",
+ [binary_to_list(UsernameBin)]),
+ ?CONNACK_AUTH
+ end;
+ {error, {auth_failure, Explanation}} ->
+ rabbit_log:error("MQTT login failed for ~p auth_failure: ~s~n",
+ [binary_to_list(UserBin), Explanation]),
+ ?CONNACK_CREDENTIALS;
+ {error, access_refused} ->
+ rabbit_log:warning("MQTT login failed for ~p access_refused "
+ "(vhost access not allowed)~n",
+ [binary_to_list(UserBin)]),
+ ?CONNACK_AUTH
+ end.
+
+get_vhost_username(UserBin) ->
+ %% split at the last colon, disallowing colons in username
+ case re:split(UserBin, ":(?!.*?:)") of
+ [Vhost, UserName] -> {Vhost, UserName};
+ [UserBin] -> {rabbit_mqtt_util:env(vhost), UserBin}
+ end.
+
+creds(User, Pass) ->
+ DefaultUser = rabbit_mqtt_util:env(default_user),
+ DefaultPass = rabbit_mqtt_util:env(default_pass),
+ Anon = rabbit_mqtt_util:env(allow_anonymous),
+ U = case {User =/= undefined, is_binary(DefaultUser), Anon =:= true} of
+ {true, _, _ } -> list_to_binary(User);
+ {false, true, true} -> DefaultUser;
+ _ -> nocreds
+ end,
+ case U of
+ nocreds ->
+ nocreds;
+ _ ->
+ case {Pass =/= undefined, is_binary(DefaultPass), Anon =:= true} of
+ {true, _, _ } -> {U, list_to_binary(Pass)};
+ {false, true, true} -> {U, DefaultPass};
+ _ -> {U, none}
+ end
+ end.
+
+supported_subs_qos(?QOS_0) -> ?QOS_0;
+supported_subs_qos(?QOS_1) -> ?QOS_1;
+supported_subs_qos(?QOS_2) -> ?QOS_1.
+
+delivery_mode(?QOS_0) -> 1;
+delivery_mode(?QOS_1) -> 2.
+
+%% different qos subscriptions are received in different queues
+%% with appropriate durability and timeout arguments
+%% this will lead to duplicate messages for overlapping subscriptions
+%% with different qos values - todo: prevent duplicates
+ensure_queue(Qos, #proc_state{ channels = {Channel, _},
+ client_id = ClientId,
+ clean_sess = CleanSess,
+ consumer_tags = {TagQ0, TagQ1} = Tags} = PState) ->
+ {QueueQ0, QueueQ1} = rabbit_mqtt_util:subcription_queue_name(ClientId),
+ Qos1Args = case {rabbit_mqtt_util:env(subscription_ttl), CleanSess} of
+ {undefined, _} ->
+ [];
+ {Ms, false} when is_integer(Ms) ->
+ [{<<"x-expires">>, long, Ms}];
+ _ ->
+ []
+ end,
+ QueueSetup =
+ case {TagQ0, TagQ1, Qos} of
+ {undefined, _, ?QOS_0} ->
+ {QueueQ0,
+ #'queue.declare'{ queue = QueueQ0,
+ durable = false,
+ auto_delete = true },
+ #'basic.consume'{ queue = QueueQ0,
+ no_ack = true }};
+ {_, undefined, ?QOS_1} ->
+ {QueueQ1,
+ #'queue.declare'{ queue = QueueQ1,
+ durable = true,
+ auto_delete = CleanSess,
+ arguments = Qos1Args },
+ #'basic.consume'{ queue = QueueQ1,
+ no_ack = false }};
+ {_, _, ?QOS_0} ->
+ {exists, QueueQ0};
+ {_, _, ?QOS_1} ->
+ {exists, QueueQ1}
+ end,
+ case QueueSetup of
+ {Queue, Declare, Consume} ->
+ #'queue.declare_ok'{} = amqp_channel:call(Channel, Declare),
+ #'basic.consume_ok'{ consumer_tag = Tag } =
+ amqp_channel:call(Channel, Consume),
+ {Queue, PState #proc_state{ consumer_tags = setelement(Qos+1, Tags, Tag) }};
+ {exists, Q} ->
+ {Q, PState}
+ end.
+
+send_will(PState = #proc_state{ will_msg = WillMsg }) ->
+ amqp_pub(WillMsg, PState).
+
+amqp_pub(undefined, PState) ->
+ PState;
+
+%% set up a qos1 publishing channel if necessary
+%% this channel will only be used for publishing, not consuming
+amqp_pub(Msg = #mqtt_msg{ qos = ?QOS_1 },
+ PState = #proc_state{ channels = {ChQos0, undefined},
+ awaiting_seqno = undefined,
+ connection = Conn }) ->
+ {ok, Channel} = amqp_connection:open_channel(Conn),
+ #'confirm.select_ok'{} = amqp_channel:call(Channel, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Channel, self()),
+ amqp_pub(Msg, PState #proc_state{ channels = {ChQos0, Channel},
+ awaiting_seqno = 1 });
+
+amqp_pub(#mqtt_msg{ qos = Qos,
+ topic = Topic,
+ dup = Dup,
+ message_id = MessageId,
+ payload = Payload },
+ PState = #proc_state{ channels = {ChQos0, ChQos1},
+ exchange = Exchange,
+ unacked_pubs = UnackedPubs,
+ awaiting_seqno = SeqNo }) ->
+ Method = #'basic.publish'{ exchange = Exchange,
+ routing_key =
+ rabbit_mqtt_util:mqtt2amqp(Topic)},
+ Headers = [{<<"x-mqtt-publish-qos">>, byte, Qos},
+ {<<"x-mqtt-dup">>, bool, Dup}],
+ Msg = #amqp_msg{ props = #'P_basic'{ headers = Headers,
+ delivery_mode = delivery_mode(Qos)},
+ payload = Payload },
+ {UnackedPubs1, Ch, SeqNo1} =
+ case Qos =:= ?QOS_1 andalso MessageId =/= undefined of
+ true -> {gb_trees:enter(SeqNo, MessageId, UnackedPubs), ChQos1,
+ SeqNo + 1};
+ false -> {UnackedPubs, ChQos0, SeqNo}
+ end,
+ amqp_channel:cast_flow(Ch, Method, Msg),
+ PState #proc_state{ unacked_pubs = UnackedPubs1,
+ awaiting_seqno = SeqNo1 }.
+
+adapter_info(Sock, ProtoVer) ->
+ amqp_connection:socket_adapter_info(
+ Sock, {'MQTT', integer_to_list(ProtoVer)}).
+
+send_client(Frame, #proc_state{ socket = Sock }) ->
+ %rabbit_log:info("MQTT sending frame ~p ~n", [Frame]),
+ rabbit_net:port_command(Sock, rabbit_mqtt_frame:serialise(Frame)).
+
+close_connection(PState = #proc_state{ connection = undefined }) ->
+ PState;
+close_connection(PState = #proc_state{ connection = Connection,
+ client_id = ClientId }) ->
+ % todo: maybe clean session
+ case ClientId of
+ undefined -> ok;
+ _ -> ok = rabbit_mqtt_collector:unregister(ClientId, self())
+ end,
+ %% ignore noproc or other exceptions to avoid debris
+ catch amqp_connection:close(Connection),
+ PState #proc_state{ channels = {undefined, undefined},
+ connection = undefined }.
+
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mqtt_reader).
+-behaviour(gen_server2).
+
+-export([start_link/0]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ code_change/3, terminate/2]).
+
+-export([conserve_resources/3, start_keepalive/2]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_mqtt.hrl").
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ gen_server2:start_link(?MODULE, [], []).
+
+conserve_resources(Pid, _, Conserve) ->
+ Pid ! {conserve_resources, Conserve},
+ ok.
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, undefined, hibernate, {backoff, 1000, 1000, 10000}}.
+
+handle_call(Msg, From, State) ->
+ {stop, {mqtt_unexpected_call, Msg, From}, State}.
+
+handle_cast({go, Sock0, SockTransform, KeepaliveSup}, undefined) ->
+ process_flag(trap_exit, true),
+ case rabbit_net:connection_string(Sock0, inbound) of
+ {ok, ConnStr} ->
+ log(info, "accepting MQTT connection ~p (~s)~n", [self(), ConnStr]),
+ case SockTransform(Sock0) of
+ {ok, Sock} ->
+ rabbit_alarm:register(
+ self(), {?MODULE, conserve_resources, []}),
+ ProcessorState = rabbit_mqtt_processor:initial_state(Sock),
+ {noreply,
+ control_throttle(
+ #state{socket = Sock,
+ conn_name = ConnStr,
+ await_recv = false,
+ connection_state = running,
+ keepalive = {none, none},
+ keepalive_sup = KeepaliveSup,
+ conserve = false,
+ parse_state = rabbit_mqtt_frame:initial_state(),
+ proc_state = ProcessorState }),
+ hibernate};
+ {error, Reason} ->
+ rabbit_net:fast_close(Sock0),
+ {stop, {network_error, Reason, ConnStr}, undefined}
+ end;
+ {network_error, Reason} ->
+ rabbit_net:fast_close(Sock0),
+ {stop, {shutdown, Reason}, undefined};
+ {error, enotconn} ->
+ rabbit_net:fast_close(Sock0),
+ {stop, shutdown, undefined};
+ {error, Reason} ->
+ rabbit_net:fast_close(Sock0),
+ {stop, {network_error, Reason}, undefined}
+ end;
+
+handle_cast(duplicate_id,
+ State = #state{ proc_state = PState,
+ conn_name = ConnName }) ->
+ log(warning, "MQTT disconnecting duplicate client id ~p (~p)~n",
+ [rabbit_mqtt_processor:info(client_id, PState), ConnName]),
+ {stop, {shutdown, duplicate_id}, State};
+
+handle_cast(Msg, State) ->
+ {stop, {mqtt_unexpected_cast, Msg}, State}.
+
+handle_info({#'basic.deliver'{}, #amqp_msg{}} = Delivery,
+ State = #state{ proc_state = ProcState }) ->
+ callback_reply(State, rabbit_mqtt_processor:amqp_callback(Delivery, ProcState));
+
+handle_info(#'basic.ack'{} = Ack, State = #state{ proc_state = ProcState }) ->
+ callback_reply(State, rabbit_mqtt_processor:amqp_callback(Ack, ProcState));
+
+handle_info(#'basic.consume_ok'{}, State) ->
+ {noreply, State, hibernate};
+
+handle_info(#'basic.cancel'{}, State) ->
+ {stop, {shutdown, subscription_cancelled}, State};
+
+handle_info({'EXIT', _Conn, Reason}, State) ->
+ {stop, {connection_died, Reason}, State};
+
+handle_info({inet_reply, _Ref, ok}, State) ->
+ {noreply, State, hibernate};
+
+handle_info({inet_async, Sock, _Ref, {ok, Data}},
+ State = #state{ socket = Sock }) ->
+ process_received_bytes(
+ Data, control_throttle(State #state{ await_recv = false }));
+
+handle_info({inet_async, _Sock, _Ref, {error, Reason}}, State = #state {}) ->
+ network_error(Reason, State);
+
+handle_info({inet_reply, _Sock, {error, Reason}}, State = #state {}) ->
+ network_error(Reason, State);
+
+handle_info({conserve_resources, Conserve}, State) ->
+ {noreply, control_throttle(State #state{ conserve = Conserve }), hibernate};
+
+handle_info({bump_credit, Msg}, State) ->
+ credit_flow:handle_bump_msg(Msg),
+ {noreply, control_throttle(State), hibernate};
+
+handle_info({start_keepalives, Keepalive},
+ State = #state { keepalive_sup = KeepaliveSup, socket = Sock }) ->
+ %% Only the client has the responsibility for sending keepalives
+ SendFun = fun() -> ok end,
+ Parent = self(),
+ ReceiveFun = fun() -> Parent ! keepalive_timeout end,
+ Heartbeater = rabbit_heartbeat:start(
+ KeepaliveSup, Sock, 0, SendFun, Keepalive, ReceiveFun),
+ {noreply, State #state { keepalive = Heartbeater }};
+
+handle_info(keepalive_timeout, State = #state { conn_name = ConnStr }) ->
+ log(error, "closing MQTT connection ~p (keepalive timeout)~n", [ConnStr]),
+ {stop, {shutdown, keepalive_timeout}, State};
+
+handle_info(Msg, State) ->
+ {stop, {mqtt_unexpected_msg, Msg}, State}.
+
+terminate({network_error, {ssl_upgrade_error, closed}, ConnStr}, _State) ->
+ log(error, "MQTT detected TLS upgrade error on ~s: connection closed~n",
+ [ConnStr]);
+
+terminate({network_error,
+ {ssl_upgrade_error,
+ {tls_alert, "handshake failure"}}, ConnStr}, _State) ->
+ log(error, "MQTT detected TLS upgrade error on ~s: handshake failure~n",
+ [ConnStr]);
+
+terminate({network_error,
+ {ssl_upgrade_error,
+ {tls_alert, "unknown ca"}}, ConnStr}, _State) ->
+ log(error, "MQTT detected TLS certificate verification error on ~s: alert 'unknown CA'~n",
+ [ConnStr]);
+
+terminate({network_error,
+ {ssl_upgrade_error,
+ {tls_alert, Alert}}, ConnStr}, _State) ->
+ log(error, "MQTT detected TLS upgrade error on ~s: alert ~s~n",
+ [ConnStr, Alert]);
+
+terminate({network_error, {ssl_upgrade_error, Reason}, ConnStr}, _State) ->
+ log(error, "MQTT detected TLS upgrade error on ~s: ~p~n",
+ [ConnStr, Reason]);
+
+terminate({network_error, Reason, ConnStr}, _State) ->
+ log(error, "MQTT detected network error on ~s: ~p~n",
+ [ConnStr, Reason]);
+
+terminate({network_error, Reason}, _State) ->
+ log(error, "MQTT detected network error: ~p~n", [Reason]);
+
+terminate(normal, State = #state{proc_state = ProcState,
+ conn_name = ConnName}) ->
+ rabbit_mqtt_processor:close_connection(ProcState),
+ log(info, "closing MQTT connection ~p (~s)~n", [self(), ConnName]),
+ ok;
+
+terminate(_Reason, State = #state{proc_state = ProcState}) ->
+ rabbit_mqtt_processor:close_connection(ProcState),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+process_received_bytes(<<>>, State) ->
+ {noreply, State, hibernate};
+process_received_bytes(Bytes,
+ State = #state{ parse_state = ParseState,
+ proc_state = ProcState,
+ conn_name = ConnStr }) ->
+ case rabbit_mqtt_frame:parse(Bytes, ParseState) of
+ {more, ParseState1} ->
+ {noreply,
+ control_throttle( State #state{ parse_state = ParseState1 }),
+ hibernate};
+ {ok, Frame, Rest} ->
+ case rabbit_mqtt_processor:process_frame(Frame, ProcState) of
+ {ok, ProcState1} ->
+ PS = rabbit_mqtt_frame:initial_state(),
+ process_received_bytes(
+ Rest,
+ State #state{ parse_state = PS,
+ proc_state = ProcState1 });
+ {error, Reason, ProcState1} ->
+ log(info, "MQTT protocol error ~p for connection ~p~n",
+ [ConnStr, Reason]),
+ {stop, {shutdown, Reason}, pstate(State, ProcState1)};
+ {error, Error} ->
+ log(error, "MQTT detected framing error '~p' for connection ~p~n",
+ [Error, ConnStr]),
+ {stop, {shutdown, Error}, State};
+ {stop, ProcState1} ->
+ {stop, normal, pstate(State, ProcState1)}
+ end;
+ {error, Error} ->
+ log(error, "MQTT detected framing error '~p' for connection ~p~n",
+ [ConnStr, Error]),
+ {stop, {shutdown, Error}, State}
+ end.
+
+callback_reply(State, {ok, ProcState}) ->
+ {noreply, pstate(State, ProcState), hibernate};
+callback_reply(State, {err, Reason, ProcState}) ->
+ {stop, Reason, pstate(State, ProcState)}.
+
+start_keepalive(_, 0 ) -> ok;
+start_keepalive(Pid, Keepalive) -> Pid ! {start_keepalives, Keepalive}.
+
+pstate(State = #state {}, PState = #proc_state{}) ->
+ State #state{ proc_state = PState }.
+
+%%----------------------------------------------------------------------------
+
+log(Level, Fmt) -> rabbit_log:log(connection, Level, Fmt, []).
+log(Level, Fmt, Args) -> rabbit_log:log(connection, Level, Fmt, Args).
+
+send_will_and_terminate(PState, State) ->
+ rabbit_mqtt_processor:send_will(PState),
+ % todo: flush channel after publish
+ {stop, {shutdown, conn_closed}, State}.
+
+network_error(closed,
+ State = #state{ conn_name = ConnStr,
+ proc_state = PState }) ->
+ log(info, "MQTT detected network error for ~p: peer closed TCP connection~n",
+ [ConnStr]),
+ send_will_and_terminate(PState, State);
+
+network_error(Reason,
+ State = #state{ conn_name = ConnStr,
+ proc_state = PState }) ->
+ log(info, "MQTT detected network error for ~p: ~p~n", [ConnStr, Reason]),
+ send_will_and_terminate(PState, State).
+
+run_socket(State = #state{ connection_state = blocked }) ->
+ State;
+run_socket(State = #state{ await_recv = true }) ->
+ State;
+run_socket(State = #state{ socket = Sock }) ->
+ rabbit_net:async_recv(Sock, 0, infinity),
+ State#state{ await_recv = true }.
+
+control_throttle(State = #state{ connection_state = Flow,
+ conserve = Conserve }) ->
+ case {Flow, Conserve orelse credit_flow:blocked()} of
+ {running, true} -> ok = rabbit_heartbeat:pause_monitor(
+ State#state.keepalive),
+ State #state{ connection_state = blocked };
+ {blocked, false} -> ok = rabbit_heartbeat:resume_monitor(
+ State#state.keepalive),
+ run_socket(State #state{
+ connection_state = running });
+ {_, _} -> run_socket(State)
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mqtt_sup).
+-behaviour(supervisor2).
+
+-define(MAX_WAIT, 16#ffffffff).
+
+-export([start_link/2, init/1]).
+
+-export([start_client/1, start_ssl_client/2]).
+
+start_link(Listeners, []) ->
+ supervisor2:start_link({local, ?MODULE}, ?MODULE, [Listeners]).
+
+init([{Listeners, SslListeners}]) ->
+ {ok, SocketOpts} = application:get_env(rabbitmq_mqtt, tcp_listen_options),
+ SslOpts = case SslListeners of
+ [] -> none;
+ _ -> rabbit_networking:ensure_ssl()
+ end,
+ {ok, {{one_for_all, 10, 10},
+ [{collector,
+ {rabbit_mqtt_collector, start_link, []},
+ transient, ?MAX_WAIT, worker, [rabbit_mqtt_collector]},
+ {rabbit_mqtt_client_sup,
+ {rabbit_client_sup, start_link, [{local, rabbit_mqtt_client_sup},
+ {rabbit_mqtt_connection_sup, start_link, []}]},
+ transient, infinity, supervisor, [rabbit_client_sup]} |
+ listener_specs(fun tcp_listener_spec/1,
+ [SocketOpts], Listeners) ++
+ listener_specs(fun ssl_listener_spec/1,
+ [SocketOpts, SslOpts], SslListeners)]}}.
+
+listener_specs(Fun, Args, Listeners) ->
+ [Fun([Address | Args]) ||
+ Listener <- Listeners,
+ Address <- rabbit_networking:tcp_listener_addresses(Listener)].
+
+tcp_listener_spec([Address, SocketOpts]) ->
+ rabbit_networking:tcp_listener_spec(
+ rabbit_mqtt_listener_sup, Address, SocketOpts,
+ mqtt, "MQTT TCP Listener",
+ {?MODULE, start_client, []}).
+
+ssl_listener_spec([Address, SocketOpts, SslOpts]) ->
+ rabbit_networking:tcp_listener_spec(
+ rabbit_mqtt_listener_sup, Address, SocketOpts,
+ 'mqtt/ssl', "MQTT SSL Listener",
+ {?MODULE, start_ssl_client, [SslOpts]}).
+
+start_client(Sock, SockTransform) ->
+ {ok, _, {KeepaliveSup, Reader}} =
+ supervisor2:start_child(rabbit_mqtt_client_sup, []),
+ ok = rabbit_net:controlling_process(Sock, Reader),
+ ok = gen_server2:cast(Reader, {go, Sock, SockTransform, KeepaliveSup}),
+
+ %% see comment in rabbit_networking:start_client/2
+ gen_event:which_handlers(error_logger),
+ Reader.
+
+start_client(Sock) ->
+ start_client(Sock, fun (S) -> {ok, S} end).
+
+start_ssl_client(SslOpts, Sock) ->
+ Transform = rabbit_networking:ssl_transform_fun(SslOpts),
+ start_client(Sock, Transform).
+
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mqtt_util).
+
+-include("rabbit_mqtt.hrl").
+
+-compile(export_all).
+
+subcription_queue_name(ClientId) ->
+ Base = "mqtt-subscription-" ++ ClientId ++ "qos",
+ {list_to_binary(Base ++ "0"), list_to_binary(Base ++ "1")}.
+
+%% amqp mqtt descr
+%% * + match one topic level
+%% # # match multiple topic levels
+%% . / topic level separator
+mqtt2amqp(Topic) ->
+ erlang:iolist_to_binary(
+ re:replace(re:replace(Topic, "/", ".", [global]),
+ "[\+]", "*", [global])).
+
+amqp2mqtt(Topic) ->
+ erlang:iolist_to_binary(
+ re:replace(re:replace(Topic, "[\*]", "+", [global]),
+ "[\.]", "/", [global])).
+
+gen_client_id() ->
+ lists:nthtail(1, rabbit_guid:string(rabbit_guid:gen_secure(), [])).
+
+env(Key) ->
+ case application:get_env(rabbitmq_mqtt, Key) of
+ {ok, Val} -> Val;
+ undefined -> undefined
+ end.
+
+table_lookup(undefined, _Key) ->
+ undefined;
+table_lookup(Table, Key) ->
+ rabbit_misc:table_lookup(Table, Key).
--- /dev/null
+{application, rabbitmq_mqtt,
+ [{description, "RabbitMQ MQTT Adapter"},
+ {vsn, "%%VSN%%"},
+ {modules, []},
+ {registered, []},
+ {mod, {rabbit_mqtt, []}},
+ {env, [{default_user, <<"guest">>},
+ {default_pass, <<"guest">>},
+ {allow_anonymous, true},
+ {vhost, <<"/">>},
+ {exchange, <<"amq.topic">>},
+ {subscription_ttl, 1800000}, % 30 min
+ {prefetch, 10},
+ {ssl_listeners, []},
+ {tcp_listeners, [1883]},
+ {tcp_listen_options, [binary,
+ {packet, raw},
+ {reuseaddr, true},
+ {backlog, 128},
+ {nodelay, true}]}]},
+ {applications, [kernel, stdlib, rabbit, amqp_client]}]}.
--- /dev/null
+UPSTREAM_GIT=https://git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.java.git
+REVISION=00b5b2f99ae8410b7d96d106e080a092c5f92546
+
+JC=javac
+
+TEST_SRC=src
+CHECKOUT_DIR=test_client
+PAHO_JAR_NAME=org.eclipse.paho.client.mqttv3.jar
+PAHO_JAR=$(CHECKOUT_DIR)/org.eclipse.paho.client.mqttv3/$(PAHO_JAR_NAME)
+JUNIT_JAR=../lib/junit.jar
+JAVA_AMQP_DIR=../../rabbitmq-java-client/
+JAVA_AMQP_CLASSES=$(JAVA_AMQP_DIR)build/classes/
+
+TEST_SRCS:=$(shell find $(TEST_SRC) -name '*.java')
+ALL_CLASSES:=$(foreach f,$(shell find src -name '*.class'),'$(f)')
+TEST_CLASSES:=$(TEST_SRCS:.java=.class)
+CP:=$(PAHO_JAR):$(JUNIT_JAR):$(TEST_SRC):$(JAVA_AMQP_CLASSES)
+
+define class_from_path
+$(subst .class,,$(subst src.,,$(subst /,.,$(1))))
+endef
+
+.PHONY: test
+test: $(TEST_CLASSES) build_java_amqp
+ $(foreach test,$(TEST_CLASSES),CLASSPATH=$(CP) java junit.textui.TestRunner -text $(call class_from_path,$(test)))
+
+clean:
+ rm -rf $(PAHO_JAR) $(ALL_CLASSES)
+
+distclean: clean
+ rm -rf $(CHECKOUT_DIR)
+
+$(CHECKOUT_DIR):
+ git clone $(UPSTREAM_GIT) $@
+ (cd $@ && git checkout $(REVISION)) || rm -rf $@
+
+$(PAHO_JAR): $(CHECKOUT_DIR)
+ ant -buildfile $</org.eclipse.paho.client.mqttv3/build.xml \
+ -Dship.folder=. -Dmqttv3-client-jar=$(PAHO_JAR_NAME) full
+
+%.class: %.java $(PAHO_JAR) $(JUNIT_JAR)
+ $(JC) -cp $(CP) $<
+
+.PHONY: build_java_amqp
+build_java_amqp:
+ make -C $(JAVA_AMQP_DIR)
--- /dev/null
+// The contents of this file are subject to the Mozilla Public License
+// Version 1.1 (the "License"); you may not use this file except in
+// compliance with the License. You may obtain a copy of the License
+// at http://www.mozilla.org/MPL/
+//
+// Software distributed under the License is distributed on an "AS IS"
+// basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+// the License for the specific language governing rights and
+// limitations under the License.
+//
+// The Original Code is RabbitMQ.
+//
+// The Initial Developer of the Original Code is GoPivotal, Inc.
+// Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+//
+
+package com.rabbitmq.mqtt.test;
+
+import com.rabbitmq.client.*;
+import junit.framework.Assert;
+import junit.framework.TestCase;
+import org.eclipse.paho.client.mqttv3.IMqttDeliveryToken;
+import org.eclipse.paho.client.mqttv3.MqttCallback;
+import org.eclipse.paho.client.mqttv3.MqttClient;
+import org.eclipse.paho.client.mqttv3.MqttConnectOptions;
+import org.eclipse.paho.client.mqttv3.MqttDeliveryToken;
+import org.eclipse.paho.client.mqttv3.MqttException;
+import org.eclipse.paho.client.mqttv3.MqttMessage;
+import org.eclipse.paho.client.mqttv3.MqttTopic;
+import org.eclipse.paho.client.mqttv3.internal.NetworkModule;
+import org.eclipse.paho.client.mqttv3.internal.TCPNetworkModule;
+import org.eclipse.paho.client.mqttv3.internal.wire.MqttInputStream;
+import org.eclipse.paho.client.mqttv3.internal.wire.MqttOutputStream;
+import org.eclipse.paho.client.mqttv3.internal.wire.MqttPingReq;
+
+import javax.net.SocketFactory;
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.Socket;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+/***
+ * MQTT v3.1 tests
+ * TODO: synchronise access to variables
+ */
+
+public class MqttTest extends TestCase implements MqttCallback {
+
+ private final String host = "localhost";
+ private final int port = 1883;
+ private final String brokerUrl = "tcp://" + host + ":" + port;
+ private String clientId;
+ private String clientId2;
+ private MqttClient client;
+ private MqttClient client2;
+ private MqttConnectOptions conOpt;
+ private ArrayList<MqttMessage> receivedMessages;
+
+ private final byte[] payload = "payload".getBytes();
+ private final String topic = "test-topic";
+ private int testDelay = 2000;
+ private long lastReceipt;
+ private boolean expectConnectionFailure;
+
+ private ConnectionFactory connectionFactory;
+ private Connection conn;
+ private Channel ch;
+
+ // override 10s limit
+ private class MyConnOpts extends MqttConnectOptions {
+ private int keepAliveInterval = 60;
+ @Override
+ public void setKeepAliveInterval(int keepAliveInterval) {
+ this.keepAliveInterval = keepAliveInterval;
+ }
+ @Override
+ public int getKeepAliveInterval() {
+ return keepAliveInterval;
+ }
+ }
+
+ @Override
+ public void setUp() throws MqttException {
+ clientId = getClass().getSimpleName() + ((int) (10000*Math.random()));
+ clientId2 = clientId + "-2";
+ client = new MqttClient(brokerUrl, clientId, null);
+ client2 = new MqttClient(brokerUrl, clientId2, null);
+ conOpt = new MyConnOpts();
+ setConOpts(conOpt);
+ receivedMessages = new ArrayList();
+ expectConnectionFailure = false;
+ }
+
+ @Override
+ public void tearDown() throws MqttException {
+ // clean any sticky sessions
+ setConOpts(conOpt);
+ client = new MqttClient(brokerUrl, clientId, null);
+ try {
+ client.connect(conOpt);
+ client.disconnect();
+ } catch (Exception _) {}
+
+ client2 = new MqttClient(brokerUrl, clientId2, null);
+ try {
+ client2.connect(conOpt);
+ client2.disconnect();
+ } catch (Exception _) {}
+ }
+
+ private void setUpAmqp() throws IOException {
+ connectionFactory = new ConnectionFactory();
+ connectionFactory.setHost(host);
+ conn = connectionFactory.newConnection();
+ ch = conn.createChannel();
+ }
+
+ private void tearDownAmqp() throws IOException {
+ conn.close();
+ }
+
+ private void setConOpts(MqttConnectOptions conOpts) {
+ // provide authentication if the broker needs it
+ // conOpts.setUserName("guest");
+ // conOpts.setPassword("guest".toCharArray());
+ conOpts.setCleanSession(true);
+ conOpts.setKeepAliveInterval(60);
+ }
+
+ public void testConnectFirst() throws MqttException, IOException, InterruptedException {
+ NetworkModule networkModule = new TCPNetworkModule(SocketFactory.getDefault(), host, port, "");
+ networkModule.start();
+ MqttInputStream mqttIn = new MqttInputStream (networkModule.getInputStream());
+ MqttOutputStream mqttOut = new MqttOutputStream(networkModule.getOutputStream());
+ try {
+ mqttOut.write(new MqttPingReq());
+ mqttOut.flush();
+ mqttIn.readMqttWireMessage();
+ fail("Error expected if CONNECT is not first packet");
+ } catch (IOException _) {}
+ }
+
+ public void testInvalidUser() throws MqttException {
+ conOpt.setUserName("invalid-user");
+ try {
+ client.connect(conOpt);
+ fail("Authentication failure expected");
+ } catch (MqttException ex) {
+ Assert.assertEquals(MqttException.REASON_CODE_FAILED_AUTHENTICATION, ex.getReasonCode());
+ }
+ }
+
+ public void testInvalidPassword() throws MqttException {
+ conOpt.setUserName("invalid-user");
+ conOpt.setPassword("invalid-password".toCharArray());
+ try {
+ client.connect(conOpt);
+ fail("Authentication failure expected");
+ } catch (MqttException ex) {
+ Assert.assertEquals(MqttException.REASON_CODE_FAILED_AUTHENTICATION, ex.getReasonCode());
+ }
+ }
+
+
+ public void testSubscribeQos0() throws MqttException, InterruptedException {
+ client.connect(conOpt);
+ client.setCallback(this);
+ client.subscribe(topic, 0);
+
+ publish(client, topic, 0, payload);
+ Thread.sleep(testDelay);
+ Assert.assertEquals(1, receivedMessages.size());
+ Assert.assertEquals(true, Arrays.equals(receivedMessages.get(0).getPayload(), payload));
+ Assert.assertEquals(0, receivedMessages.get(0).getQos());
+ client.disconnect();
+ }
+
+ public void testSubscribeUnsubscribe() throws MqttException, InterruptedException {
+ client.connect(conOpt);
+ client.setCallback(this);
+ client.subscribe(topic, 0);
+
+ publish(client, topic, 1, payload);
+ Thread.sleep(testDelay);
+ Assert.assertEquals(1, receivedMessages.size());
+ Assert.assertEquals(true, Arrays.equals(receivedMessages.get(0).getPayload(), payload));
+ Assert.assertEquals(0, receivedMessages.get(0).getQos());
+
+ client.unsubscribe(topic);
+ publish(client, topic, 0, payload);
+ Thread.sleep(testDelay);
+ Assert.assertEquals(1, receivedMessages.size());
+ client.disconnect();
+ }
+
+ public void testSubscribeQos1() throws MqttException, InterruptedException {
+ client.connect(conOpt);
+ client.setCallback(this);
+ client.subscribe(topic, 1);
+
+ publish(client, topic, 0, payload);
+ publish(client, topic, 1, payload);
+ Thread.sleep(testDelay);
+
+ Assert.assertEquals(2, receivedMessages.size());
+ MqttMessage msg1 = receivedMessages.get(0);
+ MqttMessage msg2 = receivedMessages.get(1);
+
+ Assert.assertEquals(true, Arrays.equals(msg1.getPayload(), payload));
+ Assert.assertEquals(0, msg1.getQos());
+
+ Assert.assertEquals(true, Arrays.equals(msg2.getPayload(), payload));
+ Assert.assertEquals(1, msg2.getQos());
+
+ client.disconnect();
+ }
+
+ public void testTopics() throws MqttException, InterruptedException {
+ client.connect(conOpt);
+ client.setCallback(this);
+ client.subscribe("/+/mid/#");
+ String cases[] = {"/pre/mid2", "/mid", "/a/mid/b/c/d", "/frob/mid"};
+ List<String> expected = Arrays.asList("/a/mid/b/c/d", "/frob/mid");
+ for(String example : cases){
+ publish(client, example, 0, example.getBytes());
+ }
+ Thread.sleep(testDelay);
+ Assert.assertEquals(expected.size(), receivedMessages.size());
+ for (MqttMessage m : receivedMessages){
+ expected.contains(new String(m.getPayload()));
+ }
+ client.disconnect();
+ }
+
+ public void testNonCleanSession() throws MqttException, InterruptedException {
+ conOpt.setCleanSession(false);
+ client.connect(conOpt);
+ client.subscribe(topic, 1);
+ client.disconnect();
+
+ client2.connect(conOpt);
+ publish(client2, topic, 1, payload);
+ client2.disconnect();
+
+ client.setCallback(this);
+ client.connect(conOpt);
+
+ Thread.sleep(testDelay);
+ Assert.assertEquals(1, receivedMessages.size());
+ Assert.assertEquals(true, Arrays.equals(receivedMessages.get(0).getPayload(), payload));
+ client.disconnect();
+ }
+
+ public void testCleanSession() throws MqttException, InterruptedException {
+ conOpt.setCleanSession(false);
+ client.connect(conOpt);
+ client.subscribe(topic, 1);
+ client.disconnect();
+
+ client2.connect(conOpt);
+ publish(client2, topic, 1, payload);
+ client2.disconnect();
+
+ conOpt.setCleanSession(true);
+ client.connect(conOpt);
+ client.setCallback(this);
+ client.subscribe(topic, 1);
+
+ Thread.sleep(testDelay);
+ Assert.assertEquals(0, receivedMessages.size());
+ client.unsubscribe(topic);
+ client.disconnect();
+ }
+
+ public void testMultipleClientIds() throws MqttException, InterruptedException {
+ client.connect(conOpt);
+ client2 = new MqttClient(brokerUrl, clientId, null);
+ client2.connect(conOpt);
+ Thread.sleep(testDelay);
+ Assert.assertFalse(client.isConnected());
+ client2.disconnect();
+ }
+
+ public void testPing() throws MqttException, InterruptedException {
+ conOpt.setKeepAliveInterval(1);
+ client.connect(conOpt);
+ Thread.sleep(3000);
+ Assert.assertEquals(true, client.isConnected());
+ client.disconnect();
+ }
+
+ public void testWill() throws MqttException, InterruptedException, IOException {
+ client2.connect(conOpt);
+ client2.subscribe(topic);
+ client2.setCallback(this);
+
+ final SocketFactory factory = SocketFactory.getDefault();
+ final ArrayList<Socket> sockets = new ArrayList<Socket>();
+ SocketFactory testFactory = new SocketFactory() {
+ public Socket createSocket(String s, int i) throws IOException {
+ Socket sock = factory.createSocket(s, i);
+ sockets.add(sock);
+ return sock;
+ }
+ public Socket createSocket(String s, int i, InetAddress a, int i1) throws IOException {
+ return null;
+ }
+ public Socket createSocket(InetAddress a, int i) throws IOException {
+ return null;
+ }
+ public Socket createSocket(InetAddress a, int i, InetAddress a1, int i1) throws IOException {
+ return null;
+ }
+ @Override
+ public Socket createSocket() throws IOException {
+ Socket sock = new Socket();
+ sockets.add(sock);
+ return sock;
+ }
+ };
+ conOpt.setSocketFactory(testFactory);
+ MqttTopic willTopic = client.getTopic(topic);
+ conOpt.setWill(willTopic, payload, 0, false);
+ conOpt.setCleanSession(false);
+ client.connect(conOpt);
+
+ Assert.assertEquals(1, sockets.size());
+ expectConnectionFailure = true;
+ sockets.get(0).close();
+ Thread.sleep(testDelay);
+
+ Assert.assertEquals(1, receivedMessages.size());
+ Assert.assertEquals(true, Arrays.equals(receivedMessages.get(0).getPayload(), payload));
+ client2.disconnect();
+ }
+
+ public void testSubscribeMultiple() throws MqttException {
+ client.connect(conOpt);
+ publish(client, "/topic/1", 1, "msq1-qos1".getBytes());
+
+ client2.connect(conOpt);
+ client2.setCallback(this);
+ client2.subscribe("/topic/#");
+ client2.subscribe("/topic/#");
+
+ publish(client, "/topic/2", 0, "msq2-qos0".getBytes());
+ publish(client, "/topic/3", 1, "msq3-qos1".getBytes());
+ publish(client, topic, 0, "msq4-qos0".getBytes());
+ publish(client, topic, 1, "msq4-qos1".getBytes());
+
+ Assert.assertEquals(2, receivedMessages.size());
+ client.disconnect();
+ client2.disconnect();
+ }
+
+ public void testPublishMultiple() throws MqttException, InterruptedException {
+ int pubCount = 50;
+ for (int subQos=0; subQos < 2; subQos++){
+ for (int pubQos=0; pubQos < 2; pubQos++){
+ client.connect(conOpt);
+ client.subscribe(topic, subQos);
+ client.setCallback(this);
+ long start = System.currentTimeMillis();
+ for (int i=0; i<pubCount; i++){
+ publish(client, topic, pubQos, payload);
+ }
+ Thread.sleep(testDelay);
+ Assert.assertEquals(pubCount, receivedMessages.size());
+ System.out.println("publish QOS" + pubQos + " subscribe QOS" + subQos +
+ ", " + pubCount + " msgs took " +
+ (lastReceipt - start)/1000.0 + "sec");
+ client.disconnect();
+ receivedMessages.clear();
+ }
+ }
+ }
+
+ public void testInteropM2A() throws MqttException, IOException, InterruptedException {
+ setUpAmqp();
+ String queue = ch.queueDeclare().getQueue();
+ ch.queueBind(queue, "amq.topic", topic);
+
+ client.connect(conOpt);
+ publish(client, topic, 1, payload);
+ client.disconnect();
+ Thread.sleep(testDelay);
+
+ GetResponse response = ch.basicGet(queue, true);
+ assertTrue(Arrays.equals(payload, response.getBody()));
+ assertNull(ch.basicGet(queue, true));
+ tearDownAmqp();
+ }
+
+ public void testInteropA2M() throws MqttException, IOException, InterruptedException {
+ client.connect(conOpt);
+ client.setCallback(this);
+ client.subscribe(topic, 1);
+
+ setUpAmqp();
+ ch.basicPublish("amq.topic", topic, MessageProperties.MINIMAL_BASIC, payload);
+ tearDownAmqp();
+ Thread.sleep(testDelay);
+
+ Assert.assertEquals(1, receivedMessages.size());
+ client.disconnect();
+ }
+
+ private void publish(MqttClient client, String topicName, int qos, byte[] payload) throws MqttException {
+ MqttTopic topic = client.getTopic(topicName);
+ MqttMessage message = new MqttMessage(payload);
+ message.setQos(qos);
+ MqttDeliveryToken token = topic.publish(message);
+ token.waitForCompletion();
+ }
+
+ public void connectionLost(Throwable cause) {
+ if (!expectConnectionFailure)
+ fail("Connection unexpectedly lost");
+ }
+
+ public void messageArrived(String topic, MqttMessage message) throws Exception {
+ lastReceipt = System.currentTimeMillis();
+ receivedMessages.add(message);
+ }
+
+ public void deliveryComplete(IMqttDeliveryToken token) {
+ }
+}
--- /dev/null
+#!/bin/sh
+make -C `dirname $0` build_java_amqp
+make -C `dirname $0` test
--- /dev/null
+include ../umbrella.mk
--- /dev/null
+Adds information on shovel status to the management plugin. Build it
+like any other plugin.
+
+If you have a heterogenous cluster (where the nodes have different
+plugins installed), this should be installed on the same nodes as the
+management plugin.
+
+Strictly speaking the shovel does not need to be installed, but then
+it won't tell you much.
+
+The HTTP API is very simple: GET /api/shovels.
--- /dev/null
+%% We test sample retention separately in rabbit_mgmt_test_db_unit,
+%% but for rabbit_mgmt_test_db we want to make sure samples never
+%% expire.
+[
+ {rabbitmq_shovel,
+ [{shovels,
+ [{'my-static',
+ [{sources, [{broker, "amqp://"},
+ {declarations, [{'queue.declare', [{queue, <<"static">>}]}]}
+ ]},
+ {destinations, [{broker, "amqp://"}]},
+ {queue, <<"static">>},
+ {publish_fields, [ {exchange, <<"">>},
+ {routing_key, <<"static2">>}
+ ]}
+ ]}
+ ]}
+ ]}
+].
--- /dev/null
+RELEASABLE:=true
+DEPS:=rabbitmq-management rabbitmq-shovel
+WITH_BROKER_TEST_COMMANDS:=rabbit_shovel_mgmt_test_all:all_tests()
+WITH_BROKER_TEST_CONFIG:=$(PACKAGE_DIR)/etc/rabbit-test
+
+CONSTRUCT_APP_PREREQS:=$(shell find $(PACKAGE_DIR)/priv -type f)
+define construct_app_commands
+ cp -r $(PACKAGE_DIR)/priv $(APP_DIR)
+endef
--- /dev/null
+dispatcher_add(function(sammy) {
+ sammy.get('#/shovels', function() {
+ render({'shovels': {path: '/shovels',
+ options: {vhost:true}}},
+ 'shovels', '#/shovels');
+ });
+ sammy.get('#/dynamic-shovels', function() {
+ render({'shovels': {path: '/parameters/shovel',
+ options:{vhost:true}},
+ 'vhosts': '/vhosts'},
+ 'dynamic-shovels', '#/dynamic-shovels');
+ });
+ sammy.get('#/dynamic-shovels/:vhost/:id', function() {
+ render({'shovel': '/parameters/shovel/' + esc(this.params['vhost']) + '/' + esc(this.params['id'])},
+ 'dynamic-shovel', '#/dynamic-shovels');
+ });
+ sammy.put('#/shovel-parameters', function() {
+ var num_keys = ['prefetch-count', 'reconnect-delay'];
+ var bool_keys = ['add-forward-headers'];
+ var arrayable_keys = ['src-uri', 'dest-uri'];
+ put_parameter(this, [], num_keys, bool_keys, arrayable_keys);
+ return false;
+ });
+ sammy.del('#/shovel-parameters', function() {
+ if (sync_delete(this, '/parameters/:component/:vhost/:name'))
+ go_to('#/dynamic-shovels');
+ return false;
+ });
+});
+
+
+NAVIGATION['Admin'][0]['Shovel Status'] = ['#/shovels', "monitoring"];
+NAVIGATION['Admin'][0]['Shovel Management'] = ['#/dynamic-shovels', "policymaker"];
+
+HELP['shovel-uri'] =
+ 'Both source and destination can be either a local or remote broker. See the "URI examples" pane for examples of how to construct URIs. If connecting to a cluster, you can enter several URIs here separated by spaces.';
+
+HELP['shovel-queue-exchange'] =
+ 'You can set both source and destination as either a queue or an exchange. If you choose "queue", it will be declared beforehand; if you choose "exchange" it will not, but an appropriate binding and queue will be created when the source is an exchange.';
+
+HELP['shovel-prefetch'] =
+ 'Maximum number of unacknowledged messages that may be in flight over a shovel at one time. Defaults to 1000 if not set.';
+
+HELP['shovel-reconnect'] =
+ 'Time in seconds to wait after a shovel goes down before attempting reconnection. Defaults to 1 if not set.';
+
+HELP['shovel-forward-headers'] =
+ 'Whether to add headers to the shovelled messages indicating where they have been shovelled from and to. Defaults to false if not set.';
+
+HELP['shovel-ack-mode'] =
+ '<dl>\
+ <dt><code>on-confirm</code></dt>\
+ <dd>Messages are acknowledged at the source after they have been confirmed at the destination. Handles network errors and broker failures without losing messages. The slowest option, and the default.</dd>\
+ <dt><code>on-publish</code></dt>\
+ <dd>Messages are acknowledged at the source after they have been published at the destination. Handles network errors without losing messages, but may lose messages in the event of broker failures.</dd>\
+ <dt><code>no-ack</code></dt>\
+ <dd>Message acknowledgements are not used. The fastest option, but may lose messages in the event of network or broker failures.</dd>\
+</dl>';
+
+HELP['shovel-delete-after'] =
+ '<dl>\
+ <dt><code>Never</code></dt>\
+ <dd>The shovel never deletes itself; it will persist until it is explicitly removed.</dd>\
+ <dt><code>After initial length transferred</code></dt>\
+ <dd>The shovel will check the length of the queue when it starts up. It will transfer that many messages, and then delete itself.</dd>\
+</dl>';
+
+function link_shovel(vhost, name) {
+ return _link_to(fmt_escape_html(name), '#/dynamic-shovels/' + esc(vhost) + '/' + esc(name));
+}
+
+function fmt_shovel_endpoint(prefix, shovel) {
+ var txt = '';
+ if (shovel[prefix + '-queue']) {
+ txt += fmt_string(shovel[prefix + '-queue']) + '<sub>queue</sub>';
+ } else {
+ if (shovel[prefix + '-exchange']) {
+ txt += fmt_string(shovel[prefix + '-exchange']);
+ } else {
+ txt += '<i>as published</i>';
+ }
+ if (shovel[prefix + '-exchange-key']) {
+ txt += ' : ' + fmt_string(shovel[prefix + '-exchange-key']);
+ }
+ txt += '<sub>exchange</sub>';
+ }
+ return txt;
+}
--- /dev/null
+<h1>Dynamic Shovel: <b><%= fmt_string(shovel.name) %></b></h1>
+
+<div class="section">
+ <h2>Overview</h2>
+ <div class="hider">
+ <table class="facts">
+ <tr>
+ <th>Virtual host</th>
+ <td><%= fmt_string(shovel.vhost) %></td>
+ </tr>
+ <tr>
+ <th>Source</th>
+ <td><%= fmt_string(shovel.value['src-uri']) %></td>
+ </tr>
+ <tr>
+ <th> </th>
+ <td><%= fmt_shovel_endpoint('src', shovel.value) %></td>
+ </tr>
+ <tr>
+ <th>Destination</th>
+ <td><%= fmt_string(shovel.value['dest-uri']) %></td>
+ </tr>
+ <tr>
+ <th> </th>
+ <td><%= fmt_shovel_endpoint('dest', shovel.value) %></td>
+ </tr>
+ <tr>
+ <th>Prefetch count</th>
+ <td><%= fmt_string(shovel.value['prefetch-count']) %></td>
+ </tr>
+ <tr>
+ <th>Reconnect delay</th>
+ <td><%= fmt_time(shovel.value['reconnect-delay'], 's') %></td>
+ </tr>
+ <tr>
+ <th>Add headers</th>
+ <td><%= fmt_boolean(shovel.value['add-forward-headers']) %></td>
+ </tr>
+ <tr>
+ <th>Ack mode</th>
+ <td><%= fmt_string(shovel.value['ack-mode']) %></td>
+ </tr>
+ <tr>
+ <th>Auto-delete</th>
+ <td><%= fmt_string(shovel.value['delete-after']) %></td>
+ </tr>
+ </table>
+ </div>
+</div>
+
+<div class="section-hidden">
+ <h2>Delete this shovel</h2>
+ <div class="hider">
+ <form action="#/shovel-parameters" method="delete" class="confirm">
+ <input type="hidden" name="component" value="shovel"/>
+ <input type="hidden" name="vhost" value="<%= fmt_string(shovel.vhost) %>"/>
+ <input type="hidden" name="name" value="<%= fmt_string(shovel.name) %>"/>
+ <input type="submit" value="Delete this shovel"/>
+ </form>
+ </div>
+</div>
--- /dev/null
+<h1>Dynamic Shovels</h1>
+<div class="section">
+ <h2>Shovels</h2>
+ <div class="hider updatable">
+<% if (shovels.length > 0) { %>
+<table class="list">
+ <thead>
+ <tr>
+<% if (vhosts_interesting) { %>
+ <th>Virtual Host</th>
+<% } %>
+ <th>Name</th>
+ <th colspan="2">Source</th>
+ <th colspan="2">Destination</th>
+ <th>Prefetch Count</th>
+ <th>Reconnect Delay</th>
+ <th>Add headers</th>
+ <th>Ack mode</th>
+ <th>Auto-delete</th>
+ </tr>
+ </thead>
+ <tbody>
+<%
+ for (var i = 0; i < shovels.length; i++) {
+ var shovel = shovels[i];
+%>
+ <tr<%= alt_rows(i)%>>
+<% if (vhosts_interesting) { %>
+ <td><%= fmt_string(shovel.vhost) %></td>
+<% } %>
+ <td><%= link_shovel(shovel.vhost, shovel.name) %></td>
+ <td><%= fmt_shortened_uri(shovel.value['src-uri']) %></td>
+ <td><%= fmt_shovel_endpoint('src', shovel.value) %></td>
+ <td><%= fmt_shortened_uri(shovel.value['dest-uri']) %></td>
+ <td><%= fmt_shovel_endpoint('dest', shovel.value) %></td>
+ <td class="r"><%= shovel.value['prefetch-count'] %></td>
+ <td class="r"><%= fmt_time(shovel.value['reconnect-delay'], 's') %></td>
+ <td class="c"><%= fmt_boolean(shovel.value['add-forward-headers']) %></td>
+ <td class="c"><%= fmt_string(shovel.value['ack-mode']) %></td>
+ <td><%= fmt_string(shovel.value['delete-after']) %></td>
+ </tr>
+<% } %>
+ </tbody>
+</table>
+<% } else { %>
+ <p>... no shovels ...</p>
+<% } %>
+ </div>
+</div>
+
+<div class="section-hidden">
+ <h2>Add a new shovel</h2>
+ <div class="hider">
+ <form action="#/shovel-parameters" method="put">
+ <input type="hidden" name="component" value="shovel"/>
+ <table class="form">
+<% if (vhosts_interesting) { %>
+ <tr>
+ <th><label>Virtual host:</label></th>
+ <td>
+ <select name="vhost">
+ <% for (var i = 0; i < vhosts.length; i++) { %>
+ <option value="<%= fmt_string(vhosts[i].name) %>"><%= fmt_string(vhosts[i].name) %></option>
+ <% } %>
+ </select>
+ </td>
+ </tr>
+<% } else { %>
+ <tr><td><input type="hidden" name="vhost" value="<%= fmt_string(vhosts[0].name) %>"/></td></tr>
+<% } %>
+ <tr>
+ <th><label>Name:</label></th>
+ <td><input type="text" name="name"/><span class="mand">*</span></td>
+ </tr>
+ <tr>
+ <th>Source:</th>
+ <td>
+ <table class="subform">
+ <tr>
+ <th>
+ URI
+ <span class="help" id="shovel-uri"></span>
+ </th>
+ <th>
+ <select name="queue-or-exchange" class="narrow controls-appearance">
+ <option value="src-queue">Queue:</option>
+ <option value="src-exchange">Exchange:</option>
+ </select>
+ <span class="help" id="shovel-queue-exchange"></span>
+ </th>
+ </tr>
+ <tr>
+ <td><input type="text" name="src-uri" value="amqp://"/><span class="mand">*</span></td>
+ <td>
+ <div id="src-queue-div">
+ <input type="text" name="src-queue"/>
+ </div>
+ <div id="src-exchange-div" style="display: none;">
+ <input type="text" name="src-exchange"/>
+ (Routing key: <input type="text" name="src-exchange-key"/>)
+ </div>
+ </td>
+ </tr>
+ </table>
+ </td>
+ </tr>
+ <tr>
+ <th>Destination:</th>
+ <td>
+ <table class="subform">
+ <tr>
+ <th>
+ URI
+ <span class="help" id="shovel-uri"></span>
+ </th>
+ <th>
+ <select name="queue-or-exchange" class="narrow controls-appearance">
+ <option value="dest-queue">Queue:</option>
+ <option value="dest-exchange">Exchange:</option>
+ </select>
+ <span class="help" id="shovel-queue-exchange"></span>
+ </th>
+ </tr>
+ <tr>
+ <td><input type="text" name="dest-uri" value="amqp://"/><span class="mand">*</span></td>
+ <td>
+ <div id="dest-queue-div">
+ <input type="text" name="dest-queue"/>
+ </div>
+ <div id="dest-exchange-div" style="display: none;">
+ <input type="text" name="dest-exchange"/>
+ (Routing key: <input type="text" name="dest-exchange-key"/>)
+ </div>
+ </td>
+ </tr>
+ </table>
+ </td>
+ </tr>
+ <tr>
+ <th>
+ <label>
+ Prefetch count:
+ <span class="help" id="shovel-prefetch"></span>
+ </label>
+ </th>
+ <td><input type="text" name="prefetch-count"/></td>
+ </tr>
+ <tr>
+ <th>
+ <label>
+ Reconnect delay:
+ <span class="help" id="shovel-reconnect"></span>
+ </label>
+ </th>
+ <td><input type="text" name="reconnect-delay"/> s</td>
+ </tr>
+ <tr>
+ <th>
+ <label>
+ Add forwarding headers:
+ <span class="help" id="shovel-forward-headers"></span>
+ </label>
+ </th>
+ <td>
+ <select name="add-forward-headers">
+ <option value="false">No</option>
+ <option value="true">Yes</option>
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th>
+ <label>
+ Acknowledgement mode:
+ <span class="help" id="shovel-ack-mode"></span>
+ </label>
+ </th>
+ <td>
+ <select name="ack-mode">
+ <option value="on-confirm">On confirm</option>
+ <option value="on-publish">On publish</option>
+ <option value="no-ack">No ack</option>
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th>
+ <label>
+ Auto-delete
+ <span class="help" id="shovel-delete-after"></span>
+ </label>
+ </th>
+ <td>
+ <select name="delete-after">
+ <option value="never">Never</option>
+ <option value="queue-length">After initial length transferred</option>
+ </select>
+ </td>
+ </tr>
+ </table>
+ <input type="submit" value="Add shovel"/>
+ </form>
+ </div>
+</div>
+<div class="section-hidden">
+ <h2>URI examples</h2>
+ <div class="hider">
+ <ul>
+ <li>
+ <code>amqp://</code><br/>
+ connect to local server as default user
+ </li>
+ <li>
+ <code>amqp://user@/my-vhost</code><br/>
+ connect to local server with alternate user and virtual host
+ (passwords are not required for local connections)
+ </li>
+ <li>
+ <code>amqp://server-name</code><br/>
+ connect to server-name, without SSL and default credentials
+ </li>
+ <li>
+ <code>amqp://user:password@server-name/my-vhost</code><br/>
+ connect to server-name, with credentials and overridden
+ virtual host
+ </li>
+ <li>
+ <code>amqps://user:password@server-name?cacertfile=/path/to/cacert.pem&certfile=/path/to/cert.pem&keyfile=/path/to/key.pem&verify=verify_peer</code><br/>
+ connect to server-name, with credentials and SSL
+ </li>
+ <li>
+ <code>amqps://server-name?cacertfile=/path/to/cacert.pem&certfile=/path/to/cert.pem&keyfile=/path/to/key.pem&verify=verify_peer&fail_if_no_peer_cert=true&auth_mechanism=external</code><br/>
+ connect to server-name, with SSL and EXTERNAL authentication
+ </li>
+ </ul>
+ </div>
+</div>
--- /dev/null
+<h1>Shovel Status</h1>
+<%
+ var extra_width = 0;
+ if (vhosts_interesting) extra_width++;
+ if (nodes_interesting) extra_width++;
+%>
+<div class="updatable">
+<% if (shovels.length > 0) { %>
+<table class="list">
+ <thead>
+ <tr>
+ <th>Name</th>
+<% if (nodes_interesting) { %>
+ <th>Node</th>
+<% } %>
+<% if (vhosts_interesting) { %>
+ <th>Virtual Host</th>
+<% } %>
+ <th>State</th>
+ <th colspan="2">Source</th>
+ <th colspan="2">Destination</th>
+ <th>Last changed</th>
+ </tr>
+ </thead>
+ <tbody>
+<%
+ for (var i = 0; i < shovels.length; i++) {
+ var shovel = shovels[i];
+%>
+ <tr<%= alt_rows(i)%>>
+ <td>
+ <%= fmt_string(shovel.name) %>
+ <sub><%= fmt_string(shovel.type) %></sub>
+ </td>
+<% if (nodes_interesting) { %>
+ <td><%= fmt_node(shovel.node) %></td>
+<% } %>
+<% if (vhosts_interesting) { %>
+ <td><%= fmt_string(shovel.vhost, '') %></td>
+<% } %>
+<% if (shovel.state == 'terminated') { %>
+ <td colspan="5"><%= fmt_state('red', shovel.state) %></td>
+ <td><%= shovel.timestamp %></td>
+ </tr>
+ <tr>
+ <td colspan="<%= 8 + extra_width %>">
+ <pre><%= fmt_string(shovel.reason) %></pre>
+ </td>
+ </tr>
+<% } else { %>
+ <td><%= fmt_state('green', shovel.state) %></td>
+ <% if (shovel.definition == undefined) { %>
+ <td colspan="2"><%= fmt_string(shovel.src_uri) %></td>
+ <td colspan="2"><%= fmt_string(shovel.dest_uri) %></td>
+ <% } else { %>
+ <td><%= fmt_string(shovel.src_uri) %></td>
+ <td><%= fmt_shovel_endpoint('src', shovel.definition) %></td>
+ <td><%= fmt_string(shovel.dest_uri) %></td>
+ <td><%= fmt_shovel_endpoint('dest', shovel.definition) %></td>
+ <% } %>
+ <td><%= shovel.timestamp %></td>
+ </tr>
+<% } %>
+ <% } %>
+ </tbody>
+</table>
+<% } else { %>
+ <p>... no shovels ...</p>
+<% } %>
+</div>
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_shovel_mgmt).
+
+-behaviour(rabbit_mgmt_extension).
+
+-export([dispatcher/0, web_ui/0]).
+-export([init/1, to_json/2, resource_exists/2, content_types_provided/2,
+ is_authorized/2]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include_lib("rabbitmq_management/include/rabbit_mgmt.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+
+dispatcher() -> [{["shovels"], ?MODULE, []},
+ {["shovels", vhost], ?MODULE, []}].
+web_ui() -> [{javascript, <<"shovel.js">>}].
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case rabbit_mgmt_util:vhost(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply_list(
+ filter_vhost_req(status(ReqData, Context), ReqData), ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_monitor(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+filter_vhost_req(List, ReqData) ->
+ case rabbit_mgmt_util:vhost(ReqData) of
+ none -> List;
+ VHost -> [I || I <- List,
+ pget(vhost, I) =:= VHost]
+ end.
+
+%% Allow users to see things in the vhosts they are authorised. But
+%% static shovels do not have a vhost, so only allow admins (not
+%% monitors) to see them.
+filter_vhost_user(List, _ReqData, #context{user = User = #user{tags = Tags}}) ->
+ VHosts = rabbit_mgmt_util:list_login_vhosts(User),
+ [I || I <- List, case pget(vhost, I) of
+ undefined -> lists:member(administrator, Tags);
+ VHost -> lists:member(VHost, VHosts)
+ end].
+
+status(ReqData, Context) ->
+ filter_vhost_user(
+ lists:append([status(Node) || Node <- [node() | nodes()]]),
+ ReqData, Context).
+
+status(Node) ->
+ case rpc:call(Node, rabbit_shovel_status, status, [], infinity) of
+ {badrpc, {'EXIT', _}} ->
+ [];
+ Status ->
+ [format(Node, I) || I <- Status]
+ end.
+
+format(Node, {Name, Type, Info, TS}) ->
+ [{node, Node}, {timestamp, format_ts(TS)}] ++
+ format_name(Type, Name) ++
+ format_info(Info, Type, Name).
+
+format_name(static, Name) -> [{name, Name},
+ {type, static}];
+format_name(dynamic, {VHost, Name}) -> [{name, Name},
+ {vhost, VHost},
+ {type, dynamic}].
+
+format_info(starting, _Type, _Name) ->
+ [{state, starting}];
+
+format_info({running, Props}, Type, Name) ->
+ [{state, running}] ++ lookup_src_dest(Type, Name) ++ Props;
+
+format_info({terminated, Reason}, _Type, _Name) ->
+ [{state, terminated},
+ {reason, print("~p", [Reason])}].
+
+format_ts({{Y, M, D}, {H, Min, S}}) ->
+ print("~w-~2.2.0w-~2.2.0w ~w:~2.2.0w:~2.2.0w", [Y, M, D, H, Min, S]).
+
+print(Fmt, Val) ->
+ list_to_binary(io_lib:format(Fmt, Val)).
+
+lookup_src_dest(static, _Name) ->
+ %% This is too messy to do, the config may be on another node and anyway
+ %% does not necessarily tell us the source and destination very clearly.
+ [];
+
+lookup_src_dest(dynamic, {VHost, Name}) ->
+ Def = pget(value,
+ rabbit_runtime_parameters:lookup(VHost, <<"shovel">>, Name)),
+ Ks = [<<"src-queue">>, <<"src-exchange">>, <<"src-exchange-key">>,
+ <<"dest-queue">>, <<"dest-exchange">>, <<"dest-exchange-key">>],
+ [{definition, [{K, V} || {K, V} <- Def, lists:member(K, Ks)]}].
--- /dev/null
+{application, rabbitmq_shovel_management,
+ [{description, "Shovel Status"},
+ {vsn, "%%VSN%%"},
+ {modules, []},
+ {registered, []},
+ {applications, [kernel, stdlib, rabbit, rabbitmq_management]}]}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Console.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_shovel_mgmt_test_all).
+
+-export([all_tests/0]).
+
+all_tests() ->
+ ok = eunit:test(tests(rabbit_shovel_mgmt_test_http, 60), [verbose]).
+
+tests(Module, Timeout) ->
+ {foreach, fun() -> ok end,
+ [{timeout, Timeout, fun Module:F/0} ||
+ {F, _Arity} <- proplists:get_value(exports, Module:module_info()),
+ string:right(atom_to_list(F), 5) =:= "_test"]}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Console.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_shovel_mgmt_test_http).
+
+-include_lib("rabbitmq_management/include/rabbit_mgmt_test.hrl").
+
+-import(rabbit_misc, [pget/2]).
+
+shovels_test() ->
+ http_put("/users/admin", [{password, <<"admin">>},
+ {tags, <<"administrator">>}], ?NO_CONTENT),
+ http_put("/users/mon", [{password, <<"mon">>},
+ {tags, <<"monitoring">>}], ?NO_CONTENT),
+ http_put("/vhosts/v", none, ?NO_CONTENT),
+ Perms = [{configure, <<".*">>},
+ {write, <<".*">>},
+ {read, <<".*">>}],
+ http_put("/permissions/v/guest", Perms, ?NO_CONTENT),
+ http_put("/permissions/v/admin", Perms, ?NO_CONTENT),
+ http_put("/permissions/v/mon", Perms, ?NO_CONTENT),
+
+ [http_put("/parameters/shovel/" ++ V ++ "/my-dynamic",
+ [{value, [{'src-uri', <<"amqp://">>},
+ {'dest-uri', <<"amqp://">>},
+ {'src-queue', <<"test">>},
+ {'dest-queue', <<"test2">>}]}], ?NO_CONTENT)
+ || V <- ["%2f", "v"]],
+ Static = [{name, <<"my-static">>},
+ {type, <<"static">>}],
+ Dynamic1 = [{name, <<"my-dynamic">>},
+ {vhost, <<"/">>},
+ {type, <<"dynamic">>}],
+ Dynamic2 = [{name, <<"my-dynamic">>},
+ {vhost, <<"v">>},
+ {type, <<"dynamic">>}],
+ Assert = fun (Req, User, Res) ->
+ assert_list(Res, http_get(Req, User, User, ?OK))
+ end,
+ Assert("/shovels", "guest", [Static, Dynamic1, Dynamic2]),
+ Assert("/shovels/%2f", "guest", [Dynamic1]),
+ Assert("/shovels/v", "guest", [Dynamic2]),
+ Assert("/shovels", "admin", [Static, Dynamic2]),
+ Assert("/shovels/%2f", "admin", []),
+ Assert("/shovels/v", "admin", [Dynamic2]),
+ Assert("/shovels", "mon", [Dynamic2]),
+ Assert("/shovels/%2f", "mon", []),
+ Assert("/shovels/v", "mon", [Dynamic2]),
+
+ http_delete("/vhosts/v", ?NO_CONTENT),
+ http_delete("/users/admin", ?NO_CONTENT),
+ http_delete("/users/mon", ?NO_CONTENT),
+ ok.
+
+%%---------------------------------------------------------------------------
+%% TODO this is all copypasta from the mgmt tests
+
+http_get(Path) ->
+ http_get(Path, ?OK).
+
+http_get(Path, CodeExp) ->
+ http_get(Path, "guest", "guest", CodeExp).
+
+http_get(Path, User, Pass, CodeExp) ->
+ {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
+ req(get, Path, [auth_header(User, Pass)]),
+ assert_code(CodeExp, CodeAct, "GET", Path, ResBody),
+ decode(CodeExp, Headers, ResBody).
+
+http_put(Path, List, CodeExp) ->
+ http_put_raw(Path, format_for_upload(List), CodeExp).
+
+http_put(Path, List, User, Pass, CodeExp) ->
+ http_put_raw(Path, format_for_upload(List), User, Pass, CodeExp).
+
+http_post(Path, List, CodeExp) ->
+ http_post_raw(Path, format_for_upload(List), CodeExp).
+
+http_post(Path, List, User, Pass, CodeExp) ->
+ http_post_raw(Path, format_for_upload(List), User, Pass, CodeExp).
+
+format_for_upload(none) ->
+ <<"">>;
+format_for_upload(List) ->
+ iolist_to_binary(mochijson2:encode({struct, List})).
+
+http_put_raw(Path, Body, CodeExp) ->
+ http_upload_raw(put, Path, Body, "guest", "guest", CodeExp).
+
+http_put_raw(Path, Body, User, Pass, CodeExp) ->
+ http_upload_raw(put, Path, Body, User, Pass, CodeExp).
+
+http_post_raw(Path, Body, CodeExp) ->
+ http_upload_raw(post, Path, Body, "guest", "guest", CodeExp).
+
+http_post_raw(Path, Body, User, Pass, CodeExp) ->
+ http_upload_raw(post, Path, Body, User, Pass, CodeExp).
+
+http_upload_raw(Type, Path, Body, User, Pass, CodeExp) ->
+ {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
+ req(Type, Path, [auth_header(User, Pass)], Body),
+ assert_code(CodeExp, CodeAct, Type, Path, ResBody),
+ decode(CodeExp, Headers, ResBody).
+
+http_delete(Path, CodeExp) ->
+ http_delete(Path, "guest", "guest", CodeExp).
+
+http_delete(Path, User, Pass, CodeExp) ->
+ {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
+ req(delete, Path, [auth_header(User, Pass)]),
+ assert_code(CodeExp, CodeAct, "DELETE", Path, ResBody),
+ decode(CodeExp, Headers, ResBody).
+
+assert_code(CodeExp, CodeAct, Type, Path, Body) ->
+ case CodeExp of
+ CodeAct -> ok;
+ _ -> throw({expected, CodeExp, got, CodeAct, type, Type,
+ path, Path, body, Body})
+ end.
+
+req(Type, Path, Headers) ->
+ httpc:request(Type, {?PREFIX ++ Path, Headers}, ?HTTPC_OPTS, []).
+
+req(Type, Path, Headers, Body) ->
+ httpc:request(Type, {?PREFIX ++ Path, Headers, "application/json", Body},
+ ?HTTPC_OPTS, []).
+
+decode(?OK, _Headers, ResBody) -> cleanup(mochijson2:decode(ResBody));
+decode(_, Headers, _ResBody) -> Headers.
+
+cleanup(L) when is_list(L) ->
+ [cleanup(I) || I <- L];
+cleanup({struct, I}) ->
+ cleanup(I);
+cleanup({K, V}) when is_binary(K) ->
+ {list_to_atom(binary_to_list(K)), cleanup(V)};
+cleanup(I) ->
+ I.
+
+auth_header(Username, Password) ->
+ {"Authorization",
+ "Basic " ++ binary_to_list(base64:encode(Username ++ ":" ++ Password))}.
+
+assert_list(Exp, Act) ->
+ case length(Exp) == length(Act) of
+ true -> ok;
+ false -> throw({expected, Exp, actual, Act})
+ end,
+ [case length(lists:filter(fun(ActI) -> test_item(ExpI, ActI) end, Act)) of
+ 1 -> ok;
+ N -> throw({found, N, ExpI, in, Act})
+ end || ExpI <- Exp].
+
+assert_item(Exp, Act) ->
+ case test_item0(Exp, Act) of
+ [] -> ok;
+ Or -> throw(Or)
+ end.
+
+test_item(Exp, Act) ->
+ case test_item0(Exp, Act) of
+ [] -> true;
+ _ -> false
+ end.
+
+test_item0(Exp, Act) ->
+ [{did_not_find, ExpI, in, Act} || ExpI <- Exp,
+ not lists:member(ExpI, Act)].
--- /dev/null
+include ../umbrella.mk
--- /dev/null
+Generic build instructions are at:
+ http://www.rabbitmq.com/plugin-development.html
+
+See the http://www.rabbitmq.com/shovel.html page for full instructions.
--- /dev/null
+#!/usr/bin/env escript
+%% -*- erlang -*-
+-mode(compile).
+
+main([IncludeDir, ErlDir, EbinDir, TargetFile]) ->
+ ErlDirContents = filelib:wildcard("*.erl", ErlDir),
+ ErlFiles = [filename:join(ErlDir, FileName) || FileName <- ErlDirContents],
+ Modules = sets:from_list(
+ [list_to_atom(filename:basename(FileName, ".erl")) ||
+ FileName <- ErlDirContents]),
+ Headers = sets:from_list(
+ [filename:join(IncludeDir, FileName) ||
+ FileName <- filelib:wildcard("*.hrl", IncludeDir)]),
+ Deps = lists:foldl(
+ fun (Path, Deps1) ->
+ dict:store(Path, detect_deps(IncludeDir, EbinDir,
+ Modules, Headers, Path),
+ Deps1)
+ end, dict:new(), ErlFiles),
+ {ok, Hdl} = file:open(TargetFile, [write, delayed_write]),
+ dict:fold(
+ fun (_Path, [], ok) ->
+ ok;
+ (Path, Dep, ok) ->
+ Module = filename:basename(Path, ".erl"),
+ ok = file:write(Hdl, [EbinDir, "/", Module, ".beam: ",
+ Path]),
+ ok = sets:fold(fun (E, ok) -> file:write(Hdl, [" ", E]) end,
+ ok, Dep),
+ file:write(Hdl, ["\n"])
+ end, ok, Deps),
+ ok = file:write(Hdl, [TargetFile, ": ", escript:script_name(), "\n"]),
+ ok = file:sync(Hdl),
+ ok = file:close(Hdl).
+
+detect_deps(IncludeDir, EbinDir, Modules, Headers, Path) ->
+ {ok, Forms} = epp:parse_file(Path, [IncludeDir], [{use_specs, true}]),
+ lists:foldl(
+ fun ({attribute, _LineNumber, Attribute, Behaviour}, Deps)
+ when Attribute =:= behaviour orelse Attribute =:= behavior ->
+ case sets:is_element(Behaviour, Modules) of
+ true -> sets:add_element(
+ [EbinDir, "/", atom_to_list(Behaviour), ".beam"],
+ Deps);
+ false -> Deps
+ end;
+ ({attribute, _LineNumber, file, {FileName, _LineNumber1}}, Deps) ->
+ case sets:is_element(FileName, Headers) of
+ true -> sets:add_element(FileName, Deps);
+ false -> Deps
+ end;
+ (_Form, Deps) ->
+ Deps
+ end, sets:new(), Forms).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-record(endpoint,
+ {uris,
+ resource_declaration
+ }).
+
+-record(shovel,
+ {sources,
+ destinations,
+ prefetch_count,
+ ack_mode,
+ publish_fields,
+ publish_properties,
+ queue,
+ reconnect_delay,
+ delete_after = never
+ }).
--- /dev/null
+RELEASABLE:=true
+DEPS:=rabbitmq-erlang-client
+WITH_BROKER_TEST_COMMANDS:=rabbit_shovel_test_all:all_tests()
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_shovel).
+
+-export([start/0, stop/0, start/2, stop/1]).
+
+start() -> rabbit_shovel_sup:start_link(), ok.
+
+stop() -> ok.
+
+start(normal, []) -> rabbit_shovel_sup:start_link().
+
+stop(_State) -> ok.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_shovel_config).
+
+-export([parse/2,
+ ensure_defaults/2]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_shovel.hrl").
+
+-define(IGNORE_FIELDS, [delete_after]).
+
+parse(ShovelName, Config) ->
+ {ok, Defaults} = application:get_env(defaults),
+ try
+ {ok, run_state_monad(
+ [fun enrich_shovel_config/1,
+ fun parse_shovel_config_proplist/1,
+ fun parse_shovel_config_dict/1],
+ {Config, Defaults})}
+ catch throw:{error, Reason} ->
+ {error, {invalid_shovel_configuration, ShovelName, Reason}}
+ end.
+
+%% ensures that any defaults that have been applied to a parsed
+%% shovel, are written back to the original proplist
+ensure_defaults(ShovelConfig, ParsedShovel) ->
+ lists:keystore(reconnect_delay, 1,
+ ShovelConfig,
+ {reconnect_delay,
+ ParsedShovel#shovel.reconnect_delay}).
+
+enrich_shovel_config({Config, Defaults}) ->
+ Config1 = proplists:unfold(Config),
+ case [E || E <- Config1, not (is_tuple(E) andalso tuple_size(E) == 2)] of
+ [] -> case duplicate_keys(Config1) of
+ [] -> return(lists:ukeysort(1, Config1 ++ Defaults));
+ Dups -> fail({duplicate_parameters, Dups})
+ end;
+ Invalid -> fail({invalid_parameters, Invalid})
+ end.
+
+parse_shovel_config_proplist(Config) ->
+ Dict = dict:from_list(Config),
+ Fields = record_info(fields, shovel) -- ?IGNORE_FIELDS,
+ Keys = dict:fetch_keys(Dict),
+ case {Keys -- Fields, Fields -- Keys} of
+ {[], []} -> {_Pos, Dict1} =
+ lists:foldl(
+ fun (FieldName, {Pos, Acc}) ->
+ {Pos + 1,
+ dict:update(FieldName,
+ fun (V) -> {V, Pos} end,
+ Acc)}
+ end, {2, Dict}, Fields),
+ return(Dict1);
+ {[], Missing} -> fail({missing_parameters, Missing});
+ {Unknown, _} -> fail({unrecognised_parameters, Unknown})
+ end.
+
+parse_shovel_config_dict(Dict) ->
+ run_state_monad(
+ [fun (Shovel) -> {ok, Value} = dict:find(Key, Dict),
+ try {ParsedValue, Pos} = Fun(Value),
+ return(setelement(Pos, Shovel, ParsedValue))
+ catch throw:{error, Reason} ->
+ fail({invalid_parameter_value, Key, Reason})
+ end
+ end || {Fun, Key} <-
+ [{fun parse_endpoint/1, sources},
+ {fun parse_endpoint/1, destinations},
+ {fun parse_non_negative_integer/1, prefetch_count},
+ {fun parse_ack_mode/1, ack_mode},
+ {fun parse_binary/1, queue},
+ make_parse_publish(publish_fields),
+ make_parse_publish(publish_properties),
+ {fun parse_non_negative_number/1, reconnect_delay}]],
+ #shovel{}).
+
+%% --=: Plain state monad implementation start :=--
+run_state_monad(FunList, State) ->
+ lists:foldl(fun (Fun, StateN) -> Fun(StateN) end, State, FunList).
+
+return(V) -> V.
+
+fail(Reason) -> throw({error, Reason}).
+%% --=: end :=--
+
+parse_endpoint({Endpoint, Pos}) when is_list(Endpoint) ->
+ Brokers = case proplists:get_value(brokers, Endpoint) of
+ undefined ->
+ case proplists:get_value(broker, Endpoint) of
+ undefined -> fail({missing_endpoint_parameter,
+ broker_or_brokers});
+ B -> [B]
+ end;
+ Bs when is_list(Bs) ->
+ Bs;
+ B ->
+ fail({expected_list, brokers, B})
+ end,
+ {[], Brokers1} = run_state_monad(
+ lists:duplicate(length(Brokers),
+ fun check_uri/1),
+ {Brokers, []}),
+
+ ResourceDecls =
+ case proplists:get_value(declarations, Endpoint, []) of
+ Decls when is_list(Decls) ->
+ Decls;
+ Decls ->
+ fail({expected_list, declarations, Decls})
+ end,
+ {[], ResourceDecls1} =
+ run_state_monad(
+ lists:duplicate(length(ResourceDecls), fun parse_declaration/1),
+ {ResourceDecls, []}),
+
+ DeclareFun =
+ fun (_Conn, Ch) ->
+ [amqp_channel:call(Ch, M) || M <- lists:reverse(ResourceDecls1)]
+ end,
+ return({#endpoint{uris = Brokers1,
+ resource_declaration = DeclareFun},
+ Pos});
+parse_endpoint({Endpoint, _Pos}) ->
+ fail({require_list, Endpoint}).
+
+check_uri({[Uri | Uris], Acc}) ->
+ case amqp_uri:parse(Uri) of
+ {ok, _Params} ->
+ return({Uris, [Uri | Acc]});
+ {error, _} = Err ->
+ throw(Err)
+ end.
+
+parse_declaration({[{Method, Props} | Rest], Acc}) when is_list(Props) ->
+ FieldNames = try rabbit_framing_amqp_0_9_1:method_fieldnames(Method)
+ catch exit:Reason -> fail(Reason)
+ end,
+ case proplists:get_keys(Props) -- FieldNames of
+ [] -> ok;
+ UnknownFields -> fail({unknown_fields, Method, UnknownFields})
+ end,
+ {Res, _Idx} = lists:foldl(
+ fun (K, {R, Idx}) ->
+ NewR = case proplists:get_value(K, Props) of
+ undefined -> R;
+ V -> setelement(Idx, R, V)
+ end,
+ {NewR, Idx + 1}
+ end, {rabbit_framing_amqp_0_9_1:method_record(Method), 2},
+ FieldNames),
+ return({Rest, [Res | Acc]});
+parse_declaration({[{Method, Props} | _Rest], _Acc}) ->
+ fail({expected_method_field_list, Method, Props});
+parse_declaration({[Method | Rest], Acc}) ->
+ parse_declaration({[{Method, []} | Rest], Acc}).
+
+parse_non_negative_integer({N, Pos}) when is_integer(N) andalso N >= 0 ->
+ return({N, Pos});
+parse_non_negative_integer({N, _Pos}) ->
+ fail({require_non_negative_integer, N}).
+
+parse_non_negative_number({N, Pos}) when is_number(N) andalso N >= 0 ->
+ return({N, Pos});
+parse_non_negative_number({N, _Pos}) ->
+ fail({require_non_negative_number, N}).
+
+parse_binary({Binary, Pos}) when is_binary(Binary) ->
+ return({Binary, Pos});
+parse_binary({NotABinary, _Pos}) ->
+ fail({require_binary, NotABinary}).
+
+parse_ack_mode({Val, Pos}) when Val =:= no_ack orelse
+ Val =:= on_publish orelse
+ Val =:= on_confirm ->
+ return({Val, Pos});
+parse_ack_mode({WrongVal, _Pos}) ->
+ fail({ack_mode_value_requires_one_of, {no_ack, on_publish, on_confirm},
+ WrongVal}).
+
+make_parse_publish(publish_fields) ->
+ {make_parse_publish1(record_info(fields, 'basic.publish')), publish_fields};
+make_parse_publish(publish_properties) ->
+ {make_parse_publish1(record_info(fields, 'P_basic')), publish_properties}.
+
+make_parse_publish1(ValidFields) ->
+ fun ({Fields, Pos}) when is_list(Fields) ->
+ make_publish_fun(Fields, Pos, ValidFields);
+ ({Fields, _Pos}) ->
+ fail({require_list, Fields})
+ end.
+
+make_publish_fun(Fields, Pos, ValidFields) ->
+ SuppliedFields = proplists:get_keys(Fields),
+ case SuppliedFields -- ValidFields of
+ [] ->
+ FieldIndices = make_field_indices(ValidFields, Fields),
+ Fun = fun (_SrcUri, _DestUri, Publish) ->
+ lists:foldl(fun ({Pos1, Value}, Pub) ->
+ setelement(Pos1, Pub, Value)
+ end, Publish, FieldIndices)
+ end,
+ return({Fun, Pos});
+ Unexpected ->
+ fail({unexpected_fields, Unexpected, ValidFields})
+ end.
+
+make_field_indices(Valid, Fields) ->
+ make_field_indices(Fields, field_map(Valid, 2), []).
+
+make_field_indices([], _Idxs , Acc) ->
+ lists:reverse(Acc);
+make_field_indices([{Key, Value} | Rest], Idxs, Acc) ->
+ make_field_indices(Rest, Idxs, [{dict:fetch(Key, Idxs), Value} | Acc]).
+
+field_map(Fields, Idx0) ->
+ {Dict, _IdxMax} =
+ lists:foldl(fun (Field, {Dict1, Idx1}) ->
+ {dict:store(Field, Idx1, Dict1), Idx1 + 1}
+ end, {dict:new(), Idx0}, Fields),
+ Dict.
+
+duplicate_keys(PropList) ->
+ proplists:get_keys(
+ lists:foldl(fun (K, L) -> lists:keydelete(K, 1, L) end, PropList,
+ proplists:get_keys(PropList))).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_shovel_dyn_worker_sup).
+-behaviour(supervisor2).
+
+-export([start_link/2, init/1]).
+
+-import(rabbit_misc, [pget/3]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-define(SUPERVISOR, ?MODULE).
+
+start_link(Name, Config) ->
+ supervisor2:start_link(?MODULE, [Name, Config]).
+
+%%----------------------------------------------------------------------------
+
+init([Name, Config]) ->
+ {ok, {{one_for_one, 1, ?MAX_WAIT},
+ [{Name,
+ {rabbit_shovel_worker, start_link, [dynamic, Name, Config]},
+ case pget(<<"reconnect-delay">>, Config, 1) of
+ N when is_integer(N) andalso N > 0 -> {transient, N};
+ _ -> temporary
+ end,
+ 16#ffffffff, worker, [rabbit_shovel_worker]}]}}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_shovel_dyn_worker_sup_sup).
+-behaviour(mirrored_supervisor).
+
+-export([start_link/0, init/1, adjust/2, stop_child/1]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-define(SUPERVISOR, ?MODULE).
+
+start_link() ->
+ {ok, Pid} = mirrored_supervisor:start_link(
+ {local, ?SUPERVISOR}, ?SUPERVISOR,
+ fun rabbit_misc:execute_mnesia_transaction/1, ?MODULE, []),
+ Shovels = rabbit_runtime_parameters:list_component(<<"shovel">>),
+ [start_child({pget(vhost, Shovel), pget(name, Shovel)},
+ pget(value, Shovel)) || Shovel <- Shovels],
+ {ok, Pid}.
+
+adjust(Name, Def) ->
+ case child_exists(Name) of
+ true -> stop_child(Name);
+ false -> ok
+ end,
+ start_child(Name, Def).
+
+start_child(Name, Def) ->
+ case mirrored_supervisor:start_child(
+ ?SUPERVISOR,
+ {Name, {rabbit_shovel_dyn_worker_sup, start_link, [Name, Def]},
+ transient, ?MAX_WAIT, worker, [rabbit_shovel_dyn_worker_sup]}) of
+ {ok, _Pid} -> ok;
+ {error, {already_started, _Pid}} -> ok
+ end.
+
+child_exists(Name) ->
+ lists:any(fun ({N, _, _, _}) -> N =:= Name end,
+ mirrored_supervisor:which_children(?SUPERVISOR)).
+
+stop_child(Name) ->
+ case get(shovel_worker_autodelete) of
+ true -> ok; %% [1]
+ _ -> ok = mirrored_supervisor:terminate_child(?SUPERVISOR, Name),
+ ok = mirrored_supervisor:delete_child(?SUPERVISOR, Name),
+ rabbit_shovel_status:remove(Name)
+ end.
+
+%% [1] An autodeleting worker removes its own parameter, and thus ends
+%% up here via the parameter callback. It is a transient worker that
+%% is just about to terminate normally - so we don't need to tell the
+%% supervisor to stop us - and as usual if we call into our own
+%% supervisor we risk deadlock.
+%%
+%% See rabbit_shovel_worker:maybe_autodelete/1
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{one_for_one, 3, 10}, []}}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_shovel_parameters).
+-behaviour(rabbit_runtime_parameter).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_shovel.hrl").
+
+-export([validate/5, notify/4, notify_clear/3]).
+-export([register/0, parse/2]).
+
+-import(rabbit_misc, [pget/2, pget/3]).
+
+-define(ROUTING_HEADER, <<"x-shovelled">>).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "shovel parameters"},
+ {mfa, {rabbit_shovel_parameters, register, []}},
+ {requires, rabbit_registry},
+ {enables, recovery}]}).
+
+register() ->
+ rabbit_registry:register(runtime_parameter, <<"shovel">>, ?MODULE).
+
+validate(_VHost, <<"shovel">>, Name, Def, User) ->
+ [case pget2(<<"src-exchange">>, <<"src-queue">>, Def) of
+ zero -> {error, "Must specify 'src-exchange' or 'src-queue'", []};
+ one -> ok;
+ both -> {error, "Cannot specify 'src-exchange' and 'src-queue'", []}
+ end,
+ case pget2(<<"dest-exchange">>, <<"dest-queue">>, Def) of
+ zero -> ok;
+ one -> ok;
+ both -> {error, "Cannot specify 'dest-exchange' and 'dest-queue'", []}
+ end,
+ case {pget(<<"delete-after">>, Def), pget(<<"ack-mode">>, Def)} of
+ {N, <<"no-ack">>} when is_integer(N) ->
+ {error, "Cannot specify 'no-ack' and numerical 'delete-after'", []};
+ _ ->
+ ok
+ end | rabbit_parameter_validation:proplist(Name, validation(User), Def)];
+
+validate(_VHost, _Component, Name, _Term, _User) ->
+ {error, "name not recognised: ~p", [Name]}.
+
+pget2(K1, K2, Defs) -> case {pget(K1, Defs), pget(K2, Defs)} of
+ {undefined, undefined} -> zero;
+ {undefined, _} -> one;
+ {_, undefined} -> one;
+ {_, _} -> both
+ end.
+
+notify(VHost, <<"shovel">>, Name, Definition) ->
+ rabbit_shovel_dyn_worker_sup_sup:adjust({VHost, Name}, Definition).
+
+notify_clear(VHost, <<"shovel">>, Name) ->
+ rabbit_shovel_dyn_worker_sup_sup:stop_child({VHost, Name}).
+
+%%----------------------------------------------------------------------------
+
+validation(User) ->
+ [{<<"src-uri">>, validate_uri_fun(User), mandatory},
+ {<<"dest-uri">>, validate_uri_fun(User), mandatory},
+ {<<"src-exchange">>, fun rabbit_parameter_validation:binary/2,optional},
+ {<<"src-exchange-key">>,fun rabbit_parameter_validation:binary/2,optional},
+ {<<"src-queue">>, fun rabbit_parameter_validation:binary/2,optional},
+ {<<"dest-exchange">>, fun rabbit_parameter_validation:binary/2,optional},
+ {<<"dest-exchange-key">>,fun rabbit_parameter_validation:binary/2,optional},
+ {<<"dest-queue">>, fun rabbit_parameter_validation:binary/2,optional},
+ {<<"prefetch-count">>, fun rabbit_parameter_validation:number/2,optional},
+ {<<"reconnect-delay">>, fun rabbit_parameter_validation:number/2,optional},
+ {<<"add-forward-headers">>, fun rabbit_parameter_validation:boolean/2,optional},
+ {<<"ack-mode">>, rabbit_parameter_validation:enum(
+ ['no-ack', 'on-publish', 'on-confirm']), optional},
+ {<<"delete-after">>, fun validate_delete_after/2, optional}
+ ].
+
+validate_uri_fun(User) ->
+ fun (Name, Term) -> validate_uri(Name, Term, User) end.
+
+validate_uri(Name, Term, User) when is_binary(Term) ->
+ case rabbit_parameter_validation:binary(Name, Term) of
+ ok -> case amqp_uri:parse(binary_to_list(Term)) of
+ {ok, P} -> validate_params_user(P, User);
+ {error, E} -> {error, "\"~s\" not a valid URI: ~p", [Term, E]}
+ end;
+ E -> E
+ end;
+validate_uri(Name, Term, User) ->
+ case rabbit_parameter_validation:list(Name, Term) of
+ ok -> case [V || URI <- Term,
+ V <- [validate_uri(Name, URI, User)],
+ element(1, V) =:= error] of
+ [] -> ok;
+ [E | _] -> E
+ end;
+ E -> E
+ end.
+
+validate_params_user(#amqp_params_direct{}, none) ->
+ ok;
+validate_params_user(#amqp_params_direct{virtual_host = VHost},
+ User = #user{username = Username,
+ auth_backend = M}) ->
+ case rabbit_vhost:exists(VHost) andalso M:check_vhost_access(User, VHost) of
+ true -> ok;
+ false -> {error, "user \"~s\" may not connect to vhost \"~s\"",
+ [Username, VHost]}
+ end;
+validate_params_user(#amqp_params_network{}, _User) ->
+ ok.
+
+validate_delete_after(_Name, <<"never">>) -> ok;
+validate_delete_after(_Name, <<"queue-length">>) -> ok;
+validate_delete_after(_Name, N) when is_integer(N) -> ok;
+validate_delete_after(Name, Term) ->
+ {error, "~s should be number, \"never\" or \"queue-length\", actually was "
+ "~p", [Name, Term]}.
+
+%%----------------------------------------------------------------------------
+
+parse({VHost, Name}, Def) ->
+ SrcURIs = get_uris(<<"src-uri">>, Def),
+ DestURIs = get_uris(<<"dest-uri">>, Def),
+ SrcX = pget(<<"src-exchange">>, Def, none),
+ SrcXKey = pget(<<"src-exchange-key">>, Def, <<>>), %% [1]
+ SrcQ = pget(<<"src-queue">>, Def, none),
+ DestX = pget(<<"dest-exchange">>, Def, none),
+ DestXKey = pget(<<"dest-exchange-key">>, Def, none),
+ DestQ = pget(<<"dest-queue">>, Def, none),
+ %% [1] src-exchange-key is never ignored if src-exchange is set
+ {SrcFun, Queue, Table1} =
+ case SrcQ of
+ none -> {fun (_Conn, Ch) ->
+ Ms = [#'queue.declare'{exclusive = true},
+ #'queue.bind'{routing_key = SrcXKey,
+ exchange = SrcX}],
+ [amqp_channel:call(Ch, M) || M <- Ms]
+ end, <<>>, [{<<"src-exchange">>, SrcX},
+ {<<"src-exchange-key">>, SrcXKey}]};
+ _ -> {fun (Conn, _Ch) ->
+ ensure_queue(Conn, SrcQ)
+ end, SrcQ, [{<<"src-queue">>, SrcQ}]}
+ end,
+ DestFun = fun (Conn, _Ch) ->
+ case DestQ of
+ none -> ok;
+ _ -> ensure_queue(Conn, DestQ)
+ end
+ end,
+ {X, Key} = case DestQ of
+ none -> {DestX, DestXKey};
+ _ -> {<<>>, DestQ}
+ end,
+ Table2 = [{K, V} || {K, V} <- [{<<"dest-exchange">>, DestX},
+ {<<"dest-exchange-key">>, DestXKey},
+ {<<"dest-queue">>, DestQ}],
+ V =/= none],
+ PubFun = fun (_SrcURI, _DestURI, P0) ->
+ P1 = case X of
+ none -> P0;
+ _ -> P0#'basic.publish'{exchange = X}
+ end,
+ case Key of
+ none -> P1;
+ _ -> P1#'basic.publish'{routing_key = Key}
+ end
+ end,
+ AddHeaders = pget(<<"add-forward-headers">>, Def, false),
+ Table0 = [{<<"shovelled-by">>, rabbit_nodes:cluster_name()},
+ {<<"shovel-name">>, Name},
+ {<<"shovel-vhost">>, VHost}],
+ PubPropsFun = fun (SrcURI, DestURI, P = #'P_basic'{headers = H}) ->
+ case AddHeaders of
+ true -> H1 = update_headers(
+ Table0, Table1 ++ Table2,
+ SrcURI, DestURI, H),
+ P#'P_basic'{headers = H1};
+ false -> P
+ end
+ end,
+ {ok, #shovel{
+ sources = #endpoint{uris = SrcURIs,
+ resource_declaration = SrcFun},
+ destinations = #endpoint{uris = DestURIs,
+ resource_declaration = DestFun},
+ prefetch_count = pget(<<"prefetch-count">>, Def, 1000),
+ ack_mode = translate_ack_mode(
+ pget(<<"ack-mode">>, Def, <<"on-confirm">>)),
+ publish_fields = PubFun,
+ publish_properties = PubPropsFun,
+ queue = Queue,
+ reconnect_delay = pget(<<"reconnect-delay">>, Def, 1),
+ delete_after = opt_b2a(pget(<<"delete-after">>, Def, <<"never">>))
+ }}.
+
+get_uris(Key, Def) ->
+ URIs = case pget(Key, Def) of
+ B when is_binary(B) -> [B];
+ L when is_list(L) -> L
+ end,
+ [binary_to_list(URI) || URI <- URIs].
+
+translate_ack_mode(<<"on-confirm">>) -> on_confirm;
+translate_ack_mode(<<"on-publish">>) -> on_publish;
+translate_ack_mode(<<"no-ack">>) -> no_ack.
+
+ensure_queue(Conn, Queue) ->
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ try
+ amqp_channel:call(Ch, #'queue.declare'{queue = Queue,
+ passive = true})
+ catch exit:{{shutdown, {server_initiated_close, ?NOT_FOUND, _Text}}, _} ->
+ {ok, Ch2} = amqp_connection:open_channel(Conn),
+ amqp_channel:call(Ch2, #'queue.declare'{queue = Queue,
+ durable = true}),
+ catch amqp_channel:close(Ch2)
+
+ after
+ catch amqp_channel:close(Ch)
+ end.
+
+update_headers(Table0, Table1, SrcURI, DestURI, Headers) ->
+ Table = Table0 ++ [{<<"src-uri">>, SrcURI},
+ {<<"dest-uri">>, DestURI}] ++ Table1,
+ rabbit_basic:prepend_table_header(
+ ?ROUTING_HEADER, [{K, longstr, V} || {K, V} <- Table],
+ Headers).
+
+opt_b2a(B) when is_binary(B) -> list_to_atom(binary_to_list(B));
+opt_b2a(N) -> N.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_shovel_status).
+-behaviour(gen_server).
+
+-export([start_link/0]).
+
+-export([report/3, remove/1, status/0]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-define(SERVER, ?MODULE).
+-define(ETS_NAME, ?MODULE).
+
+-record(state, {}).
+-record(entry, {name, type, info, timestamp}).
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+report(Name, Type, Info) ->
+ gen_server:cast(?SERVER, {report, Name, Type, Info, calendar:local_time()}).
+
+remove(Name) ->
+ gen_server:cast(?SERVER, {remove, Name}).
+
+status() ->
+ gen_server:call(?SERVER, status, infinity).
+
+init([]) ->
+ ?ETS_NAME = ets:new(?ETS_NAME,
+ [named_table, {keypos, #entry.name}, private]),
+ {ok, #state{}}.
+
+handle_call(status, _From, State) ->
+ Entries = ets:tab2list(?ETS_NAME),
+ {reply, [{Entry#entry.name, Entry#entry.type, Entry#entry.info,
+ Entry#entry.timestamp}
+ || Entry <- Entries], State}.
+
+handle_cast({report, Name, Type, Info, Timestamp}, State) ->
+ true = ets:insert(?ETS_NAME, #entry{name = Name, type = Type, info = Info,
+ timestamp = Timestamp}),
+ rabbit_event:notify(shovel_worker_status,
+ split_name(Name) ++ split_status(Info)),
+ {noreply, State};
+
+handle_cast({remove, Name}, State) ->
+ true = ets:delete(?ETS_NAME, Name),
+ rabbit_event:notify(shovel_worker_removed, split_name(Name)),
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+split_status({running, MoreInfo}) -> [{status, running} | MoreInfo];
+split_status({terminated, Reason}) -> [{status, terminated},
+ {reason, Reason}];
+split_status(Status) when is_atom(Status) -> [{status, Status}].
+
+split_name({VHost, Name}) -> [{name, Name},
+ {vhost, VHost}];
+split_name(Name) when is_atom(Name) -> [{name, Name}].
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_shovel_sup).
+-behaviour(supervisor2).
+
+-export([start_link/0, init/1]).
+
+-import(rabbit_shovel_config, [ensure_defaults/2]).
+
+-include("rabbit_shovel.hrl").
+
+start_link() ->
+ case parse_configuration(application:get_env(shovels)) of
+ {ok, Configurations} ->
+ supervisor2:start_link({local, ?MODULE}, ?MODULE, [Configurations]);
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+init([Configurations]) ->
+ Len = dict:size(Configurations),
+ ChildSpecs = [{rabbit_shovel_status,
+ {rabbit_shovel_status, start_link, []},
+ transient, 16#ffffffff, worker,
+ [rabbit_shovel_status]},
+ {rabbit_shovel_dyn_worker_sup_sup,
+ {rabbit_shovel_dyn_worker_sup_sup, start_link, []},
+ transient, 16#ffffffff, supervisor,
+ [rabbit_shovel_dyn_worker_sup_sup]} |
+ make_child_specs(Configurations)],
+ {ok, {{one_for_one, 2*Len, 2}, ChildSpecs}}.
+
+make_child_specs(Configurations) ->
+ dict:fold(
+ fun (ShovelName, ShovelConfig, Acc) ->
+ [{ShovelName,
+ {rabbit_shovel_worker_sup, start_link,
+ [ShovelName, ShovelConfig]},
+ permanent,
+ 16#ffffffff,
+ supervisor,
+ [rabbit_shovel_worker_sup]} | Acc]
+ end, [], Configurations).
+
+parse_configuration(undefined) ->
+ {ok, dict:new()};
+parse_configuration({ok, Env}) ->
+ {ok, Defaults} = application:get_env(defaults),
+ parse_configuration(Defaults, Env, dict:new()).
+
+parse_configuration(_Defaults, [], Acc) ->
+ {ok, Acc};
+parse_configuration(Defaults, [{ShovelName, ShovelConfig} | Env], Acc)
+ when is_atom(ShovelName) andalso is_list(ShovelConfig) ->
+ case dict:is_key(ShovelName, Acc) of
+ true -> {error, {duplicate_shovel_definition, ShovelName}};
+ false -> case validate_shovel_config(ShovelName, ShovelConfig) of
+ {ok, Shovel} ->
+ %% make sure the config we accumulate has any
+ %% relevant default values (discovered during
+ %% validation), applied back to it
+ UpdatedConfig = ensure_defaults(ShovelConfig, Shovel),
+ Acc2 = dict:store(ShovelName, UpdatedConfig, Acc),
+ parse_configuration(Defaults, Env, Acc2);
+ Error ->
+ Error
+ end
+ end;
+parse_configuration(_Defaults, _, _Acc) ->
+ {error, require_list_of_shovel_configurations}.
+
+validate_shovel_config(ShovelName, ShovelConfig) ->
+ rabbit_shovel_config:parse(ShovelName, ShovelConfig).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_shovel_worker).
+-behaviour(gen_server2).
+
+-export([start_link/3]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_shovel.hrl").
+
+-define(MAX_CONNECTION_CLOSE_TIMEOUT, 10000).
+
+-record(state, {inbound_conn, inbound_ch, outbound_conn, outbound_ch,
+ name, type, config, inbound_uri, outbound_uri, unacked,
+ remaining, %% [1]
+ remaining_unacked}). %% [2]
+
+%% [1] Counts down until we shut down in all modes
+%% [2] Counts down until we stop publishing in on-confirm mode
+
+start_link(Type, Name, Config) ->
+ ok = rabbit_shovel_status:report(Name, Type, starting),
+ gen_server2:start_link(?MODULE, [Type, Name, Config], []).
+
+%%---------------------------
+%% Gen Server Implementation
+%%---------------------------
+
+init([Type, Name, Config]) ->
+ gen_server2:cast(self(), init),
+ {ok, Shovel} = parse(Type, Name, Config),
+ {ok, #state{name = Name, type = Type, config = Shovel}}.
+
+parse(static, Name, Config) -> rabbit_shovel_config:parse(Name, Config);
+parse(dynamic, Name, Config) -> rabbit_shovel_parameters:parse(Name, Config).
+
+handle_call(_Msg, _From, State) ->
+ {noreply, State}.
+
+handle_cast(init, State = #state{config = Config}) ->
+ random:seed(now()),
+ #shovel{sources = Sources, destinations = Destinations} = Config,
+ {InboundConn, InboundChan, InboundURI} =
+ make_conn_and_chan(Sources#endpoint.uris),
+ {OutboundConn, OutboundChan, OutboundURI} =
+ make_conn_and_chan(Destinations#endpoint.uris),
+
+ %% Don't trap exits until we have established connections so that
+ %% if we try to shut down while waiting for a connection to be
+ %% established then we don't block
+ process_flag(trap_exit, true),
+
+ (Sources#endpoint.resource_declaration)(InboundConn, InboundChan),
+ (Destinations#endpoint.resource_declaration)(OutboundConn, OutboundChan),
+
+ NoAck = Config#shovel.ack_mode =:= no_ack,
+ case NoAck of
+ false -> Prefetch = Config#shovel.prefetch_count,
+ #'basic.qos_ok'{} =
+ amqp_channel:call(
+ InboundChan, #'basic.qos'{prefetch_count = Prefetch});
+ true -> ok
+ end,
+
+ case Config#shovel.ack_mode of
+ on_confirm ->
+ #'confirm.select_ok'{} =
+ amqp_channel:call(OutboundChan, #'confirm.select'{}),
+ ok = amqp_channel:register_confirm_handler(OutboundChan, self());
+ _ ->
+ ok
+ end,
+
+ Remaining = remaining(InboundChan, Config),
+
+ #'basic.consume_ok'{} =
+ amqp_channel:subscribe(
+ InboundChan, #'basic.consume'{queue = Config#shovel.queue,
+ no_ack = NoAck},
+ self()),
+
+ State1 =
+ State#state{inbound_conn = InboundConn, inbound_ch = InboundChan,
+ outbound_conn = OutboundConn, outbound_ch = OutboundChan,
+ inbound_uri = InboundURI,
+ outbound_uri = OutboundURI,
+ remaining = Remaining,
+ remaining_unacked = Remaining,
+ unacked = gb_trees:empty()},
+ ok = report_running(State1),
+ {noreply, State1}.
+
+handle_info(#'basic.consume_ok'{}, State) ->
+ {noreply, State};
+
+handle_info({#'basic.deliver'{delivery_tag = Tag,
+ exchange = Exchange, routing_key = RoutingKey},
+ Msg = #amqp_msg{props = Props = #'P_basic'{}}},
+ State = #state{inbound_uri = InboundURI,
+ outbound_uri = OutboundURI,
+ config = #shovel{publish_properties = PropsFun,
+ publish_fields = FieldsFun}}) ->
+ Method = #'basic.publish'{exchange = Exchange, routing_key = RoutingKey},
+ Method1 = FieldsFun(InboundURI, OutboundURI, Method),
+ Msg1 = Msg#amqp_msg{props = PropsFun(InboundURI, OutboundURI, Props)},
+ {noreply, publish(Tag, Method1, Msg1, State)};
+
+handle_info(#'basic.ack'{delivery_tag = Seq, multiple = Multiple},
+ State = #state{config = #shovel{ack_mode = on_confirm}}) ->
+ {noreply, confirm_to_inbound(
+ fun (DTag, Multi) ->
+ #'basic.ack'{delivery_tag = DTag, multiple = Multi}
+ end, Seq, Multiple, State)};
+
+handle_info(#'basic.nack'{delivery_tag = Seq, multiple = Multiple},
+ State = #state{config = #shovel{ack_mode = on_confirm}}) ->
+ {noreply, confirm_to_inbound(
+ fun (DTag, Multi) ->
+ #'basic.nack'{delivery_tag = DTag, multiple = Multi}
+ end, Seq, Multiple, State)};
+
+handle_info(#'basic.cancel'{}, State = #state{name = Name}) ->
+ rabbit_log:warning("Shovel ~p received 'basic.cancel' from the broker~n",
+ [Name]),
+ {stop, {shutdown, restart}, State};
+
+handle_info({'EXIT', InboundConn, Reason},
+ State = #state{inbound_conn = InboundConn}) ->
+ {stop, {inbound_conn_died, Reason}, State};
+
+handle_info({'EXIT', OutboundConn, Reason},
+ State = #state{outbound_conn = OutboundConn}) ->
+ {stop, {outbound_conn_died, Reason}, State}.
+
+terminate(Reason, #state{inbound_conn = undefined, inbound_ch = undefined,
+ outbound_conn = undefined, outbound_ch = undefined,
+ name = Name, type = Type}) ->
+ rabbit_shovel_status:report(Name, Type, {terminated, Reason}),
+ ok;
+terminate(Reason, State) ->
+ maybe_autodelete(Reason, State),
+ catch amqp_connection:close(State#state.inbound_conn,
+ ?MAX_CONNECTION_CLOSE_TIMEOUT),
+ catch amqp_connection:close(State#state.outbound_conn,
+ ?MAX_CONNECTION_CLOSE_TIMEOUT),
+ rabbit_shovel_status:report(State#state.name, State#state.type,
+ {terminated, Reason}),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%---------------------------
+%% Helpers
+%%---------------------------
+
+confirm_to_inbound(MsgCtr, Seq, Multiple, State =
+ #state{inbound_ch = InboundChan, unacked = Unacked}) ->
+ ok = amqp_channel:cast(
+ InboundChan, MsgCtr(gb_trees:get(Seq, Unacked), Multiple)),
+ {Unacked1, Removed} = remove_delivery_tags(Seq, Multiple, Unacked, 0),
+ decr_remaining(Removed, State#state{unacked = Unacked1}).
+
+remove_delivery_tags(Seq, false, Unacked, 0) ->
+ {gb_trees:delete(Seq, Unacked), 1};
+remove_delivery_tags(Seq, true, Unacked, Count) ->
+ case gb_trees:is_empty(Unacked) of
+ true -> {Unacked, Count};
+ false -> {Smallest, _Val, Unacked1} = gb_trees:take_smallest(Unacked),
+ case Smallest > Seq of
+ true -> {Unacked, Count};
+ false -> remove_delivery_tags(Seq, true, Unacked1, Count+1)
+ end
+ end.
+
+report_running(State) ->
+ rabbit_shovel_status:report(
+ State#state.name, State#state.type,
+ {running, [{src_uri, State#state.inbound_uri},
+ {dest_uri, State#state.outbound_uri}]}).
+
+publish(_Tag, _Method, _Msg, State = #state{remaining_unacked = 0}) ->
+ %% We are in on-confirm mode, and are autodelete. We have
+ %% published all the messages we need to; we just wait for acks to
+ %% come back. So drop subsequent messages on the floor to be
+ %% requeued later.
+ State;
+
+publish(Tag, Method, Msg,
+ State = #state{inbound_ch = InboundChan, outbound_ch = OutboundChan,
+ config = Config, unacked = Unacked}) ->
+ Seq = case Config#shovel.ack_mode of
+ on_confirm -> amqp_channel:next_publish_seqno(OutboundChan);
+ _ -> undefined
+ end,
+ ok = amqp_channel:call(OutboundChan, Method, Msg),
+ decr_remaining_unacked(
+ case Config#shovel.ack_mode of
+ no_ack -> decr_remaining(1, State);
+ on_confirm -> State#state{unacked = gb_trees:insert(
+ Seq, Tag, Unacked)};
+ on_publish -> ok = amqp_channel:cast(
+ InboundChan, #'basic.ack'{delivery_tag = Tag}),
+ decr_remaining(1, State)
+ end).
+
+make_conn_and_chan(URIs) ->
+ URI = lists:nth(random:uniform(length(URIs)), URIs),
+ {ok, AmqpParam} = amqp_uri:parse(URI),
+ {ok, Conn} = amqp_connection:start(AmqpParam),
+ link(Conn),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ {Conn, Chan, list_to_binary(amqp_uri:remove_credentials(URI))}.
+
+remaining(Ch, #shovel{delete_after = never}) ->
+ unlimited;
+remaining(Ch, #shovel{delete_after = 'queue-length', queue = Queue}) ->
+ #'queue.declare_ok'{message_count = N} =
+ amqp_channel:call(Ch, #'queue.declare'{queue = Queue,
+ passive = true}),
+ N;
+remaining(Ch, #shovel{delete_after = Count}) ->
+ Count.
+
+decr_remaining(_N, State = #state{remaining = unlimited}) ->
+ State;
+decr_remaining(N, State = #state{remaining = M}) ->
+ case M > N of
+ true -> State#state{remaining = M - N};
+ false -> exit({shutdown, autodelete})
+ end.
+
+decr_remaining_unacked(State = #state{remaining_unacked = unlimited}) ->
+ State;
+decr_remaining_unacked(State = #state{remaining_unacked = 0}) ->
+ State;
+decr_remaining_unacked(State = #state{remaining_unacked = N}) ->
+ State#state{remaining_unacked = N - 1}.
+
+maybe_autodelete({shutdown, autodelete}, #state{name = {VHost, Name},
+ type = dynamic}) ->
+ %% See rabbit_shovel_dyn_worker_sup_sup:stop_child/1
+ put(shovel_worker_autodelete, true),
+ rabbit_runtime_parameters:clear(VHost, <<"shovel">>, Name);
+maybe_autodelete(_Reason, _State) ->
+ ok.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_shovel_worker_sup).
+-behaviour(mirrored_supervisor).
+
+-export([start_link/2, init/1]).
+
+-include("rabbit_shovel.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+start_link(ShovelName, ShovelConfig) ->
+ mirrored_supervisor:start_link({local, ShovelName}, ShovelName,
+ fun rabbit_misc:execute_mnesia_transaction/1,
+ ?MODULE, [ShovelName, ShovelConfig]).
+
+init([Name, Config]) ->
+ ChildSpecs = [{Name,
+ {rabbit_shovel_worker, start_link, [static, Name, Config]},
+ case proplists:get_value(reconnect_delay, Config, none) of
+ N when is_integer(N) andalso N > 0 -> {permanent, N};
+ _ -> temporary
+ end,
+ 16#ffffffff,
+ worker,
+ [rabbit_shovel_worker]}],
+ {ok, {{one_for_one, 1, ?MAX_WAIT}, ChildSpecs}}.
--- /dev/null
+{application, rabbitmq_shovel,
+ [{description, "Data Shovel for RabbitMQ"},
+ {vsn, "%%VSN%%"},
+ {modules, []},
+ {registered, []},
+ {env, [{defaults, [{prefetch_count, 1000},
+ {ack_mode, on_confirm},
+ {publish_fields, []},
+ {publish_properties, []},
+ {reconnect_delay, 5}]
+ }]},
+ {mod, {rabbit_shovel, []}},
+ {applications, [kernel, stdlib, rabbit, amqp_client]}]}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_shovel_test).
+-export([test/0]).
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-define(EXCHANGE, <<"test_exchange">>).
+-define(TO_SHOVEL, <<"to_the_shovel">>).
+-define(FROM_SHOVEL, <<"from_the_shovel">>).
+-define(UNSHOVELLED, <<"unshovelled">>).
+-define(SHOVELLED, <<"shovelled">>).
+-define(TIMEOUT, 1000).
+
+main_test() ->
+ %% it may already be running. Stop if possible
+ application:stop(rabbitmq_shovel),
+
+ %% shovel can be started with zero shovels configured
+ ok = application:start(rabbitmq_shovel),
+ ok = application:stop(rabbitmq_shovel),
+
+ %% various ways of breaking the config
+ require_list_of_shovel_configurations =
+ test_broken_shovel_configs(invalid_config),
+
+ require_list_of_shovel_configurations =
+ test_broken_shovel_configs([{test_shovel, invalid_shovel_config}]),
+
+ Config = [{sources, [{broker, "amqp://"}]},
+ {destinations, [{broker, "amqp://"}]},
+ {queue, <<"">>}],
+
+ {duplicate_shovel_definition, test_shovel} =
+ test_broken_shovel_configs(
+ [{test_shovel, Config}, {test_shovel, Config}]),
+
+ {invalid_parameters, [{invalid, invalid, invalid}]} =
+ test_broken_shovel_config([{invalid, invalid, invalid} | Config]),
+
+ {duplicate_parameters, [queue]} =
+ test_broken_shovel_config([{queue, <<"">>} | Config]),
+
+ {missing_parameters, Missing} =
+ test_broken_shovel_config([]),
+ [destinations, queue, sources] = lists:sort(Missing),
+
+ {unrecognised_parameters, [invalid]} =
+ test_broken_shovel_config([{invalid, invalid} | Config]),
+
+ {require_list, invalid} =
+ test_broken_shovel_sources(invalid),
+
+ {missing_endpoint_parameter, broker_or_brokers} =
+ test_broken_shovel_sources([]),
+
+ {expected_list, brokers, invalid} =
+ test_broken_shovel_sources([{brokers, invalid}]),
+
+ {expected_string_uri, 42} =
+ test_broken_shovel_sources([{brokers, [42]}]),
+
+ {{unexpected_uri_scheme, "invalid"}, "invalid://"} =
+ test_broken_shovel_sources([{broker, "invalid://"}]),
+
+ {{unable_to_parse_uri, no_scheme}, "invalid"} =
+ test_broken_shovel_sources([{broker, "invalid"}]),
+
+ {expected_list,declarations, invalid} =
+ test_broken_shovel_sources([{broker, "amqp://"},
+ {declarations, invalid}]),
+ {unknown_method_name, 42} =
+ test_broken_shovel_sources([{broker, "amqp://"},
+ {declarations, [42]}]),
+
+ {expected_method_field_list, 'queue.declare', 42} =
+ test_broken_shovel_sources([{broker, "amqp://"},
+ {declarations, [{'queue.declare', 42}]}]),
+
+ {unknown_fields, 'queue.declare', [invalid]} =
+ test_broken_shovel_sources(
+ [{broker, "amqp://"},
+ {declarations, [{'queue.declare', [invalid]}]}]),
+
+ {{invalid_amqp_params_parameter, heartbeat, "text",
+ [{"heartbeat", "text"}], {not_an_integer, "text"}}, _} =
+ test_broken_shovel_sources(
+ [{broker, "amqp://localhost/?heartbeat=text"}]),
+
+ {{invalid_amqp_params_parameter, username, "text",
+ [{"username", "text"}],
+ {parameter_unconfigurable_in_query, username, "text"}}, _} =
+ test_broken_shovel_sources([{broker, "amqp://?username=text"}]),
+
+ {invalid_parameter_value, prefetch_count,
+ {require_non_negative_integer, invalid}} =
+ test_broken_shovel_config([{prefetch_count, invalid} | Config]),
+
+ {invalid_parameter_value, ack_mode,
+ {ack_mode_value_requires_one_of,
+ {no_ack, on_publish, on_confirm}, invalid}} =
+ test_broken_shovel_config([{ack_mode, invalid} | Config]),
+
+ {invalid_parameter_value, queue,
+ {require_binary, invalid}} =
+ test_broken_shovel_config([{sources, [{broker, "amqp://"}]},
+ {destinations, [{broker, "amqp://"}]},
+ {queue, invalid}]),
+
+ {invalid_parameter_value, publish_properties,
+ {require_list, invalid}} =
+ test_broken_shovel_config([{publish_properties, invalid} | Config]),
+
+ {invalid_parameter_value, publish_properties,
+ {unexpected_fields, [invalid], _}} =
+ test_broken_shovel_config([{publish_properties, [invalid]} | Config]),
+
+ {{invalid_ssl_parameter, fail_if_no_peer_cert, "42", _,
+ {require_boolean, '42'}}, _} =
+ test_broken_shovel_sources([{broker, "amqps://username:password@host:5673/vhost?cacertfile=/path/to/cacert.pem&certfile=/path/to/certfile.pem&keyfile=/path/to/keyfile.pem&verify=verify_peer&fail_if_no_peer_cert=42"}]),
+
+ %% a working config
+ application:set_env(
+ rabbitmq_shovel,
+ shovels,
+ [{test_shovel,
+ [{sources,
+ [{broker, "amqp:///%2f?heartbeat=5"},
+ {declarations,
+ [{'queue.declare', [exclusive, auto_delete]},
+ {'exchange.declare', [{exchange, ?EXCHANGE}, auto_delete]},
+ {'queue.bind', [{queue, <<>>}, {exchange, ?EXCHANGE},
+ {routing_key, ?TO_SHOVEL}]}
+ ]}]},
+ {destinations,
+ [{broker, "amqp:///%2f"}]},
+ {queue, <<>>},
+ {ack_mode, on_confirm},
+ {publish_fields, [{exchange, ?EXCHANGE}, {routing_key, ?FROM_SHOVEL}]},
+ {publish_properties, [{delivery_mode, 2},
+ {cluster_id, <<"my-cluster">>},
+ {content_type, ?SHOVELLED}]}
+ ]}],
+ infinity),
+
+ ok = application:start(rabbitmq_shovel),
+
+ await_running_shovel(test_shovel),
+
+ {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+
+ #'queue.declare_ok'{ queue = Q } =
+ amqp_channel:call(Chan, #'queue.declare' { exclusive = true }),
+ #'queue.bind_ok'{} =
+ amqp_channel:call(Chan, #'queue.bind' { queue = Q, exchange = ?EXCHANGE,
+ routing_key = ?FROM_SHOVEL }),
+ #'queue.bind_ok'{} =
+ amqp_channel:call(Chan, #'queue.bind' { queue = Q, exchange = ?EXCHANGE,
+ routing_key = ?TO_SHOVEL }),
+
+ #'basic.consume_ok'{ consumer_tag = CTag } =
+ amqp_channel:subscribe(Chan,
+ #'basic.consume' { queue = Q, exclusive = true },
+ self()),
+ receive
+ #'basic.consume_ok'{ consumer_tag = CTag } -> ok
+ after ?TIMEOUT -> throw(timeout_waiting_for_consume_ok)
+ end,
+
+ ok = amqp_channel:call(Chan,
+ #'basic.publish' { exchange = ?EXCHANGE,
+ routing_key = ?TO_SHOVEL },
+ #amqp_msg { payload = <<42>>,
+ props = #'P_basic' {
+ delivery_mode = 2,
+ content_type = ?UNSHOVELLED }
+ }),
+
+ receive
+ {#'basic.deliver' { consumer_tag = CTag, delivery_tag = AckTag,
+ routing_key = ?FROM_SHOVEL },
+ #amqp_msg { payload = <<42>>,
+ props = #'P_basic' { delivery_mode = 2,
+ content_type = ?SHOVELLED }
+ }} ->
+ ok = amqp_channel:call(Chan, #'basic.ack'{ delivery_tag = AckTag })
+ after ?TIMEOUT -> throw(timeout_waiting_for_deliver1)
+ end,
+
+ [{test_shovel, static, {running, _Info}, _Time}] =
+ rabbit_shovel_status:status(),
+
+ receive
+ {#'basic.deliver' { consumer_tag = CTag, delivery_tag = AckTag1,
+ routing_key = ?TO_SHOVEL },
+ #amqp_msg { payload = <<42>>,
+ props = #'P_basic' { delivery_mode = 2,
+ content_type = ?UNSHOVELLED }
+ }} ->
+ ok = amqp_channel:call(Chan, #'basic.ack'{ delivery_tag = AckTag1 })
+ after ?TIMEOUT -> throw(timeout_waiting_for_deliver2)
+ end,
+
+ amqp_channel:close(Chan),
+ amqp_connection:close(Conn),
+
+ ok.
+
+test_broken_shovel_configs(Configs) ->
+ application:set_env(rabbitmq_shovel, shovels, Configs),
+ {error, {Error, _}} = application:start(rabbitmq_shovel),
+ Error.
+
+test_broken_shovel_config(Config) ->
+ {invalid_shovel_configuration, test_shovel, Error} =
+ test_broken_shovel_configs([{test_shovel, Config}]),
+ Error.
+
+test_broken_shovel_sources(Sources) ->
+ {invalid_parameter_value, sources, Error} =
+ test_broken_shovel_config([{sources, Sources},
+ {destinations, [{broker, "amqp://"}]},
+ {queue, <<"">>}]),
+ Error.
+
+await_running_shovel(Name) ->
+ case [Name || {Name, _, {running, _}, _}
+ <- rabbit_shovel_status:status()] of
+ [_] -> ok;
+ _ -> timer:sleep(100),
+ await_running_shovel(Name)
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_shovel_test_all).
+
+-export([all_tests/0]).
+
+all_tests() ->
+ ok = eunit:test(tests(rabbit_shovel_test, 60), [verbose]),
+ ok = eunit:test(tests(rabbit_shovel_test_dyn, 60), [verbose]).
+
+tests(Module, Timeout) ->
+ {foreach, fun() -> ok end,
+ [{timeout, Timeout, fun Module:F/0} || F <- funs(Module, "_test")] ++
+ [{timeout, Timeout, Fun} || Gen <- funs(Module, "_test_"),
+ Fun <- Module:Gen()]}.
+
+funs(Module, Suffix) ->
+ [F || {F, _Arity} <- proplists:get_value(exports, Module:module_info()),
+ string:right(atom_to_list(F), length(Suffix)) =:= Suffix].
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_shovel_test_dyn).
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-import(rabbit_misc, [pget/2]).
+
+simple_test() ->
+ with_ch(
+ fun (Ch) ->
+ set_param(<<"test">>, [{<<"src-queue">>, <<"src">>},
+ {<<"dest-queue">>, <<"dest">>}]),
+ publish_expect(Ch, <<>>, <<"src">>, <<"dest">>, <<"hello">>)
+ end).
+
+exchange_test() ->
+ with_ch(
+ fun (Ch) ->
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"queue">>,
+ durable = true}),
+ amqp_channel:call(
+ Ch, #'queue.bind'{queue = <<"queue">>,
+ exchange = <<"amq.topic">>,
+ routing_key = <<"test-key">>}),
+ set_param(<<"test">>, [{<<"src-exchange">>, <<"amq.direct">>},
+ {<<"src-exchange-key">>,<<"test-key">>},
+ {<<"dest-exchange">>, <<"amq.topic">>}]),
+ publish_expect(Ch, <<"amq.direct">>, <<"test-key">>,
+ <<"queue">>, <<"hello">>),
+ set_param(<<"test">>, [{<<"src-exchange">>, <<"amq.direct">>},
+ {<<"src-exchange-key">>, <<"test-key">>},
+ {<<"dest-exchange">>, <<"amq.topic">>},
+ {<<"dest-exchange-key">>,<<"new-key">>}]),
+ publish(Ch, <<"amq.direct">>, <<"test-key">>, <<"hello">>),
+ expect_empty(Ch, <<"queue">>),
+ amqp_channel:call(
+ Ch, #'queue.bind'{queue = <<"queue">>,
+ exchange = <<"amq.topic">>,
+ routing_key = <<"new-key">>}),
+ publish_expect(Ch, <<"amq.direct">>, <<"test-key">>,
+ <<"queue">>, <<"hello">>)
+ end).
+
+restart_test() ->
+ with_ch(
+ fun (Ch) ->
+ set_param(<<"test">>, [{<<"src-queue">>, <<"src">>},
+ {<<"dest-queue">>, <<"dest">>}]),
+ %% The catch is because connections link to the shovel,
+ %% so one connection will die, kill the shovel, kill
+ %% the other connection, then we can't close it
+ [catch amqp_connection:close(C) || C <- rabbit_direct:list()],
+ publish_expect(Ch, <<>>, <<"src">>, <<"dest">>, <<"hello">>)
+ end).
+
+change_definition_test() ->
+ with_ch(
+ fun (Ch) ->
+ set_param(<<"test">>, [{<<"src-queue">>, <<"src">>},
+ {<<"dest-queue">>, <<"dest">>}]),
+ publish_expect(Ch, <<>>, <<"src">>, <<"dest">>, <<"hello">>),
+ set_param(<<"test">>, [{<<"src-queue">>, <<"src">>},
+ {<<"dest-queue">>, <<"dest2">>}]),
+ publish_expect(Ch, <<>>, <<"src">>, <<"dest2">>, <<"hello">>),
+ expect_empty(Ch, <<"dest">>),
+ clear_param(<<"test">>),
+ publish_expect(Ch, <<>>, <<"src">>, <<"src">>, <<"hello">>),
+ expect_empty(Ch, <<"dest">>),
+ expect_empty(Ch, <<"dest2">>)
+ end).
+
+autodelete_test_() ->
+ [autodelete_case({<<"on-confirm">>, <<"queue-length">>, 0, 100}),
+ autodelete_case({<<"on-confirm">>, 50, 50, 50}),
+ autodelete_case({<<"on-publish">>, <<"queue-length">>, 0, 100}),
+ autodelete_case({<<"on-publish">>, 50, 50, 50}),
+ %% no-ack is not compatible with explicit count
+ autodelete_case({<<"no-ack">>, <<"queue-length">>, 0, 100})].
+
+autodelete_case(Args) ->
+ fun () -> with_ch(autodelete_do(Args)) end.
+
+autodelete_do({AckMode, After, ExpSrc, ExpDest}) ->
+ fun (Ch) ->
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"src">>}),
+ publish_count(Ch, <<>>, <<"src">>, <<"hello">>, 100),
+ amqp_channel:wait_for_confirms(Ch),
+ set_param_nowait(<<"test">>, [{<<"src-queue">>, <<"src">>},
+ {<<"dest-queue">>, <<"dest">>},
+ {<<"ack-mode">>, AckMode},
+ {<<"delete-after">>, After}]),
+ await_autodelete(<<"test">>),
+ expect_count(Ch, <<"src">>, <<"hello">>, ExpSrc),
+ expect_count(Ch, <<"dest">>, <<"hello">>, ExpDest)
+ end.
+
+validation_test() ->
+ URIs = [{<<"src-uri">>, <<"amqp://">>},
+ {<<"dest-uri">>, <<"amqp://">>}],
+
+ %% Need valid src and dest URIs
+ invalid_param([]),
+ invalid_param([{<<"src-queue">>, <<"test">>},
+ {<<"src-uri">>, <<"derp">>},
+ {<<"dest-uri">>, <<"amqp://">>}]),
+ invalid_param([{<<"src-queue">>, <<"test">>},
+ {<<"src-uri">>, [<<"derp">>]},
+ {<<"dest-uri">>, <<"amqp://">>}]),
+ invalid_param([{<<"src-queue">>, <<"test">>},
+ {<<"dest-uri">>, <<"amqp://">>}]),
+
+ %% Also need src exchange or queue
+ invalid_param(URIs),
+ valid_param([{<<"src-exchange">>, <<"test">>} | URIs]),
+ QURIs = [{<<"src-queue">>, <<"test">>} | URIs],
+ valid_param(QURIs),
+
+ %% But not both
+ invalid_param([{<<"src-exchange">>, <<"test">>} | QURIs]),
+
+ %% Check these are of right type
+ invalid_param([{<<"prefetch-count">>, <<"three">>} | QURIs]),
+ invalid_param([{<<"reconnect-delay">>, <<"three">>} | QURIs]),
+ invalid_param([{<<"ack-mode">>, <<"whenever">>} | QURIs]),
+ invalid_param([{<<"delete-after">>, <<"whenever">>} | QURIs]),
+
+ %% Can't use explicit message count and no-ack together
+ invalid_param([{<<"delete-after">>, 1},
+ {<<"ack-mode">>, <<"no-ack">>} | QURIs]),
+ ok.
+
+security_validation_test() ->
+ [begin
+ rabbit_vhost:add(U),
+ rabbit_auth_backend_internal:add_user(U, <<>>),
+ rabbit_auth_backend_internal:set_permissions(
+ U, U, <<".*">>, <<".*">>, <<".*">>)
+ end || U <- [<<"a">>, <<"b">>]],
+
+ Qs = [{<<"src-queue">>, <<"test">>},
+ {<<"dest-queue">>, <<"test2">>}],
+
+ A = lookup_user(<<"a">>),
+ valid_param([{<<"src-uri">>, <<"amqp:///a">>},
+ {<<"dest-uri">>, <<"amqp:///a">>} | Qs], A),
+ invalid_param([{<<"src-uri">>, <<"amqp:///a">>},
+ {<<"dest-uri">>, <<"amqp:///b">>} | Qs], A),
+ invalid_param([{<<"src-uri">>, <<"amqp:///b">>},
+ {<<"dest-uri">>, <<"amqp:///a">>} | Qs], A),
+ [begin
+ rabbit_vhost:delete(U),
+ rabbit_auth_backend_internal:delete_user(U)
+ end || U <- [<<"a">>, <<"b">>]],
+ ok.
+
+%%----------------------------------------------------------------------------
+
+with_ch(Fun) ->
+ {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ Fun(Ch),
+ amqp_connection:close(Conn),
+ cleanup(),
+ ok.
+
+publish(Ch, X, Key, Payload) when is_binary(Payload) ->
+ publish(Ch, X, Key, #amqp_msg{payload = Payload});
+
+publish(Ch, X, Key, Msg = #amqp_msg{}) ->
+ amqp_channel:cast(Ch, #'basic.publish'{exchange = X,
+ routing_key = Key}, Msg).
+
+publish_expect(Ch, X, Key, Q, Payload) ->
+ publish(Ch, X, Key, Payload),
+ expect(Ch, Q, Payload).
+
+expect(Ch, Q, Payload) ->
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q,
+ no_ack = true}, self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = CTag} -> ok
+ end,
+ receive
+ {#'basic.deliver'{}, #amqp_msg{payload = Payload}} ->
+ ok
+ after 1000 ->
+ exit({not_received, Payload})
+ end,
+ amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag}).
+
+expect_empty(Ch, Q) ->
+ ?assertMatch(#'basic.get_empty'{},
+ amqp_channel:call(Ch, #'basic.get'{ queue = Q })).
+
+publish_count(Ch, X, Key, M, Count) ->
+ [publish(Ch, X, Key, M) || _ <- lists:seq(1, Count)].
+
+expect_count(Ch, Q, M, Count) ->
+ [expect(Ch, Q, M) || _ <- lists:seq(1, Count)],
+ expect_empty(Ch, Q).
+
+set_param(Name, Value) ->
+ set_param_nowait(Name, Value),
+ await_shovel(Name).
+
+set_param_nowait(Name, Value) ->
+ ok = rabbit_runtime_parameters:set(
+ <<"/">>, <<"shovel">>, Name, [{<<"src-uri">>, <<"amqp://">>},
+ {<<"dest-uri">>, [<<"amqp://">>]} |
+ Value], none).
+
+invalid_param(Value, User) ->
+ {error_string, _} = rabbit_runtime_parameters:set(
+ <<"/">>, <<"shovel">>, <<"invalid">>, Value, User).
+
+valid_param(Value, User) ->
+ ok = rabbit_runtime_parameters:set(
+ <<"/">>, <<"shovel">>, <<"a">>, Value, User),
+ ok = rabbit_runtime_parameters:clear(<<"/">>, <<"shovel">>, <<"a">>).
+
+invalid_param(Value) -> invalid_param(Value, none).
+valid_param(Value) -> valid_param(Value, none).
+
+lookup_user(Name) ->
+ {ok, User} = rabbit_auth_backend_internal:check_user_login(Name, []),
+ User.
+
+clear_param(Name) ->
+ rabbit_runtime_parameters:clear(<<"/">>, <<"shovel">>, Name).
+
+cleanup() ->
+ [rabbit_runtime_parameters:clear(pget(vhost, P),
+ pget(component, P),
+ pget(name, P)) ||
+ P <- rabbit_runtime_parameters:list()],
+ [rabbit_amqqueue:delete(Q, false, false) || Q <- rabbit_amqqueue:list()].
+
+await_shovel(Name) ->
+ await(fun () -> lists:member(Name, shovels_from_status()) end).
+
+await_autodelete(Name) ->
+ await(fun () -> not lists:member(Name, shovels_from_parameters()) end),
+ await(fun () -> not lists:member(Name, shovels_from_status()) end).
+
+await(Pred) ->
+ case Pred() of
+ true -> ok;
+ false -> timer:sleep(100),
+ await(Pred)
+ end.
+
+shovels_from_status() ->
+ S = rabbit_shovel_status:status(),
+ [N || {{<<"/">>, N}, dynamic, {running, _}, _} <- S].
+
+shovels_from_parameters() ->
+ L = rabbit_runtime_parameters:list(<<"/">>, <<"shovel">>),
+ [pget(name, Shovel) || Shovel <- L].
--- /dev/null
+include ../umbrella.mk
--- /dev/null
+Comments from Sean Treadway, 2 June 2008, on the rabbitmq-discuss list:
+
+ - On naming, extensibility, and headers:
+
+ "STOMP looked like it was MQ agnostic and extensible while keeping
+ the core headers well defined (ack=client, message_id, etc...),
+ but my application was not MQ agnostic. Plus I saw some of the
+ ActiveMQ headers weren't available or necessary in RabbitMQ.
+
+ "Keeping the AMQP naming is the best way to piggy back on the AMQP
+ documentation. For those that need simple, transient queues, the
+ existing STOMP documentation would be sufficient."
+
+ ...
+
+ "I only have experience with RabbitMQ, so I'm fine with exposing
+ AMQP rather than try to come to some agreement over the extension
+ names of standard STOMP headers."
+
+ - On queue deletion over STOMP:
+
+ "Here, I would stick with the verbs defined in STOMP and extend the
+ verbs with headers. One possibility is to use UNSUBSCRIBE
+ messages to change the queue properties before sending the
+ 'basic.cancel' method. Another possibility is to change queue
+ properties on a SUBSCRIBE message. Neither seem nice to me. Third
+ option is to do nothing, and delete the queues outside of the
+ STOMP protocol"
+
+Comments from Darien Kindlund, 11 February 2009, on the rabbitmq-discuss list:
+
+ - On testing of connection establishment:
+
+ "[O]nce I switched each perl process over to re-using their
+ existing STOMP connection, things worked much, much better. As
+ such, I'm continuing development. In your unit testing, you may
+ want to include rapid connect/disconnect behavior or otherwise
+ explicitly warn developers to avoid this scenario."
+
+Comments from Novak Joe, 11 September 2008, on the rabbitmq-discuss list:
+
+ - On broadcast send:
+
+ "That said, I think it would also be useful to add to the STOMP
+ wiki page an additional note on broadcast SEND. In particular I
+ found that in order to send a message to a broadcast exchange it
+ needs look something like:
+
+ ---------------------------------
+ SEND
+ destination:x.mytopic
+ exchange:amq.topic
+
+ my message
+ \x00
+ --------------------------------
+
+ "However my initial newb intuition was that it should look more like:
+
+ ---------------------------------
+ SEND
+ destination:
+ exchange:amq.topic
+ routing_key:x.mytopic
+
+ my message
+ \x00
+ --------------------------------
+
+ "The ruby examples cleared this up but not before I experienced a
+ bit of confusion on the subject."
--- /dev/null
+# RabbitMQ STOMP adapter
+
+The STOMP adapter is included in the RabbitMQ distribution. To enable
+it, use <href="http://www.rabbitmq.com/man/rabbitmq-plugins.1.man.html">rabbitmq-plugins</a>:
+
+ rabbitmq-plugins enable rabbitmq_stomp
+
+Binaries for previous versions of the STOMP adapter can be obtained
+from
+<http://www.rabbitmq.com/plugins.html#rabbitmq-stomp>.
+
+Full usage instructions can be found at
+<http://www.rabbitmq.com/stomp.html>.
--- /dev/null
+UPSTREAM_HG=https://stomppy.googlecode.com/hg/
+REVISION=16a4000624a7
+
+LIB_DIR=stomppy
+CHECKOUT_DIR=stomppy-hg
+
+TARGETS=$(LIB_DIR)
+
+all: $(TARGETS)
+
+clean:
+ rm -rf $(LIB_DIR)
+
+distclean: clean
+ rm -rf $(CHECKOUT_DIR)
+
+$(LIB_DIR) : $(CHECKOUT_DIR) rabbit.patch
+ rm -rf $@
+ cp -R $< $@
+ cd $@ && patch -p1 < ../rabbit.patch
+
+$(CHECKOUT_DIR):
+ hg clone $(UPSTREAM_HG) $@
+ (cd $@ && hg up $(REVISION)) || rm -rf $@
+
+echo-revision:
+ @echo $(REVISION)
+
--- /dev/null
+diff -r 16a4000624a7 stomp/connect.py
+--- a/stomp/connect.py Sun May 02 18:15:34 2010 +0100
++++ b/stomp/connect.py Fri Aug 26 15:35:33 2011 +0100
+@@ -88,7 +88,10 @@
+ ssl_key_file = None,
+ ssl_cert_file = None,
+ ssl_ca_certs = None,
+- ssl_cert_validator = None):
++ ssl_cert_validator = None,
++ version = None,
++ heartbeat = None,
++ virtual_host = None):
+ """
+ Initialize and start this connection.
+
+@@ -159,6 +162,16 @@
+
+ where OK is a boolean, and cert is a certificate structure
+ as returned by ssl.SSLSocket.getpeercert()
++
++ \param version
++ (optional) stomp version header to send (comma separated)
++
++ \param heartbeat
++ (optional) heartbeat header to send (STOMP 1.1)
++
++ \param virtual_host
++ (optional) virtual_host header to send (STOMP 1.1)
++
+ """
+
+ sorted_host_and_ports = []
+@@ -205,6 +218,15 @@
+ self.__connect_headers['login'] = user
+ self.__connect_headers['passcode'] = passcode
+
++ if version is not None:
++ self.__connect_headers['accept-version'] = version
++
++ if heartbeat is not None:
++ self.__connect_headers['heart-beat'] = heartbeat
++
++ if virtual_host is not None:
++ self.__connect_headers['host'] = virtual_host
++
+ self.__socket = None
+ self.__socket_semaphore = threading.BoundedSemaphore(1)
+ self.__current_host_and_port = None
+@@ -383,6 +405,10 @@
+ """
+ self.__send_frame_helper('DISCONNECT', '', utils.merge_headers([self.__connect_headers, headers, keyword_headers]), [ ])
+ self.__running = False
++ self.close_socket()
++ self.__current_host_and_port = None
++
++ def close_socket(self):
+ if self.__socket is not None:
+ if self.__ssl:
+ #
+@@ -390,20 +416,23 @@
+ #
+ try:
+ self.__socket = self.__socket.unwrap()
+- except Exception:
++ except Exception as e:
+ #
+ # unwrap seems flaky on Win with the backported ssl mod, so catch any exception and log it
+ #
+- _, e, _ = sys.exc_info()
+- log.warn(e)
++ log.warning("socket unwrap() threw exception: %s" % e)
+ elif hasattr(socket, 'SHUT_RDWR'):
+- self.__socket.shutdown(socket.SHUT_RDWR)
++ try:
++ self.__socket.shutdown(socket.SHUT_RDWR)
++ except Exception as e:
++ log.warning("socket shutdown() threw exception: %s" % e)
+ #
+- # split this into a separate check, because sometimes the socket is nulled between shutdown and this call
++ # caution, because sometimes the socket is nulled between shutdown and this call
+ #
+- if self.__socket is not None:
++ try:
+ self.__socket.close()
+- self.__current_host_and_port = None
++ except Exception as e:
++ log.warning("socket close() threw exception: %s" % e)
+
+ def __convert_dict(self, payload):
+ """
+@@ -449,6 +478,9 @@
+ raise KeyError("Command %s requires header %r" % (command, required_header_key))
+ self.__send_frame(command, headers, payload)
+
++ def send_frame(self, command, headers={}, payload=''):
++ self.__send_frame(command, headers, payload)
++
+ def __send_frame(self, command, headers={}, payload=''):
+ """
+ Send a STOMP frame.
+@@ -680,4 +712,4 @@
+ sleep_exp += 1
+
+ if not self.__socket:
+- raise exception.ReconnectFailedException
+\ No newline at end of file
++ raise exception.ReconnectFailedException
--- /dev/null
+#!/usr/bin/perl -w
+# subscribe to messages from the queue 'foo'
+use Net::Stomp;
+my $stomp = Net::Stomp->new({hostname=>'localhost', port=>'61613'});
+$stomp->connect({login=>'guest', passcode=>'guest'});
+$stomp->subscribe({'destination'=>'/queue/foo', 'ack'=>'client'});
+while (1) {
+ my $frame = $stomp->receive_frame;
+ print $frame->body . "\n";
+ $stomp->ack({frame=>$frame});
+ last if $frame->body eq 'QUIT';
+}
+$stomp->disconnect;
--- /dev/null
+#!/usr/bin/perl -w
+
+use Net::Stomp;
+my $stomp = Net::Stomp->new({hostname=>'localhost', port=>'61613'});
+$stomp->connect({login=>'guest', passcode=>'guest'});
+
+my $private_q_name = "/queue/c-" . time() . "-" . rand();
+
+$stomp->subscribe({destination => $private_q_name});
+$stomp->send({destination => '/queue/rabbitmq_stomp_rpc_service',
+ 'reply-to' => $private_q_name,
+ body => "request from $private_q_name"});
+print "Reply: " . $stomp->receive_frame->body;
+
+$stomp->disconnect;
--- /dev/null
+#!/usr/bin/perl -w
+
+use Net::Stomp;
+
+my $stomp = Net::Stomp->new({hostname=>'localhost', port=>'61613'});
+$stomp->connect({login=>'guest', passcode=>'guest'});
+
+$stomp->subscribe({'destination'=>'/queue/rabbitmq_stomp_rpc_service', 'ack'=>'client'});
+while (1) {
+ print "Waiting for request...\n";
+ my $frame = $stomp->receive_frame;
+ print "Received message, reply_to = " . $frame->headers->{"reply-to"} . "\n";
+ print $frame->body . "\n";
+
+ $stomp->send({destination => $frame->headers->{"reply-to"}, bytes_message => 1,
+ body => "Got body: " . $frame->body});
+ $stomp->ack({frame=>$frame});
+ last if $frame->body eq 'QUIT';
+}
+
+$stomp->disconnect;
--- /dev/null
+#!/usr/bin/perl -w
+# send a message to the queue 'foo'
+use Net::Stomp;
+my $stomp = Net::Stomp->new({hostname=>'localhost', port=>'61613'});
+$stomp->connect({login=>'guest', passcode=>'guest'});
+$stomp->send({destination=>'/exchange/amq.fanout',
+ bytes_message=>1,
+ body=>($ARGV[0] or "test\0message")});
+$stomp->disconnect;
--- /dev/null
+#!/usr/bin/perl -w
+# send a message to the queue 'foo'
+use Net::Stomp;
+my $stomp = Net::Stomp->new({hostname=>'localhost', port=>'61613'});
+$stomp->connect({login=>'guest', passcode=>'guest'});
+for (my $i = 0; $i < 10000; $i++) {
+ $stomp->send({destination=>'/queue/foo',
+ bytes_message=>1,
+ body=>($ARGV[0] or "message $i")});
+}
+$stomp->disconnect;
--- /dev/null
+#!/usr/bin/perl -w
+# subscribe to messages from the queue 'foo'
+use Net::Stomp;
+my $stomp = Net::Stomp->new({hostname=>'localhost', port=>'61613'});
+$stomp->connect({login=>'guest', passcode=>'guest', prefetch=>1});
+$stomp->subscribe({'destination'=>'/queue/foo', 'ack'=>'client'});
+while (1) {
+ my $frame = $stomp->receive_frame;
+ print $frame->body . "\n";
+ sleep 1;
+ $stomp->ack({frame=>$frame});
+ last if $frame->body eq 'QUIT';
+}
+$stomp->disconnect;
--- /dev/null
+require 'rubygems'
+require 'stomp'
+
+conn = Stomp::Connection.open('guest', 'guest', 'localhost')
+conn.subscribe('/queue/carl')
+while mesg = conn.receive
+ puts mesg.body
+end
--- /dev/null
+require 'rubygems'
+require 'stomp'
+
+client = Stomp::Client.new("guest", "guest", "localhost", 61613)
+10000.times { |i| client.publish '/queue/carl', "Test Message number #{i}"}
+client.publish '/queue/carl', "All Done!"
--- /dev/null
+require 'rubygems'
+require 'stomp'
+
+# Note: requires support for connect_headers hash in the STOMP gem's connection.rb
+conn = Stomp::Connection.open('guest', 'guest', 'localhost', 61613, false, 5, {:prefetch => 1})
+conn.subscribe('/queue/carl', {:ack => 'client'})
+while mesg = conn.receive
+ puts mesg.body
+ puts 'Sleeping...'
+ sleep 0.2
+ puts 'Awake again. Acking.'
+ conn.ack mesg.headers['message-id']
+end
--- /dev/null
+require 'rubygems'
+require 'stomp'
+
+conn = Stomp::Connection.open('guest', 'guest', 'localhost')
+conn.subscribe('/queue/durable', :'auto-delete' => false, :durable => true)
+
+puts "Waiting for messages..."
+
+while mesg = conn.receive
+ puts mesg.body
+end
--- /dev/null
+require 'rubygems'
+require 'stomp'
+
+# Use this case to test durable queues
+#
+# Start the sender - 11 messages will be sent to /queue/durable and the sender exits
+# Stop the server - 11 messages will be written to disk
+# Start the server
+# Start the receiver - 11 messages should be received and the receiver - interrupt the receive loop
+
+client = Stomp::Client.new("guest", "guest", "localhost", 61613)
+10.times { |i| client.publish '/queue/durable', "Test Message number #{i} sent at #{Time.now}", 'delivery-mode' => '2'}
+client.publish '/queue/durable', "All Done!"
--- /dev/null
+require 'rubygems'
+require 'stomp'
+
+topic = ARGV[0] || 'x'
+puts "Binding to /topic/#{topic}"
+
+conn = Stomp::Connection.open('guest', 'guest', 'localhost')
+conn.subscribe("/topic/#{topic}")
+while mesg = conn.receive
+ puts mesg.body
+end
--- /dev/null
+require 'rubygems'
+require 'stomp' # this is a gem
+
+conn = Stomp::Connection.open('guest', 'guest', 'localhost')
+puts "Subscribing to /topic/x"
+conn.subscribe('/topic/x')
+puts 'Receiving...'
+mesg = conn.receive
+puts mesg.body
+puts "Unsubscribing from /topic/x"
+conn.unsubscribe('/topic/x')
+puts 'Sleeping 5 seconds...'
+sleep 5
--- /dev/null
+require 'rubygems'
+require 'stomp'
+
+client = Stomp::Client.new("guest", "guest", "localhost", 61613)
+client.publish '/topic/x.y', 'first message'
+client.publish '/topic/x.z', 'second message'
+client.publish '/topic/x', 'third message'
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-record(stomp_configuration, {default_login,
+ default_passcode,
+ implicit_connect,
+ ssl_cert_login}).
+
+-define(SUPPORTED_VERSIONS, ["1.0", "1.1", "1.2"]).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-record(stomp_frame, {command, headers, body_iolist}).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-define(HEADER_ACCEPT_VERSION, "accept-version").
+-define(HEADER_ACK, "ack").
+-define(HEADER_AMQP_MESSAGE_ID, "amqp-message-id").
+-define(HEADER_APP_ID, "app-id").
+-define(HEADER_CONTENT_ENCODING, "content-encoding").
+-define(HEADER_CONTENT_LENGTH, "content-length").
+-define(HEADER_CONTENT_TYPE, "content-type").
+-define(HEADER_CORRELATION_ID, "correlation-id").
+-define(HEADER_DESTINATION, "destination").
+-define(HEADER_EXPIRATION, "expiration").
+-define(HEADER_HEART_BEAT, "heart-beat").
+-define(HEADER_HOST, "host").
+-define(HEADER_ID, "id").
+-define(HEADER_LOGIN, "login").
+-define(HEADER_MESSAGE_ID, "message-id").
+-define(HEADER_PASSCODE, "passcode").
+-define(HEADER_PERSISTENT, "persistent").
+-define(HEADER_PREFETCH_COUNT, "prefetch-count").
+-define(HEADER_PRIORITY, "priority").
+-define(HEADER_RECEIPT, "receipt").
+-define(HEADER_REPLY_TO, "reply-to").
+-define(HEADER_SERVER, "server").
+-define(HEADER_SESSION, "session").
+-define(HEADER_SUBSCRIPTION, "subscription").
+-define(HEADER_TIMESTAMP, "timestamp").
+-define(HEADER_TRANSACTION, "transaction").
+-define(HEADER_TYPE, "type").
+-define(HEADER_USER_ID, "user-id").
+-define(HEADER_VERSION, "version").
+
+-define(MESSAGE_ID_SEPARATOR, "@@").
+
+-define(HEADERS_NOT_ON_SEND, [?HEADER_MESSAGE_ID]).
+
+-define(TEMP_QUEUE_ID_PREFIX, "/temp-queue/").
--- /dev/null
+RELEASABLE:=true
+DEPS:=rabbitmq-server rabbitmq-erlang-client
+STANDALONE_TEST_COMMANDS:=eunit:test([rabbit_stomp_test_util,rabbit_stomp_test_frame],[verbose])
+WITH_BROKER_TEST_SCRIPTS:=$(PACKAGE_DIR)/test/src/test.py $(PACKAGE_DIR)/test/src/test_connect_options.py
+WITH_BROKER_TEST_COMMANDS:=rabbit_stomp_test:all_tests() rabbit_stomp_amqqueue_test:all_tests()
+
+RABBITMQ_TEST_PATH=$(PACKAGE_DIR)/../rabbitmq-test
+ABS_PACKAGE_DIR:=$(abspath $(PACKAGE_DIR))
+
+CERTS_DIR:=$(ABS_PACKAGE_DIR)/test/certs
+CAN_RUN_SSL:=$(shell if [ -d $(RABBITMQ_TEST_PATH) ]; then echo "true"; else echo "false"; fi)
+
+TEST_CONFIG_PATH=$(TEST_EBIN_DIR)/test.config
+WITH_BROKER_TEST_CONFIG:=$(TEST_EBIN_DIR)/test
+
+.PHONY: $(TEST_CONFIG_PATH)
+
+ifeq ($(CAN_RUN_SSL),true)
+
+WITH_BROKER_TEST_SCRIPTS += $(PACKAGE_DIR)/test/src/test_ssl.py
+
+$(TEST_CONFIG_PATH): $(CERTS_DIR) $(ABS_PACKAGE_DIR)/test/src/ssl.config
+ sed -e "s|%%CERTS_DIR%%|$(CERTS_DIR)|g" < $(ABS_PACKAGE_DIR)/test/src/ssl.config > $@
+ @echo "\nRunning SSL tests\n"
+
+$(CERTS_DIR):
+ mkdir -p $(CERTS_DIR)
+ make -C $(RABBITMQ_TEST_PATH)/certs all PASSWORD=test DIR=$(CERTS_DIR)
+
+else
+$(TEST_CONFIG_PATH): $(ABS_PACKAGE_DIR)/test/src/non_ssl.config
+ cp $(ABS_PACKAGE_DIR)/test/src/non_ssl.config $@
+ @echo "\nNOT running SSL tests - looked in $(RABBITMQ_TEST_PATH) \n"
+
+endif
+
+define package_rules
+
+$(PACKAGE_DIR)+pre-test:: $(TEST_CONFIG_PATH)
+ make -C $(PACKAGE_DIR)/deps/stomppy
+
+$(PACKAGE_DIR)+clean::
+ rm -rf $(CERTS_DIR)
+
+$(PACKAGE_DIR)+clean-with-deps::
+ make -C $(PACKAGE_DIR)/deps/stomppy distclean
+
+endef
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_stomp).
+
+-include("rabbit_stomp.hrl").
+
+-behaviour(application).
+-export([start/2, stop/1]).
+
+-define(DEFAULT_CONFIGURATION,
+ #stomp_configuration{
+ default_login = undefined,
+ default_passcode = undefined,
+ implicit_connect = false,
+ ssl_cert_login = false}).
+
+start(normal, []) ->
+ Config = parse_configuration(),
+ Listeners = parse_listener_configuration(),
+ rabbit_stomp_sup:start_link(Listeners, Config).
+
+stop(_State) ->
+ ok.
+
+parse_listener_configuration() ->
+ {ok, Listeners} = application:get_env(tcp_listeners),
+ {ok, SslListeners} = application:get_env(ssl_listeners),
+ {Listeners, SslListeners}.
+
+parse_configuration() ->
+ {ok, UserConfig} = application:get_env(default_user),
+ Conf0 = parse_default_user(UserConfig, ?DEFAULT_CONFIGURATION),
+ {ok, SSLLogin} = application:get_env(ssl_cert_login),
+ {ok, ImplicitConnect} = application:get_env(implicit_connect),
+ Conf = Conf0#stomp_configuration{ssl_cert_login = SSLLogin,
+ implicit_connect = ImplicitConnect},
+ report_configuration(Conf),
+ Conf.
+
+parse_default_user([], Configuration) ->
+ Configuration;
+parse_default_user([{login, Login} | Rest], Configuration) ->
+ parse_default_user(Rest, Configuration#stomp_configuration{
+ default_login = Login});
+parse_default_user([{passcode, Passcode} | Rest], Configuration) ->
+ parse_default_user(Rest, Configuration#stomp_configuration{
+ default_passcode = Passcode});
+parse_default_user([Unknown | Rest], Configuration) ->
+ rabbit_log:warning("rabbit_stomp: ignoring invalid default_user "
+ "configuration option: ~p~n", [Unknown]),
+ parse_default_user(Rest, Configuration).
+
+report_configuration(#stomp_configuration{
+ default_login = Login,
+ implicit_connect = ImplicitConnect,
+ ssl_cert_login = SSLCertLogin}) ->
+ case Login of
+ undefined -> ok;
+ _ -> rabbit_log:info("rabbit_stomp: default user '~s' "
+ "enabled~n", [Login])
+ end,
+
+ case ImplicitConnect of
+ true -> rabbit_log:info("rabbit_stomp: implicit connect enabled~n");
+ false -> ok
+ end,
+
+ case SSLCertLogin of
+ true -> rabbit_log:info("rabbit_stomp: ssl_cert_login enabled~n");
+ false -> ok
+ end,
+
+ ok.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_stomp_client_sup).
+-behaviour(supervisor2).
+
+-define(MAX_WAIT, 16#ffffffff).
+-export([start_link/1, init/1]).
+
+start_link(Configuration) ->
+ {ok, SupPid} = supervisor2:start_link(?MODULE, []),
+ {ok, HelperPid} =
+ supervisor2:start_child(SupPid,
+ {rabbit_stomp_heartbeat_sup,
+ {rabbit_connection_helper_sup, start_link, []},
+ intrinsic, infinity, supervisor,
+ [rabbit_connection_helper_sup]}),
+ %% The processor is intrinsic. When it exits, the supervisor goes too.
+ {ok, ProcessorPid} =
+ supervisor2:start_child(SupPid,
+ {rabbit_stomp_processor,
+ {rabbit_stomp_processor, start_link,
+ [Configuration]},
+ intrinsic, ?MAX_WAIT, worker,
+ [rabbit_stomp_processor]}),
+ %% We want the reader to be transient since when it exits normally
+ %% the processor may have some work still to do (and the reader
+ %% tells the processor to exit). However, if the reader terminates
+ %% abnormally then we want to take everything down.
+ {ok, ReaderPid} = supervisor2:start_child(
+ SupPid,
+ {rabbit_stomp_reader,
+ {rabbit_stomp_reader,
+ start_link, [HelperPid, ProcessorPid, Configuration]},
+ transient, ?MAX_WAIT, worker,
+ [rabbit_stomp_reader]}),
+
+ {ok, SupPid, ReaderPid}.
+
+init([]) ->
+ {ok, {{one_for_all, 0, 1}, []}}.
+
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% stomp_frame implements the STOMP framing protocol "version 1.0", as
+%% per http://stomp.codehaus.org/Protocol
+
+-module(rabbit_stomp_frame).
+
+-include("rabbit_stomp_frame.hrl").
+-include("rabbit_stomp_headers.hrl").
+
+-export([parse/2, initial_state/0]).
+-export([header/2, header/3,
+ boolean_header/2, boolean_header/3,
+ integer_header/2, integer_header/3,
+ binary_header/2, binary_header/3]).
+-export([serialize/1]).
+
+initial_state() -> none.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% STOMP 1.1 frames basic syntax
+%% Rabbit modifications:
+%% o CR LF is equivalent to LF in all element terminators (eol).
+%% o Escape codes for header names and values include \r for CR
+%% and CR is not allowed.
+%% o Header names and values are not limited to UTF-8 strings.
+%% o Header values may contain unescaped colons
+%%
+%% frame_seq ::= *(noise frame)
+%% noise ::= *(NUL | eol)
+%% eol ::= LF | CR LF
+%% frame ::= cmd hdrs body NUL
+%% body ::= *OCTET
+%% cmd ::= 1*NOTEOL eol
+%% hdrs ::= *hdr eol
+%% hdr ::= hdrname COLON hdrvalue eol
+%% hdrname ::= 1*esc_char
+%% hdrvalue ::= *esc_char
+%% esc_char ::= HDROCT | BACKSLASH ESCCODE
+%%
+%% Terms in CAPS all represent sets (alternatives) of single octets.
+%% They are defined here using a small extension of BNF, minus (-):
+%%
+%% term1 - term2 denotes any of the possibilities in term1
+%% excluding those in term2.
+%% In this grammar minus is only used for sets of single octets.
+%%
+%% OCTET ::= '00'x..'FF'x % any octet
+%% NUL ::= '00'x % the zero octet
+%% LF ::= '\n' % '0a'x newline or linefeed
+%% CR ::= '\r' % '0d'x carriage return
+%% NOTEOL ::= OCTET - (CR | LF) % any octet except CR or LF
+%% BACKSLASH ::= '\\' % '5c'x
+%% ESCCODE ::= 'c' | 'n' | 'r' | BACKSLASH
+%% COLON ::= ':'
+%% HDROCT ::= NOTEOL - (COLON | BACKSLASH)
+%% % octets allowed in a header
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%% explicit frame characters
+-define(NUL, 0).
+-define(CR, $\r).
+-define(LF, $\n).
+-define(BSL, $\\).
+-define(COLON, $:).
+
+%% header escape codes
+-define(LF_ESC, $n).
+-define(BSL_ESC, $\\).
+-define(COLON_ESC, $c).
+-define(CR_ESC, $r).
+
+%% parser state
+-record(state, {acc, cmd, hdrs, hdrname}).
+
+parse(Content, {resume, Continuation}) -> Continuation(Content);
+parse(Content, none ) -> parser(Content, noframe, #state{}).
+
+more(Continuation) -> {more, {resume, Continuation}}.
+
+%% Single-function parser: Term :: noframe | command | headers | hdrname | hdrvalue
+%% general more and line-end detection
+parser(<<>>, Term , State) -> more(fun(Rest) -> parser(Rest, Term, State) end);
+parser(<<?CR>>, Term , State) -> more(fun(Rest) -> parser(<<?CR, Rest/binary>>, Term, State) end);
+parser(<<?CR, ?LF, Rest/binary>>, Term , State) -> parser(<<?LF, Rest/binary>>, Term, State);
+parser(<<?CR, Ch:8, _Rest/binary>>, Term , _State) -> {error, {unexpected_chars(Term), [?CR, Ch]}};
+%% escape processing (only in hdrname and hdrvalue terms)
+parser(<<?BSL>>, Term , State) -> more(fun(Rest) -> parser(<<?BSL, Rest/binary>>, Term, State) end);
+parser(<<?BSL, Ch:8, Rest/binary>>, Term , State)
+ when Term == hdrname;
+ Term == hdrvalue -> unescape(Ch, fun(Ech) -> parser(Rest, Term, accum(Ech, State)) end);
+%% inter-frame noise
+parser(<<?NUL, Rest/binary>>, noframe , State) -> parser(Rest, noframe, State);
+parser(<<?LF, Rest/binary>>, noframe , State) -> parser(Rest, noframe, State);
+%% detect transitions
+parser( Rest, noframe , State) -> goto(noframe, command, Rest, State);
+parser(<<?LF, Rest/binary>>, command , State) -> goto(command, headers, Rest, State);
+parser(<<?LF, Rest/binary>>, headers , State) -> goto(headers, body, Rest, State);
+parser( Rest, headers , State) -> goto(headers, hdrname, Rest, State);
+parser(<<?COLON, Rest/binary>>, hdrname , State) -> goto(hdrname, hdrvalue, Rest, State);
+parser(<<?LF, Rest/binary>>, hdrname , State) -> goto(hdrname, headers, Rest, State);
+parser(<<?LF, Rest/binary>>, hdrvalue, State) -> goto(hdrvalue, headers, Rest, State);
+%% accumulate
+parser(<<Ch:8, Rest/binary>>, Term , State) -> parser(Rest, Term, accum(Ch, State)).
+
+%% state transitions
+goto(noframe, command, Rest, State ) -> parser(Rest, command, State#state{acc = []});
+goto(command, headers, Rest, State = #state{acc = Acc} ) -> parser(Rest, headers, State#state{cmd = lists:reverse(Acc), hdrs = []});
+goto(headers, body, Rest, #state{cmd = Cmd, hdrs = Hdrs}) -> parse_body(Rest, #stomp_frame{command = Cmd, headers = Hdrs});
+goto(headers, hdrname, Rest, State ) -> parser(Rest, hdrname, State#state{acc = []});
+goto(hdrname, hdrvalue, Rest, State = #state{acc = Acc} ) -> parser(Rest, hdrvalue, State#state{acc = [], hdrname = lists:reverse(Acc)});
+goto(hdrname, headers, _Rest, #state{acc = Acc} ) -> {error, {header_no_value, lists:reverse(Acc)}}; % badly formed header -- fatal error
+goto(hdrvalue, headers, Rest, State = #state{acc = Acc, hdrs = Headers, hdrname = HdrName}) ->
+ parser(Rest, headers, State#state{hdrs = insert_header(Headers, HdrName, lists:reverse(Acc))}).
+
+%% error atom
+unexpected_chars(noframe) -> unexpected_chars_between_frames;
+unexpected_chars(command) -> unexpected_chars_in_command;
+unexpected_chars(hdrname) -> unexpected_chars_in_header;
+unexpected_chars(hdrvalue) -> unexpected_chars_in_header;
+unexpected_chars(_Term) -> unexpected_chars.
+
+%% general accumulation
+accum(Ch, State = #state{acc = Acc}) -> State#state{acc = [Ch | Acc]}.
+
+%% resolve escapes (with error processing)
+unescape(?LF_ESC, Fun) -> Fun(?LF);
+unescape(?BSL_ESC, Fun) -> Fun(?BSL);
+unescape(?COLON_ESC, Fun) -> Fun(?COLON);
+unescape(?CR_ESC, Fun) -> Fun(?CR);
+unescape(Ch, _Fun) -> {error, {bad_escape, [?BSL, Ch]}}.
+
+%% insert header unless aleady seen
+insert_header(Headers, Name, Value) ->
+ case lists:keymember(Name, 1, Headers) of
+ true -> Headers; % first header only
+ false -> [{Name, Value} | Headers]
+ end.
+
+parse_body(Content, Frame) ->
+ parse_body(Content, Frame, [],
+ integer_header(Frame, ?HEADER_CONTENT_LENGTH, unknown)).
+
+parse_body(Content, Frame, Chunks, unknown) ->
+ parse_body2(Content, Frame, Chunks, case firstnull(Content) of
+ -1 -> {more, unknown};
+ Pos -> {done, Pos}
+ end);
+parse_body(Content, Frame, Chunks, Remaining) ->
+ Size = byte_size(Content),
+ parse_body2(Content, Frame, Chunks, case Remaining >= Size of
+ true -> {more, Remaining - Size};
+ false -> {done, Remaining}
+ end).
+
+parse_body2(Content, Frame, Chunks, {more, Left}) ->
+ Chunks1 = finalize_chunk(Content, Chunks),
+ more(fun(Rest) -> parse_body(Rest, Frame, Chunks1, Left) end);
+parse_body2(Content, Frame, Chunks, {done, Pos}) ->
+ <<Chunk:Pos/binary, 0, Rest/binary>> = Content,
+ Body = lists:reverse(finalize_chunk(Chunk, Chunks)),
+ {ok, Frame#stomp_frame{body_iolist = Body}, Rest}.
+
+finalize_chunk(<<>>, Chunks) -> Chunks;
+finalize_chunk(Chunk, Chunks) -> [Chunk | Chunks].
+
+default_value({ok, Value}, _DefaultValue) -> Value;
+default_value(not_found, DefaultValue) -> DefaultValue.
+
+header(#stomp_frame{headers = Headers}, Key) ->
+ case lists:keysearch(Key, 1, Headers) of
+ {value, {_, Str}} -> {ok, Str};
+ _ -> not_found
+ end.
+
+header(F, K, D) -> default_value(header(F, K), D).
+
+boolean_header(#stomp_frame{headers = Headers}, Key) ->
+ case lists:keysearch(Key, 1, Headers) of
+ {value, {_, "true"}} -> {ok, true};
+ {value, {_, "false"}} -> {ok, false};
+ _ -> not_found
+ end.
+
+boolean_header(F, K, D) -> default_value(boolean_header(F, K), D).
+
+internal_integer_header(Headers, Key) ->
+ case lists:keysearch(Key, 1, Headers) of
+ {value, {_, Str}} -> {ok, list_to_integer(string:strip(Str))};
+ _ -> not_found
+ end.
+
+integer_header(#stomp_frame{headers = Headers}, Key) ->
+ internal_integer_header(Headers, Key).
+
+integer_header(F, K, D) -> default_value(integer_header(F, K), D).
+
+binary_header(F, K) ->
+ case header(F, K) of
+ {ok, Str} -> {ok, list_to_binary(Str)};
+ not_found -> not_found
+ end.
+
+binary_header(F, K, D) -> default_value(binary_header(F, K), D).
+
+serialize(#stomp_frame{command = Command,
+ headers = Headers,
+ body_iolist = BodyFragments}) ->
+ Len = iolist_size(BodyFragments),
+ [Command, ?LF,
+ lists:map(fun serialize_header/1,
+ lists:keydelete(?HEADER_CONTENT_LENGTH, 1, Headers)),
+ if
+ Len > 0 -> [?HEADER_CONTENT_LENGTH ++ ":", integer_to_list(Len), ?LF];
+ true -> []
+ end,
+ ?LF, BodyFragments, 0].
+
+serialize_header({K, V}) when is_integer(V) -> hdr(escape(K), integer_to_list(V));
+serialize_header({K, V}) when is_list(V) -> hdr(escape(K), escape(V)).
+
+hdr(K, V) -> [K, ?COLON, V, ?LF].
+
+escape(Str) -> [escape1(Ch) || Ch <- Str].
+
+escape1(?COLON) -> [?BSL, ?COLON_ESC];
+escape1(?BSL) -> [?BSL, ?BSL_ESC];
+escape1(?LF) -> [?BSL, ?LF_ESC];
+escape1(?CR) -> [?BSL, ?CR_ESC];
+escape1(Ch) -> Ch.
+
+firstnull(Content) -> firstnull(Content, 0).
+
+firstnull(<<>>, _N) -> -1;
+firstnull(<<0, _Rest/binary>>, N) -> N;
+firstnull(<<_Ch, Rest/binary>>, N) -> firstnull(Rest, N+1).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_stomp_processor).
+-behaviour(gen_server2).
+
+-export([start_link/1, init_arg/2, process_frame/2, flush_and_die/1]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ code_change/3, terminate/2]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("amqp_client/include/rabbit_routing_prefixes.hrl").
+-include("rabbit_stomp_frame.hrl").
+-include("rabbit_stomp.hrl").
+-include("rabbit_stomp_headers.hrl").
+
+-record(state, {session_id, channel, connection, subscriptions,
+ version, start_heartbeat_fun, pending_receipts,
+ config, route_state, reply_queues, frame_transformer,
+ adapter_info, send_fun, ssl_login_name, peer_addr}).
+
+-record(subscription, {dest_hdr, ack_mode, multi_ack, description}).
+
+-define(FLUSH_TIMEOUT, 60000).
+
+%%----------------------------------------------------------------------------
+%% Public API
+%%----------------------------------------------------------------------------
+start_link(Args) ->
+ gen_server2:start_link(?MODULE, Args, []).
+
+init_arg(ProcessorPid, InitArgs) ->
+ gen_server2:cast(ProcessorPid, {init, InitArgs}).
+
+process_frame(Pid, Frame = #stomp_frame{command = "SEND"}) ->
+ credit_flow:send(Pid),
+ gen_server2:cast(Pid, {"SEND", Frame, self()});
+process_frame(Pid, Frame = #stomp_frame{command = Command}) ->
+ gen_server2:cast(Pid, {Command, Frame, noflow}).
+
+flush_and_die(Pid) ->
+ gen_server2:cast(Pid, flush_and_die).
+
+%%----------------------------------------------------------------------------
+%% Basic gen_server2 callbacks
+%%----------------------------------------------------------------------------
+
+init(Configuration) ->
+ process_flag(trap_exit, true),
+ {ok,
+ #state {
+ session_id = none,
+ channel = none,
+ connection = none,
+ subscriptions = dict:new(),
+ version = none,
+ pending_receipts = undefined,
+ config = Configuration,
+ route_state = rabbit_routing_util:init_state(),
+ reply_queues = dict:new(),
+ frame_transformer = undefined},
+ hibernate,
+ {backoff, 1000, 1000, 10000}
+ }.
+
+terminate(_Reason, State) ->
+ close_connection(State).
+
+handle_cast({init, [SendFun, AdapterInfo, StartHeartbeatFun, SSLLoginName,
+ PeerAddr]},
+ State) ->
+ {noreply, State #state { send_fun = SendFun,
+ adapter_info = AdapterInfo,
+ start_heartbeat_fun = StartHeartbeatFun,
+ ssl_login_name = SSLLoginName,
+ peer_addr = PeerAddr}};
+
+handle_cast(flush_and_die, State) ->
+ {stop, normal, close_connection(State)};
+
+handle_cast({"STOMP", Frame, noflow}, State) ->
+ process_connect(no_implicit, Frame, State);
+
+handle_cast({"CONNECT", Frame, noflow}, State) ->
+ process_connect(no_implicit, Frame, State);
+
+handle_cast(Request, State = #state{channel = none,
+ config = #stomp_configuration{
+ implicit_connect = true}}) ->
+ {noreply, State1 = #state{channel = Ch}, _} =
+ process_connect(implicit, #stomp_frame{headers = []}, State),
+ case Ch of
+ none -> {stop, normal, State1};
+ _ -> handle_cast(Request, State1)
+ end;
+
+handle_cast(_Request, State = #state{channel = none,
+ config = #stomp_configuration{
+ implicit_connect = false}}) ->
+ {noreply,
+ send_error("Illegal command",
+ "You must log in using CONNECT first",
+ State),
+ hibernate};
+
+handle_cast({Command, Frame, FlowPid},
+ State = #state{frame_transformer = FT}) ->
+ case FlowPid of
+ noflow -> ok;
+ _ -> credit_flow:ack(FlowPid)
+ end,
+ Frame1 = FT(Frame),
+ process_request(
+ fun(StateN) ->
+ case validate_frame(Command, Frame1, StateN) of
+ R = {error, _, _, _} -> R;
+ _ -> handle_frame(Command, Frame1, StateN)
+ end
+ end,
+ fun(StateM) -> ensure_receipt(Frame1, StateM) end,
+ State);
+
+handle_cast(client_timeout,
+ State = #state{adapter_info = #amqp_adapter_info{name = S}}) ->
+ rabbit_log:warning("STOMP detected missed client heartbeat(s) "
+ "on connection ~s, closing it~n", [S]),
+ {stop, {shutdown, client_heartbeat_timeout}, close_connection(State)}.
+
+handle_info(#'basic.consume_ok'{}, State) ->
+ {noreply, State, hibernate};
+handle_info(#'basic.cancel_ok'{}, State) ->
+ {noreply, State, hibernate};
+handle_info(#'basic.ack'{delivery_tag = Tag, multiple = IsMulti}, State) ->
+ {noreply, flush_pending_receipts(Tag, IsMulti, State), hibernate};
+handle_info({Delivery = #'basic.deliver'{},
+ #amqp_msg{props = Props, payload = Payload}}, State) ->
+ {noreply, send_delivery(Delivery, Props, Payload, State), hibernate};
+handle_info(#'basic.cancel'{consumer_tag = Ctag}, State) ->
+ process_request(
+ fun(StateN) -> server_cancel_consumer(Ctag, StateN) end, State);
+handle_info({'EXIT', Conn,
+ {shutdown, {server_initiated_close, Code, Explanation}}},
+ State = #state{connection = Conn}) ->
+ amqp_death(Code, Explanation, State);
+handle_info({'EXIT', Conn, Reason}, State = #state{connection = Conn}) ->
+ send_error("AMQP connection died", "Reason: ~p", [Reason], State),
+ {stop, {conn_died, Reason}, State};
+handle_info({inet_reply, _, ok}, State) ->
+ {noreply, State, hibernate};
+handle_info({bump_credit, Msg}, State) ->
+ credit_flow:handle_bump_msg(Msg),
+ {noreply, State, hibernate};
+handle_info({inet_reply, _, Status}, State) ->
+ {stop, Status, State}.
+
+process_request(ProcessFun, State) ->
+ process_request(ProcessFun, fun (StateM) -> StateM end, State).
+
+process_request(ProcessFun, SuccessFun, State) ->
+ Res = case catch ProcessFun(State) of
+ {'EXIT',
+ {{shutdown,
+ {server_initiated_close, ReplyCode, Explanation}}, _}} ->
+ amqp_death(ReplyCode, Explanation, State);
+ {'EXIT', Reason} ->
+ priv_error("Processing error", "Processing error",
+ Reason, State);
+ Result ->
+ Result
+ end,
+ case Res of
+ {ok, Frame, NewState} ->
+ case Frame of
+ none -> ok;
+ _ -> send_frame(Frame, NewState)
+ end,
+ {noreply, SuccessFun(NewState), hibernate};
+ {error, Message, Detail, NewState} ->
+ {noreply, send_error(Message, Detail, NewState), hibernate};
+ {stop, normal, NewState} ->
+ {stop, normal, SuccessFun(NewState)};
+ {stop, R, NewState} ->
+ {stop, R, NewState}
+ end.
+
+process_connect(Implicit, Frame,
+ State = #state{channel = none,
+ config = Config,
+ ssl_login_name = SSLLoginName,
+ adapter_info = AdapterInfo}) ->
+ process_request(
+ fun(StateN) ->
+ case negotiate_version(Frame) of
+ {ok, Version} ->
+ FT = frame_transformer(Version),
+ Frame1 = FT(Frame),
+ {Username, Passwd} = creds(Frame1, SSLLoginName, Config),
+ {ok, DefaultVHost} = application:get_env(
+ rabbitmq_stomp, default_vhost),
+ {ProtoName, _} = AdapterInfo#amqp_adapter_info.protocol,
+ Res = do_login(
+ Username, Passwd,
+ login_header(Frame1, ?HEADER_HOST, DefaultVHost),
+ login_header(Frame1, ?HEADER_HEART_BEAT, "0,0"),
+ AdapterInfo#amqp_adapter_info{
+ protocol = {ProtoName, Version}}, Version,
+ StateN#state{frame_transformer = FT}),
+ case {Res, Implicit} of
+ {{ok, _, StateN1}, implicit} -> ok(StateN1);
+ _ -> Res
+ end;
+ {error, no_common_version} ->
+ error("Version mismatch",
+ "Supported versions are ~s~n",
+ [string:join(?SUPPORTED_VERSIONS, ",")],
+ StateN)
+ end
+ end,
+ State).
+
+creds(Frame, SSLLoginName,
+ #stomp_configuration{default_login = DefLogin,
+ default_passcode = DefPasscode}) ->
+ PasswordCreds = {login_header(Frame, ?HEADER_LOGIN, DefLogin),
+ login_header(Frame, ?HEADER_PASSCODE, DefPasscode)},
+ case {rabbit_stomp_frame:header(Frame, ?HEADER_LOGIN), SSLLoginName} of
+ {not_found, none} -> PasswordCreds;
+ {not_found, SSLName} -> {SSLName, none};
+ _ -> PasswordCreds
+ end.
+
+login_header(Frame, Key, Default) when is_binary(Default) ->
+ login_header(Frame, Key, binary_to_list(Default));
+login_header(Frame, Key, Default) ->
+ case rabbit_stomp_frame:header(Frame, Key, Default) of
+ undefined -> undefined;
+ Hdr -> list_to_binary(Hdr)
+ end.
+
+%%----------------------------------------------------------------------------
+%% Frame Transformation
+%%----------------------------------------------------------------------------
+frame_transformer("1.0") -> fun rabbit_stomp_util:trim_headers/1;
+frame_transformer(_) -> fun(Frame) -> Frame end.
+
+%%----------------------------------------------------------------------------
+%% Frame Validation
+%%----------------------------------------------------------------------------
+
+validate_frame(Command, Frame, State)
+ when Command =:= "SUBSCRIBE" orelse Command =:= "UNSUBSCRIBE" ->
+ Hdr = fun(Name) -> rabbit_stomp_frame:header(Frame, Name) end,
+ case {Hdr(?HEADER_PERSISTENT), Hdr(?HEADER_ID)} of
+ {{ok, "true"}, not_found} ->
+ error("Missing Header",
+ "Header 'id' is required for durable subscriptions", State);
+ _ ->
+ ok(State)
+ end;
+validate_frame(_Command, _Frame, State) ->
+ ok(State).
+
+%%----------------------------------------------------------------------------
+%% Frame handlers
+%%----------------------------------------------------------------------------
+
+handle_frame("DISCONNECT", _Frame, State) ->
+ {stop, normal, close_connection(State)};
+
+handle_frame("SUBSCRIBE", Frame, State) ->
+ with_destination("SUBSCRIBE", Frame, State, fun do_subscribe/4);
+
+handle_frame("UNSUBSCRIBE", Frame, State) ->
+ ConsumerTag = rabbit_stomp_util:consumer_tag(Frame),
+ cancel_subscription(ConsumerTag, Frame, State);
+
+handle_frame("SEND", Frame, State) ->
+ without_headers(?HEADERS_NOT_ON_SEND, "SEND", Frame, State,
+ fun (_Command, Frame1, State1) ->
+ with_destination("SEND", Frame1, State1, fun do_send/4)
+ end);
+
+handle_frame("ACK", Frame, State) ->
+ ack_action("ACK", Frame, State, fun create_ack_method/2);
+
+handle_frame("NACK", Frame, State) ->
+ ack_action("NACK", Frame, State, fun create_nack_method/2);
+
+handle_frame("BEGIN", Frame, State) ->
+ transactional_action(Frame, "BEGIN", fun begin_transaction/2, State);
+
+handle_frame("COMMIT", Frame, State) ->
+ transactional_action(Frame, "COMMIT", fun commit_transaction/2, State);
+
+handle_frame("ABORT", Frame, State) ->
+ transactional_action(Frame, "ABORT", fun abort_transaction/2, State);
+
+handle_frame(Command, _Frame, State) ->
+ error("Bad command",
+ "Could not interpret command ~p~n",
+ [Command],
+ State).
+
+%%----------------------------------------------------------------------------
+%% Internal helpers for processing frames callbacks
+%%----------------------------------------------------------------------------
+
+ack_action(Command, Frame,
+ State = #state{subscriptions = Subs,
+ channel = Channel,
+ version = Version}, MethodFun) ->
+ AckHeader = rabbit_stomp_util:ack_header_name(Version),
+ case rabbit_stomp_frame:header(Frame, AckHeader) of
+ {ok, AckValue} ->
+ case rabbit_stomp_util:parse_message_id(AckValue) of
+ {ok, {ConsumerTag, _SessionId, DeliveryTag}} ->
+ case dict:find(ConsumerTag, Subs) of
+ {ok, Sub} ->
+ Method = MethodFun(DeliveryTag, Sub),
+ case transactional(Frame) of
+ {yes, Transaction} ->
+ extend_transaction(
+ Transaction, {Method}, State);
+ no ->
+ amqp_channel:call(Channel, Method),
+ ok(State)
+ end;
+ error ->
+ error("Subscription not found",
+ "Message with id ~p has no subscription",
+ [AckValue],
+ State)
+ end;
+ _ ->
+ error("Invalid header",
+ "~p must include a valid ~p header~n",
+ [Command, AckHeader],
+ State)
+ end;
+ not_found ->
+ error("Missing header",
+ "~p must include the ~p header~n",
+ [Command, AckHeader],
+ State)
+ end.
+
+%%----------------------------------------------------------------------------
+%% Internal helpers for processing frames callbacks
+%%----------------------------------------------------------------------------
+server_cancel_consumer(ConsumerTag, State = #state{subscriptions = Subs}) ->
+ case dict:find(ConsumerTag, Subs) of
+ error ->
+ error("Server cancelled unknown subscription",
+ "Consumer tag ~p is not associated with a subscription.~n",
+ [ConsumerTag],
+ State);
+ {ok, Subscription = #subscription{description = Description}} ->
+ Id = case rabbit_stomp_util:tag_to_id(ConsumerTag) of
+ {ok, {_, Id1}} -> Id1;
+ {error, {_, Id1}} -> "Unknown[" ++ Id1 ++ "]"
+ end,
+ send_error_frame("Server cancelled subscription",
+ [{?HEADER_SUBSCRIPTION, Id}],
+ "The server has canceled a subscription.~n"
+ "No more messages will be delivered for ~p.~n",
+ [Description],
+ State),
+ tidy_canceled_subscription(ConsumerTag, Subscription,
+ #stomp_frame{}, State)
+ end.
+
+cancel_subscription({error, invalid_prefix}, _Frame, State) ->
+ error("Invalid id",
+ "UNSUBSCRIBE 'id' may not start with ~s~n",
+ [?TEMP_QUEUE_ID_PREFIX],
+ State);
+
+cancel_subscription({error, _}, _Frame, State) ->
+ error("Missing destination or id",
+ "UNSUBSCRIBE must include a 'destination' or 'id' header",
+ State);
+
+cancel_subscription({ok, ConsumerTag, Description}, Frame,
+ State = #state{subscriptions = Subs,
+ channel = Channel}) ->
+ case dict:find(ConsumerTag, Subs) of
+ error ->
+ error("No subscription found",
+ "UNSUBSCRIBE must refer to an existing subscription.~n"
+ "Subscription to ~p not found.~n",
+ [Description],
+ State);
+ {ok, Subscription = #subscription{description = Descr}} ->
+ case amqp_channel:call(Channel,
+ #'basic.cancel'{
+ consumer_tag = ConsumerTag}) of
+ #'basic.cancel_ok'{consumer_tag = ConsumerTag} ->
+ tidy_canceled_subscription(ConsumerTag, Subscription,
+ Frame, State);
+ _ ->
+ error("Failed to cancel subscription",
+ "UNSUBSCRIBE to ~p failed.~n",
+ [Descr],
+ State)
+ end
+ end.
+
+tidy_canceled_subscription(ConsumerTag, #subscription{dest_hdr = DestHdr},
+ Frame, State = #state{subscriptions = Subs}) ->
+ Subs1 = dict:erase(ConsumerTag, Subs),
+ {ok, Dest} = rabbit_routing_util:parse_endpoint(DestHdr),
+ maybe_delete_durable_sub(Dest, Frame, State#state{subscriptions = Subs1}).
+
+maybe_delete_durable_sub({topic, Name}, Frame,
+ State = #state{channel = Channel}) ->
+ case rabbit_stomp_frame:boolean_header(Frame,
+ ?HEADER_PERSISTENT, false) of
+ true ->
+ {ok, Id} = rabbit_stomp_frame:header(Frame, ?HEADER_ID),
+ QName = rabbit_stomp_util:durable_subscription_queue(Name, Id),
+ amqp_channel:call(Channel,
+ #'queue.delete'{queue = list_to_binary(QName),
+ nowait = false}),
+ ok(State);
+ false ->
+ ok(State)
+ end;
+maybe_delete_durable_sub(_Destination, _Frame, State) ->
+ ok(State).
+
+with_destination(Command, Frame, State, Fun) ->
+ case rabbit_stomp_frame:header(Frame, ?HEADER_DESTINATION) of
+ {ok, DestHdr} ->
+ case rabbit_routing_util:parse_endpoint(DestHdr) of
+ {ok, Destination} ->
+ case Fun(Destination, DestHdr, Frame, State) of
+ {error, invalid_endpoint} ->
+ error("Invalid destination",
+ "'~s' is not a valid destination for '~s'~n",
+ [DestHdr, Command],
+ State);
+ {error, {invalid_destination, Msg}} ->
+ error("Invalid destination",
+ "~s",
+ [Msg],
+ State);
+ {error, Reason} ->
+ throw(Reason);
+ Result ->
+ Result
+ end;
+ {error, {invalid_destination, Type, Content}} ->
+ error("Invalid destination",
+ "'~s' is not a valid ~p destination~n",
+ [Content, Type],
+ State);
+ {error, {unknown_destination, Content}} ->
+ error("Unknown destination",
+ "'~s' is not a valid destination.~n"
+ "Valid destination types are: ~s.~n",
+ [Content,
+ string:join(rabbit_routing_util:all_dest_prefixes(),
+ ", ")], State)
+ end;
+ not_found ->
+ error("Missing destination",
+ "~p must include a 'destination' header~n",
+ [Command],
+ State)
+ end.
+
+without_headers([Hdr | Hdrs], Command, Frame, State, Fun) ->
+ case rabbit_stomp_frame:header(Frame, Hdr) of
+ {ok, _} ->
+ error("Invalid header",
+ "'~s' is not allowed on '~s'.~n",
+ [Hdr, Command],
+ State);
+ not_found ->
+ without_headers(Hdrs, Command, Frame, State, Fun)
+ end;
+without_headers([], Command, Frame, State, Fun) ->
+ Fun(Command, Frame, State).
+
+do_login(undefined, _, _, _, _, _, State) ->
+ error("Bad CONNECT", "Missing login or passcode header(s)", State);
+do_login(Username, Passwd, VirtualHost, Heartbeat, AdapterInfo, Version,
+ State = #state{peer_addr = Addr}) ->
+ case start_connection(
+ #amqp_params_direct{username = Username,
+ password = Passwd,
+ virtual_host = VirtualHost,
+ adapter_info = AdapterInfo}, Username, Addr) of
+ {ok, Connection} ->
+ link(Connection),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ SessionId = rabbit_guid:string(rabbit_guid:gen_secure(), "session"),
+ {{SendTimeout, ReceiveTimeout}, State1} =
+ ensure_heartbeats(Heartbeat, State),
+ ok("CONNECTED",
+ [{?HEADER_SESSION, SessionId},
+ {?HEADER_HEART_BEAT,
+ io_lib:format("~B,~B", [SendTimeout, ReceiveTimeout])},
+ {?HEADER_SERVER, server_header()},
+ {?HEADER_VERSION, Version}],
+ "",
+ State1#state{session_id = SessionId,
+ channel = Channel,
+ connection = Connection,
+ version = Version});
+ {error, {auth_failure, _}} ->
+ rabbit_log:warning("STOMP login failed for user ~p~n",
+ [binary_to_list(Username)]),
+ error("Bad CONNECT", "Access refused for user '" ++
+ binary_to_list(Username) ++ "'~n", [], State);
+ {error, access_refused} ->
+ rabbit_log:warning("STOMP login failed - access_refused "
+ "(vhost access not allowed)~n"),
+ error("Bad CONNECT", "Virtual host '" ++
+ binary_to_list(VirtualHost) ++
+ "' access denied", State);
+ {error, not_loopback} ->
+ rabbit_log:warning("STOMP login failed - access_refused "
+ "(user must access over loopback)~n"),
+ error("Bad CONNECT", "non-loopback access denied", State)
+ end.
+
+start_connection(Params, Username, Addr) ->
+ case amqp_connection:start(Params) of
+ {ok, Conn} -> case rabbit_access_control:check_user_loopback(
+ Username, Addr) of
+ ok -> {ok, Conn};
+ not_allowed -> amqp_connection:close(Conn),
+ {error, not_loopback}
+ end;
+ {error, E} -> {error, E}
+ end.
+
+server_header() ->
+ {ok, Product} = application:get_key(rabbit, id),
+ {ok, Version} = application:get_key(rabbit, vsn),
+ rabbit_misc:format("~s/~s", [Product, Version]).
+
+do_subscribe(Destination, DestHdr, Frame,
+ State = #state{subscriptions = Subs,
+ route_state = RouteState,
+ channel = Channel}) ->
+ Prefetch =
+ rabbit_stomp_frame:integer_header(Frame, ?HEADER_PREFETCH_COUNT,
+ undefined),
+ {AckMode, IsMulti} = rabbit_stomp_util:ack_mode(Frame),
+ case ensure_endpoint(source, Destination, Frame, Channel, RouteState) of
+ {ok, Queue, RouteState1} ->
+ {ok, ConsumerTag, Description} =
+ rabbit_stomp_util:consumer_tag(Frame),
+ case Prefetch of
+ undefined -> ok;
+ _ -> amqp_channel:call(
+ Channel, #'basic.qos'{prefetch_count = Prefetch})
+ end,
+ ExchangeAndKey = rabbit_routing_util:parse_routing(Destination),
+ try
+ amqp_channel:subscribe(Channel,
+ #'basic.consume'{
+ queue = Queue,
+ consumer_tag = ConsumerTag,
+ no_local = false,
+ no_ack = (AckMode == auto),
+ exclusive = false,
+ arguments = []},
+ self()),
+ ok = rabbit_routing_util:ensure_binding(
+ Queue, ExchangeAndKey, Channel)
+ catch exit:Err ->
+ %% it's safe to delete this queue, it was server-named
+ %% and declared by us
+ case Destination of
+ {exchange, _} ->
+ ok = maybe_clean_up_queue(Queue, State);
+ {topic, _} ->
+ ok = maybe_clean_up_queue(Queue, State);
+ _ ->
+ ok
+ end,
+ exit(Err)
+ end,
+ ok(State#state{subscriptions =
+ dict:store(
+ ConsumerTag,
+ #subscription{dest_hdr = DestHdr,
+ ack_mode = AckMode,
+ multi_ack = IsMulti,
+ description = Description},
+ Subs),
+ route_state = RouteState1});
+ {error, _} = Err ->
+ Err
+ end.
+
+maybe_clean_up_queue(Queue, #state{connection = Connection}) ->
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ catch amqp_channel:call(Channel, #'queue.delete'{queue = Queue}),
+ catch amqp_channel:close(Channel),
+ ok.
+
+do_send(Destination, _DestHdr,
+ Frame = #stomp_frame{body_iolist = BodyFragments},
+ State = #state{channel = Channel, route_state = RouteState}) ->
+ case ensure_endpoint(dest, Destination, Frame, Channel, RouteState) of
+
+ {ok, _Q, RouteState1} ->
+
+ {Frame1, State1} =
+ ensure_reply_to(Frame, State#state{route_state = RouteState1}),
+
+ Props = rabbit_stomp_util:message_properties(Frame1),
+
+ {Exchange, RoutingKey} =
+ rabbit_routing_util:parse_routing(Destination),
+
+ Method = #'basic.publish'{
+ exchange = list_to_binary(Exchange),
+ routing_key = list_to_binary(RoutingKey),
+ mandatory = false,
+ immediate = false},
+
+ case transactional(Frame1) of
+ {yes, Transaction} ->
+ extend_transaction(
+ Transaction,
+ fun(StateN) ->
+ maybe_record_receipt(Frame1, StateN)
+ end,
+ {Method, Props, BodyFragments},
+ State1);
+ no ->
+ ok(send_method(Method, Props, BodyFragments,
+ maybe_record_receipt(Frame1, State1)))
+ end;
+
+ {error, _} = Err ->
+
+ Err
+ end.
+
+create_ack_method(DeliveryTag, #subscription{multi_ack = IsMulti}) ->
+ #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = IsMulti}.
+
+create_nack_method(DeliveryTag, #subscription{multi_ack = IsMulti}) ->
+ #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = IsMulti}.
+
+negotiate_version(Frame) ->
+ ClientVers = re:split(rabbit_stomp_frame:header(
+ Frame, ?HEADER_ACCEPT_VERSION, "1.0"),
+ ",", [{return, list}]),
+ rabbit_stomp_util:negotiate_version(ClientVers, ?SUPPORTED_VERSIONS).
+
+
+send_delivery(Delivery = #'basic.deliver'{consumer_tag = ConsumerTag},
+ Properties, Body,
+ State = #state{session_id = SessionId,
+ subscriptions = Subs,
+ version = Version}) ->
+ case dict:find(ConsumerTag, Subs) of
+ {ok, #subscription{ack_mode = AckMode}} ->
+ send_frame(
+ "MESSAGE",
+ rabbit_stomp_util:headers(SessionId, Delivery, Properties,
+ AckMode, Version),
+ Body,
+ State);
+ error ->
+ send_error("Subscription not found",
+ "There is no current subscription with tag '~s'.",
+ [ConsumerTag],
+ State)
+ end.
+
+send_method(Method, Channel, State) ->
+ amqp_channel:call(Channel, Method),
+ State.
+
+send_method(Method, State = #state{channel = Channel}) ->
+ send_method(Method, Channel, State).
+
+send_method(Method, Properties, BodyFragments,
+ State = #state{channel = Channel}) ->
+ send_method(Method, Channel, Properties, BodyFragments, State).
+
+send_method(Method = #'basic.publish'{}, Channel, Properties, BodyFragments,
+ State) ->
+ amqp_channel:cast_flow(
+ Channel, Method,
+ #amqp_msg{props = Properties,
+ payload = list_to_binary(BodyFragments)}),
+ State.
+
+close_connection(State = #state{connection = none}) ->
+ State;
+%% Closing the connection will close the channel and subchannels
+close_connection(State = #state{connection = Connection}) ->
+ %% ignore noproc or other exceptions to avoid debris
+ catch amqp_connection:close(Connection),
+ State#state{channel = none, connection = none, subscriptions = none}.
+
+%%----------------------------------------------------------------------------
+%% Reply-To
+%%----------------------------------------------------------------------------
+ensure_reply_to(Frame = #stomp_frame{headers = Headers}, State) ->
+ case rabbit_stomp_frame:header(Frame, ?HEADER_REPLY_TO) of
+ not_found ->
+ {Frame, State};
+ {ok, ReplyTo} ->
+ {ok, Destination} = rabbit_routing_util:parse_endpoint(ReplyTo),
+ case rabbit_routing_util:dest_temp_queue(Destination) of
+ none ->
+ {Frame, State};
+ TempQueueId ->
+ {ReplyQueue, State1} =
+ ensure_reply_queue(TempQueueId, State),
+ {Frame#stomp_frame{
+ headers = lists:keyreplace(
+ ?HEADER_REPLY_TO, 1, Headers,
+ {?HEADER_REPLY_TO, ReplyQueue})},
+ State1}
+ end
+ end.
+
+ensure_reply_queue(TempQueueId, State = #state{channel = Channel,
+ reply_queues = RQS,
+ subscriptions = Subs}) ->
+ case dict:find(TempQueueId, RQS) of
+ {ok, RQ} ->
+ {binary_to_list(RQ), State};
+ error ->
+ #'queue.declare_ok'{queue = Queue} =
+ amqp_channel:call(Channel,
+ #'queue.declare'{auto_delete = true,
+ exclusive = true}),
+
+ ConsumerTag = rabbit_stomp_util:consumer_tag_reply_to(TempQueueId),
+ #'basic.consume_ok'{} =
+ amqp_channel:subscribe(Channel,
+ #'basic.consume'{
+ queue = Queue,
+ consumer_tag = ConsumerTag,
+ no_ack = true,
+ nowait = false},
+ self()),
+
+ Destination = binary_to_list(Queue),
+
+ %% synthesise a subscription to the reply queue destination
+ Subs1 = dict:store(ConsumerTag,
+ #subscription{dest_hdr = Destination,
+ multi_ack = false},
+ Subs),
+
+ {Destination, State#state{
+ reply_queues = dict:store(TempQueueId, Queue, RQS),
+ subscriptions = Subs1}}
+ end.
+
+%%----------------------------------------------------------------------------
+%% Receipt Handling
+%%----------------------------------------------------------------------------
+
+ensure_receipt(Frame = #stomp_frame{command = Command}, State) ->
+ case rabbit_stomp_frame:header(Frame, ?HEADER_RECEIPT) of
+ {ok, Id} -> do_receipt(Command, Id, State);
+ not_found -> State
+ end.
+
+do_receipt("SEND", _, State) ->
+ %% SEND frame receipts are handled when messages are confirmed
+ State;
+do_receipt(_Frame, ReceiptId, State) ->
+ send_frame("RECEIPT", [{"receipt-id", ReceiptId}], "", State).
+
+maybe_record_receipt(Frame, State = #state{channel = Channel,
+ pending_receipts = PR}) ->
+ case rabbit_stomp_frame:header(Frame, ?HEADER_RECEIPT) of
+ {ok, Id} ->
+ PR1 = case PR of
+ undefined ->
+ amqp_channel:register_confirm_handler(
+ Channel, self()),
+ #'confirm.select_ok'{} =
+ amqp_channel:call(Channel, #'confirm.select'{}),
+ gb_trees:empty();
+ _ ->
+ PR
+ end,
+ SeqNo = amqp_channel:next_publish_seqno(Channel),
+ State#state{pending_receipts = gb_trees:insert(SeqNo, Id, PR1)};
+ not_found ->
+ State
+ end.
+
+flush_pending_receipts(DeliveryTag, IsMulti,
+ State = #state{pending_receipts = PR}) ->
+ {Receipts, PR1} = accumulate_receipts(DeliveryTag, IsMulti, PR),
+ State1 = lists:foldl(fun(ReceiptId, StateN) ->
+ do_receipt(none, ReceiptId, StateN)
+ end, State, Receipts),
+ State1#state{pending_receipts = PR1}.
+
+accumulate_receipts(DeliveryTag, false, PR) ->
+ case gb_trees:lookup(DeliveryTag, PR) of
+ {value, ReceiptId} -> {[ReceiptId], gb_trees:delete(DeliveryTag, PR)};
+ none -> {[], PR}
+ end;
+
+accumulate_receipts(DeliveryTag, true, PR) ->
+ case gb_trees:is_empty(PR) of
+ true -> {[], PR};
+ false -> accumulate_receipts1(DeliveryTag,
+ gb_trees:take_smallest(PR), [])
+ end.
+
+accumulate_receipts1(DeliveryTag, {Key, Value, PR}, Acc)
+ when Key > DeliveryTag ->
+ {lists:reverse(Acc), gb_trees:insert(Key, Value, PR)};
+accumulate_receipts1(DeliveryTag, {_Key, Value, PR}, Acc) ->
+ Acc1 = [Value | Acc],
+ case gb_trees:is_empty(PR) of
+ true -> {lists:reverse(Acc1), PR};
+ false -> accumulate_receipts1(DeliveryTag,
+ gb_trees:take_smallest(PR), Acc1)
+ end.
+
+
+%%----------------------------------------------------------------------------
+%% Transaction Support
+%%----------------------------------------------------------------------------
+
+transactional(Frame) ->
+ case rabbit_stomp_frame:header(Frame, ?HEADER_TRANSACTION) of
+ {ok, Transaction} -> {yes, Transaction};
+ not_found -> no
+ end.
+
+transactional_action(Frame, Name, Fun, State) ->
+ case transactional(Frame) of
+ {yes, Transaction} ->
+ Fun(Transaction, State);
+ no ->
+ error("Missing transaction",
+ "~p must include a 'transaction' header~n",
+ [Name],
+ State)
+ end.
+
+with_transaction(Transaction, State, Fun) ->
+ case get({transaction, Transaction}) of
+ undefined ->
+ error("Bad transaction",
+ "Invalid transaction identifier: ~p~n",
+ [Transaction],
+ State);
+ Actions ->
+ Fun(Actions, State)
+ end.
+
+begin_transaction(Transaction, State) ->
+ put({transaction, Transaction}, []),
+ ok(State).
+
+extend_transaction(Transaction, Callback, Action, State) ->
+ extend_transaction(Transaction, {callback, Callback, Action}, State).
+
+extend_transaction(Transaction, Action, State0) ->
+ with_transaction(
+ Transaction, State0,
+ fun (Actions, State) ->
+ put({transaction, Transaction}, [Action | Actions]),
+ ok(State)
+ end).
+
+commit_transaction(Transaction, State0) ->
+ with_transaction(
+ Transaction, State0,
+ fun (Actions, State) ->
+ FinalState = lists:foldr(fun perform_transaction_action/2,
+ State,
+ Actions),
+ erase({transaction, Transaction}),
+ ok(FinalState)
+ end).
+
+abort_transaction(Transaction, State0) ->
+ with_transaction(
+ Transaction, State0,
+ fun (_Actions, State) ->
+ erase({transaction, Transaction}),
+ ok(State)
+ end).
+
+perform_transaction_action({callback, Callback, Action}, State) ->
+ perform_transaction_action(Action, Callback(State));
+perform_transaction_action({Method}, State) ->
+ send_method(Method, State);
+perform_transaction_action({Method, Props, BodyFragments}, State) ->
+ send_method(Method, Props, BodyFragments, State).
+
+%%--------------------------------------------------------------------
+%% Heartbeat Management
+%%--------------------------------------------------------------------
+
+ensure_heartbeats(Heartbeats,
+ State = #state{start_heartbeat_fun = SHF,
+ send_fun = RawSendFun}) ->
+ [CX, CY] = [list_to_integer(X) ||
+ X <- re:split(Heartbeats, ",", [{return, list}])],
+
+ SendFun = fun() -> RawSendFun(sync, <<$\n>>) end,
+ Pid = self(),
+ ReceiveFun = fun() -> gen_server2:cast(Pid, client_timeout) end,
+
+ {SendTimeout, ReceiveTimeout} =
+ {millis_to_seconds(CY), millis_to_seconds(CX)},
+
+ SHF(SendTimeout, SendFun, ReceiveTimeout, ReceiveFun),
+
+ {{SendTimeout * 1000 , ReceiveTimeout * 1000}, State}.
+
+millis_to_seconds(M) when M =< 0 -> 0;
+millis_to_seconds(M) when M < 1000 -> 1;
+millis_to_seconds(M) -> M div 1000.
+
+%%----------------------------------------------------------------------------
+%% Queue Setup
+%%----------------------------------------------------------------------------
+
+ensure_endpoint(_Direction, {queue, []}, _Frame, _Channel, _State) ->
+ {error, {invalid_destination, "Destination cannot be blank"}};
+
+ensure_endpoint(source, EndPoint, Frame, Channel, State) ->
+ Params =
+ case rabbit_stomp_frame:boolean_header(
+ Frame, ?HEADER_PERSISTENT, false) of
+ true ->
+ [{subscription_queue_name_gen,
+ fun () ->
+ {ok, Id} = rabbit_stomp_frame:header(Frame, ?HEADER_ID),
+ {_, Name} = rabbit_routing_util:parse_routing(EndPoint),
+ list_to_binary(
+ rabbit_stomp_util:durable_subscription_queue(Name,
+ Id))
+ end},
+ {durable, true}];
+ false ->
+ [{durable, false}]
+ end,
+ rabbit_routing_util:ensure_endpoint(source, Channel, EndPoint, Params, State);
+
+ensure_endpoint(Direction, Endpoint, _Frame, Channel, State) ->
+ rabbit_routing_util:ensure_endpoint(Direction, Channel, Endpoint, State).
+
+%%----------------------------------------------------------------------------
+%% Success/error handling
+%%----------------------------------------------------------------------------
+
+ok(State) ->
+ {ok, none, State}.
+
+ok(Command, Headers, BodyFragments, State) ->
+ {ok, #stomp_frame{command = Command,
+ headers = Headers,
+ body_iolist = BodyFragments}, State}.
+
+amqp_death(ReplyCode, Explanation, State) ->
+ ErrorName = amqp_connection:error_atom(ReplyCode),
+ ErrorDesc = rabbit_misc:format("~s~n", [Explanation]),
+ log_error(ErrorName, ErrorDesc, none),
+ {stop, normal, close_connection(send_error(atom_to_list(ErrorName), ErrorDesc, State))}.
+
+error(Message, Detail, State) ->
+ priv_error(Message, Detail, none, State).
+
+error(Message, Format, Args, State) ->
+ priv_error(Message, Format, Args, none, State).
+
+priv_error(Message, Detail, ServerPrivateDetail, State) ->
+ log_error(Message, Detail, ServerPrivateDetail),
+ {error, Message, Detail, State}.
+
+priv_error(Message, Format, Args, ServerPrivateDetail, State) ->
+ priv_error(Message, rabbit_misc:format(Format, Args), ServerPrivateDetail,
+ State).
+
+log_error(Message, Detail, ServerPrivateDetail) ->
+ rabbit_log:error("STOMP error frame sent:~n"
+ "Message: ~p~n"
+ "Detail: ~p~n"
+ "Server private detail: ~p~n",
+ [Message, Detail, ServerPrivateDetail]).
+
+%%----------------------------------------------------------------------------
+%% Frame sending utilities
+%%----------------------------------------------------------------------------
+send_frame(Command, Headers, BodyFragments, State) ->
+ send_frame(#stomp_frame{command = Command,
+ headers = Headers,
+ body_iolist = BodyFragments},
+ State).
+
+send_frame(Frame, State = #state{send_fun = SendFun}) ->
+ SendFun(async, rabbit_stomp_frame:serialize(Frame)),
+ State.
+
+send_error_frame(Message, ExtraHeaders, Format, Args, State) ->
+ send_error_frame(Message, ExtraHeaders, rabbit_misc:format(Format, Args),
+ State).
+
+send_error_frame(Message, ExtraHeaders, Detail, State) ->
+ send_frame("ERROR", [{"message", Message},
+ {"content-type", "text/plain"},
+ {"version", string:join(?SUPPORTED_VERSIONS, ",")}] ++
+ ExtraHeaders,
+ Detail, State).
+
+send_error(Message, Detail, State) ->
+ send_error_frame(Message, [], Detail, State).
+
+send_error(Message, Format, Args, State) ->
+ send_error(Message, rabbit_misc:format(Format, Args), State).
+
+%%----------------------------------------------------------------------------
+%% Skeleton gen_server2 callbacks
+%%----------------------------------------------------------------------------
+handle_call(_Msg, _From, State) ->
+ {noreply, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_stomp_reader).
+
+-export([start_link/3]).
+-export([init/3]).
+-export([conserve_resources/3]).
+
+-include("rabbit_stomp.hrl").
+-include("rabbit_stomp_frame.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-record(reader_state, {socket, parse_state, processor, state,
+ conserve_resources, recv_outstanding}).
+
+%%----------------------------------------------------------------------------
+
+start_link(SupHelperPid, ProcessorPid, Configuration) ->
+ {ok, proc_lib:spawn_link(?MODULE, init,
+ [SupHelperPid, ProcessorPid, Configuration])}.
+
+log(Level, Fmt, Args) -> rabbit_log:log(connection, Level, Fmt, Args).
+
+init(SupHelperPid, ProcessorPid, Configuration) ->
+ Reply = go(SupHelperPid, ProcessorPid, Configuration),
+ rabbit_stomp_processor:flush_and_die(ProcessorPid),
+ Reply.
+
+go(SupHelperPid, ProcessorPid, Configuration) ->
+ process_flag(trap_exit, true),
+ receive
+ {go, Sock0, SockTransform} ->
+ case rabbit_net:connection_string(Sock0, inbound) of
+ {ok, ConnStr} ->
+ case SockTransform(Sock0) of
+ {ok, Sock} ->
+
+ ProcInitArgs = processor_args(SupHelperPid,
+ Configuration,
+ Sock),
+ rabbit_stomp_processor:init_arg(ProcessorPid,
+ ProcInitArgs),
+ log(info, "accepting STOMP connection ~p (~s)~n",
+ [self(), ConnStr]),
+
+ ParseState = rabbit_stomp_frame:initial_state(),
+ try
+ mainloop(
+ register_resource_alarm(
+ #reader_state{socket = Sock,
+ parse_state = ParseState,
+ processor = ProcessorPid,
+ state = running,
+ conserve_resources = false,
+ recv_outstanding = false})),
+ log(info, "closing STOMP connection ~p (~s)~n",
+ [self(), ConnStr])
+ catch _:Ex ->
+ log_network_error(ConnStr, Ex),
+ rabbit_net:fast_close(Sock),
+ exit(normal)
+ end,
+ done;
+ {error, enotconn} ->
+ rabbit_net:fast_close(Sock0),
+ exit(normal);
+ {error, Reason} ->
+ log_network_error(ConnStr, Reason),
+ rabbit_net:fast_close(Sock0),
+ exit(normal)
+ end
+ end
+ end.
+
+mainloop(State0 = #reader_state{socket = Sock}) ->
+ State = run_socket(control_throttle(State0)),
+ receive
+ {inet_async, Sock, _Ref, {ok, Data}} ->
+ mainloop(process_received_bytes(
+ Data, State#reader_state{recv_outstanding = false}));
+ {inet_async, _Sock, _Ref, {error, closed}} ->
+ ok;
+ {inet_async, _Sock, _Ref, {error, Reason}} ->
+ throw({inet_error, Reason});
+ {conserve_resources, Conserve} ->
+ mainloop(State#reader_state{conserve_resources = Conserve});
+ {bump_credit, Msg} ->
+ credit_flow:handle_bump_msg(Msg),
+ mainloop(State);
+ {'EXIT', _From, shutdown} ->
+ ok;
+ Other ->
+ log(warning, "STOMP connection ~p received "
+ "an unexpected message ~p~n", [Other]),
+ ok
+ end.
+
+process_received_bytes([], State) ->
+ State;
+process_received_bytes(Bytes,
+ State = #reader_state{
+ processor = Processor,
+ parse_state = ParseState,
+ state = S}) ->
+ case rabbit_stomp_frame:parse(Bytes, ParseState) of
+ {more, ParseState1} ->
+ State#reader_state{parse_state = ParseState1};
+ {ok, Frame, Rest} ->
+ rabbit_stomp_processor:process_frame(Processor, Frame),
+ PS = rabbit_stomp_frame:initial_state(),
+ process_received_bytes(Rest, State#reader_state{
+ parse_state = PS,
+ state = next_state(S, Frame)})
+ end.
+
+conserve_resources(Pid, _Source, Conserve) ->
+ Pid ! {conserve_resources, Conserve},
+ ok.
+
+register_resource_alarm(State) ->
+ rabbit_alarm:register(self(), {?MODULE, conserve_resources, []}), State.
+
+control_throttle(State = #reader_state{state = CS,
+ conserve_resources = Mem}) ->
+ case {CS, Mem orelse credit_flow:blocked()} of
+ {running, true} -> State#reader_state{state = blocking};
+ {blocking, false} -> State#reader_state{state = running};
+ {blocked, false} -> State#reader_state{state = running};
+ {_, _} -> State
+ end.
+
+next_state(blocking, #stomp_frame{command = "SEND"}) ->
+ blocked;
+next_state(S, _) ->
+ S.
+
+run_socket(State = #reader_state{state = blocked}) ->
+ State;
+run_socket(State = #reader_state{recv_outstanding = true}) ->
+ State;
+run_socket(State = #reader_state{socket = Sock}) ->
+ rabbit_net:async_recv(Sock, 0, infinity),
+ State#reader_state{recv_outstanding = true}.
+
+%%----------------------------------------------------------------------------
+
+processor_args(SupPid, Configuration, Sock) ->
+ SendFun = fun (sync, IoData) ->
+ %% no messages emitted
+ catch rabbit_net:send(Sock, IoData);
+ (async, IoData) ->
+ %% {inet_reply, _, _} will appear soon
+ %% We ignore certain errors here, as we will be
+ %% receiving an asynchronous notification of the
+ %% same (or a related) fault shortly anyway. See
+ %% bug 21365.
+ catch rabbit_net:port_command(Sock, IoData)
+ end,
+
+ StartHeartbeatFun =
+ fun (SendTimeout, SendFin, ReceiveTimeout, ReceiveFun) ->
+ rabbit_heartbeat:start(SupPid, Sock, SendTimeout,
+ SendFin, ReceiveTimeout, ReceiveFun)
+ end,
+ {ok, {PeerAddr, _PeerPort}} = rabbit_net:sockname(Sock),
+ [SendFun, adapter_info(Sock), StartHeartbeatFun,
+ ssl_login_name(Sock, Configuration), PeerAddr].
+
+adapter_info(Sock) ->
+ amqp_connection:socket_adapter_info(Sock, {'STOMP', 0}).
+
+ssl_login_name(_Sock, #stomp_configuration{ssl_cert_login = false}) ->
+ none;
+ssl_login_name(Sock, #stomp_configuration{ssl_cert_login = true}) ->
+ case rabbit_net:peercert(Sock) of
+ {ok, C} -> case rabbit_ssl:peer_cert_auth_name(C) of
+ unsafe -> none;
+ not_found -> none;
+ Name -> Name
+ end;
+ {error, no_peercert} -> none;
+ nossl -> none
+ end.
+
+%%----------------------------------------------------------------------------
+
+log_network_error(ConnStr, {ssl_upgrade_error,
+ {tls_alert, "handshake failure"}}) ->
+ log(error, "STOMP detected TLS upgrade error on "
+ "~p (~s): handshake failure~n", [self(), ConnStr]);
+
+log_network_error(ConnStr, {ssl_upgrade_error,
+ {tls_alert, "unknown ca"}}) ->
+ log(error, "STOMP detected TLS certificate "
+ "verification error on "
+ "~p (~s): alert 'unknown CA'~n", [self(), ConnStr]);
+
+log_network_error(ConnStr, {ssl_upgrade_error, {tls_alert, Alert}}) ->
+ log(error, "STOMP detected TLS upgrade error on "
+ "~p (~s): alert ~s~n", [self(), ConnStr, Alert]);
+
+log_network_error(ConnStr, {ssl_upgrade_error, closed}) ->
+ log(error, "STOMP detected TLS upgrade error on "
+ "~p (~s): connection closed~n", [self(), ConnStr]);
+
+log_network_error(ConnStr, Ex) ->
+ log(error, "STOMP detected network error on "
+ "~p (~s):~n~p~n", [self(), ConnStr, Ex]).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_stomp_sup).
+-behaviour(supervisor).
+
+-export([start_link/2, init/1]).
+
+-export([start_client/2, start_ssl_client/3]).
+
+start_link(Listeners, Configuration) ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE,
+ [Listeners, Configuration]).
+
+init([{Listeners, SslListeners}, Configuration]) ->
+ {ok, SocketOpts} = application:get_env(rabbitmq_stomp, tcp_listen_options),
+
+ SslOpts = case SslListeners of
+ [] -> none;
+ _ -> rabbit_networking:ensure_ssl()
+ end,
+
+ {ok, {{one_for_all, 10, 10},
+ [{rabbit_stomp_client_sup_sup,
+ {rabbit_client_sup, start_link,
+ [{local, rabbit_stomp_client_sup_sup},
+ {rabbit_stomp_client_sup, start_link,[]}]},
+ transient, infinity, supervisor, [rabbit_client_sup]} |
+ listener_specs(fun tcp_listener_spec/1,
+ [SocketOpts, Configuration], Listeners) ++
+ listener_specs(fun ssl_listener_spec/1,
+ [SocketOpts, SslOpts, Configuration], SslListeners)]}}.
+
+listener_specs(Fun, Args, Listeners) ->
+ [Fun([Address | Args]) ||
+ Listener <- Listeners,
+ Address <- rabbit_networking:tcp_listener_addresses(Listener)].
+
+tcp_listener_spec([Address, SocketOpts, Configuration]) ->
+ rabbit_networking:tcp_listener_spec(
+ rabbit_stomp_listener_sup, Address, SocketOpts,
+ stomp, "STOMP TCP Listener",
+ {?MODULE, start_client, [Configuration]}).
+
+ssl_listener_spec([Address, SocketOpts, SslOpts, Configuration]) ->
+ rabbit_networking:tcp_listener_spec(
+ rabbit_stomp_listener_sup, Address, SocketOpts,
+ 'stomp/ssl', "STOMP SSL Listener",
+ {?MODULE, start_ssl_client, [Configuration, SslOpts]}).
+
+start_client(Configuration, Sock, SockTransform) ->
+ {ok, _Child, Reader} = supervisor:start_child(rabbit_stomp_client_sup_sup,
+ [Configuration]),
+ ok = rabbit_net:controlling_process(Sock, Reader),
+ Reader ! {go, Sock, SockTransform},
+
+ %% see comment in rabbit_networking:start_client/2
+ gen_event:which_handlers(error_logger),
+
+ Reader.
+
+start_client(Configuration, Sock) ->
+ start_client(Configuration, Sock, fun (S) -> {ok, S} end).
+
+start_ssl_client(Configuration, SslOpts, Sock) ->
+ Transform = rabbit_networking:ssl_transform_fun(SslOpts),
+ start_client(Configuration, Sock, Transform).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_stomp_util).
+
+-export([parse_message_id/1, durable_subscription_queue/2]).
+-export([longstr_field/2]).
+-export([ack_mode/1, consumer_tag_reply_to/1, consumer_tag/1, message_headers/1,
+ headers_post_process/1, headers/5, message_properties/1, tag_to_id/1,
+ msg_header_name/1, ack_header_name/1]).
+-export([negotiate_version/2]).
+-export([trim_headers/1]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("amqp_client/include/rabbit_routing_prefixes.hrl").
+-include("rabbit_stomp_frame.hrl").
+-include("rabbit_stomp_headers.hrl").
+
+-define(INTERNAL_TAG_PREFIX, "T_").
+-define(QUEUE_TAG_PREFIX, "Q_").
+
+%%--------------------------------------------------------------------
+%% Frame and Header Parsing
+%%--------------------------------------------------------------------
+
+consumer_tag_reply_to(QueueId) ->
+ internal_tag(?TEMP_QUEUE_ID_PREFIX ++ QueueId).
+
+consumer_tag(Frame) ->
+ case rabbit_stomp_frame:header(Frame, ?HEADER_ID) of
+ {ok, Id} ->
+ case lists:prefix(?TEMP_QUEUE_ID_PREFIX, Id) of
+ false -> {ok, internal_tag(Id), "id='" ++ Id ++ "'"};
+ true -> {error, invalid_prefix}
+ end;
+ not_found ->
+ case rabbit_stomp_frame:header(Frame, ?HEADER_DESTINATION) of
+ {ok, DestHdr} ->
+ {ok, queue_tag(DestHdr),
+ "destination='" ++ DestHdr ++ "'"};
+ not_found ->
+ {error, missing_destination_header}
+ end
+ end.
+
+ack_mode(Frame) ->
+ case rabbit_stomp_frame:header(Frame, ?HEADER_ACK, "auto") of
+ "auto" -> {auto, false};
+ "client" -> {client, true};
+ "client-individual" -> {client, false}
+ end.
+
+message_properties(Frame = #stomp_frame{headers = Headers}) ->
+ BinH = fun(K) -> rabbit_stomp_frame:binary_header(Frame, K, undefined) end,
+ IntH = fun(K) -> rabbit_stomp_frame:integer_header(Frame, K, undefined) end,
+
+ DeliveryMode = case rabbit_stomp_frame:boolean_header(
+ Frame, ?HEADER_PERSISTENT, false) of
+ true -> 2;
+ false -> undefined
+ end,
+
+ #'P_basic'{ content_type = BinH(?HEADER_CONTENT_TYPE),
+ content_encoding = BinH(?HEADER_CONTENT_ENCODING),
+ headers = [longstr_field(K, V) ||
+ {K, V} <- Headers, user_header(K)],
+ delivery_mode = DeliveryMode,
+ priority = IntH(?HEADER_PRIORITY),
+ correlation_id = BinH(?HEADER_CORRELATION_ID),
+ reply_to = BinH(?HEADER_REPLY_TO),
+ expiration = BinH(?HEADER_EXPIRATION),
+ message_id = BinH(?HEADER_AMQP_MESSAGE_ID),
+ timestamp = IntH(?HEADER_TIMESTAMP),
+ type = BinH(?HEADER_TYPE),
+ user_id = BinH(?HEADER_USER_ID),
+ app_id = BinH(?HEADER_APP_ID) }.
+
+message_headers(Props = #'P_basic'{headers = Headers}) ->
+ adhoc_convert_headers(
+ Headers,
+ lists:foldl(fun({Header, Index}, Acc) ->
+ maybe_header(Header, element(Index, Props), Acc)
+ end, [],
+ [{?HEADER_CONTENT_TYPE, #'P_basic'.content_type},
+ {?HEADER_CONTENT_ENCODING, #'P_basic'.content_encoding},
+ {?HEADER_PERSISTENT, #'P_basic'.delivery_mode},
+ {?HEADER_PRIORITY, #'P_basic'.priority},
+ {?HEADER_CORRELATION_ID, #'P_basic'.correlation_id},
+ {?HEADER_REPLY_TO, #'P_basic'.reply_to},
+ {?HEADER_EXPIRATION, #'P_basic'.expiration},
+ {?HEADER_AMQP_MESSAGE_ID, #'P_basic'.message_id},
+ {?HEADER_TIMESTAMP, #'P_basic'.timestamp},
+ {?HEADER_TYPE, #'P_basic'.type},
+ {?HEADER_USER_ID, #'P_basic'.user_id},
+ {?HEADER_APP_ID, #'P_basic'.app_id}])).
+
+adhoc_convert_headers(undefined, Existing) ->
+ Existing;
+adhoc_convert_headers(Headers, Existing) ->
+ lists:foldr(fun ({K, longstr, V}, Acc) ->
+ [{binary_to_list(K), binary_to_list(V)} | Acc];
+ ({K, signedint, V}, Acc) ->
+ [{binary_to_list(K), integer_to_list(V)} | Acc];
+ (_, Acc) ->
+ Acc
+ end, Existing, Headers).
+
+headers_extra(SessionId, AckMode, Version,
+ #'basic.deliver'{consumer_tag = ConsumerTag,
+ delivery_tag = DeliveryTag,
+ exchange = ExchangeBin,
+ routing_key = RoutingKeyBin}) ->
+ case tag_to_id(ConsumerTag) of
+ {ok, {internal, Id}} -> [{?HEADER_SUBSCRIPTION, Id}];
+ _ -> []
+ end ++
+ [{?HEADER_DESTINATION,
+ format_destination(binary_to_list(ExchangeBin),
+ binary_to_list(RoutingKeyBin))},
+ {?HEADER_MESSAGE_ID,
+ create_message_id(ConsumerTag, SessionId, DeliveryTag)}] ++
+ case AckMode == client andalso Version == "1.2" of
+ true -> [{?HEADER_ACK,
+ create_message_id(ConsumerTag, SessionId, DeliveryTag)}];
+ false -> []
+ end.
+
+headers_post_process(Headers) ->
+ Prefixes = rabbit_routing_util:dest_prefixes(),
+ [case Header of
+ {?HEADER_REPLY_TO, V} ->
+ case lists:any(fun (P) -> lists:prefix(P, V) end, Prefixes) of
+ true -> {?HEADER_REPLY_TO, V};
+ false -> {?HEADER_REPLY_TO, ?REPLY_QUEUE_PREFIX ++ V}
+ end;
+ {_, _} ->
+ Header
+ end || Header <- Headers].
+
+headers(SessionId, Delivery, Properties, AckMode, Version) ->
+ headers_extra(SessionId, AckMode, Version, Delivery) ++
+ headers_post_process(message_headers(Properties)).
+
+tag_to_id(<<?INTERNAL_TAG_PREFIX, Id/binary>>) ->
+ {ok, {internal, binary_to_list(Id)}};
+tag_to_id(<<?QUEUE_TAG_PREFIX, Id/binary>>) ->
+ {ok, {queue, binary_to_list(Id)}};
+tag_to_id(Other) when is_binary(Other) ->
+ {error, {unknown, binary_to_list(Other)}}.
+
+user_header(Hdr)
+ when Hdr =:= ?HEADER_CONTENT_TYPE orelse
+ Hdr =:= ?HEADER_CONTENT_ENCODING orelse
+ Hdr =:= ?HEADER_PERSISTENT orelse
+ Hdr =:= ?HEADER_PRIORITY orelse
+ Hdr =:= ?HEADER_CORRELATION_ID orelse
+ Hdr =:= ?HEADER_REPLY_TO orelse
+ Hdr =:= ?HEADER_EXPIRATION orelse
+ Hdr =:= ?HEADER_AMQP_MESSAGE_ID orelse
+ Hdr =:= ?HEADER_TIMESTAMP orelse
+ Hdr =:= ?HEADER_TYPE orelse
+ Hdr =:= ?HEADER_USER_ID orelse
+ Hdr =:= ?HEADER_APP_ID orelse
+ Hdr =:= ?HEADER_DESTINATION ->
+ false;
+user_header(_) ->
+ true.
+
+parse_message_id(MessageId) ->
+ case split(MessageId, ?MESSAGE_ID_SEPARATOR) of
+ [ConsumerTag, SessionId, DeliveryTag] ->
+ {ok, {list_to_binary(ConsumerTag),
+ SessionId,
+ list_to_integer(DeliveryTag)}};
+ _ ->
+ {error, invalid_message_id}
+ end.
+
+negotiate_version(ClientVers, ServerVers) ->
+ Common = lists:filter(fun(Ver) ->
+ lists:member(Ver, ServerVers)
+ end, ClientVers),
+ case Common of
+ [] ->
+ {error, no_common_version};
+ [H|T] ->
+ {ok, lists:foldl(fun(Ver, AccN) ->
+ max_version(Ver, AccN)
+ end, H, T)}
+ end.
+
+max_version(V, V) ->
+ V;
+max_version(V1, V2) ->
+ Split = fun(X) -> re:split(X, "\\.", [{return, list}]) end,
+ find_max_version({V1, Split(V1)}, {V2, Split(V2)}).
+
+find_max_version({V1, [X|T1]}, {V2, [X|T2]}) ->
+ find_max_version({V1, T1}, {V2, T2});
+find_max_version({V1, [X]}, {V2, [Y]}) ->
+ case list_to_integer(X) >= list_to_integer(Y) of
+ true -> V1;
+ false -> V2
+ end;
+find_max_version({_V1, []}, {V2, Y}) when length(Y) > 0 ->
+ V2;
+find_max_version({V1, X}, {_V2, []}) when length(X) > 0 ->
+ V1.
+
+%% ---- Header processing helpers ----
+
+longstr_field(K, V) ->
+ {list_to_binary(K), longstr, list_to_binary(V)}.
+
+maybe_header(_Key, undefined, Acc) ->
+ Acc;
+maybe_header(?HEADER_PERSISTENT, 2, Acc) ->
+ [{?HEADER_PERSISTENT, "true"} | Acc];
+maybe_header(Key, Value, Acc) when is_binary(Value) ->
+ [{Key, binary_to_list(Value)} | Acc];
+maybe_header(Key, Value, Acc) when is_integer(Value) ->
+ [{Key, integer_to_list(Value)}| Acc];
+maybe_header(_Key, _Value, Acc) ->
+ Acc.
+
+create_message_id(ConsumerTag, SessionId, DeliveryTag) ->
+ [ConsumerTag,
+ ?MESSAGE_ID_SEPARATOR,
+ SessionId,
+ ?MESSAGE_ID_SEPARATOR,
+ integer_to_list(DeliveryTag)].
+
+trim_headers(Frame = #stomp_frame{headers = Hdrs}) ->
+ Frame#stomp_frame{headers = [{K, string:strip(V, left)} || {K, V} <- Hdrs]}.
+
+internal_tag(Base) ->
+ list_to_binary(?INTERNAL_TAG_PREFIX ++ Base).
+
+queue_tag(Base) ->
+ list_to_binary(?QUEUE_TAG_PREFIX ++ Base).
+
+ack_header_name("1.2") -> ?HEADER_ID;
+ack_header_name("1.1") -> ?HEADER_MESSAGE_ID;
+ack_header_name("1.0") -> ?HEADER_MESSAGE_ID.
+
+msg_header_name("1.2") -> ?HEADER_ACK;
+msg_header_name("1.1") -> ?HEADER_MESSAGE_ID;
+msg_header_name("1.0") -> ?HEADER_MESSAGE_ID.
+
+%%--------------------------------------------------------------------
+%% Destination Formatting
+%%--------------------------------------------------------------------
+
+format_destination("", RoutingKey) ->
+ ?QUEUE_PREFIX ++ "/" ++ escape(RoutingKey);
+format_destination("amq.topic", RoutingKey) ->
+ ?TOPIC_PREFIX ++ "/" ++ escape(RoutingKey);
+format_destination(Exchange, "") ->
+ ?EXCHANGE_PREFIX ++ "/" ++ escape(Exchange);
+format_destination(Exchange, RoutingKey) ->
+ ?EXCHANGE_PREFIX ++ "/" ++ escape(Exchange) ++ "/" ++ escape(RoutingKey).
+
+%%--------------------------------------------------------------------
+%% Destination Parsing
+%%--------------------------------------------------------------------
+
+durable_subscription_queue(Destination, SubscriptionId) ->
+ %% We need a queue name that a) can be derived from the
+ %% Destination and SubscriptionId, and b) meets the constraints on
+ %% AMQP queue names. It doesn't need to be secure; we use md5 here
+ %% simply as a convenient means to bound the length.
+ rabbit_guid:string(
+ erlang:md5(term_to_binary({Destination, SubscriptionId})),
+ "stomp.dsub").
+
+%% ---- Helpers ----
+
+split([], _Splitter) -> [];
+split(Content, []) -> Content;
+split(Content, Splitter) -> split(Content, [], [], Splitter).
+
+split([], RPart, RParts, _Splitter) ->
+ lists:reverse([lists:reverse(RPart) | RParts]);
+split(Content = [Elem | Rest1], RPart, RParts, Splitter) ->
+ case take_prefix(Splitter, Content) of
+ {ok, Rest2} ->
+ split(Rest2, [], [lists:reverse(RPart) | RParts], Splitter);
+ not_found ->
+ split(Rest1, [Elem | RPart], RParts, Splitter)
+ end.
+
+take_prefix([Char | Prefix], [Char | List]) -> take_prefix(Prefix, List);
+take_prefix([], List) -> {ok, List};
+take_prefix(_Prefix, _List) -> not_found.
+
+escape(Str) -> escape(Str, []).
+
+escape([$/ | Str], Acc) -> escape(Str, "F2%" ++ Acc); %% $/ == '2F'x
+escape([$% | Str], Acc) -> escape(Str, "52%" ++ Acc); %% $% == '25'x
+escape([X | Str], Acc) when X < 32 orelse X > 127 ->
+ escape(Str, revhex(X) ++ "%" ++ Acc);
+escape([C | Str], Acc) -> escape(Str, [C | Acc]);
+escape([], Acc) -> lists:reverse(Acc).
+
+revhex(I) -> hexdig(I) ++ hexdig(I bsr 4).
+
+hexdig(I) -> erlang:integer_to_list(I band 15, 16).
--- /dev/null
+{application, rabbitmq_stomp,
+ [{description, "Embedded Rabbit Stomp Adapter"},
+ {vsn, "%%VSN%%"},
+ {modules, []},
+ {registered, []},
+ {mod, {rabbit_stomp, []}},
+ {env, [{default_user,
+ [{login, "guest"},
+ {passcode, "guest"}]},
+ {default_vhost, <<"/">>},
+ {ssl_cert_login, false},
+ {implicit_connect, false},
+ {tcp_listeners, [61613]},
+ {ssl_listeners, []},
+ {tcp_listen_options, [binary,
+ {packet, raw},
+ {reuseaddr, true},
+ {backlog, 128},
+ {nodelay, true}]}]},
+ {applications, [kernel, stdlib, rabbit, amqp_client]}]}.
--- /dev/null
+import unittest
+import stomp
+import base
+import time
+
+class TestAck(base.BaseTest):
+
+ def test_ack_client(self):
+ d = "/queue/ack-test"
+
+ # subscribe and send message
+ self.listener.reset(2) ## expecting 2 messages
+ self.conn.subscribe(destination=d, ack='client',
+ headers={'prefetch-count': '10'})
+ self.conn.send("test1", destination=d)
+ self.conn.send("test2", destination=d)
+ self.assertTrue(self.listener.await(4), "initial message not received")
+ self.assertEquals(2, len(self.listener.messages))
+
+ # disconnect with no ack
+ self.conn.disconnect()
+
+ # now reconnect
+ conn2 = self.create_connection()
+ try:
+ listener2 = base.WaitableListener()
+ listener2.reset(2)
+ conn2.set_listener('', listener2)
+ conn2.subscribe(destination=d, ack='client',
+ headers={'prefetch-count': '10'})
+ self.assertTrue(listener2.await(), "message not received again")
+ self.assertEquals(2, len(listener2.messages))
+
+ # now ack only the last message - expecting cumulative behaviour
+ mid = listener2.messages[1]['headers']['message-id']
+ conn2.ack({'message-id':mid})
+ finally:
+ conn2.stop()
+
+ # now reconnect again, shouldn't see the message
+ conn3 = self.create_connection()
+ try:
+ listener3 = base.WaitableListener()
+ conn3.set_listener('', listener3)
+ conn3.subscribe(destination=d)
+ self.assertFalse(listener3.await(3),
+ "unexpected message. ACK not working?")
+ finally:
+ conn3.stop()
+
+ def test_ack_client_individual(self):
+ d = "/queue/ack-test-individual"
+
+ # subscribe and send message
+ self.listener.reset(2) ## expecting 2 messages
+ self.conn.subscribe(destination=d, ack='client-individual',
+ headers={'prefetch-count': '10'})
+ self.conn.send("test1", destination=d)
+ self.conn.send("test2", destination=d)
+ self.assertTrue(self.listener.await(4), "Both initial messages not received")
+ self.assertEquals(2, len(self.listener.messages))
+
+ # disconnect without acks
+ self.conn.disconnect()
+
+ # now reconnect
+ conn2 = self.create_connection()
+ try:
+ listener2 = base.WaitableListener()
+ listener2.reset(2) ## expect 2 messages
+ conn2.set_listener('', listener2)
+ conn2.subscribe(destination=d, ack='client-individual',
+ headers={'prefetch-count': '10'})
+ self.assertTrue(listener2.await(2.5), "Did not receive 2 messages")
+ self.assertEquals(2, len(listener2.messages), "Not exactly 2 messages received")
+
+ # now ack only the 'test2' message - expecting individual behaviour
+ nummsgs = len(listener2.messages)
+ mid = None
+ for ind in range(nummsgs):
+ if listener2.messages[ind]['message']=="test2":
+ mid = listener2.messages[ind]['headers']['message-id']
+ self.assertEquals(1, ind, 'Expecting test2 to be second message')
+ break
+ self.assertTrue(mid, "Did not find test2 message id.")
+ conn2.ack({'message-id':mid})
+ finally:
+ conn2.stop()
+
+ # now reconnect again, shouldn't see the message
+ conn3 = self.create_connection()
+ try:
+ listener3 = base.WaitableListener()
+ listener3.reset(2) ## expecting a single message, but wait for two
+ conn3.set_listener('', listener3)
+ conn3.subscribe(destination=d)
+ self.assertFalse(listener3.await(2.5),
+ "Expected to see only one message. ACK not working?")
+ self.assertEquals(1, len(listener3.messages), "Expecting exactly one message")
+ self.assertEquals("test1", listener3.messages[0]['message'], "Unexpected message remains")
+ finally:
+ conn3.stop()
+
+ def test_ack_client_tx(self):
+ d = "/queue/ack-test-tx"
+
+ # subscribe and send message
+ self.listener.reset()
+ self.conn.subscribe(destination=d, ack='client')
+ self.conn.send("test", destination=d)
+ self.assertTrue(self.listener.await(3), "initial message not received")
+ self.assertEquals(1, len(self.listener.messages))
+
+ # disconnect with no ack
+ self.conn.disconnect()
+
+ # now reconnect
+ conn2 = self.create_connection()
+ try:
+ tx = "abc"
+ listener2 = base.WaitableListener()
+ conn2.set_listener('', listener2)
+ conn2.begin(transaction=tx)
+ conn2.subscribe(destination=d, ack='client')
+ self.assertTrue(listener2.await(), "message not received again")
+ self.assertEquals(1, len(listener2.messages))
+
+ # now ack
+ mid = listener2.messages[0]['headers']['message-id']
+ conn2.ack({'message-id':mid, 'transaction':tx})
+
+ #now commit
+ conn2.commit(transaction=tx)
+ finally:
+ conn2.stop()
+
+ # now reconnect again, shouldn't see the message
+ conn3 = self.create_connection()
+ try:
+ listener3 = base.WaitableListener()
+ conn3.set_listener('', listener3)
+ conn3.subscribe(destination=d)
+ self.assertFalse(listener3.await(3),
+ "unexpected message. TX ACK not working?")
+ finally:
+ conn3.stop()
+
+ def test_topic_prefetch(self):
+ d = "/topic/prefetch-test"
+
+ # subscribe and send message
+ self.listener.reset(6) ## expect 6 messages
+ self.conn.subscribe(destination=d, ack='client',
+ headers={'prefetch-count': '5'})
+
+ for x in range(10):
+ self.conn.send("test" + str(x), destination=d)
+
+ self.assertFalse(self.listener.await(3),
+ "Should not have been able to see 6 messages")
+ self.assertEquals(5, len(self.listener.messages))
+
+ def test_nack(self):
+ d = "/queue/nack-test"
+
+ #subscribe and send
+ self.conn.subscribe(destination=d, ack='client-individual')
+ self.conn.send("nack-test", destination=d)
+
+ self.assertTrue(self.listener.await(), "Not received message")
+ message_id = self.listener.messages[0]['headers']['message-id']
+ self.listener.reset()
+
+ self.conn.send_frame("NACK", {"message-id" : message_id})
+ self.assertTrue(self.listener.await(), "Not received message after NACK")
+ message_id = self.listener.messages[0]['headers']['message-id']
+ self.conn.ack({'message-id' : message_id})
+
+ def test_nack_multi(self):
+ d = "/queue/nack-multi"
+
+ self.listener.reset(2)
+
+ #subscribe and send
+ self.conn.subscribe(destination=d, ack='client',
+ headers = {'prefetch-count' : '10'})
+ self.conn.send("nack-test1", destination=d)
+ self.conn.send("nack-test2", destination=d)
+
+ self.assertTrue(self.listener.await(), "Not received messages")
+ message_id = self.listener.messages[1]['headers']['message-id']
+ self.listener.reset(2)
+
+ self.conn.send_frame("NACK", {"message-id" : message_id})
+ self.assertTrue(self.listener.await(), "Not received message again")
+ message_id = self.listener.messages[1]['headers']['message-id']
+ self.conn.ack({'message-id' : message_id})
--- /dev/null
+import unittest
+import stomp
+import sys
+import threading
+
+
+class BaseTest(unittest.TestCase):
+
+ def create_connection(self, version=None, heartbeat=None):
+ conn = stomp.Connection(user="guest", passcode="guest",
+ version=version, heartbeat=heartbeat)
+ conn.start()
+ conn.connect()
+ return conn
+
+ def create_subscriber_connection(self, dest):
+ conn = self.create_connection()
+ listener = WaitableListener()
+ conn.set_listener('', listener)
+ conn.subscribe(destination=dest, receipt="sub.receipt")
+ listener.await()
+ self.assertEquals(1, len(listener.receipts))
+ listener.reset()
+ return conn, listener
+
+ def setUp(self):
+ self.conn = self.create_connection()
+ self.listener = WaitableListener()
+ self.conn.set_listener('', self.listener)
+
+ def tearDown(self):
+ if self.conn.is_connected():
+ self.conn.stop()
+
+ def simple_test_send_rec(self, dest, route = None):
+ self.listener.reset()
+
+ self.conn.subscribe(destination=dest)
+ self.conn.send("foo", destination=dest)
+
+ self.assertTrue(self.listener.await(), "Timeout, no message received")
+
+ # assert no errors
+ if len(self.listener.errors) > 0:
+ self.fail(self.listener.errors[0]['message'])
+
+ # check header content
+ msg = self.listener.messages[0]
+ self.assertEquals("foo", msg['message'])
+ self.assertEquals(dest, msg['headers']['destination'])
+
+ def assertListener(self, errMsg, numMsgs=0, numErrs=0, numRcts=0, timeout=10):
+ if numMsgs + numErrs + numRcts > 0:
+ self._assertTrue(self.listener.await(timeout), errMsg + " (#awaiting)")
+ else:
+ self._assertFalse(self.listener.await(timeout), errMsg + " (#awaiting)")
+ self._assertEquals(numMsgs, len(self.listener.messages), errMsg + " (#messages)")
+ self._assertEquals(numErrs, len(self.listener.errors), errMsg + " (#errors)")
+ self._assertEquals(numRcts, len(self.listener.receipts), errMsg + " (#receipts)")
+
+ def _assertTrue(self, bool, msg):
+ if not bool:
+ self.listener.print_state(msg, True)
+ self.assertTrue(bool, msg)
+
+ def _assertFalse(self, bool, msg):
+ if bool:
+ self.listener.print_state(msg, True)
+ self.assertFalse(bool, msg)
+
+ def _assertEquals(self, expected, actual, msg):
+ if expected != actual:
+ self.listener.print_state(msg, True)
+ self.assertEquals(expected, actual, msg)
+
+ def assertListenerAfter(self, verb, errMsg="", numMsgs=0, numErrs=0, numRcts=0, timeout=5):
+ num = numMsgs + numErrs + numRcts
+ self.listener.reset(num if num>0 else 1)
+ verb()
+ self.assertListener(errMsg=errMsg, numMsgs=numMsgs, numErrs=numErrs, numRcts=numRcts, timeout=timeout)
+
+class WaitableListener(object):
+
+ def __init__(self):
+ self.debug = False
+ if self.debug:
+ print '(listener) init'
+ self.messages = []
+ self.errors = []
+ self.receipts = []
+ self.latch = Latch(1)
+ self.msg_no = 0
+
+ def _next_msg_no(self):
+ self.msg_no += 1
+ return self.msg_no
+
+ def _append(self, array, msg, hdrs):
+ mno = self._next_msg_no()
+ array.append({'message' : msg, 'headers' : hdrs, 'msg_no' : mno})
+ self.latch.countdown()
+
+ def on_receipt(self, headers, message):
+ if self.debug:
+ print '(on_receipt) message:', message, 'headers:', headers
+ self._append(self.receipts, message, headers)
+
+ def on_error(self, headers, message):
+ if self.debug:
+ print '(on_error) message:', message, 'headers:', headers
+ self._append(self.errors, message, headers)
+
+ def on_message(self, headers, message):
+ if self.debug:
+ print '(on_message) message:', message, 'headers:', headers
+ self._append(self.messages, message, headers)
+
+ def reset(self, count=1):
+ if self.debug:
+ self.print_state('(reset listener--old state)')
+ self.messages = []
+ self.errors = []
+ self.receipts = []
+ self.latch = Latch(count)
+ self.msg_no = 0
+ if self.debug:
+ self.print_state('(reset listener--new state)')
+
+ def await(self, timeout=10):
+ return self.latch.await(timeout)
+
+ def print_state(self, hdr="", full=False):
+ print hdr,
+ print '#messages:', len(self.messages),
+ print '#errors:', len(self.errors),
+ print '#receipts:', len(self.receipts),
+ print 'Remaining count:', self.latch.get_count()
+ if full:
+ if len(self.messages) != 0: print 'Messages:', self.messages
+ if len(self.errors) != 0: print 'Messages:', self.errors
+ if len(self.receipts) != 0: print 'Messages:', self.receipts
+
+class Latch(object):
+
+ def __init__(self, count=1):
+ self.cond = threading.Condition()
+ self.cond.acquire()
+ self.count = count
+ self.cond.release()
+
+ def countdown(self):
+ self.cond.acquire()
+ if self.count > 0:
+ self.count -= 1
+ if self.count == 0:
+ self.cond.notify_all()
+ self.cond.release()
+
+ def await(self, timeout=None):
+ try:
+ self.cond.acquire()
+ if self.count == 0:
+ return True
+ else:
+ self.cond.wait(timeout)
+ return self.count == 0
+ finally:
+ self.cond.release()
+
+ def get_count(self):
+ try:
+ self.cond.acquire()
+ return self.count
+ finally:
+ self.cond.release()
--- /dev/null
+import unittest
+import stomp
+import base
+import test_util
+
+class TestConnectOptions(base.BaseTest):
+
+ def test_implicit_connect(self):
+ ''' Implicit connect with receipt on first command '''
+ self.conn.disconnect()
+ test_util.enable_implicit_connect()
+ listener = base.WaitableListener()
+ new_conn = stomp.Connection()
+ new_conn.set_listener('', listener)
+
+ new_conn.start() # not going to issue connect
+ new_conn.subscribe(destination="/topic/implicit", id='sub_implicit', receipt='implicit')
+
+ try:
+ self.assertTrue(listener.await(5))
+ self.assertEquals(1, len(listener.receipts),
+ 'Missing receipt. Likely not connected')
+ self.assertEquals('implicit', listener.receipts[0]['headers']['receipt-id'])
+ finally:
+ new_conn.disconnect()
+ test_util.disable_implicit_connect()
+
+ def test_default_user(self):
+ ''' Default user connection '''
+ self.conn.disconnect()
+ test_util.enable_default_user()
+ listener = base.WaitableListener()
+ new_conn = stomp.Connection()
+ new_conn.set_listener('', listener)
+ new_conn.start()
+ new_conn.connect()
+ try:
+ self.assertFalse(listener.await(3)) # no error back
+ self.assertTrue(new_conn.is_connected())
+ finally:
+ new_conn.disconnect()
+ test_util.disable_default_user()
--- /dev/null
+import unittest
+import stomp
+import base
+import time
+
+class TestExchange(base.BaseTest):
+
+
+ def test_amq_direct(self):
+ ''' Test basic send/receive for /exchange/amq.direct '''
+ self.__test_exchange_send_rec("amq.direct", "route")
+
+ def test_amq_topic(self):
+ ''' Test basic send/receive for /exchange/amq.topic '''
+ self.__test_exchange_send_rec("amq.topic", "route")
+
+ def test_amq_fanout(self):
+ ''' Test basic send/receive for /exchange/amq.fanout '''
+ self.__test_exchange_send_rec("amq.fanout", "route")
+
+ def test_amq_fanout_no_route(self):
+ ''' Test basic send/receive, /exchange/amq.direct, no routing key'''
+ self.__test_exchange_send_rec("amq.fanout")
+
+ def test_invalid_exchange(self):
+ ''' Test invalid exchange error '''
+ self.listener.reset(1)
+ self.conn.subscribe(destination="/exchange/does.not.exist")
+ self.assertListener("Expecting an error", numErrs=1)
+ err = self.listener.errors[0]
+ self.assertEquals("not_found", err['headers']['message'])
+ self.assertEquals(
+ "NOT_FOUND - no exchange 'does.not.exist' in vhost '/'\n",
+ err['message'])
+ time.sleep(1)
+ self.assertFalse(self.conn.is_connected())
+
+ def __test_exchange_send_rec(self, exchange, route = None):
+ if exchange != "amq.topic":
+ dest = "/exchange/" + exchange
+ else:
+ dest = "/topic"
+ if route != None:
+ dest += "/" + route
+
+ self.simple_test_send_rec(dest)
+
+class TestQueue(base.BaseTest):
+
+ def test_send_receive(self):
+ ''' Test basic send/receive for /queue '''
+ d = '/queue/test'
+ self.simple_test_send_rec(d)
+
+ def test_send_receive_in_other_conn(self):
+ ''' Test send in one connection, receive in another '''
+ d = '/queue/test2'
+
+ # send
+ self.conn.send("hello", destination=d)
+
+ # now receive
+ conn2 = self.create_connection()
+ try:
+ listener2 = base.WaitableListener()
+ conn2.set_listener('', listener2)
+
+ conn2.subscribe(destination=d)
+ self.assertTrue(listener2.await(10), "no receive")
+ finally:
+ conn2.stop()
+
+ def test_send_receive_in_other_conn_with_disconnect(self):
+ ''' Test send, disconnect, receive '''
+ d = '/queue/test3'
+
+ # send
+ self.conn.send("hello thar", destination=d, receipt="foo")
+ self.listener.await(3)
+ self.conn.stop()
+
+ # now receive
+ conn2 = self.create_connection()
+ try:
+ listener2 = base.WaitableListener()
+ conn2.set_listener('', listener2)
+
+ conn2.subscribe(destination=d)
+ self.assertTrue(listener2.await(10), "no receive")
+ finally:
+ conn2.stop()
+
+
+ def test_multi_subscribers(self):
+ ''' Test multiple subscribers against a single /queue destination '''
+ d = '/queue/test-multi'
+
+ ## set up two subscribers
+ conn1, listener1 = self.create_subscriber_connection(d)
+ conn2, listener2 = self.create_subscriber_connection(d)
+
+ try:
+ ## now send
+ self.conn.send("test1", destination=d)
+ self.conn.send("test2", destination=d)
+
+ ## expect both consumers to get a message?
+ self.assertTrue(listener1.await(2))
+ self.assertEquals(1, len(listener1.messages),
+ "unexpected message count")
+ self.assertTrue(listener2.await(2))
+ self.assertEquals(1, len(listener2.messages),
+ "unexpected message count")
+ finally:
+ conn1.stop()
+ conn2.stop()
+
+ def test_send_with_receipt(self):
+ d = '/queue/test-receipt'
+ def noop(): pass
+ self.__test_send_receipt(d, noop, noop)
+
+ def test_send_with_receipt_tx(self):
+ d = '/queue/test-receipt-tx'
+ tx = 'receipt.tx'
+
+ def before():
+ self.conn.begin(transaction=tx)
+
+ def after():
+ self.assertFalse(self.listener.await(1))
+ self.conn.commit(transaction=tx)
+
+ self.__test_send_receipt(d, before, after, {'transaction': tx})
+
+ def test_interleaved_receipt_no_receipt(self):
+ ''' Test i-leaved receipt/no receipt, no-r bracketed by rs '''
+
+ d = '/queue/ir'
+
+ self.listener.reset(5)
+
+ self.conn.subscribe(destination=d)
+ self.conn.send('first', destination=d, receipt='a')
+ self.conn.send('second', destination=d)
+ self.conn.send('third', destination=d, receipt='b')
+
+ self.assertListener("Missing messages/receipts", numMsgs=3, numRcts=2, timeout=3)
+
+ self.assertEquals(set(['a','b']), self.__gather_receipts())
+
+ def test_interleaved_receipt_no_receipt_tx(self):
+ ''' Test i-leaved receipt/no receipt, no-r bracketed by r+xactions '''
+
+ d = '/queue/ir'
+ tx = 'tx.ir'
+
+ # three messages and two receipts
+ self.listener.reset(5)
+
+ self.conn.subscribe(destination=d)
+ self.conn.begin(transaction=tx)
+
+ self.conn.send('first', destination=d, receipt='a', transaction=tx)
+ self.conn.send('second', destination=d, transaction=tx)
+ self.conn.send('third', destination=d, receipt='b', transaction=tx)
+ self.conn.commit(transaction=tx)
+
+ self.assertListener("Missing messages/receipts", numMsgs=3, numRcts=2, timeout=40)
+
+ expected = set(['a', 'b'])
+ missing = expected.difference(self.__gather_receipts())
+
+ self.assertEquals(set(), missing, "Missing receipts: " + str(missing))
+
+ def test_interleaved_receipt_no_receipt_inverse(self):
+ ''' Test i-leaved receipt/no receipt, r bracketed by no-rs '''
+
+ d = '/queue/ir'
+
+ self.listener.reset(4)
+
+ self.conn.subscribe(destination=d)
+ self.conn.send('first', destination=d)
+ self.conn.send('second', destination=d, receipt='a')
+ self.conn.send('third', destination=d)
+
+ self.assertListener("Missing messages/receipt", numMsgs=3, numRcts=1, timeout=3)
+
+ self.assertEquals(set(['a']), self.__gather_receipts())
+
+ def __test_send_receipt(self, destination, before, after, headers = {}):
+ count = 50
+ self.listener.reset(count)
+
+ before()
+ expected_receipts = set()
+
+ for x in range(0, count):
+ receipt = "test" + str(x)
+ expected_receipts.add(receipt)
+ self.conn.send("test receipt", destination=destination,
+ receipt=receipt, headers=headers)
+ after()
+
+ self.assertTrue(self.listener.await(5))
+
+ missing_receipts = expected_receipts.difference(
+ self.__gather_receipts())
+
+ self.assertEquals(set(), missing_receipts,
+ "missing receipts: " + str(missing_receipts))
+
+ def __gather_receipts(self):
+ result = set()
+ for r in self.listener.receipts:
+ result.add(r['headers']['receipt-id'])
+ return result
+
+class TestTopic(base.BaseTest):
+
+ def test_send_receive(self):
+ ''' Test basic send/receive for /topic '''
+ d = '/topic/test'
+ self.simple_test_send_rec(d)
+
+ def test_send_multiple(self):
+ ''' Test /topic with multiple consumers '''
+ d = '/topic/multiple'
+
+ ## set up two subscribers
+ conn1, listener1 = self.create_subscriber_connection(d)
+ conn2, listener2 = self.create_subscriber_connection(d)
+
+ try:
+ ## listeners are expecting 2 messages
+ listener1.reset(2)
+ listener2.reset(2)
+
+ ## now send
+ self.conn.send("test1", destination=d)
+ self.conn.send("test2", destination=d)
+
+ ## expect both consumers to get both messages
+ self.assertTrue(listener1.await(5))
+ self.assertEquals(2, len(listener1.messages),
+ "unexpected message count")
+ self.assertTrue(listener2.await(5))
+ self.assertEquals(2, len(listener2.messages),
+ "unexpected message count")
+ finally:
+ conn1.stop()
+ conn2.stop()
+
+class TestReplyQueue(base.BaseTest):
+
+ def test_reply_queue(self):
+ ''' Test with two separate clients. Client 1 sends
+ message to a known destination with a defined reply
+ queue. Client 2 receives on known destination and replies
+ on the reply destination. Client 1 gets the reply message'''
+
+ known = '/queue/known'
+ reply = '/temp-queue/0'
+
+ ## Client 1 uses pre-supplied connection and listener
+ ## Set up client 2
+ conn2, listener2 = self.create_subscriber_connection(known)
+
+ try:
+ self.conn.send("test", destination=known,
+ headers = {"reply-to": reply})
+
+ self.assertTrue(listener2.await(5))
+ self.assertEquals(1, len(listener2.messages))
+
+ reply_to = listener2.messages[0]['headers']['reply-to']
+ self.assertTrue(reply_to.startswith('/reply-queue/'))
+
+ conn2.send("reply", destination=reply_to)
+ self.assertTrue(self.listener.await(5))
+ self.assertEquals("reply", self.listener.messages[0]['message'])
+ finally:
+ conn2.stop()
+
+ def test_reuse_reply_queue(self):
+ ''' Test re-use of reply-to queue '''
+
+ known2 = '/queue/known2'
+ known3 = '/queue/known3'
+ reply = '/temp-queue/foo'
+
+ def respond(cntn, listna):
+ self.assertTrue(listna.await(5))
+ self.assertEquals(1, len(listna.messages))
+ reply_to = listna.messages[0]['headers']['reply-to']
+ self.assertTrue(reply_to.startswith('/reply-queue/'))
+ cntn.send("reply", destination=reply_to)
+
+ ## Client 1 uses pre-supplied connection and listener
+ ## Set up clients 2 and 3
+ conn2, listener2 = self.create_subscriber_connection(known2)
+ conn3, listener3 = self.create_subscriber_connection(known3)
+ try:
+ self.listener.reset(2)
+ self.conn.send("test2", destination=known2,
+ headers = {"reply-to": reply})
+ self.conn.send("test3", destination=known3,
+ headers = {"reply-to": reply})
+ respond(conn2, listener2)
+ respond(conn3, listener3)
+
+ self.assertTrue(self.listener.await(5))
+ self.assertEquals(2, len(self.listener.messages))
+ self.assertEquals("reply", self.listener.messages[0]['message'])
+ self.assertEquals("reply", self.listener.messages[1]['message'])
+ finally:
+ conn2.stop()
+ conn3.stop()
+
+ def test_perm_reply_queue(self):
+ '''As test_reply_queue, but with a non-temp reply queue'''
+
+ known = '/queue/known'
+ reply = '/queue/reply'
+
+ ## Client 1 uses pre-supplied connection and listener
+ ## Set up client 2
+ conn1, listener1 = self.create_subscriber_connection(reply)
+ conn2, listener2 = self.create_subscriber_connection(known)
+
+ try:
+ conn1.send("test", destination=known,
+ headers = {"reply-to": reply})
+
+ self.assertTrue(listener2.await(5))
+ self.assertEquals(1, len(listener2.messages))
+
+ reply_to = listener2.messages[0]['headers']['reply-to']
+ self.assertTrue(reply_to == reply)
+
+ conn2.send("reply", destination=reply_to)
+ self.assertTrue(listener1.await(5))
+ self.assertEquals("reply", listener1.messages[0]['message'])
+ finally:
+ conn1.stop()
+ conn2.stop()
+
+class TestDurableSubscription(base.BaseTest):
+
+ ID = 'test.subscription'
+
+ def __subscribe(self, dest, conn=None, id=None):
+ if not conn:
+ conn = self.conn
+ if not id:
+ id = TestDurableSubscription.ID
+
+ conn.subscribe(destination=dest,
+ headers ={'persistent': 'true',
+ 'receipt': 1,
+ 'id': id})
+
+ def __assert_receipt(self, listener=None, pos=None):
+ if not listener:
+ listener = self.listener
+
+ self.assertTrue(listener.await(5))
+ self.assertEquals(1, len(self.listener.receipts))
+ if pos is not None:
+ self.assertEquals(pos, self.listener.receipts[0]['msg_no'])
+
+ def __assert_message(self, msg, listener=None, pos=None):
+ if not listener:
+ listener = self.listener
+
+ self.assertTrue(listener.await(5))
+ self.assertEquals(1, len(listener.messages))
+ self.assertEquals(msg, listener.messages[0]['message'])
+ if pos is not None:
+ self.assertEquals(pos, self.listener.messages[0]['msg_no'])
+
+ def test_durable_subscription(self):
+ d = '/topic/durable'
+
+ self.__subscribe(d)
+ self.__assert_receipt()
+
+ # send first message without unsubscribing
+ self.listener.reset(1)
+ self.conn.send("first", destination=d)
+ self.__assert_message("first")
+
+ # now unsubscribe (disconnect only)
+ self.conn.unsubscribe(id=TestDurableSubscription.ID)
+
+ # send again
+ self.listener.reset(2)
+ self.conn.send("second", destination=d)
+
+ # resubscribe and expect receipt
+ self.__subscribe(d)
+ self.__assert_receipt(pos=1)
+ # and message
+ self.__assert_message("second", pos=2)
+
+ # now unsubscribe (cancel)
+ self.conn.unsubscribe(id=TestDurableSubscription.ID,
+ headers={'persistent': 'true'})
+
+ # send again
+ self.listener.reset(1)
+ self.conn.send("third", destination=d)
+
+ # resubscribe and expect no message
+ self.__subscribe(d)
+ self.assertTrue(self.listener.await(3))
+ self.assertEquals(0, len(self.listener.messages))
+ self.assertEquals(1, len(self.listener.receipts))
+
+ def test_share_subscription(self):
+ d = '/topic/durable-shared'
+
+ conn2 = self.create_connection()
+ conn2.set_listener('', self.listener)
+
+ try:
+ self.__subscribe(d)
+ self.__assert_receipt()
+ self.listener.reset(1)
+ self.__subscribe(d, conn2)
+ self.__assert_receipt()
+
+ self.listener.reset(100)
+
+ # send 100 messages
+ for x in xrange(0, 100):
+ self.conn.send("msg" + str(x), destination=d)
+
+ self.assertTrue(self.listener.await(5))
+ self.assertEquals(100, len(self.listener.messages))
+ finally:
+ conn2.stop()
+
+ def test_separate_ids(self):
+ d = '/topic/durable-separate'
+
+ conn2 = self.create_connection()
+ listener2 = base.WaitableListener()
+ conn2.set_listener('', listener2)
+
+ try:
+ # ensure durable subscription exists for each ID
+ self.__subscribe(d)
+ self.__assert_receipt()
+ self.__subscribe(d, conn2, "other.id")
+ self.__assert_receipt(listener2)
+ self.conn.unsubscribe(id=TestDurableSubscription.ID)
+ conn2.unsubscribe(id="other.id")
+
+ self.listener.reset(101)
+ listener2.reset(101) ## 100 messages and 1 receipt
+
+ # send 100 messages
+ for x in xrange(0, 100):
+ self.conn.send("msg" + str(x), destination=d)
+
+ self.__subscribe(d)
+ self.__subscribe(d, conn2, "other.id")
+
+ for l in [self.listener, listener2]:
+ self.assertTrue(l.await(10))
+ self.assertEquals(100, len(l.messages))
+
+ finally:
+ conn2.stop()
+
+ def test_durable_subscribe_no_id(self):
+ d = '/topic/durable-invalid'
+
+ self.conn.subscribe(destination=d, headers={'persistent':'true'}),
+ self.listener.await(3)
+ self.assertEquals(1, len(self.listener.errors))
+ self.assertEquals("Missing Header", self.listener.errors[0]['headers']['message'])
+
+
--- /dev/null
+import unittest
+import stomp
+import base
+import time
+
+class TestErrors(base.BaseTest):
+
+ def test_invalid_queue_destination(self):
+ self.__test_invalid_destination("queue", "/bah/baz")
+
+ def test_invalid_empty_queue_destination(self):
+ self.__test_invalid_destination("queue", "")
+
+ def test_invalid_topic_destination(self):
+ self.__test_invalid_destination("topic", "/bah/baz")
+
+ def test_invalid_empty_topic_destination(self):
+ self.__test_invalid_destination("topic", "")
+
+ def test_invalid_exchange_destination(self):
+ self.__test_invalid_destination("exchange", "/bah/baz/boo")
+
+ def test_invalid_empty_exchange_destination(self):
+ self.__test_invalid_destination("exchange", "")
+
+ def test_invalid_default_exchange_destination(self):
+ self.__test_invalid_destination("exchange", "//foo")
+
+ def test_unknown_destination(self):
+ self.listener.reset()
+ self.conn.send(destination="/something/interesting")
+
+ self.assertTrue(self.listener.await())
+ self.assertEquals(1, len(self.listener.errors))
+
+ err = self.listener.errors[0]
+ self.assertEquals("Unknown destination", err['headers']['message'])
+
+ def test_send_missing_destination(self):
+ self.__test_missing_destination("SEND")
+
+ def test_send_missing_destination(self):
+ self.__test_missing_destination("SUBSCRIBE")
+
+ def __test_missing_destination(self, command):
+ self.listener.reset()
+ self.conn.send_frame(command)
+
+ self.assertTrue(self.listener.await())
+ self.assertEquals(1, len(self.listener.errors))
+
+ err = self.listener.errors[0]
+ self.assertEquals("Missing destination", err['headers']['message'])
+
+ def __test_invalid_destination(self, dtype, content):
+ self.listener.reset()
+ self.conn.send(destination="/" + dtype + content)
+
+ self.assertTrue(self.listener.await())
+ self.assertEquals(1, len(self.listener.errors))
+
+ err = self.listener.errors[0]
+ self.assertEquals("Invalid destination", err['headers']['message'])
+ self.assertEquals("'" + content + "' is not a valid " +
+ dtype + " destination\n",
+ err['message'])
+
--- /dev/null
+import unittest
+import stomp
+import base
+import time
+
+class TestLifecycle(base.BaseTest):
+
+ def test_unsubscribe_exchange_destination(self):
+ ''' Test UNSUBSCRIBE command with exchange'''
+ d = "/exchange/amq.fanout"
+ self.unsub_test(d, self.sub_and_send(d))
+
+ def test_unsubscribe_exchange_destination_with_receipt(self):
+ ''' Test receipted UNSUBSCRIBE command with exchange'''
+ d = "/exchange/amq.fanout"
+ self.unsub_test(d, self.sub_and_send(d, receipt="unsub.rct"), numRcts=1)
+
+ def test_unsubscribe_queue_destination(self):
+ ''' Test UNSUBSCRIBE command with queue'''
+ d = "/queue/unsub01"
+ self.unsub_test(d, self.sub_and_send(d))
+
+ def test_unsubscribe_queue_destination_with_receipt(self):
+ ''' Test receipted UNSUBSCRIBE command with queue'''
+ d = "/queue/unsub02"
+ self.unsub_test(d, self.sub_and_send(d, receipt="unsub.rct"), numRcts=1)
+
+ def test_unsubscribe_exchange_id(self):
+ ''' Test UNSUBSCRIBE command with exchange by id'''
+ d = "/exchange/amq.fanout"
+ self.unsub_test(d, self.sub_and_send(d, subid="exchid"))
+
+ def test_unsubscribe_exchange_id_with_receipt(self):
+ ''' Test receipted UNSUBSCRIBE command with exchange by id'''
+ d = "/exchange/amq.fanout"
+ self.unsub_test(d, self.sub_and_send(d, subid="exchid", receipt="unsub.rct"), numRcts=1)
+
+ def test_unsubscribe_queue_id(self):
+ ''' Test UNSUBSCRIBE command with queue by id'''
+ d = "/queue/unsub03"
+ self.unsub_test(d, self.sub_and_send(d, subid="queid"))
+
+ def test_unsubscribe_queue_id_with_receipt(self):
+ ''' Test receipted UNSUBSCRIBE command with queue by id'''
+ d = "/queue/unsub04"
+ self.unsub_test(d, self.sub_and_send(d, subid="queid", receipt="unsub.rct"), numRcts=1)
+
+ def test_connect_version_1_1(self):
+ ''' Test CONNECT with version 1.1'''
+ self.conn.disconnect()
+ new_conn = self.create_connection(version="1.1")
+ try:
+ self.assertTrue(new_conn.is_connected())
+ finally:
+ new_conn.disconnect()
+ self.assertFalse(new_conn.is_connected())
+
+ def test_heartbeat_disconnects_client(self):
+ ''' Test heart-beat disconnection'''
+ self.conn.disconnect()
+ new_conn = self.create_connection(heartbeat="1500,0")
+ try:
+ self.assertTrue(new_conn.is_connected())
+ time.sleep(1)
+ self.assertTrue(new_conn.is_connected())
+ time.sleep(3)
+ self.assertFalse(new_conn.is_connected())
+ finally:
+ if new_conn.is_connected():
+ new_conn.disconnect()
+
+ def test_unsupported_version(self):
+ ''' Test unsupported version on CONNECT command'''
+ self.bad_connect(stomp.Connection(user="guest",
+ passcode="guest",
+ version="100.1"),
+ "Supported versions are 1.0,1.1,1.2\n")
+
+ def test_bad_username(self):
+ ''' Test bad username'''
+ self.bad_connect(stomp.Connection(user="gust",
+ passcode="guest"),
+ "Access refused for user 'gust'\n")
+
+ def test_bad_password(self):
+ ''' Test bad password'''
+ self.bad_connect(stomp.Connection(user="guest",
+ passcode="gust"),
+ "Access refused for user 'guest'\n")
+
+ def test_bad_vhost(self):
+ ''' Test bad virtual host'''
+ self.bad_connect(stomp.Connection(user="guest",
+ passcode="guest",
+ virtual_host="//"),
+ "Virtual host '//' access denied")
+
+ def bad_connect(self, new_conn, expected):
+ self.conn.disconnect()
+ listener = base.WaitableListener()
+ new_conn.set_listener('', listener)
+ try:
+ new_conn.start()
+ new_conn.connect()
+ self.assertTrue(listener.await())
+ self.assertEquals(expected, listener.errors[0]['message'])
+ finally:
+ if new_conn.is_connected():
+ new_conn.disconnect()
+
+ def test_bad_header_on_send(self):
+ ''' Test disallowed header on SEND '''
+ self.listener.reset(1)
+ self.conn.send_frame("SEND", {"destination":"a", "message-id":"1"})
+ self.assertTrue(self.listener.await())
+ self.assertEquals(1, len(self.listener.errors))
+ errorReceived = self.listener.errors[0]
+ self.assertEquals("Invalid header", errorReceived['headers']['message'])
+ self.assertEquals("'message-id' is not allowed on 'SEND'.\n", errorReceived['message'])
+
+ def test_disconnect(self):
+ ''' Test DISCONNECT command'''
+ self.conn.disconnect()
+ self.assertFalse(self.conn.is_connected())
+
+ def test_disconnect_with_receipt(self):
+ ''' Test the DISCONNECT command with receipts '''
+ time.sleep(3)
+ self.listener.reset(1)
+ self.conn.send_frame("DISCONNECT", {"receipt": "test"})
+ self.assertTrue(self.listener.await())
+ self.assertEquals(1, len(self.listener.receipts))
+ receiptReceived = self.listener.receipts[0]['headers']['receipt-id']
+ self.assertEquals("test", receiptReceived
+ , "Wrong receipt received: '" + receiptReceived + "'")
+
+ def unsub_test(self, dest, verbs, numRcts=0):
+ def afterfun():
+ self.conn.send("after-test", destination=dest)
+ subverb, unsubverb = verbs
+ self.assertListenerAfter(subverb, numMsgs=1,
+ errMsg="FAILED to subscribe and send")
+ self.assertListenerAfter(unsubverb, numRcts=numRcts,
+ errMsg="Incorrect responses from UNSUBSCRIBE")
+ self.assertListenerAfter(afterfun,
+ errMsg="Still receiving messages")
+
+ def sub_and_send(self, dest, subid="", receipt=""):
+ def subfun():
+ if subid=="":
+ self.conn.subscribe(destination=dest)
+ else:
+ self.conn.subscribe(destination=dest, id=subid)
+ self.conn.send("test", destination=dest)
+ def unsubfun():
+ if subid=="" and receipt=="":
+ self.conn.unsubscribe(destination=dest)
+ elif receipt=="":
+ self.conn.unsubscribe(id=subid)
+ elif subid=="":
+ self.conn.unsubscribe(destination=dest, receipt=receipt)
+ else:
+ self.conn.unsubscribe(id=subid, receipt=receipt)
+ return subfun, unsubfun
--- /dev/null
+[{rabbitmq_stomp, [{default_user, [{login, "guest"},
+ {passcode, "guest"}
+ ]},
+ {implicit_connect, true}
+ ]}
+].
--- /dev/null
+'''
+Few tests for a rabbitmq-stomp adaptor. They intend to increase code coverage
+of the erlang stomp code.
+'''
+import unittest
+import re
+import socket
+import functools
+import time
+import sys
+
+def connect(cnames):
+ ''' Decorator that creates stomp connections and issues CONNECT '''
+ cmd=('CONNECT\n'
+ 'login:guest\n'
+ 'passcode:guest\n'
+ '\n'
+ '\n\0')
+ resp = ('CONNECTED\n'
+ 'session:(.*)\n'
+ 'heart-beat:0,0\n'
+ 'server:RabbitMQ/(.*)\n'
+ 'version:1.0\n'
+ '\n\x00')
+ def w(m):
+ @functools.wraps(m)
+ def wrapper(self, *args, **kwargs):
+ for cname in cnames:
+ sd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sd.settimeout(30000)
+ sd.connect((self.host, self.port))
+ sd.sendall(cmd)
+ self.match(resp, sd.recv(4096))
+ setattr(self, cname, sd)
+ try:
+ r = m(self, *args, **kwargs)
+ finally:
+ for cname in cnames:
+ try:
+ getattr(self, cname).close()
+ except IOError:
+ pass
+ return r
+ return wrapper
+ return w
+
+
+class TestParsing(unittest.TestCase):
+ host='127.0.0.1'
+ port=61613
+
+
+ def match(self, pattern, data):
+ ''' helper: try to match 'pattern' regexp with 'data' string.
+ Fail test if they don't match.
+ '''
+ matched = re.match(pattern, data)
+ if matched:
+ return matched.groups()
+ self.assertTrue(False, 'No match:\n%r\n%r' % (pattern, data) )
+
+ def recv_atleast(self, bufsize):
+ recvhead = []
+ rl = bufsize
+ while rl > 0:
+ buf = self.cd.recv(rl)
+ bl = len(buf)
+ if bl==0: break
+ recvhead.append( buf )
+ rl -= bl
+ return ''.join(recvhead)
+
+
+ @connect(['cd'])
+ def test_newline_after_nul(self):
+ self.cd.sendall('\n'
+ 'SUBSCRIBE\n'
+ 'destination:/exchange/amq.fanout\n'
+ '\n\x00\n'
+ 'SEND\n'
+ 'content-type:text/plain\n'
+ 'destination:/exchange/amq.fanout\n\n'
+ 'hello\n\x00\n')
+ resp = ('MESSAGE\n'
+ 'destination:/exchange/amq.fanout\n'
+ 'message-id:Q_/exchange/amq.fanout@@session-(.*)\n'
+ 'content-type:text/plain\n'
+ 'content-length:6\n'
+ '\n'
+ 'hello\n\0')
+ self.match(resp, self.cd.recv(4096))
+
+ @connect(['cd'])
+ def test_send_without_content_type(self):
+ self.cd.sendall('\n'
+ 'SUBSCRIBE\n'
+ 'destination:/exchange/amq.fanout\n'
+ '\n\x00\n'
+ 'SEND\n'
+ 'destination:/exchange/amq.fanout\n\n'
+ 'hello\n\x00')
+ resp = ('MESSAGE\n'
+ 'destination:/exchange/amq.fanout\n'
+ 'message-id:Q_/exchange/amq.fanout@@session-(.*)\n'
+ 'content-length:6\n'
+ '\n'
+ 'hello\n\0')
+ self.match(resp, self.cd.recv(4096))
+
+ @connect(['cd'])
+ def test_send_without_content_type_binary(self):
+ msg = u'\u0ca0\ufffd\x00\n\x01hello\x00'.encode('utf-8')
+ self.cd.sendall('\n'
+ 'SUBSCRIBE\n'
+ 'destination:/exchange/amq.fanout\n'
+ '\n\x00\n'
+ 'SEND\n'
+ 'destination:/exchange/amq.fanout\n'
+ 'content-length:'+str(len(msg))+'\n\n'
+ + msg + '\x00')
+ resp = ('MESSAGE\n'
+ 'destination:/exchange/amq.fanout\n'
+ 'message-id:Q_/exchange/amq.fanout@@session-(.*)\n'
+ 'content-length:'+str(len(msg))+'\n'
+ '\n'
+ + msg + '\0')
+ self.match(resp, self.cd.recv(4096))
+
+ @connect(['cd'])
+ def test_newline_after_nul_and_leading_nul(self):
+ self.cd.sendall('\n'
+ '\x00SUBSCRIBE\n'
+ 'destination:/exchange/amq.fanout\n'
+ '\n\x00\n'
+ '\x00SEND\n'
+ 'destination:/exchange/amq.fanout\n'
+ 'content-type:text/plain\n'
+ '\nhello\n\x00\n')
+ resp = ('MESSAGE\n'
+ 'destination:/exchange/amq.fanout\n'
+ 'message-id:Q_/exchange/amq.fanout@@session-(.*)\n'
+ 'content-type:text/plain\n'
+ 'content-length:6\n'
+ '\n'
+ 'hello\n\0')
+ self.match(resp, self.cd.recv(4096))
+
+ @connect(['cd'])
+ def test_bad_command(self):
+ ''' Trigger an error message. '''
+ self.cd.sendall('WRONGCOMMAND\n'
+ 'destination:a\n'
+ 'exchange:amq.fanout\n'
+ '\n\0')
+ resp = ('ERROR\n'
+ 'message:Bad command\n'
+ 'content-type:text/plain\n'
+ 'version:1.0,1.1,1.2\n'
+ 'content-length:43\n'
+ '\n'
+ 'Could not interpret command "WRONGCOMMAND"\n'
+ '\0')
+ self.match(resp, self.cd.recv(4096))
+
+ @connect(['sd', 'cd1', 'cd2'])
+ def test_broadcast(self):
+ ''' Single message should be delivered to two consumers:
+ amq.topic --routing_key--> first_queue --> first_connection
+ \--routing_key--> second_queue--> second_connection
+ '''
+ subscribe=( 'SUBSCRIBE\n'
+ 'id: XsKNhAf\n'
+ 'destination:/exchange/amq.topic/da9d4779\n'
+ '\n\0')
+ for cd in [self.cd1, self.cd2]:
+ cd.sendall(subscribe)
+
+ time.sleep(0.1)
+
+ self.sd.sendall('SEND\n'
+ 'content-type:text/plain\n'
+ 'destination:/exchange/amq.topic/da9d4779\n'
+ '\n'
+ 'message'
+ '\n\0')
+
+ resp=('MESSAGE\n'
+ 'subscription:(.*)\n'
+ 'destination:/topic/da9d4779\n'
+ 'message-id:(.*)\n'
+ 'content-type:text/plain\n'
+ 'content-length:8\n'
+ '\n'
+ 'message'
+ '\n\x00')
+ for cd in [self.cd1, self.cd2]:
+ self.match(resp, cd.recv(4096))
+
+
+ @connect(['cd'])
+ def test_huge_message(self):
+ ''' Test sending/receiving huge (16MB) message. '''
+ subscribe=( 'SUBSCRIBE\n'
+ 'id: xxx\n'
+ 'destination:/exchange/amq.topic/test_huge_message\n'
+ '\n\0')
+ self.cd.sendall(subscribe)
+
+ message = 'x' * 1024*1024*16
+
+ self.cd.sendall('SEND\n'
+ 'destination:/exchange/amq.topic/test_huge_message\n'
+ 'content-type:text/plain\n'
+ '\n'
+ '%s'
+ '\0' % message)
+
+ resp=('MESSAGE\n'
+ 'subscription:(.*)\n'
+ 'destination:/topic/test_huge_message\n'
+ 'message-id:(.*)\n'
+ 'content-type:text/plain\n'
+ 'content-length:%i\n'
+ '\n'
+ '%s(.*)'
+ % (len(message), message[:8000]) )
+
+ recv = []
+ s = 0
+ while len(recv) < 1 or recv[-1][-1] != '\0':
+ buf = self.cd.recv(4096*16)
+ s += len(buf)
+ recv.append( buf )
+ buf = ''.join(recv)
+
+ # matching 100MB regexp is way too expensive.
+ self.match(resp, buf[:8192])
+ self.assertEqual(len(buf) > len(message), True)
+
+ @connect(['cd'])
+ def test_message_with_embedded_nulls(self):
+ ''' Test sending/receiving message with embedded nulls. '''
+ dest='destination:/exchange/amq.topic/test_embed_nulls_message\n'
+ resp_dest='destination:/topic/test_embed_nulls_message\n'
+ subscribe=( 'SUBSCRIBE\n'
+ 'id:xxx\n'
+ +dest+
+ '\n\0')
+ self.cd.sendall(subscribe)
+
+ boilerplate = '0123456789'*1024 # large enough boilerplate
+ message = '01'
+ oldi = 2
+ for i in [5, 90, 256-1, 384-1, 512, 1024, 1024+256+64+32]:
+ message = message + '\0' + boilerplate[oldi+1:i]
+ oldi = i
+ msg_len = len(message)
+
+ self.cd.sendall('SEND\n'
+ +dest+
+ 'content-type:text/plain\n'
+ 'content-length:%i\n'
+ '\n'
+ '%s'
+ '\0' % (len(message), message) )
+
+ headresp=('MESSAGE\n' # 8
+ 'subscription:(.*)\n' # 14 + subscription
+ +resp_dest+ # 44
+ 'message-id:(.*)\n' # 12 + message-id
+ 'content-type:text/plain\n' # 24
+ 'content-length:%i\n' # 16 + 4==len('1024')
+ '\n' # 1
+ '(.*)$' # prefix of body+null (potentially)
+ % len(message) )
+ headlen = 8 + 24 + 14 + (3) + 44 + 12 + (48) + 16 + (4) + 1 + (1)
+
+ headbuf = self.recv_atleast(headlen)
+ self.assertFalse(len(headbuf) == 0)
+
+ (sub, msg_id, bodyprefix) = self.match(headresp, headbuf)
+ bodyresp=( '%s\0' % message )
+ bodylen = len(bodyresp);
+
+ bodybuf = ''.join([bodyprefix,
+ self.recv_atleast(bodylen - len(bodyprefix))])
+
+ self.assertEqual(len(bodybuf), msg_len+1,
+ "body received not the same length as message sent")
+ self.assertEqual(bodybuf, bodyresp,
+ " body (...'%s')\nincorrectly returned as (...'%s')"
+ % (bodyresp[-10:], bodybuf[-10:]))
+
+ @connect(['cd'])
+ def test_message_in_packets(self):
+ ''' Test sending/receiving message in packets. '''
+ base_dest='topic/test_embed_nulls_message\n'
+ dest='destination:/exchange/amq.' + base_dest
+ resp_dest='destination:/'+ base_dest
+ subscribe=( 'SUBSCRIBE\n'
+ 'id:xxx\n'
+ +dest+
+ '\n\0')
+ self.cd.sendall(subscribe)
+
+ boilerplate = '0123456789'*1024 # large enough boilerplate
+
+ message = boilerplate[:1024 + 512 + 256 + 32]
+ msg_len = len(message)
+
+ msg_to_send = ('SEND\n'
+ +dest+
+ 'content-type:text/plain\n'
+ '\n'
+ '%s'
+ '\0' % (message) )
+ packet_size = 191
+ part_index = 0
+ msg_to_send_len = len(msg_to_send)
+ while part_index < msg_to_send_len:
+ part = msg_to_send[part_index:part_index+packet_size]
+ time.sleep(0.1)
+ self.cd.sendall(part)
+ part_index += packet_size
+
+ headresp=('MESSAGE\n' # 8
+ 'subscription:(.*)\n' # 14 + subscription
+ +resp_dest+ # 44
+ 'message-id:(.*)\n' # 12 + message-id
+ 'content-type:text/plain\n' # 24
+ 'content-length:%i\n' # 16 + 4==len('1024')
+ '\n' # 1
+ '(.*)$' # prefix of body+null (potentially)
+ % len(message) )
+ headlen = 8 + 24 + 14 + (3) + 44 + 12 + (48) + 16 + (4) + 1 + (1)
+
+ headbuf = self.recv_atleast(headlen)
+ self.assertFalse(len(headbuf) == 0)
+
+ (sub, msg_id, bodyprefix) = self.match(headresp, headbuf)
+ bodyresp=( '%s\0' % message )
+ bodylen = len(bodyresp);
+
+ bodybuf = ''.join([bodyprefix,
+ self.recv_atleast(bodylen - len(bodyprefix))])
+
+ self.assertEqual(len(bodybuf), msg_len+1,
+ "body received not the same length as message sent")
+ self.assertEqual(bodybuf, bodyresp,
+ " body ('%s')\nincorrectly returned as ('%s')"
+ % (bodyresp, bodybuf))
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_stomp_amqqueue_test).
+-export([all_tests/0]).
+-compile(export_all).
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_stomp.hrl").
+-include("rabbit_stomp_frame.hrl").
+-include("rabbit_stomp_headers.hrl").
+
+-define(QUEUE, <<"TestQueue">>).
+-define(DESTINATION, "/amq/queue/TestQueue").
+
+all_tests() ->
+ [[ok = run_test(TestFun, Version)
+ || TestFun <- [fun test_subscribe_error/3,
+ fun test_subscribe/3,
+ fun test_unsubscribe_ack/3,
+ fun test_subscribe_ack/3,
+ fun test_send/3,
+ fun test_delete_queue_subscribe/3,
+ fun test_temp_destination_queue/3,
+ fun test_temp_destination_in_send/3,
+ fun test_blank_destination_in_send/3]]
+ || Version <- ?SUPPORTED_VERSIONS],
+ ok.
+
+run_test(TestFun, Version) ->
+ {ok, Connection} = amqp_connection:start(#amqp_params_direct{}),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ {ok, Client} = rabbit_stomp_client:connect(Version),
+
+ Result = (catch TestFun(Channel, Client, Version)),
+
+ rabbit_stomp_client:disconnect(Client),
+ amqp_channel:close(Channel),
+ amqp_connection:close(Connection),
+ Result.
+
+test_subscribe_error(_Channel, Client, _Version) ->
+ %% SUBSCRIBE to missing queue
+ rabbit_stomp_client:send(
+ Client, "SUBSCRIBE", [{"destination", ?DESTINATION}]),
+ {ok, _Client1, Hdrs, _} = stomp_receive(Client, "ERROR"),
+ "not_found" = proplists:get_value("message", Hdrs),
+ ok.
+
+test_subscribe(Channel, Client, _Version) ->
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Channel, #'queue.declare'{queue = ?QUEUE,
+ auto_delete = true}),
+
+ %% subscribe and wait for receipt
+ rabbit_stomp_client:send(
+ Client, "SUBSCRIBE", [{"destination", ?DESTINATION}, {"receipt", "foo"}]),
+ {ok, Client1, _, _} = stomp_receive(Client, "RECEIPT"),
+
+ %% send from amqp
+ Method = #'basic.publish'{exchange = <<"">>, routing_key = ?QUEUE},
+
+ amqp_channel:call(Channel, Method, #amqp_msg{props = #'P_basic'{},
+ payload = <<"hello">>}),
+
+ {ok, _Client2, _, [<<"hello">>]} = stomp_receive(Client1, "MESSAGE"),
+ ok.
+
+test_unsubscribe_ack(Channel, Client, Version) ->
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Channel, #'queue.declare'{queue = ?QUEUE,
+ auto_delete = true}),
+ %% subscribe and wait for receipt
+ rabbit_stomp_client:send(
+ Client, "SUBSCRIBE", [{"destination", ?DESTINATION},
+ {"receipt", "rcpt1"},
+ {"ack", "client"},
+ {"id", "subscription-id"}]),
+ {ok, Client1, _, _} = stomp_receive(Client, "RECEIPT"),
+
+ %% send from amqp
+ Method = #'basic.publish'{exchange = <<"">>, routing_key = ?QUEUE},
+
+ amqp_channel:call(Channel, Method, #amqp_msg{props = #'P_basic'{},
+ payload = <<"hello">>}),
+
+ {ok, Client2, Hdrs1, [<<"hello">>]} = stomp_receive(Client1, "MESSAGE"),
+
+ rabbit_stomp_client:send(
+ Client2, "UNSUBSCRIBE", [{"destination", ?DESTINATION},
+ {"id", "subscription-id"}]),
+
+ rabbit_stomp_client:send(
+ Client2, "ACK", [{rabbit_stomp_util:ack_header_name(Version),
+ proplists:get_value(
+ rabbit_stomp_util:msg_header_name(Version), Hdrs1)},
+ {"receipt", "rcpt2"}]),
+
+ {ok, _Client3, Hdrs2, _Body2} = stomp_receive(Client2, "ERROR"),
+ ?assertEqual("Subscription not found",
+ proplists:get_value("message", Hdrs2)),
+ ok.
+
+test_subscribe_ack(Channel, Client, Version) ->
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Channel, #'queue.declare'{queue = ?QUEUE,
+ auto_delete = true}),
+
+ %% subscribe and wait for receipt
+ rabbit_stomp_client:send(
+ Client, "SUBSCRIBE", [{"destination", ?DESTINATION},
+ {"receipt", "foo"},
+ {"ack", "client"}]),
+ {ok, Client1, _, _} = stomp_receive(Client, "RECEIPT"),
+
+ %% send from amqp
+ Method = #'basic.publish'{exchange = <<"">>, routing_key = ?QUEUE},
+
+ amqp_channel:call(Channel, Method, #amqp_msg{props = #'P_basic'{},
+ payload = <<"hello">>}),
+
+ {ok, _Client2, Headers, [<<"hello">>]} = stomp_receive(Client1, "MESSAGE"),
+ false = (Version == "1.2") xor proplists:is_defined(?HEADER_ACK, Headers),
+
+ MsgHeader = rabbit_stomp_util:msg_header_name(Version),
+ AckValue = proplists:get_value(MsgHeader, Headers),
+ AckHeader = rabbit_stomp_util:ack_header_name(Version),
+
+ rabbit_stomp_client:send(Client, "ACK", [{AckHeader, AckValue}]),
+ #'basic.get_empty'{} =
+ amqp_channel:call(Channel, #'basic.get'{queue = ?QUEUE}),
+ ok.
+
+test_send(Channel, Client, _Version) ->
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Channel, #'queue.declare'{queue = ?QUEUE,
+ auto_delete = true}),
+
+ %% subscribe and wait for receipt
+ rabbit_stomp_client:send(
+ Client, "SUBSCRIBE", [{"destination", ?DESTINATION}, {"receipt", "foo"}]),
+ {ok, Client1, _, _} = stomp_receive(Client, "RECEIPT"),
+
+ %% send from stomp
+ rabbit_stomp_client:send(
+ Client1, "SEND", [{"destination", ?DESTINATION}], ["hello"]),
+
+ {ok, _Client2, _, [<<"hello">>]} = stomp_receive(Client1, "MESSAGE"),
+ ok.
+
+test_delete_queue_subscribe(Channel, Client, _Version) ->
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Channel, #'queue.declare'{queue = ?QUEUE,
+ auto_delete = true}),
+
+ %% subscribe and wait for receipt
+ rabbit_stomp_client:send(
+ Client, "SUBSCRIBE", [{"destination", ?DESTINATION}, {"receipt", "bah"}]),
+ {ok, Client1, _, _} = stomp_receive(Client, "RECEIPT"),
+
+ %% delete queue while subscribed
+ #'queue.delete_ok'{} =
+ amqp_channel:call(Channel, #'queue.delete'{queue = ?QUEUE}),
+
+ {ok, _Client2, Headers, _} = stomp_receive(Client1, "ERROR"),
+
+ ?DESTINATION = proplists:get_value("subscription", Headers),
+
+ % server closes connection
+ ok.
+
+test_temp_destination_queue(Channel, Client, _Version) ->
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Channel, #'queue.declare'{queue = ?QUEUE,
+ auto_delete = true}),
+ rabbit_stomp_client:send( Client, "SEND", [{"destination", ?DESTINATION},
+ {"reply-to", "/temp-queue/foo"}],
+ ["ping"]),
+ amqp_channel:call(Channel,#'basic.consume'{queue = ?QUEUE, no_ack = true}),
+ receive #'basic.consume_ok'{consumer_tag = _Tag} -> ok end,
+ receive {#'basic.deliver'{delivery_tag = _DTag},
+ #'amqp_msg'{payload = <<"ping">>,
+ props = #'P_basic'{reply_to = ReplyTo}}} -> ok
+ end,
+ ok = amqp_channel:call(Channel,
+ #'basic.publish'{routing_key = ReplyTo},
+ #amqp_msg{payload = <<"pong">>}),
+ {ok, _Client1, _, [<<"pong">>]} = stomp_receive(Client, "MESSAGE"),
+ ok.
+
+test_temp_destination_in_send(_Channel, Client, _Version) ->
+ rabbit_stomp_client:send( Client, "SEND", [{"destination", "/temp-queue/foo"}],
+ ["poing"]),
+ {ok, _Client1, Hdrs, _} = stomp_receive(Client, "ERROR"),
+ "Invalid destination" = proplists:get_value("message", Hdrs),
+ ok.
+
+test_blank_destination_in_send(_Channel, Client, _Version) ->
+ rabbit_stomp_client:send( Client, "SEND", [{"destination", ""}],
+ ["poing"]),
+ {ok, _Client1, Hdrs, _} = stomp_receive(Client, "ERROR"),
+ "Invalid destination" = proplists:get_value("message", Hdrs),
+ ok.
+
+stomp_receive(Client, Command) ->
+ {#stomp_frame{command = Command,
+ headers = Hdrs,
+ body_iolist = Body}, Client1} =
+ rabbit_stomp_client:recv(Client),
+ {ok, Client1, Hdrs, Body}.
+
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Console.
+%%
+%% The Initial Developers of the Original Code are Rabbit Technologies Ltd.
+%%
+%% Copyright (C) 2011 Rabbit Technologies Ltd.
+%%
+%% All Rights Reserved.
+%%
+%% Contributor(s): ______________________________________.
+%%
+
+%% The stupidest client imaginable, just for testing.
+
+-module(rabbit_stomp_client).
+
+-export([connect/0, connect/1, disconnect/1, send/2, send/3, send/4, recv/1]).
+
+-include("rabbit_stomp_frame.hrl").
+
+-define(TIMEOUT, 1000). % milliseconds
+
+connect() -> connect0([]).
+connect(V) -> connect0([{"accept-version", V}]).
+
+connect0(Version) ->
+ {ok, Sock} = gen_tcp:connect(localhost, 61613, [{active, false}, binary]),
+ Client0 = recv_state(Sock),
+ send(Client0, "CONNECT", [{"login", "guest"},
+ {"passcode", "guest"} | Version]),
+ {#stomp_frame{command = "CONNECTED"}, Client1} = recv(Client0),
+ {ok, Client1}.
+
+disconnect(Client = {Sock, _}) ->
+ send(Client, "DISCONNECT"),
+ gen_tcp:close(Sock).
+
+send(Client, Command) ->
+ send(Client, Command, []).
+
+send(Client, Command, Headers) ->
+ send(Client, Command, Headers, []).
+
+send({Sock, _}, Command, Headers, Body) ->
+ Frame = rabbit_stomp_frame:serialize(
+ #stomp_frame{command = list_to_binary(Command),
+ headers = Headers,
+ body_iolist = Body}),
+ gen_tcp:send(Sock, Frame).
+
+recv_state(Sock) ->
+ {Sock, []}.
+
+recv({_Sock, []} = Client) ->
+ recv(Client, rabbit_stomp_frame:initial_state(), 0);
+recv({Sock, [Frame | Frames]}) ->
+ {Frame, {Sock, Frames}}.
+
+recv(Client = {Sock, _}, FrameState, Length) ->
+ {ok, Payload} = gen_tcp:recv(Sock, Length, ?TIMEOUT),
+ parse(Payload, Client, FrameState, Length).
+
+parse(Payload, Client = {Sock, FramesRev}, FrameState, Length) ->
+ case rabbit_stomp_frame:parse(Payload, FrameState) of
+ {ok, Frame, <<>>} ->
+ recv({Sock, lists:reverse([Frame | FramesRev])});
+ {ok, Frame, Rest} ->
+ parse(Rest, {Sock, [Frame | FramesRev]},
+ rabbit_stomp_frame:initial_state(), Length);
+ {more, NewState} ->
+ recv(Client, NewState, 0)
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Console.
+%%
+%% The Initial Developers of the Original Code are Rabbit Technologies Ltd.
+%%
+%% Copyright (C) 2011 Rabbit Technologies Ltd.
+%%
+%% All Rights Reserved.
+%%
+%% Contributor(s): ______________________________________.
+%%
+-module(rabbit_stomp_publish_test).
+
+-export([run/0]).
+
+-include("rabbit_stomp_frame.hrl").
+
+-define(DESTINATION, "/queue/test").
+
+-define(MICROS_PER_UPDATE, 5000000).
+-define(MICROS_PER_UPDATE_MSG, 100000).
+-define(MICROS_PER_SECOND, 1000000).
+
+%% A very simple publish-and-consume-as-fast-as-you-can test.
+
+run() ->
+ [put(K, 0) || K <- [sent, recd, last_sent, last_recd]],
+ put(last_ts, erlang:now()),
+ {ok, Pub} = rabbit_stomp_client:connect(),
+ {ok, Recv} = rabbit_stomp_client:connect(),
+ Self = self(),
+ spawn(fun() -> publish(Self, Pub, 0, erlang:now()) end),
+ rabbit_stomp_client:send(
+ Recv, "SUBSCRIBE", [{"destination", ?DESTINATION}]),
+ spawn(fun() -> recv(Self, Recv, 0, erlang:now()) end),
+ report().
+
+report() ->
+ receive
+ {sent, C} -> put(sent, C);
+ {recd, C} -> put(recd, C)
+ end,
+ Diff = timer:now_diff(erlang:now(), get(last_ts)),
+ case Diff > ?MICROS_PER_UPDATE of
+ true -> S = get(sent) - get(last_sent),
+ R = get(recd) - get(last_recd),
+ put(last_sent, get(sent)),
+ put(last_recd, get(recd)),
+ put(last_ts, erlang:now()),
+ io:format("Send ~p msg/s | Recv ~p msg/s~n",
+ [trunc(S * ?MICROS_PER_SECOND / Diff),
+ trunc(R * ?MICROS_PER_SECOND / Diff)]);
+ false -> ok
+ end,
+ report().
+
+publish(Owner, Client, Count, TS) ->
+ rabbit_stomp_client:send(
+ Client, "SEND", [{"destination", ?DESTINATION}],
+ [integer_to_list(Count)]),
+ Diff = timer:now_diff(erlang:now(), TS),
+ case Diff > ?MICROS_PER_UPDATE_MSG of
+ true -> Owner ! {sent, Count + 1},
+ publish(Owner, Client, Count + 1, erlang:now());
+ false -> publish(Owner, Client, Count + 1, TS)
+ end.
+
+recv(Owner, Client0, Count, TS) ->
+ {#stomp_frame{body_iolist = Body}, Client1} =
+ rabbit_stomp_client:recv(Client0),
+ BodyInt = list_to_integer(binary_to_list(iolist_to_binary(Body))),
+ Count = BodyInt,
+ Diff = timer:now_diff(erlang:now(), TS),
+ case Diff > ?MICROS_PER_UPDATE_MSG of
+ true -> Owner ! {recd, Count + 1},
+ recv(Owner, Client1, Count + 1, erlang:now());
+ false -> recv(Owner, Client1, Count + 1, TS)
+ end.
+
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_stomp_test).
+-export([all_tests/0]).
+-import(rabbit_misc, [pget/2]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_stomp_frame.hrl").
+-define(DESTINATION, "/queue/bulk-test").
+
+all_tests() ->
+ test_messages_not_dropped_on_disconnect(),
+ ok.
+
+test_messages_not_dropped_on_disconnect() ->
+ {ok, Client} = rabbit_stomp_client:connect(),
+ [rabbit_stomp_client:send(
+ Client, "SEND", [{"destination", ?DESTINATION}],
+ [integer_to_list(Count)]) || Count <- lists:seq(1, 1000)],
+ rabbit_stomp_client:disconnect(Client),
+ QName = rabbit_misc:r(<<"/">>, queue, <<"bulk-test">>),
+ timer:sleep(3000),
+ rabbit_amqqueue:with(
+ QName, fun(Q) ->
+ 1000 = pget(messages, rabbit_amqqueue:info(Q, [messages]))
+ end),
+ ok.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_stomp_test_frame).
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_stomp_frame.hrl").
+-include("rabbit_stomp_headers.hrl").
+
+parse_simple_frame_test() ->
+ parse_simple_frame_gen("\n").
+
+parse_simple_frame_crlf_test() ->
+ parse_simple_frame_gen("\r\n").
+
+parse_simple_frame_gen(Term) ->
+ Headers = [{"header1", "value1"}, {"header2", "value2"}],
+ Content = frame_string("COMMAND",
+ Headers,
+ "Body Content",
+ Term),
+ {"COMMAND", Frame, _State} = parse_complete(Content),
+ [?assertEqual({ok, Value},
+ rabbit_stomp_frame:header(Frame, Key)) ||
+ {Key, Value} <- Headers],
+ #stomp_frame{body_iolist = Body} = Frame,
+ ?assertEqual(<<"Body Content">>, iolist_to_binary(Body)).
+
+parse_simple_frame_with_null_test() ->
+ Headers = [{"header1", "value1"}, {"header2", "value2"},
+ {?HEADER_CONTENT_LENGTH, "12"}],
+ Content = frame_string("COMMAND",
+ Headers,
+ "Body\0Content"),
+ {"COMMAND", Frame, _State} = parse_complete(Content),
+ [?assertEqual({ok, Value},
+ rabbit_stomp_frame:header(Frame, Key)) ||
+ {Key, Value} <- Headers],
+ #stomp_frame{body_iolist = Body} = Frame,
+ ?assertEqual(<<"Body\0Content">>, iolist_to_binary(Body)).
+
+parse_large_content_frame_with_nulls_test() ->
+ BodyContent = string:copies("012345678\0", 1024),
+ Headers = [{"header1", "value1"}, {"header2", "value2"},
+ {?HEADER_CONTENT_LENGTH, integer_to_list(string:len(BodyContent))}],
+ Content = frame_string("COMMAND",
+ Headers,
+ BodyContent),
+ {"COMMAND", Frame, _State} = parse_complete(Content),
+ [?assertEqual({ok, Value},
+ rabbit_stomp_frame:header(Frame, Key)) ||
+ {Key, Value} <- Headers],
+ #stomp_frame{body_iolist = Body} = Frame,
+ ?assertEqual(list_to_binary(BodyContent), iolist_to_binary(Body)).
+
+parse_command_only_test() ->
+ {ok, #stomp_frame{command = "COMMAND"}, _Rest} = parse("COMMAND\n\n\0").
+
+parse_ignore_empty_frames_test() ->
+ {ok, #stomp_frame{command = "COMMAND"}, _Rest} = parse("\0\0COMMAND\n\n\0").
+
+parse_heartbeat_interframe_test() ->
+ {ok, #stomp_frame{command = "COMMAND"}, _Rest} = parse("\nCOMMAND\n\n\0").
+
+parse_crlf_interframe_test() ->
+ {ok, #stomp_frame{command = "COMMAND"}, _Rest} = parse("\r\nCOMMAND\n\n\0").
+
+parse_carriage_return_not_ignored_interframe_test() ->
+ {error, {unexpected_chars_between_frames, "\rC"}} = parse("\rCOMMAND\n\n\0").
+
+parse_carriage_return_mid_command_test() ->
+ {error, {unexpected_chars_in_command, "\rA"}} = parse("COMM\rAND\n\n\0").
+
+parse_carriage_return_end_command_test() ->
+ {error, {unexpected_chars_in_command, "\r\r"}} = parse("COMMAND\r\r\n\n\0").
+
+parse_resume_mid_command_test() ->
+ First = "COMM",
+ Second = "AND\n\n\0",
+ {more, Resume} = parse(First),
+ {ok, #stomp_frame{command = "COMMAND"}, _Rest} = parse(Second, Resume).
+
+parse_resume_mid_header_key_test() ->
+ First = "COMMAND\nheade",
+ Second = "r1:value1\n\n\0",
+ {more, Resume} = parse(First),
+ {ok, Frame = #stomp_frame{command = "COMMAND"}, _Rest} =
+ parse(Second, Resume),
+ ?assertEqual({ok, "value1"},
+ rabbit_stomp_frame:header(Frame, "header1")).
+
+parse_resume_mid_header_val_test() ->
+ First = "COMMAND\nheader1:val",
+ Second = "ue1\n\n\0",
+ {more, Resume} = parse(First),
+ {ok, Frame = #stomp_frame{command = "COMMAND"}, _Rest} =
+ parse(Second, Resume),
+ ?assertEqual({ok, "value1"},
+ rabbit_stomp_frame:header(Frame, "header1")).
+
+parse_resume_mid_body_test() ->
+ First = "COMMAND\n\nABC",
+ Second = "DEF\0",
+ {more, Resume} = parse(First),
+ {ok, #stomp_frame{command = "COMMAND", body_iolist = Body}, _Rest} =
+ parse(Second, Resume),
+ ?assertEqual([<<"ABC">>, <<"DEF">>], Body).
+
+parse_no_header_stripping_test() ->
+ Content = "COMMAND\nheader: foo \n\n\0",
+ {ok, Frame, _} = parse(Content),
+ {ok, Val} = rabbit_stomp_frame:header(Frame, "header"),
+ ?assertEqual(" foo ", Val).
+
+parse_multiple_headers_test() ->
+ Content = "COMMAND\nheader:correct\nheader:incorrect\n\n\0",
+ {ok, Frame, _} = parse(Content),
+ {ok, Val} = rabbit_stomp_frame:header(Frame, "header"),
+ ?assertEqual("correct", Val).
+
+header_no_colon_test() ->
+ Content = "COMMAND\n"
+ "hdr1:val1\n"
+ "hdrerror\n"
+ "hdr2:val2\n"
+ "\n\0",
+ ?assertEqual(parse(Content), {error, {header_no_value, "hdrerror"}}).
+
+no_nested_escapes_test() ->
+ Content = "COM\\\\rAND\n" % no escapes
+ "hdr\\\\rname:" % one escape
+ "hdr\\\\rval\n\n\0", % one escape
+ {ok, Frame, _} = parse(Content),
+ ?assertEqual(Frame,
+ #stomp_frame{command = "COM\\\\rAND",
+ headers = [{"hdr\\rname", "hdr\\rval"}],
+ body_iolist = []}).
+
+header_name_with_cr_test() ->
+ Content = "COMMAND\nhead\rer:val\n\n\0",
+ {error, {unexpected_chars_in_header, "\re"}} = parse(Content).
+
+header_value_with_cr_test() ->
+ Content = "COMMAND\nheader:val\rue\n\n\0",
+ {error, {unexpected_chars_in_header, "\ru"}} = parse(Content).
+
+header_value_with_colon_test() ->
+ Content = "COMMAND\nheader:val:ue\n\n\0",
+ {ok, Frame, _} = parse(Content),
+ ?assertEqual(Frame,
+ #stomp_frame{ command = "COMMAND",
+ headers = [{"header", "val:ue"}],
+ body_iolist = []}).
+
+headers_escaping_roundtrip_test() ->
+ Content = "COMMAND\nhead\\r\\c\\ner:\\c\\n\\r\\\\\n\n\0",
+ {ok, Frame, _} = parse(Content),
+ {ok, Val} = rabbit_stomp_frame:header(Frame, "head\r:\ner"),
+ ?assertEqual(":\n\r\\", Val),
+ Serialized = lists:flatten(rabbit_stomp_frame:serialize(Frame)),
+ ?assertEqual(Content, rabbit_misc:format("~s", [Serialized])).
+
+parse(Content) ->
+ parse(Content, rabbit_stomp_frame:initial_state()).
+parse(Content, State) ->
+ rabbit_stomp_frame:parse(list_to_binary(Content), State).
+
+parse_complete(Content) ->
+ {ok, Frame = #stomp_frame{command = Command}, State} = parse(Content),
+ {Command, Frame, State}.
+
+frame_string(Command, Headers, BodyContent) ->
+ frame_string(Command, Headers, BodyContent, "\n").
+
+frame_string(Command, Headers, BodyContent, Term) ->
+ HeaderString =
+ lists:flatten([Key ++ ":" ++ Value ++ Term || {Key, Value} <- Headers]),
+ Command ++ Term ++ HeaderString ++ Term ++ BodyContent ++ "\0".
+
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_stomp_test_util).
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("amqp_client/include/rabbit_routing_prefixes.hrl").
+-include("rabbit_stomp_frame.hrl").
+
+%%--------------------------------------------------------------------
+%% Header Processing Tests
+%%--------------------------------------------------------------------
+
+longstr_field_test() ->
+ {<<"ABC">>, longstr, <<"DEF">>} =
+ rabbit_stomp_util:longstr_field("ABC", "DEF").
+
+message_properties_test() ->
+ Headers = [
+ {"content-type", "text/plain"},
+ {"content-encoding", "UTF-8"},
+ {"persistent", "true"},
+ {"priority", "1"},
+ {"correlation-id", "123"},
+ {"reply-to", "something"},
+ {"expiration", "my-expiration"},
+ {"amqp-message-id", "M123"},
+ {"timestamp", "123456"},
+ {"type", "freshly-squeezed"},
+ {"user-id", "joe"},
+ {"app-id", "joe's app"},
+ {"str", "foo"},
+ {"int", "123"}
+ ],
+
+ #'P_basic'{
+ content_type = <<"text/plain">>,
+ content_encoding = <<"UTF-8">>,
+ delivery_mode = 2,
+ priority = 1,
+ correlation_id = <<"123">>,
+ reply_to = <<"something">>,
+ expiration = <<"my-expiration">>,
+ message_id = <<"M123">>,
+ timestamp = 123456,
+ type = <<"freshly-squeezed">>,
+ user_id = <<"joe">>,
+ app_id = <<"joe's app">>,
+ headers = [{<<"str">>, longstr, <<"foo">>},
+ {<<"int">>, longstr, <<"123">>}]
+ } =
+ rabbit_stomp_util:message_properties(#stomp_frame{headers = Headers}).
+
+message_headers_test() ->
+ Properties = #'P_basic'{
+ headers = [{<<"str">>, longstr, <<"foo">>},
+ {<<"int">>, signedint, 123}],
+ content_type = <<"text/plain">>,
+ content_encoding = <<"UTF-8">>,
+ delivery_mode = 2,
+ priority = 1,
+ correlation_id = 123,
+ reply_to = <<"something">>,
+ message_id = <<"M123">>,
+ timestamp = 123456,
+ type = <<"freshly-squeezed">>,
+ user_id = <<"joe">>,
+ app_id = <<"joe's app">>},
+
+ Headers = rabbit_stomp_util:message_headers(Properties),
+
+ Expected = [
+ {"content-type", "text/plain"},
+ {"content-encoding", "UTF-8"},
+ {"persistent", "true"},
+ {"priority", "1"},
+ {"correlation-id", "123"},
+ {"reply-to", "something"},
+ {"expiration", "my-expiration"},
+ {"amqp-message-id", "M123"},
+ {"timestamp", "123456"},
+ {"type", "freshly-squeezed"},
+ {"user-id", "joe"},
+ {"app-id", "joe's app"},
+ {"str", "foo"},
+ {"int", "123"}
+ ],
+
+ [] = lists:subtract(Headers, Expected).
+
+minimal_message_headers_with_no_custom_test() ->
+ Delivery = #'basic.deliver'{
+ consumer_tag = <<"Q_123">>,
+ delivery_tag = 123,
+ exchange = <<"">>,
+ routing_key = <<"foo">>},
+
+ Properties = #'P_basic'{},
+
+ Headers = rabbit_stomp_util:message_headers(Properties),
+ Expected = [
+ {"content-type", "text/plain"},
+ {"content-encoding", "UTF-8"},
+ {"amqp-message-id", "M123"}
+ ],
+
+ [] = lists:subtract(Headers, Expected).
+
+headers_post_process_test() ->
+ Headers = [{"header1", "1"},
+ {"header2", "12"},
+ {"reply-to", "something"}],
+ Expected = [{"header1", "1"},
+ {"header2", "12"},
+ {"reply-to", "/reply-queue/something"}],
+ [] = lists:subtract(
+ rabbit_stomp_util:headers_post_process(Headers), Expected).
+
+headers_post_process_noop_replyto_test() ->
+ [begin
+ Headers = [{"reply-to", Prefix ++ "/something"}],
+ Headers = rabbit_stomp_util:headers_post_process(Headers)
+ end || Prefix <- rabbit_routing_util:dest_prefixes()].
+
+headers_post_process_noop2_test() ->
+ Headers = [{"header1", "1"},
+ {"header2", "12"}],
+ Expected = [{"header1", "1"},
+ {"header2", "12"}],
+ [] = lists:subtract(
+ rabbit_stomp_util:headers_post_process(Headers), Expected).
+
+negotiate_version_both_empty_test() ->
+ {error, no_common_version} = rabbit_stomp_util:negotiate_version([],[]).
+
+negotiate_version_no_common_test() ->
+ {error, no_common_version} =
+ rabbit_stomp_util:negotiate_version(["1.2"],["1.3"]).
+
+negotiate_version_simple_common_test() ->
+ {ok, "1.2"} =
+ rabbit_stomp_util:negotiate_version(["1.2"],["1.2"]).
+
+negotiate_version_two_choice_common_test() ->
+ {ok, "1.3"} =
+ rabbit_stomp_util:negotiate_version(["1.2", "1.3"],["1.2", "1.3"]).
+
+negotiate_version_two_choice_common_out_of_order_test() ->
+ {ok, "1.3"} =
+ rabbit_stomp_util:negotiate_version(["1.3", "1.2"],["1.2", "1.3"]).
+
+negotiate_version_two_choice_big_common_test() ->
+ {ok, "1.20.23"} =
+ rabbit_stomp_util:negotiate_version(["1.20.23", "1.30.456"],
+ ["1.20.23", "1.30.457"]).
+negotiate_version_choice_mismatched_length_test() ->
+ {ok, "1.2.3"} =
+ rabbit_stomp_util:negotiate_version(["1.2", "1.2.3"],
+ ["1.2.3", "1.2"]).
+negotiate_version_choice_duplicates_test() ->
+ {ok, "1.2"} =
+ rabbit_stomp_util:negotiate_version(["1.2", "1.2"],
+ ["1.2", "1.2"]).
+trim_headers_test() ->
+ #stomp_frame{headers = [{"one", "foo"}, {"two", "baz "}]} =
+ rabbit_stomp_util:trim_headers(
+ #stomp_frame{headers = [{"one", " foo"}, {"two", " baz "}]}).
+
+%%--------------------------------------------------------------------
+%% Frame Parsing Tests
+%%--------------------------------------------------------------------
+
+ack_mode_auto_test() ->
+ Frame = #stomp_frame{headers = [{"ack", "auto"}]},
+ {auto, _} = rabbit_stomp_util:ack_mode(Frame).
+
+ack_mode_auto_default_test() ->
+ Frame = #stomp_frame{headers = []},
+ {auto, _} = rabbit_stomp_util:ack_mode(Frame).
+
+ack_mode_client_test() ->
+ Frame = #stomp_frame{headers = [{"ack", "client"}]},
+ {client, true} = rabbit_stomp_util:ack_mode(Frame).
+
+ack_mode_client_individual_test() ->
+ Frame = #stomp_frame{headers = [{"ack", "client-individual"}]},
+ {client, false} = rabbit_stomp_util:ack_mode(Frame).
+
+consumer_tag_id_test() ->
+ Frame = #stomp_frame{headers = [{"id", "foo"}]},
+ {ok, <<"T_foo">>, _} = rabbit_stomp_util:consumer_tag(Frame).
+
+consumer_tag_destination_test() ->
+ Frame = #stomp_frame{headers = [{"destination", "foo"}]},
+ {ok, <<"Q_foo">>, _} = rabbit_stomp_util:consumer_tag(Frame).
+
+consumer_tag_invalid_test() ->
+ Frame = #stomp_frame{headers = []},
+ {error, missing_destination_header} = rabbit_stomp_util:consumer_tag(Frame).
+
+%%--------------------------------------------------------------------
+%% Message ID Parsing Tests
+%%--------------------------------------------------------------------
+
+parse_valid_message_id_test() ->
+ {ok, {<<"bar">>, "abc", 123}} =
+ rabbit_stomp_util:parse_message_id("bar@@abc@@123").
+
+parse_invalid_message_id_test() ->
+ {error, invalid_message_id} =
+ rabbit_stomp_util:parse_message_id("blah").
+
--- /dev/null
+import base
+import stomp
+import unittest
+import time
+
+class TestReliability(base.BaseTest):
+
+ def test_send_and_disconnect(self):
+ ''' Test close socket after send does not lose messages '''
+ d = "/queue/reliability"
+ pub_conn = self.create_connection()
+ try:
+ msg = "0" * (128)
+
+ count = 10000
+
+ listener = base.WaitableListener()
+ listener.reset(count)
+ self.conn.set_listener('', listener)
+ self.conn.subscribe(destination=d)
+
+ for x in range(0, count):
+ pub_conn.send(msg + str(x), destination=d)
+ time.sleep(2.0)
+ pub_conn.close_socket()
+
+ if listener.await(30):
+ self.assertEquals(count, len(listener.messages))
+ else:
+ listener.print_state("Final state of listener:")
+ self.fail("Did not receive %s messages in time" % count)
+ finally:
+ if pub_conn.is_connected():
+ pub_conn.disconnect()
--- /dev/null
+[{rabbitmq_stomp, [{default_user, []},
+ {ssl_cert_login, true},
+ {ssl_listeners, [61614]}
+ ]},
+ {rabbit, [{ssl_options, [{cacertfile,"%%CERTS_DIR%%/testca/cacert.pem"},
+ {certfile,"%%CERTS_DIR%%/server/cert.pem"},
+ {keyfile,"%%CERTS_DIR%%/server/key.pem"},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert,true}
+ ]}
+ ]}
+].
--- /dev/null
+import unittest
+import os
+
+import stomp
+import base
+
+ssl_key_file = os.path.abspath("test/certs/client/key.pem")
+ssl_cert_file = os.path.abspath("test/certs/client/cert.pem")
+ssl_ca_certs = os.path.abspath("test/certs/testca/cacert.pem")
+
+class TestSslClient(unittest.TestCase):
+
+ def __ssl_connect(self):
+ conn = stomp.Connection(user="guest", passcode="guest",
+ host_and_ports = [ ('localhost', 61614) ],
+ use_ssl = True, ssl_key_file = ssl_key_file,
+ ssl_cert_file = ssl_cert_file,
+ ssl_ca_certs = ssl_ca_certs)
+
+ conn.start()
+ conn.connect()
+ return conn
+
+ def __ssl_auth_connect(self):
+ conn = stomp.Connection(host_and_ports = [ ('localhost', 61614) ],
+ use_ssl = True, ssl_key_file = ssl_key_file,
+ ssl_cert_file = ssl_cert_file,
+ ssl_ca_certs = ssl_ca_certs)
+ conn.start()
+ conn.connect()
+ return conn
+
+ def test_ssl_connect(self):
+ conn = self.__ssl_connect()
+ conn.stop()
+
+ def test_ssl_auth_connect(self):
+ conn = self.__ssl_auth_connect()
+ conn.stop()
+
+ def test_ssl_send_receive(self):
+ conn = self.__ssl_connect()
+ self.__test_conn(conn)
+
+ def test_ssl_auth_send_receive(self):
+ conn = self.__ssl_auth_connect()
+ self.__test_conn(conn)
+
+ def __test_conn(self, conn):
+ try:
+ listener = base.WaitableListener()
+
+ conn.set_listener('', listener)
+
+ d = "/topic/ssl.test"
+ conn.subscribe(destination=d, receipt="sub")
+
+ self.assertTrue(listener.await(1))
+
+ self.assertEquals("sub",
+ listener.receipts[0]['headers']['receipt-id'])
+
+ listener.reset(1)
+ conn.send("Hello SSL!", destination=d)
+
+ self.assertTrue(listener.await())
+
+ self.assertEquals("Hello SSL!", listener.messages[0]['message'])
+ finally:
+ conn.disconnect()
--- /dev/null
+#!/usr/bin/env python
+
+import test_runner
+
+if __name__ == '__main__':
+ modules = ['parsing', 'destinations', 'lifecycle', 'transactions',
+ 'ack', 'errors', 'reliability']
+ test_runner.run_unittests(modules)
+
--- /dev/null
+#!/usr/bin/env python
+
+import test_runner
+
+if __name__ == '__main__':
+ modules = ['connect_options']
+ test_runner.run_unittests(modules)
+
--- /dev/null
+#!/usr/bin/env python
+
+import unittest
+import sys
+import os
+
+def add_deps_to_path():
+ deps_dir = os.path.realpath(os.path.join(__file__, "..", "..", "..", "deps"))
+ sys.path.append(os.path.join(deps_dir, "stomppy", "stomppy"))
+
+def run_unittests(modules):
+ add_deps_to_path()
+
+ suite = unittest.TestSuite()
+ for m in modules:
+ mod = __import__(m)
+ for name in dir(mod):
+ obj = getattr(mod, name)
+ if name.startswith("Test") and issubclass(obj, unittest.TestCase):
+ suite.addTest(unittest.TestLoader().loadTestsFromTestCase(obj))
+
+ ts = unittest.TextTestRunner().run(unittest.TestSuite(suite))
+ if ts.errors or ts.failures:
+ sys.exit(1)
+
--- /dev/null
+#!/usr/bin/env python
+
+import test_runner
+import test_util
+
+if __name__ == '__main__':
+ modules = ['ssl_lifecycle']
+ test_util.ensure_ssl_auth_user()
+ test_runner.run_unittests(modules)
+
--- /dev/null
+import subprocess
+import socket
+import sys
+import os
+import os.path
+
+def ensure_ssl_auth_user():
+ user = 'O=client,CN=%s' % socket.gethostname()
+ rabbitmqctl(['stop_app'])
+ rabbitmqctl(['reset'])
+ rabbitmqctl(['start_app'])
+ rabbitmqctl(['add_user', user, 'foo'])
+ rabbitmqctl(['clear_password', user])
+ rabbitmqctl(['set_permissions', user, '.*', '.*', '.*'])
+
+def enable_implicit_connect():
+ switch_config(implicit_connect='true', default_user='[{login, "guest"}, {passcode, "guest"}]')
+
+def disable_implicit_connect():
+ switch_config(implicit_connect='false', default_user='[]')
+
+def enable_default_user():
+ switch_config(default_user='[{login, "guest"}, {passcode, "guest"}]')
+
+def disable_default_user():
+ switch_config(default_user='[]')
+
+def switch_config(implicit_connect='', default_user=''):
+ cmd = 'application:stop(rabbitmq_stomp),'
+ if implicit_connect:
+ cmd += 'application:set_env(rabbitmq_stomp,implicit_connect,' + implicit_connect + '),'
+ if default_user:
+ cmd += 'application:set_env(rabbitmq_stomp,default_user,' + default_user + '),'
+ cmd += 'application:start(rabbitmq_stomp).'
+ rabbitmqctl(['eval', cmd])
+
+def rabbitmqctl(args):
+ ctl = os.path.normpath(os.path.join(os.getcwd(), sys.argv[0], '../../../../rabbitmq-server/scripts/rabbitmqctl'))
+ cmdline = [ctl, '-n', 'rabbit-test']
+ cmdline.extend(args)
+ subprocess.check_call(cmdline)
+
--- /dev/null
+import unittest
+import stomp
+import base
+import time
+
+class TestTransactions(base.BaseTest):
+
+ def test_tx_commit(self):
+ ''' Test TX with a COMMIT and ensure messages are delivered '''
+ d = "/exchange/amq.fanout"
+ tx = "test.tx"
+
+ self.listener.reset()
+ self.conn.subscribe(destination=d)
+ self.conn.begin(transaction=tx)
+ self.conn.send("hello!", destination=d, transaction=tx)
+ self.conn.send("again!", destination=d)
+
+ ## should see the second message
+ self.assertTrue(self.listener.await(3))
+ self.assertEquals(1, len(self.listener.messages))
+ self.assertEquals("again!", self.listener.messages[0]['message'])
+
+ ## now look for the first message
+ self.listener.reset()
+ self.conn.commit(transaction=tx)
+ self.assertTrue(self.listener.await(3))
+ self.assertEquals(1, len(self.listener.messages),
+ "Missing committed message")
+ self.assertEquals("hello!", self.listener.messages[0]['message'])
+
+ def test_tx_abort(self):
+ ''' Test TX with an ABORT and ensure messages are discarded '''
+ d = "/exchange/amq.fanout"
+ tx = "test.tx"
+
+ self.listener.reset()
+ self.conn.subscribe(destination=d)
+ self.conn.begin(transaction=tx)
+ self.conn.send("hello!", destination=d, transaction=tx)
+ self.conn.send("again!", destination=d)
+
+ ## should see the second message
+ self.assertTrue(self.listener.await(3))
+ self.assertEquals(1, len(self.listener.messages))
+ self.assertEquals("again!", self.listener.messages[0]['message'])
+
+ ## now look for the first message to be discarded
+ self.listener.reset()
+ self.conn.abort(transaction=tx)
+ self.assertFalse(self.listener.await(3))
+ self.assertEquals(0, len(self.listener.messages),
+ "Unexpected committed message")
+
--- /dev/null
+.PHONY: all full lite conformance16 update-qpid-testsuite run-qpid-testsuite \
+ prepare restart-app stop-app start-app \
+ start-secondary-app stop-secondary-app \
+ restart-secondary-node cleanup force-snapshot \
+ enable-ha disable-ha
+
+include ../umbrella.mk
+
+BROKER_DIR=../rabbitmq-server
+TEST_DIR=../rabbitmq-java-client
+
+TEST_RABBIT_PORT=5672
+TEST_HARE_PORT=5673
+TEST_RABBIT_SSL_PORT=5671
+TEST_HARE_SSL_PORT=5670
+
+COVER=true
+
+ifeq ($(COVER), true)
+COVER_START=start-cover
+COVER_STOP=stop-cover
+else
+COVER_START=
+COVER_STOP=
+endif
+
+# we actually want to test for ssl above 3.9 (eg >= 3.10), but this
+# comparison is buggy because it doesn't believe 10 > 9, so it doesn't
+# believe 3.10 > 3.9. As a result, we cheat, and use the erts version
+# instead. SSL 3.10 came out with R13B, which included erts 5.7.1, so
+# we require > 5.7.0.
+SSL_VERIFY=$(shell if [ $$(erl -noshell -eval 'io:format(erlang:system_info(version)), halt().') \> "5.7.0" ]; then echo "true"; else echo "false"; fi)
+ifeq (true,$(SSL_VERIFY))
+SSL_VERIFY_OPTION :={verify,verify_peer},{fail_if_no_peer_cert,false}
+else
+SSL_VERIFY_OPTION :={verify_code,1}
+endif
+export SSL_CERTS_DIR := $(realpath certs)
+export PASSWORD := test
+RABBIT_BROKER_OPTIONS := "-rabbit ssl_listeners [{\\\"0.0.0.0\\\",$(TEST_RABBIT_SSL_PORT)}] -rabbit ssl_options [{cacertfile,\\\"$(SSL_CERTS_DIR)/testca/cacert.pem\\\"},{certfile,\\\"$(SSL_CERTS_DIR)/server/cert.pem\\\"},{keyfile,\\\"$(SSL_CERTS_DIR)/server/key.pem\\\"},$(SSL_VERIFY_OPTION)] -rabbit auth_mechanisms ['PLAIN','AMQPLAIN','EXTERNAL','RABBIT-CR-DEMO']"
+HARE_BROKER_OPTIONS := "-rabbit ssl_listeners [{\\\"0.0.0.0\\\",$(TEST_HARE_SSL_PORT)}] -rabbit ssl_options [{cacertfile,\\\"$(SSL_CERTS_DIR)/testca/cacert.pem\\\"},{certfile,\\\"$(SSL_CERTS_DIR)/server/cert.pem\\\"},{keyfile,\\\"$(SSL_CERTS_DIR)/server/key.pem\\\"},$(SSL_VERIFY_OPTION)] -rabbit auth_mechanisms ['PLAIN','AMQPLAIN','EXTERNAL','RABBIT-CR-DEMO']"
+
+TESTS_FAILED := echo '\n============'\
+ '\nTESTS FAILED'\
+ '\n============\n'
+
+all: full test
+
+full:
+ OK=true && \
+ $(MAKE) prepare && \
+ { $(MAKE) -C $(BROKER_DIR) run-tests || { OK=false; $(TESTS_FAILED); } } && \
+ { $(MAKE) run-qpid-testsuite || { OK=false; $(TESTS_FAILED); } } && \
+ { ( cd $(TEST_DIR) && ant test-suite ) || { OK=false; $(TESTS_FAILED); } } && \
+ $(MAKE) cleanup && { $$OK || $(TESTS_FAILED); } && $$OK
+
+lite:
+ OK=true && \
+ $(MAKE) prepare && \
+ { $(MAKE) -C $(BROKER_DIR) run-tests || OK=false; } && \
+ { ( cd $(TEST_DIR) && ant test-suite ) || OK=false; } && \
+ $(MAKE) cleanup && $$OK
+
+conformance16:
+ OK=true && \
+ $(MAKE) prepare && \
+ { $(MAKE) -C $(BROKER_DIR) run-tests || OK=false; } && \
+ { ( cd $(TEST_DIR) && ant test-suite ) || OK=false; } && \
+ $(MAKE) cleanup && $$OK
+
+qpid_testsuite:
+ $(MAKE) update-qpid-testsuite
+
+update-qpid-testsuite:
+ svn co -r 906960 http://svn.apache.org/repos/asf/qpid/trunk/qpid/python qpid_testsuite
+ # hg clone http://rabbit-hg.eng.vmware.com/mirrors/qpid_testsuite
+ - patch -N -r - -p0 -d qpid_testsuite/ < qpid_patch
+
+prepare-qpid-patch:
+ cd qpid_testsuite && svn diff > ../qpid_patch && cd ..
+
+run-qpid-testsuite: qpid_testsuite
+ AMQP_SPEC=../rabbitmq-docs/specs/amqp0-8.xml qpid_testsuite/qpid-python-test -m tests_0-8 -I rabbit_failing.txt
+ AMQP_SPEC=../rabbitmq-docs/specs/amqp0-9-1.xml qpid_testsuite/qpid-python-test -m tests_0-9 -I rabbit_failing.txt
+
+clean:
+ rm -rf qpid_testsuite
+
+prepare: create_ssl_certs
+ $(MAKE) -C $(BROKER_DIR) \
+ RABBITMQ_NODENAME=hare \
+ RABBITMQ_NODE_IP_ADDRESS=0.0.0.0 \
+ RABBITMQ_NODE_PORT=${TEST_HARE_PORT} \
+ RABBITMQ_SERVER_START_ARGS=$(HARE_BROKER_OPTIONS) \
+ RABBITMQ_CONFIG_FILE=/does-not-exist \
+ RABBITMQ_ENABLED_PLUGINS_FILE=/does-not-exist \
+ stop-node cleandb start-background-node
+ $(MAKE) -C $(BROKER_DIR) \
+ RABBITMQ_NODE_IP_ADDRESS=0.0.0.0 \
+ RABBITMQ_NODE_PORT=${TEST_RABBIT_PORT} \
+ RABBITMQ_SERVER_START_ARGS=$(RABBIT_BROKER_OPTIONS) \
+ RABBITMQ_CONFIG_FILE=/does-not-exist \
+ RABBITMQ_ENABLED_PLUGINS_FILE=/does-not-exist \
+ stop-node cleandb start-background-node ${COVER_START} start-rabbit-on-node
+ $(MAKE) -C $(BROKER_DIR) RABBITMQ_NODENAME=hare start-rabbit-on-node
+
+start-app:
+ $(MAKE) -C $(BROKER_DIR) \
+ RABBITMQ_NODE_IP_ADDRESS=0.0.0.0 \
+ RABBITMQ_NODE_PORT=${TEST_RABBIT_PORT} \
+ RABBITMQ_SERVER_START_ARGS=$(RABBIT_BROKER_OPTIONS) \
+ RABBITMQ_CONFIG_FILE=/does-not-exist \
+ RABBITMQ_ENABLED_PLUGINS_FILE=/does-not-exist \
+ start-rabbit-on-node
+
+stop-app:
+ $(MAKE) -C $(BROKER_DIR) stop-rabbit-on-node
+
+restart-app: stop-app start-app
+
+start-secondary-app:
+ $(MAKE) -C $(BROKER_DIR) RABBITMQ_NODENAME=hare start-rabbit-on-node
+
+stop-secondary-app:
+ $(MAKE) -C $(BROKER_DIR) RABBITMQ_NODENAME=hare stop-rabbit-on-node
+
+restart-secondary-node:
+ $(MAKE) -C $(BROKER_DIR) \
+ RABBITMQ_NODENAME=hare \
+ RABBITMQ_NODE_IP_ADDRESS=0.0.0.0 \
+ RABBITMQ_NODE_PORT=${TEST_HARE_PORT} \
+ RABBITMQ_SERVER_START_ARGS=$(HARE_BROKER_OPTIONS) \
+ RABBITMQ_CONFIG_FILE=/does-not-exist \
+ RABBITMQ_ENABLED_PLUGINS_FILE=/does-not-exist \
+ stop-node start-background-node
+ $(MAKE) -C $(BROKER_DIR) RABBITMQ_NODENAME=hare start-rabbit-on-node
+
+force-snapshot:
+ $(MAKE) -C $(BROKER_DIR) force-snapshot
+
+set-resource-alarm:
+ $(MAKE) -C $(BROKER_DIR) set-resource-alarm SOURCE=$(SOURCE)
+
+clear-resource-alarm:
+ $(MAKE) -C $(BROKER_DIR) clear-resource-alarm SOURCE=$(SOURCE)
+
+enable-ha:
+ $(BROKER_DIR)/scripts/rabbitmqctl set_policy HA \
+ ".*" '{"ha-mode": "all"}'
+
+disable-ha:
+ $(BROKER_DIR)/scripts/rabbitmqctl clear_policy HA
+
+cleanup:
+ -$(MAKE) -C $(BROKER_DIR) \
+ RABBITMQ_NODENAME=hare \
+ RABBITMQ_NODE_IP_ADDRESS=0.0.0.0 \
+ RABBITMQ_NODE_PORT=${TEST_HARE_PORT} \
+ RABBITMQ_SERVER_START_ARGS=$(HARE_BROKER_OPTIONS) \
+ RABBITMQ_CONFIG_FILE=/does-not-exist \
+ RABBITMQ_ENABLED_PLUGINS_FILE=/does-not-exist \
+ stop-rabbit-on-node stop-node
+ -$(MAKE) -C $(BROKER_DIR) \
+ RABBITMQ_NODE_IP_ADDRESS=0.0.0.0 \
+ RABBITMQ_NODE_PORT=${TEST_RABBIT_PORT} \
+ RABBITMQ_SERVER_START_ARGS=$(RABBIT_BROKER_OPTIONS) \
+ RABBITMQ_CONFIG_FILE=/does-not-exist \
+ RABBITMQ_ENABLED_PLUGINS_FILE=/does-not-exist \
+ stop-rabbit-on-node ${COVER_STOP} stop-node
+
+create_ssl_certs:
+ $(MAKE) -C certs DIR=$(SSL_CERTS_DIR) clean all
--- /dev/null
+Useful targets:
+
+$ make lite # runs the Erlang unit tests and the Java client / functional tests
+$ make full # runs both the above plus the QPid test suite
+$ make test # runs the Erlang multi-node integration tests
+$ make all # runs all of the above
+
+The multi-node tests take a long time, so you might want to run a subset:
+
+$ make test FILTER=dynamic_ha # <- run just one suite
+$ make test FILTER=dynamic_ha:change_policy # <- run just one test
+
+The multi-node tests also default to coverage off, to turn it on:
+
+$ make test COVER=true
+
+This repository is not related to plugin tests; run "make test" in a
+plugin directory to test that plugin.
--- /dev/null
+OPENSSL=openssl
+
+ifndef DIR
+DIR := .
+endif
+
+ifdef PASSWORD
+P12PASS := true
+else
+P12PASS := @echo No PASSWORD defined. && false
+endif
+
+.PRECIOUS: %/testca
+.PHONY: %/clean target all p12pass
+
+all: client server
+
+client: p12pass
+ echo $(DIR)
+ $(MAKE) target DIR=$(DIR) TARGET=client EXTENSIONS=client_ca_extensions
+
+server: p12pass
+ $(MAKE) target DIR=$(DIR) TARGET=server EXTENSIONS=server_ca_extensions
+
+p12pass:
+ $(P12PASS)
+
+target: $(DIR)/testca
+ mkdir $(DIR)/$(TARGET)
+ { ( cd $(DIR)/$(TARGET) && \
+ openssl genrsa -out key.pem 2048 &&\
+ openssl req -new -key key.pem -out req.pem -outform PEM\
+ -subj /CN=$$(hostname)/O=$(TARGET)/L=$$$$/ -nodes &&\
+ cd ../testca && \
+ openssl ca -config openssl.cnf -in ../$(TARGET)/req.pem -out \
+ ../$(TARGET)/cert.pem -notext -batch -extensions \
+ $(EXTENSIONS) && \
+ cd ../$(TARGET) && \
+ openssl pkcs12 -export -out keycert.p12 -in cert.pem -inkey key.pem \
+ -passout pass:$(PASSWORD) ) || (rm -rf $(DIR)/$(TARGET) && false); }
+
+$(DIR)/testca:
+ mkdir $(DIR)/testca
+ cp openssl.cnf $(DIR)/testca/openssl.cnf
+ { ( cd $(DIR)/testca && \
+ mkdir certs private && \
+ chmod 700 private && \
+ echo 01 > serial && \
+ touch index.txt && \
+ openssl req -x509 -config openssl.cnf -newkey rsa:2048 -days 365 \
+ -out cacert.pem -outform PEM -subj /CN=MyTestCA/L=$$$$/ -nodes && \
+ openssl x509 -in cacert.pem -out cacert.cer -outform DER ) \
+ || (rm -rf $@ && false); }
+
+clean:
+ rm -rf $(DIR)/testca
+ rm -rf $(DIR)/server
+ rm -rf $(DIR)/client
--- /dev/null
+[ ca ]
+default_ca = testca
+
+[ testca ]
+dir = .
+certificate = $dir/cacert.pem
+database = $dir/index.txt
+new_certs_dir = $dir/certs
+private_key = $dir/private/cakey.pem
+serial = $dir/serial
+
+default_crl_days = 7
+default_days = 365
+default_md = sha1
+
+policy = testca_policy
+x509_extensions = certificate_extensions
+
+[ testca_policy ]
+commonName = supplied
+stateOrProvinceName = optional
+countryName = optional
+emailAddress = optional
+organizationName = optional
+organizationalUnitName = optional
+domainComponent = optional
+
+[ certificate_extensions ]
+basicConstraints = CA:false
+
+[ req ]
+default_bits = 2048
+default_keyfile = ./private/cakey.pem
+default_md = sha1
+prompt = yes
+distinguished_name = root_ca_distinguished_name
+x509_extensions = root_ca_extensions
+
+[ root_ca_distinguished_name ]
+commonName = hostname
+
+[ root_ca_extensions ]
+basicConstraints = CA:true
+keyUsage = keyCertSign, cRLSign
+
+[ client_ca_extensions ]
+basicConstraints = CA:false
+keyUsage = digitalSignature
+extendedKeyUsage = 1.3.6.1.5.5.7.3.2
+
+[ server_ca_extensions ]
+basicConstraints = CA:false
+keyUsage = keyEncipherment
+extendedKeyUsage = 1.3.6.1.5.5.7.3.1
--- /dev/null
+DEPS:=rabbitmq-erlang-client
+FILTER:=all
+COVER:=false
+STANDALONE_TEST_COMMANDS:=rabbit_test_runner:run_multi(\"$(UMBRELLA_BASE_DIR)/rabbitmq-server\",\"$(PACKAGE_DIR)/test/ebin\",\"$(FILTER)\",$(COVER),none)
+
+## Require R15B to compile inet_proxy_dist since it requires includes
+## introduced there.
+ifeq ($(shell erl -noshell -eval 'io:format([list_to_integer(X) || X <- string:tokens(erlang:system_info(version), ".")] >= [5,9]), halt().'),true)
+PACKAGE_ERLC_OPTS+=-Derlang_r15b_or_later
+endif
--- /dev/null
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import os
+
+AMQP_SPEC_DIR=os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "../rabbitmq-docs/specs")
+amqp_spec = os.path.join(AMQP_SPEC_DIR, "amqp.0-10-qpid-errata.xml")
+amqp_spec_0_8 = os.path.join(AMQP_SPEC_DIR, "amqp0-8.xml")
+amqp_spec_0_9 = os.path.join(AMQP_SPEC_DIR, "amqp0-9.xml")
+amqp_spec = 'file://'+os.path.join(AMQP_SPEC_DIR, 'amqp.0-10.xml')
--- /dev/null
+Index: tests_0-8/basic.py
+===================================================================
+--- tests_0-8/basic.py (revision 906960)
++++ tests_0-8/basic.py (working copy)
+@@ -98,7 +98,7 @@
+ channel.basic_consume(queue="")
+ self.fail("Expected failure when consuming from unspecified queue")
+ except Closed, e:
+- self.assertConnectionException(530, e.args[0])
++ self.assertChannelException(404, e.args[0])
+
+ def test_consume_unique_consumers(self):
+ """
+Index: tests_0-8/exchange.py
+===================================================================
+--- tests_0-8/exchange.py (revision 906960)
++++ tests_0-8/exchange.py (working copy)
+@@ -138,8 +138,6 @@
+ # Test automatic binding by queue name.
+ self.queue_declare(queue="d")
+ self.assertPublishConsume(queue="d", routing_key="d")
+- # Test explicit bind to default queue
+- self.verifyDirectExchange("")
+
+
+ # TODO aconway 2006-09-27: Fill in empty tests:
+@@ -318,7 +316,7 @@
+ self.channel.exchange_declare(exchange="test_different_declared_type_exchange", type="topic")
+ self.fail("Expected 530 for redeclaration of exchange with different type.")
+ except Closed, e:
+- self.assertConnectionException(530, e.args[0])
++ self.assertChannelException(406, e.args[0])
+ #cleanup
+ other = self.connect()
+ c2 = other.channel(1)
+Index: tests_0-8/queue.py
+===================================================================
+--- tests_0-8/queue.py (revision 906960)
++++ tests_0-8/queue.py (working copy)
+@@ -37,14 +37,10 @@
+ channel.basic_publish(exchange="test-exchange", routing_key="key", content=Content("two"))
+ channel.basic_publish(exchange="test-exchange", routing_key="key", content=Content("three"))
+
+- #check that the queue now reports 3 messages:
+- reply = channel.queue_declare(queue="test-queue")
+- self.assertEqual(3, reply.message_count)
+-
+ #now do the purge, then test that three messages are purged and the count drops to 0
+ reply = channel.queue_purge(queue="test-queue");
+ self.assertEqual(3, reply.message_count)
+- reply = channel.queue_declare(queue="test-queue")
++ reply = channel.queue_declare(queue="test-queue", exclusive=True)
+ self.assertEqual(0, reply.message_count)
+
+ #send a further message and consume it, ensuring that the other messages are really gone
+@@ -71,7 +67,7 @@
+ channel.queue_purge()
+ self.fail("Expected failure when purging unspecified queue")
+ except Closed, e:
+- self.assertConnectionException(530, e.args[0])
++ self.assertChannelException(404, e.args[0])
+
+ #cleanup
+ other = self.connect()
+@@ -174,11 +170,7 @@
+ #check attempted deletion of non-existant queue is handled correctly:
+ channel = self.client.channel(2)
+ channel.channel_open()
+- try:
+- channel.queue_delete(queue="i-dont-exist", if_empty="True")
+- self.fail("Expected delete of non-existant queue to fail")
+- except Closed, e:
+- self.assertChannelException(404, e.args[0])
++ channel.queue_delete(queue="i-dont-exist", if_empty="True")
+
+
+
+Index: qpid/codec.py
+===================================================================
+--- qpid/codec.py (revision 906960)
++++ qpid/codec.py (working copy)
+@@ -76,6 +76,7 @@
+ if not self.types:
+ self.typecode(ord('S'), "longstr")
+ self.typecode(ord('I'), "long")
++ self.typecode(ord('t'), "bool")
+
+ def typecode(self, code, type):
+ self.types[code] = type
+@@ -206,6 +207,22 @@
+ """
+ return self.unpack("!B")
+
++ def encode_bool(self, b):
++ """
++ encodes bool (8 bits) data 't' in network byte order
++ """
++
++ if ((b is not True) and (b is not False)):
++ raise ValueError('Valid range of bool is True or False')
++
++ self.pack("!B", int(b))
++
++ def decode_bool(self):
++ """
++ decodes a bool (8 bits) encoded in network byte order
++ """
++ return bool(self.unpack("!B"))
++
+ def encode_short(self, o):
+ """
+ encodes short (16 bits) data 'o' in network byte order
+Index: qpid/testlib.py
+===================================================================
+--- qpid/testlib.py (revision 906960)
++++ qpid/testlib.py (working copy)
+@@ -67,8 +67,7 @@
+
+ if not self.client.closed:
+ self.client.channel(0).connection_close(reply_code=200)
+- else:
+- self.client.close()
++ self.client.close()
+
+ def connect(self, host=None, port=None, user=None, password=None, tune_params=None):
+ """Create a new connction, return the Client object"""
+Index: qpid_config.py
+===================================================================
+--- qpid_config.py (revision 906960)
++++ qpid_config.py (working copy)
+@@ -19,7 +19,8 @@
+
+ import os
+
+-AMQP_SPEC_DIR=os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "specs")
++AMQP_SPEC_DIR=os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "../rabbitmq-docs/specs")
+ amqp_spec = os.path.join(AMQP_SPEC_DIR, "amqp.0-10-qpid-errata.xml")
+-amqp_spec_0_8 = os.path.join(AMQP_SPEC_DIR, "amqp.0-8.xml")
+-amqp_spec_0_9 = os.path.join(AMQP_SPEC_DIR, "amqp.0-9.xml")
++amqp_spec_0_8 = os.path.join(AMQP_SPEC_DIR, "amqp0-8.xml")
++amqp_spec_0_9 = os.path.join(AMQP_SPEC_DIR, "amqp0-9.xml")
++amqp_spec = 'file://'+os.path.join(AMQP_SPEC_DIR, 'amqp.0-10.xml')
--- /dev/null
+tests_0-8.basic.BasicTests.test_ack
+tests_0-8.basic.BasicTests.test_consume_no_local
+tests_0-8.basic.BasicTests.test_qos_prefetch_count
+tests_0-8.basic.BasicTests.test_qos_prefetch_size
+tests_0-8.broker.BrokerTests.test_basic_delivery_immediate
+tests_0-8.broker.BrokerTests.test_channel_flow
+tests_0-8.tx.TxTests.test_auto_rollback
+tests_0-8.tx.TxTests.test_rollback
+tests_0-9.query.*
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+-module(inet_proxy_dist).
+
+%% A distribution plugin that uses the usual inet_tcp_dist but allows
+%% insertion of a proxy at the receiving end.
+
+%% inet_*_dist "behaviour"
+-export([listen/1, accept/1, accept_connection/5,
+ setup/5, close/1, select/1, is_node_name/1]).
+
+%% For copypasta from inet_tcp_dist
+-export([do_setup/6]).
+-import(error_logger,[error_msg/2]).
+
+-define(REAL, inet_tcp_dist).
+
+%%----------------------------------------------------------------------------
+
+listen(Name) -> ?REAL:listen(Name).
+select(Node) -> ?REAL:select(Node).
+accept(Listen) -> ?REAL:accept(Listen).
+close(Socket) -> ?REAL:close(Socket).
+is_node_name(Node) -> ?REAL:is_node_name(Node).
+
+accept_connection(AcceptPid, Socket, MyNode, Allowed, SetupTime) ->
+ ?REAL:accept_connection(AcceptPid, Socket, MyNode, Allowed, SetupTime).
+
+%% This is copied from inet_tcp_dist, in order to change the
+%% output of erl_epmd:port_please/2.
+
+-ifdef(erlang_r15b_or_later).
+
+-include_lib("kernel/include/net_address.hrl").
+-include_lib("kernel/include/dist_util.hrl").
+
+setup(Node, Type, MyNode, LongOrShortNames,SetupTime) ->
+ spawn_opt(?MODULE, do_setup,
+ [self(), Node, Type, MyNode, LongOrShortNames, SetupTime],
+ [link, {priority, max}]).
+
+do_setup(Kernel, Node, Type, MyNode, LongOrShortNames,SetupTime) ->
+ ?trace("~p~n",[{inet_tcp_dist,self(),setup,Node}]),
+ [Name, Address] = splitnode(Node, LongOrShortNames),
+ case inet:getaddr(Address, inet) of
+ {ok, Ip} ->
+ Timer = dist_util:start_timer(SetupTime),
+ case erl_epmd:port_please(Name, Ip) of
+ {port, TcpPort, Version} ->
+ ?trace("port_please(~p) -> version ~p~n",
+ [Node,Version]),
+ dist_util:reset_timer(Timer),
+ %% Modification START
+ ProxyPort = case TcpPort >= 25672 andalso TcpPort < 25700
+ andalso inet_tcp_proxy:is_enabled() of
+ true -> TcpPort + 10000;
+ false -> TcpPort
+ end,
+ case inet_tcp:connect(Ip, ProxyPort,
+ [{active, false},
+ {packet,2}]) of
+ {ok, Socket} ->
+ {ok, {_, SrcPort}} = inet:sockname(Socket),
+ ok = inet_tcp_proxy_manager:register(
+ node(), Node, SrcPort, TcpPort, ProxyPort),
+ %% Modification END
+ HSData = #hs_data{
+ kernel_pid = Kernel,
+ other_node = Node,
+ this_node = MyNode,
+ socket = Socket,
+ timer = Timer,
+ this_flags = 0,
+ other_version = Version,
+ f_send = fun inet_tcp:send/2,
+ f_recv = fun inet_tcp:recv/3,
+ f_setopts_pre_nodeup =
+ fun(S) ->
+ inet:setopts
+ (S,
+ [{active, false},
+ {packet, 4},
+ nodelay()])
+ end,
+ f_setopts_post_nodeup =
+ fun(S) ->
+ inet:setopts
+ (S,
+ [{active, true},
+ {deliver, port},
+ {packet, 4},
+ nodelay()])
+ end,
+ f_getll = fun inet:getll/1,
+ f_address =
+ fun(_,_) ->
+ #net_address{
+ address = {Ip,TcpPort},
+ host = Address,
+ protocol = tcp,
+ family = inet}
+ end,
+ mf_tick = fun inet_tcp_dist:tick/1,
+ mf_getstat = fun inet_tcp_dist:getstat/1,
+ request_type = Type
+ },
+ dist_util:handshake_we_started(HSData);
+ R ->
+ io:format("~p failed! ~p~n", [node(), R]),
+ %% Other Node may have closed since
+ %% port_please !
+ ?trace("other node (~p) "
+ "closed since port_please.~n",
+ [Node]),
+ ?shutdown(Node)
+ end;
+ _ ->
+ ?trace("port_please (~p) "
+ "failed.~n", [Node]),
+ ?shutdown(Node)
+ end;
+ _Other ->
+ ?trace("inet_getaddr(~p) "
+ "failed (~p).~n", [Node,_Other]),
+ ?shutdown(Node)
+ end.
+
+%% If Node is illegal terminate the connection setup!!
+splitnode(Node, LongOrShortNames) ->
+ case split_node(atom_to_list(Node), $@, []) of
+ [Name|Tail] when Tail =/= [] ->
+ Host = lists:append(Tail),
+ case split_node(Host, $., []) of
+ [_] when LongOrShortNames =:= longnames ->
+ error_msg("** System running to use "
+ "fully qualified "
+ "hostnames **~n"
+ "** Hostname ~s is illegal **~n",
+ [Host]),
+ ?shutdown(Node);
+ L when length(L) > 1, LongOrShortNames =:= shortnames ->
+ error_msg("** System NOT running to use fully qualified "
+ "hostnames **~n"
+ "** Hostname ~s is illegal **~n",
+ [Host]),
+ ?shutdown(Node);
+ _ ->
+ [Name, Host]
+ end;
+ [_] ->
+ error_msg("** Nodename ~p illegal, no '@' character **~n",
+ [Node]),
+ ?shutdown(Node);
+ _ ->
+ error_msg("** Nodename ~p illegal **~n", [Node]),
+ ?shutdown(Node)
+ end.
+
+split_node([Chr|T], Chr, Ack) -> [lists:reverse(Ack)|split_node(T, Chr, [])];
+split_node([H|T], Chr, Ack) -> split_node(T, Chr, [H|Ack]);
+split_node([], _, Ack) -> [lists:reverse(Ack)].
+
+%% we may not always want the nodelay behaviour
+%% for performance reasons
+
+nodelay() ->
+ case application:get_env(kernel, dist_nodelay) of
+ undefined ->
+ {nodelay, true};
+ {ok, true} ->
+ {nodelay, true};
+ {ok, false} ->
+ {nodelay, false};
+ _ ->
+ {nodelay, true}
+ end.
+
+-else.
+
+setup(_Node, _Type, _MyNode, _LongOrShortNames, _SetupTime) ->
+ exit(erlang_r15b_required).
+
+do_setup(_Kernel, _Node, _Type, _MyNode, _LongOrShortNames, _SetupTime) ->
+ exit(erlang_r15b_required).
+
+-endif.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+-module(inet_tcp_proxy).
+
+%% A TCP proxy for insertion into the Erlang distribution mechanism,
+%% which allows us to simulate network partitions.
+
+-export([start/0, reconnect/1, is_enabled/0, allow/1, block/1]).
+
+-define(TABLE, ?MODULE).
+
+%% This can't start_link because there's no supervision hierarchy we
+%% can easily fit it into (we need to survive all application
+%% restarts). So we have to do some horrible error handling.
+
+start() ->
+ spawn(error_handler(fun go/0)),
+ ok.
+
+reconnect(Nodes) ->
+ [erlang:disconnect_node(N) || N <- Nodes, N =/= node()],
+ ok.
+
+is_enabled() ->
+ lists:member(?TABLE, ets:all()).
+
+allow(Node) -> ets:delete(?TABLE, Node).
+block(Node) -> ets:insert(?TABLE, {Node, block}).
+
+%%----------------------------------------------------------------------------
+
+error_handler(Thunk) ->
+ fun () ->
+ try
+ Thunk()
+ catch _:{{nodedown, _}, _} ->
+ %% The only other node we ever talk to is the test
+ %% runner; if that's down then the test is nearly
+ %% over; die quietly.
+ ok;
+ _:X ->
+ io:format(user, "TCP proxy died with ~p~n At ~p~n",
+ [X, erlang:get_stacktrace()]),
+ erlang:halt(1)
+ end
+ end.
+
+go() ->
+ ets:new(?TABLE, [public, named_table]),
+ {ok, Port} = application:get_env(kernel, inet_dist_listen_min),
+ ProxyPort = Port + 10000,
+ {ok, Sock} = gen_tcp:listen(ProxyPort, [inet,
+ {reuseaddr, true}]),
+ accept_loop(Sock, Port).
+
+accept_loop(ListenSock, Port) ->
+ {ok, Sock} = gen_tcp:accept(ListenSock),
+ Proxy = spawn(error_handler(fun() -> run_it(Sock, Port) end)),
+ ok = gen_tcp:controlling_process(Sock, Proxy),
+ accept_loop(ListenSock, Port).
+
+run_it(SockIn, Port) ->
+ case {inet:peername(SockIn), inet:sockname(SockIn)} of
+ {{ok, {_Addr, SrcPort}}, {ok, {Addr, _OtherPort}}} ->
+ {ok, Remote, This} = inet_tcp_proxy_manager:lookup(SrcPort),
+ case node() of
+ This -> ok;
+ _ -> exit({not_me, node(), This})
+ end,
+ {ok, SockOut} = gen_tcp:connect(Addr, Port, [inet]),
+ run_loop({SockIn, SockOut}, Remote, []);
+ _ ->
+ ok
+ end.
+
+run_loop(Sockets, RemoteNode, Buf0) ->
+ Block = [{RemoteNode, block}] =:= ets:lookup(?TABLE, RemoteNode),
+ receive
+ {tcp, Sock, Data} ->
+ Buf = [Data | Buf0],
+ case Block of
+ false -> gen_tcp:send(other(Sock, Sockets), lists:reverse(Buf)),
+ run_loop(Sockets, RemoteNode, []);
+ true -> run_loop(Sockets, RemoteNode, Buf)
+ end;
+ {tcp_closed, Sock} ->
+ gen_tcp:close(other(Sock, Sockets));
+ X ->
+ exit({weirdness, X})
+ end.
+
+other(A, {A, B}) -> B;
+other(B, {A, B}) -> A.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+-module(inet_tcp_proxy_manager).
+
+%% The TCP proxies need to decide whether to block based on the node
+%% they're running on, and the node connecting to them. The trouble
+%% is, they don't have an easy way to determine the latter. Therefore
+%% when A connects to B we register the source port used by A here, so
+%% that B can later look it up and find out who A is without having to
+%% sniff the distribution protocol.
+%%
+%% That does unfortunately mean that we need a central control
+%% thing. We assume here it's running on the node called
+%% 'standalone_test' since that's where tests are orchestrated from.
+%%
+%% Yes, this leaks. For its intended lifecycle, that's fine.
+
+-behaviour(gen_server).
+
+-export([start_link/0, register/5, lookup/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-define(NODE, standalone_test).
+
+-record(state, {ports, pending}).
+
+start_link() ->
+ Node = node(),
+ Node = controller_node(),
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+register(_From, _To, _SrcPort, Port, Port) ->
+ %% No proxy, don't register
+ ok;
+register(From, To, SrcPort, _Port, _ProxyPort) ->
+ gen_server:call(name(), {register, From, To, SrcPort}, infinity).
+
+lookup(SrcPort) ->
+ gen_server:call(name(), {lookup, SrcPort}, infinity).
+
+controller_node() ->
+ rabbit_nodes:make(atom_to_list(?NODE)).
+
+name() ->
+ {?MODULE, controller_node()}.
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, #state{ports = dict:new(),
+ pending = []}}.
+
+handle_call({register, FromNode, ToNode, SrcPort}, _From,
+ State = #state{ports = Ports,
+ pending = Pending}) ->
+ {Notify, Pending2} =
+ lists:partition(fun ({P, _}) -> P =:= SrcPort end, Pending),
+ [gen_server:reply(From, {ok, FromNode, ToNode}) || {_, From} <- Notify],
+ {reply, ok,
+ State#state{ports = dict:store(SrcPort, {FromNode, ToNode}, Ports),
+ pending = Pending2}};
+
+handle_call({lookup, SrcPort}, From,
+ State = #state{ports = Ports, pending = Pending}) ->
+ case dict:find(SrcPort, Ports) of
+ {ok, {FromNode, ToNode}} ->
+ {reply, {ok, FromNode, ToNode}, State};
+ error ->
+ {noreply, State#state{pending = [{SrcPort, From} | Pending]}}
+ end;
+
+handle_call(_Req, _From, State) ->
+ {reply, unknown_request, State}.
+
+handle_cast(_C, State) ->
+ {noreply, State}.
+
+handle_info(_I, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_, State, _) -> {ok, State}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+-module(rabbit_ha_test_consumer).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-export([await_response/1, create/5, start/6]).
+
+await_response(ConsumerPid) ->
+ case receive {ConsumerPid, Response} -> Response end of
+ {error, Reason} -> erlang:error(Reason);
+ ok -> ok
+ end.
+
+create(Channel, Queue, TestPid, CancelOnFailover, ExpectingMsgs) ->
+ ConsumerPid = spawn_link(?MODULE, start,
+ [TestPid, Channel, Queue, CancelOnFailover,
+ ExpectingMsgs + 1, ExpectingMsgs]),
+ amqp_channel:subscribe(
+ Channel, consume_method(Queue, CancelOnFailover), ConsumerPid),
+ ConsumerPid.
+
+start(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume) ->
+ error_logger:info_msg("consumer ~p on ~p awaiting ~w messages "
+ "(lowest seen = ~w, cancel-on-failover = ~w)~n",
+ [self(), Channel, MsgsToConsume, LowestSeen,
+ CancelOnFailover]),
+ run(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume).
+
+run(TestPid, _Channel, _Queue, _CancelOnFailover, _LowestSeen, 0) ->
+ consumer_reply(TestPid, ok);
+run(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume) ->
+ receive
+ #'basic.consume_ok'{} ->
+ run(TestPid, Channel, Queue,
+ CancelOnFailover, LowestSeen, MsgsToConsume);
+ {Delivery = #'basic.deliver'{ redelivered = Redelivered },
+ #amqp_msg{payload = Payload}} ->
+ MsgNum = list_to_integer(binary_to_list(Payload)),
+
+ ack(Delivery, Channel),
+
+ %% we can receive any message we've already seen and,
+ %% because of the possibility of multiple requeuings, we
+ %% might see these messages in any order. If we are seeing
+ %% a message again, we don't decrement the MsgsToConsume
+ %% counter.
+ if
+ MsgNum + 1 == LowestSeen ->
+ run(TestPid, Channel, Queue,
+ CancelOnFailover, MsgNum, MsgsToConsume - 1);
+ MsgNum >= LowestSeen ->
+ error_logger:info_msg(
+ "consumer ~p on ~p ignoring redeliverd msg ~p~n",
+ [self(), Channel, MsgNum]),
+ true = Redelivered, %% ASSERTION
+ run(TestPid, Channel, Queue,
+ CancelOnFailover, LowestSeen, MsgsToConsume);
+ true ->
+ %% We received a message we haven't seen before,
+ %% but it is not the next message in the expected
+ %% sequence.
+ consumer_reply(TestPid,
+ {error, {unexpected_message, MsgNum}})
+ end;
+ #'basic.cancel'{} when CancelOnFailover ->
+ error_logger:info_msg("consumer ~p on ~p received basic.cancel: "
+ "resubscribing to ~p on ~p~n",
+ [self(), Channel, Queue, Channel]),
+ resubscribe(TestPid, Channel, Queue, CancelOnFailover,
+ LowestSeen, MsgsToConsume);
+ #'basic.cancel'{} ->
+ exit(cancel_received_without_cancel_on_failover)
+ end.
+
+%%
+%% Private API
+%%
+
+resubscribe(TestPid, Channel, Queue, CancelOnFailover, LowestSeen,
+ MsgsToConsume) ->
+ amqp_channel:subscribe(
+ Channel, consume_method(Queue, CancelOnFailover), self()),
+ ok = receive #'basic.consume_ok'{} -> ok
+ end,
+ error_logger:info_msg("re-subscripting consumer ~p on ~p complete "
+ "(received basic.consume_ok)",
+ [self(), Channel]),
+ start(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume).
+
+consume_method(Queue, CancelOnFailover) ->
+ Args = [{<<"x-cancel-on-ha-failover">>, bool, CancelOnFailover}],
+ #'basic.consume'{queue = Queue,
+ arguments = Args}.
+
+ack(#'basic.deliver'{delivery_tag = DeliveryTag}, Channel) ->
+ amqp_channel:call(Channel, #'basic.ack'{delivery_tag = DeliveryTag}),
+ ok.
+
+consumer_reply(TestPid, Reply) ->
+ TestPid ! {self(), Reply}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+-module(rabbit_ha_test_producer).
+
+-export([await_response/1, start/5, create/5]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+await_response(ProducerPid) ->
+ error_logger:info_msg("waiting for producer pid ~p~n", [ProducerPid]),
+ case receive {ProducerPid, Response} -> Response end of
+ ok -> ok;
+ {error, _} = Else -> exit(Else);
+ Else -> exit({weird_response, Else})
+ end.
+
+create(Channel, Queue, TestPid, Confirm, MsgsToSend) ->
+ ProducerPid = spawn_link(?MODULE, start, [Channel, Queue, TestPid,
+ Confirm, MsgsToSend]),
+ receive
+ {ProducerPid, started} -> ProducerPid
+ end.
+
+start(Channel, Queue, TestPid, Confirm, MsgsToSend) ->
+ ConfirmState =
+ case Confirm of
+ true -> amqp_channel:register_confirm_handler(Channel, self()),
+ #'confirm.select_ok'{} =
+ amqp_channel:call(Channel, #'confirm.select'{}),
+ gb_trees:empty();
+ false -> none
+ end,
+ TestPid ! {self(), started},
+ error_logger:info_msg("publishing ~w msgs on ~p~n", [MsgsToSend, Channel]),
+ producer(Channel, Queue, TestPid, ConfirmState, MsgsToSend).
+
+%%
+%% Private API
+%%
+
+producer(_Channel, _Queue, TestPid, none, 0) ->
+ TestPid ! {self(), ok};
+producer(Channel, _Queue, TestPid, ConfirmState, 0) ->
+ error_logger:info_msg("awaiting confirms on channel ~p~n", [Channel]),
+ Msg = case drain_confirms(no_nacks, ConfirmState) of
+ no_nacks -> ok;
+ nacks -> {error, received_nacks};
+ {Nacks, CS} -> {error, {missing_confirms, Nacks,
+ lists:sort(gb_trees:keys(CS))}}
+ end,
+ TestPid ! {self(), Msg};
+
+producer(Channel, Queue, TestPid, ConfirmState, MsgsToSend) ->
+ Method = #'basic.publish'{exchange = <<"">>,
+ routing_key = Queue,
+ mandatory = false,
+ immediate = false},
+
+ ConfirmState1 = maybe_record_confirm(ConfirmState, Channel, MsgsToSend),
+
+ amqp_channel:call(Channel, Method,
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = list_to_binary(
+ integer_to_list(MsgsToSend))}),
+
+ producer(Channel, Queue, TestPid, ConfirmState1, MsgsToSend - 1).
+
+maybe_record_confirm(none, _, _) ->
+ none;
+maybe_record_confirm(ConfirmState, Channel, MsgsToSend) ->
+ SeqNo = amqp_channel:next_publish_seqno(Channel),
+ gb_trees:insert(SeqNo, MsgsToSend, ConfirmState).
+
+drain_confirms(Nacks, ConfirmState) ->
+ case gb_trees:is_empty(ConfirmState) of
+ true -> Nacks;
+ false -> receive
+ #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = IsMulti} ->
+ drain_confirms(Nacks,
+ delete_confirms(DeliveryTag, IsMulti,
+ ConfirmState));
+ #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = IsMulti} ->
+ drain_confirms(nacks,
+ delete_confirms(DeliveryTag, IsMulti,
+ ConfirmState))
+ after
+ 60000 -> {Nacks, ConfirmState}
+ end
+ end.
+
+delete_confirms(DeliveryTag, false, ConfirmState) ->
+ gb_trees:delete(DeliveryTag, ConfirmState);
+delete_confirms(DeliveryTag, true, ConfirmState) ->
+ multi_confirm(DeliveryTag, ConfirmState).
+
+multi_confirm(DeliveryTag, ConfirmState) ->
+ case gb_trees:is_empty(ConfirmState) of
+ true -> ConfirmState;
+ false -> {Key, _, ConfirmState1} = gb_trees:take_smallest(ConfirmState),
+ case Key =< DeliveryTag of
+ true -> multi_confirm(DeliveryTag, ConfirmState1);
+ false -> ConfirmState
+ end
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+-module(rabbit_test_configs).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-export([enable_plugins/1]).
+-export([cluster/2, cluster_ab/1, cluster_abc/1, start_ab/1, start_abc/1]).
+-export([start_connections/1, build_cluster/1]).
+-export([ha_policy_all/1, ha_policy_two_pos/1]).
+-export([start_nodes/2, start_nodes/3, add_to_cluster/2]).
+-export([stop_nodes/1, start_node/1, stop_node/1, kill_node/1, restart_node/1,
+ execute/1]).
+-export([cover_work_factor/2]).
+
+-import(rabbit_test_util, [set_ha_policy/3, set_ha_policy/4, a2b/1]).
+-import(rabbit_misc, [pget/2]).
+
+-define(INITIAL_KEYS, [cover, base, server, plugins]).
+-define(NON_RUNNING_KEYS, ?INITIAL_KEYS ++ [nodename, port]).
+
+cluster_ab(InitialCfg) -> cluster(InitialCfg, [a, b]).
+cluster_abc(InitialCfg) -> cluster(InitialCfg, [a, b, c]).
+start_ab(InitialCfg) -> start_nodes(InitialCfg, [a, b]).
+start_abc(InitialCfg) -> start_nodes(InitialCfg, [a, b, c]).
+
+cluster(InitialCfg, NodeNames) ->
+ start_connections(build_cluster(start_nodes(InitialCfg, NodeNames))).
+
+start_nodes(InitialCfg, NodeNames) ->
+ start_nodes(InitialCfg, NodeNames, 5672).
+
+start_nodes(InitialCfg0, NodeNames, FirstPort) ->
+ {ok, Already0} = net_adm:names(),
+ Already = [list_to_atom(N) || {N, _P} <- Already0],
+ [check_node_not_running(Node, Already) || Node <- NodeNames],
+ Ports = lists:seq(FirstPort, length(NodeNames) + FirstPort - 1),
+ InitialCfgs = case InitialCfg0 of
+ [{_, _}|_] -> [InitialCfg0 || _ <- NodeNames];
+ _ -> InitialCfg0
+ end,
+ Nodes = [[{nodename, N}, {port, P} | strip_non_initial(Cfg)]
+ || {N, P, Cfg} <- lists:zip3(NodeNames, Ports, InitialCfgs)],
+ [start_node(Node) || Node <- Nodes].
+
+check_node_not_running(Node, Already) ->
+ case lists:member(Node, Already) of
+ true -> exit({node_already_running, Node});
+ false -> ok
+ end.
+
+strip_non_initial(Cfg) ->
+ [{K, V} || {K, V} <- Cfg, lists:member(K, ?INITIAL_KEYS)].
+
+strip_running(Cfg) ->
+ [{K, V} || {K, V} <- Cfg, lists:member(K, ?NON_RUNNING_KEYS)].
+
+enable_plugins(Cfg) -> enable_plugins(pget(plugins, Cfg), pget(server, Cfg)).
+
+enable_plugins(none, _Server) -> ok;
+enable_plugins(Dir, Server) ->
+ Env = plugins_env(Dir),
+ R = execute(Env, Server ++ "/scripts/rabbitmq-plugins list -m"),
+ Plugins = string:tokens(R, "\n"),
+ [execute(Env, {Server ++ "/scripts/rabbitmq-plugins enable ~s", [Plugin]})
+ || Plugin <- Plugins],
+ ok.
+
+plugins_env(none) ->
+ [{"RABBITMQ_ENABLED_PLUGINS_FILE", "/does-not-exist"}];
+plugins_env(Dir) ->
+ [{"RABBITMQ_PLUGINS_DIR", {"~s/plugins", [Dir]}},
+ {"RABBITMQ_PLUGINS_EXPAND_DIR", {"~s/expand", [Dir]}},
+ {"RABBITMQ_ENABLED_PLUGINS_FILE", {"~s/enabled_plugins", [Dir]}}].
+
+start_node(Cfg) ->
+ Nodename = pget(nodename, Cfg),
+ Port = pget(port, Cfg),
+ Base = pget(base, Cfg),
+ Server = pget(server, Cfg),
+ PidFile = rabbit_misc:format("~s/~s.pid", [Base, Nodename]),
+ Linked =
+ execute_bg(
+ [{"RABBITMQ_MNESIA_BASE", {"~s/rabbitmq-~s-mnesia", [Base,Nodename]}},
+ {"RABBITMQ_LOG_BASE", {"~s", [Base]}},
+ {"RABBITMQ_NODENAME", {"~s", [Nodename]}},
+ {"RABBITMQ_NODE_PORT", {"~B", [Port]}},
+ {"RABBITMQ_PID_FILE", PidFile},
+ {"RABBITMQ_CONFIG_FILE", "/some/path/which/does/not/exist"},
+ {"RABBITMQ_ALLOW_INPUT", "1"}, %% Needed to make it close on our exit
+ %% Bit of a hack - only needed for mgmt tests.
+ {"RABBITMQ_SERVER_START_ARGS",
+ {"-rabbitmq_management listener [{port,1~B}]", [Port]}},
+ {"RABBITMQ_SERVER_ERL_ARGS",
+ %% Next two lines are defaults
+ {"+K true +A30 +P 1048576 "
+ "-kernel inet_default_connect_options [{nodelay,true}] "
+ %% Some tests need to be able to make distribution unhappy
+ "-pa ~s/../rabbitmq-test/ebin "
+ "-proto_dist inet_proxy", [Server]}}
+ | plugins_env(pget(plugins, Cfg))],
+ Server ++ "/scripts/rabbitmq-server"),
+ execute({Server ++ "/scripts/rabbitmqctl -n ~s wait ~s",
+ [Nodename, PidFile]}),
+ Node = rabbit_nodes:make(Nodename),
+ OSPid = rpc:call(Node, os, getpid, []),
+ %% The cover system thinks all nodes with the same name are the
+ %% same node and will automaticaly re-establish cover as soon as
+ %% we see them, so we only want to start cover once per node name
+ %% for the entire test run.
+ case {pget(cover, Cfg), lists:member(Node, cover:which_nodes())} of
+ {true, false} -> cover:start([Node]);
+ _ -> ok
+ end,
+ [{node, Node},
+ {pid_file, PidFile},
+ {os_pid, OSPid},
+ {linked_pid, Linked} | Cfg].
+
+build_cluster([First | Rest]) ->
+ add_to_cluster([First], Rest).
+
+add_to_cluster([First | _] = Existing, New) ->
+ [cluster_with(First, Node) || Node <- New],
+ Existing ++ New.
+
+cluster_with(Cfg, NewCfg) ->
+ Node = pget(node, Cfg),
+ NewNodename = pget(nodename, NewCfg),
+ Server = pget(server, Cfg),
+ execute({Server ++ "/scripts/rabbitmqctl -n ~s stop_app",
+ [NewNodename]}),
+ execute({Server ++ "/scripts/rabbitmqctl -n ~s join_cluster ~s",
+ [NewNodename, Node]}),
+ execute({Server ++ "/scripts/rabbitmqctl -n ~s start_app",
+ [NewNodename]}).
+
+ha_policy_all([Cfg | _] = Cfgs) ->
+ set_ha_policy(Cfg, <<".*">>, <<"all">>),
+ Cfgs.
+
+ha_policy_two_pos([Cfg | _] = Cfgs) ->
+ Members = [a2b(pget(node, C)) || C <- Cfgs],
+ TwoNodes = [M || M <- lists:sublist(Members, 2)],
+ set_ha_policy(Cfg, <<"^ha.two.">>, {<<"nodes">>, TwoNodes}, []),
+ set_ha_policy(Cfg, <<"^ha.auto.">>, {<<"nodes">>, TwoNodes},
+ [{<<"ha-sync-mode">>, <<"automatic">>}]),
+ Cfgs.
+
+start_connections(Nodes) -> [start_connection(Node) || Node <- Nodes].
+
+start_connection(Cfg) ->
+ Port = pget(port, Cfg),
+ {ok, Conn} = amqp_connection:start(#amqp_params_network{port = Port}),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ [{connection, Conn}, {channel, Ch} | Cfg].
+
+stop_nodes(Nodes) -> [stop_node(Node) || Node <- Nodes].
+
+stop_node(Cfg) ->
+ Server = pget(server, Cfg),
+ maybe_flush_cover(Cfg),
+ catch execute({Server ++ "/scripts/rabbitmqctl -n ~s stop ~s",
+ [pget(nodename, Cfg), pget(pid_file, Cfg)]}),
+ strip_running(Cfg).
+
+kill_node(Cfg) ->
+ maybe_flush_cover(Cfg),
+ catch execute({"kill -9 ~s", [pget(os_pid, Cfg)]}),
+ strip_running(Cfg).
+
+restart_node(Cfg) ->
+ start_node(stop_node(Cfg)).
+
+maybe_flush_cover(Cfg) ->
+ case pget(cover, Cfg) of
+ true -> cover:flush(pget(node, Cfg));
+ false -> ok
+ end.
+
+%% Cover slows things down enough that if we are sending messages in
+%% bulk, we want to send fewer or we'll be here all day...
+cover_work_factor(Without, Cfg) ->
+ case pget(cover, Cfg) of
+ true -> trunc(Without * 0.1);
+ false -> Without
+ end.
+
+%%----------------------------------------------------------------------------
+
+execute(Cmd) -> execute([], Cmd).
+
+execute(Env0, Cmd0) ->
+ Env = [{K, fmt(V)} || {K, V} <- Env0],
+ Cmd = fmt(Cmd0),
+ Port = erlang:open_port(
+ {spawn, "/usr/bin/env sh -c \"" ++ Cmd ++ "\""},
+ [{env, Env}, exit_status,
+ stderr_to_stdout, use_stdio]),
+ port_receive_loop(Port, "").
+
+port_receive_loop(Port, Stdout) ->
+ receive
+ {Port, {exit_status, 0}} -> Stdout;
+ {Port, {exit_status, 137}} -> Stdout; %% [0]
+ {Port, {exit_status, X}} -> exit({exit_status, X, Stdout});
+ {Port, {data, Out}} -> %%io:format(user, "~s", [Out]),
+ port_receive_loop(Port, Stdout ++ Out)
+ end.
+
+%% [0] code 137 -> killed with SIGKILL which we do in some tests
+
+execute_bg(Env, Cmd) ->
+ spawn_link(fun () ->
+ execute(Env, Cmd),
+ {links, Links} = process_info(self(), links),
+ [unlink(L) || L <- Links]
+ end).
+
+fmt({Fmt, Args}) -> rabbit_misc:format(Fmt, Args);
+fmt(Str) -> Str.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_test_runner).
+
+-include_lib("kernel/include/file.hrl").
+
+-define(TIMEOUT, 600).
+
+-import(rabbit_misc, [pget/2]).
+
+-export([run_in_broker/2, run_multi/5]).
+
+run_in_broker(Dir, Filter) ->
+ io:format("~nIn-broker tests~n================~n~n", []),
+ eunit:test(make_tests_single(Dir, Filter, ?TIMEOUT), []).
+
+run_multi(ServerDir, Dir, Filter, Cover, PluginsDir) ->
+ io:format("~nMulti-node tests~n================~n~n", []),
+ %% Umbrella does not give us -sname
+ net_kernel:start([?MODULE, shortnames]),
+ inets:start(), %% Used by HTTP tests
+ error_logger:tty(false),
+ case Cover of
+ true -> io:format("Cover compiling..."),
+ cover:start(),
+ ok = rabbit_misc:enable_cover(["../rabbitmq-server/"]),
+ io:format(" done.~n~n");
+ false -> ok
+ end,
+ R = eunit:test(make_tests_multi(
+ ServerDir, Dir, Filter, Cover, PluginsDir, ?TIMEOUT), []),
+ case Cover of
+ true -> io:format("~nCover reporting..."),
+ ok = rabbit_misc:report_cover(),
+ io:format(" done.~n~n");
+ false -> ok
+ end,
+ R.
+
+make_tests_single(Dir, Filter, Timeout) ->
+ {Filtered, AllCount, Width} = find_tests(Dir, Filter, "_test"),
+ io:format("Running ~B of ~B tests; FILTER=~s~n~n",
+ [length(Filtered), AllCount, Filter]),
+ [make_test_single(M, FWith, F, ShowHeading, Timeout, Width)
+ || {M, FWith, F, ShowHeading} <- annotate_show_heading(Filtered)].
+
+make_tests_multi(ServerDir, Dir, Filter, Cover, PluginsDir, Timeout) ->
+ {Filtered, AllCount, Width} = find_tests(Dir, Filter, "_with"),
+ io:format("Running ~B of ~B tests; FILTER=~s; COVER=~s~n~n",
+ [length(Filtered), AllCount, Filter, Cover]),
+ Cfg = [{cover, Cover},
+ {base, basedir() ++ "/nodes"},
+ {server, ServerDir},
+ {plugins, PluginsDir}],
+ rabbit_test_configs:enable_plugins(Cfg),
+ [make_test_multi(M, FWith, F, ShowHeading, Timeout, Width, Cfg)
+ || {M, FWith, F, ShowHeading} <- annotate_show_heading(Filtered)].
+
+find_tests(Dir, Filter, Suffix) ->
+ All = [{M, FWith, F} ||
+ M <- modules(Dir),
+ {FWith, _Arity} <- proplists:get_value(exports, M:module_info()),
+ string:right(atom_to_list(FWith), length(Suffix)) =:= Suffix,
+ F <- [truncate_function_name(FWith, length(Suffix))]],
+ Filtered = [Test || {M, _FWith, F} = Test <- All,
+ should_run(M, F, Filter)],
+ Width = case Filtered of
+ [] -> 0;
+ _ -> lists:max([atom_length(F) || {_, _, F} <- Filtered])
+ end,
+ {Filtered, length(All), Width}.
+
+make_test_single(M, FWith, F, ShowHeading, Timeout, Width) ->
+ {timeout,
+ Timeout,
+ fun () ->
+ maybe_print_heading(M, ShowHeading),
+ io:format(user, "~s [running]", [name(F, Width)]),
+ M:FWith(),
+ io:format(user, " [PASSED].~n", [])
+ end}.
+
+make_test_multi(M, FWith, F, ShowHeading, Timeout, Width, InitialCfg) ->
+ {setup,
+ fun () ->
+ maybe_print_heading(M, ShowHeading),
+ io:format(user, "~s [setup]", [name(F, Width)]),
+ setup_error_logger(M, F, basedir()),
+ recursive_delete(pget(base, InitialCfg)),
+ try
+ apply_config(M:FWith(), InitialCfg)
+ catch
+ error:{Type, Error, Cfg, Stack} ->
+ case Cfg of
+ InitialCfg -> ok; %% [0]
+ _ -> rabbit_test_configs:stop_nodes(Cfg)
+ end,
+ exit({Type, Error, Stack})
+ end
+ end,
+ fun (Nodes) ->
+ rabbit_test_configs:stop_nodes(Nodes),
+ %% Partition tests change this, let's revert
+ net_kernel:set_net_ticktime(60, 1),
+ io:format(user, ".~n", [])
+ end,
+ fun (Nodes) ->
+ [{timeout,
+ Timeout,
+ fun () ->
+ [link(pget(linked_pid, N)) || N <- Nodes],
+ io:format(user, " [running]", []),
+ M:F(Nodes),
+ io:format(user, " [PASSED]", [])
+ end}]
+ end}.
+%% [0] If we didn't get as far as starting any nodes then we only have
+%% one proplist for initial config, not several per node. So avoid
+%% trying to "stop" it - it won't work (and there's nothing to do
+%% anyway).
+
+maybe_print_heading(M, true) ->
+ io:format(user, "~n~s~n~s~n", [M, string:chars($-, atom_length(M))]);
+maybe_print_heading(_M, false) ->
+ ok.
+
+apply_config(Things, Cfg) when is_list(Things) ->
+ lists:foldl(fun apply_config/2, Cfg, Things);
+apply_config(F, Cfg) when is_atom(F) ->
+ apply_config(fun (C) -> rabbit_test_configs:F(C) end, Cfg);
+apply_config(F, Cfg) when is_function(F) ->
+ try
+ F(Cfg)
+ catch
+ Type:Error -> erlang:error({Type, Error, Cfg, erlang:get_stacktrace()})
+ end.
+
+annotate_show_heading(List) ->
+ annotate_show_heading(List, undefined).
+
+annotate_show_heading([], _) ->
+ [];
+annotate_show_heading([{M, FWith, F} | Rest], Current) ->
+ [{M, FWith, F, M =/= Current} | annotate_show_heading(Rest, M)].
+
+setup_error_logger(M, F, Base) ->
+ case error_logger_logfile_filename() of
+ {error, no_log_file} -> ok;
+ _ -> ok = error_logger:logfile(close)
+ end,
+ FN = rabbit_misc:format("~s/~s:~s.log", [basedir(), M, F]),
+ ensure_dir(Base),
+ ok = error_logger:logfile({open, FN}).
+
+truncate_function_name(FWith, Length) ->
+ FName = atom_to_list(FWith),
+ list_to_atom(string:substr(FName, 1, length(FName) - Length)).
+
+should_run(_M, _F, "all") -> true;
+should_run(M, F, Filter) -> MF = rabbit_misc:format("~s:~s", [M, F]),
+ case re:run(MF, Filter) of
+ {match, _} -> true;
+ nomatch -> false
+ end.
+
+ensure_dir(Path) ->
+ case file:read_file_info(Path) of
+ {ok, #file_info{type=regular}} -> exit({exists_as_file, Path});
+ {ok, #file_info{type=directory}} -> ok;
+ _ -> file:make_dir(Path)
+ end.
+
+modules(RelDir) ->
+ {ok, Files} = file:list_dir(RelDir),
+ [M || F <- Files,
+ M <- case string:tokens(F, ".") of
+ [MStr, "beam"] -> [list_to_atom(MStr)];
+ _ -> []
+ end].
+
+recursive_delete(Dir) ->
+ rabbit_test_configs:execute({"rm -rf ~s", [Dir]}).
+
+name(F, Width) ->
+ R = atom_to_list(F),
+ R ++ ":" ++ string:chars($ , Width - length(R)).
+
+atom_length(A) -> length(atom_to_list(A)).
+
+basedir() -> "/tmp/rabbitmq-multi-node".
+
+%% reimplement error_logger:logfile(filename) only using
+%% gen_event:call/4 instead of gen_event:call/3 with our old friend
+%% the 5 second timeout. Grr.
+error_logger_logfile_filename() ->
+ case gen_event:call(
+ error_logger, error_logger_file_h, filename, infinity) of
+ {error,_} -> {error, no_log_file};
+ Val -> Val
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+-module(rabbit_test_util).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-import(rabbit_misc, [pget/2]).
+
+-compile(export_all).
+
+set_ha_policy(Cfg, Pattern, Policy) ->
+ set_ha_policy(Cfg, Pattern, Policy, []).
+
+set_ha_policy(Cfg, Pattern, Policy, Extra) ->
+ set_policy(Cfg, Pattern, Pattern, <<"queues">>, ha_policy(Policy) ++ Extra).
+
+ha_policy(<<"all">>) -> [{<<"ha-mode">>, <<"all">>}];
+ha_policy({Mode, Params}) -> [{<<"ha-mode">>, Mode},
+ {<<"ha-params">>, Params}].
+
+set_policy(Cfg, Name, Pattern, ApplyTo, Definition) ->
+ ok = rpc:call(pget(node, Cfg), rabbit_policy, set,
+ [<<"/">>, Name, Pattern, Definition, 0, ApplyTo]).
+
+clear_policy(Cfg, Name) ->
+ ok = rpc:call(pget(node, Cfg), rabbit_policy, delete, [<<"/">>, Name]).
+
+set_param(Cfg, Component, Name, Value) ->
+ ok = rpc:call(pget(node, Cfg), rabbit_runtime_parameters, set,
+ [<<"/">>, Component, Name, Value, none]).
+
+clear_param(Cfg, Component, Name) ->
+ ok = rpc:call(pget(node, Cfg), rabbit_runtime_parameters, clear,
+ [<<"/">>, Component, Name]).
+
+control_action(Command, Cfg) ->
+ control_action(Command, Cfg, [], []).
+
+control_action(Command, Cfg, Args) ->
+ control_action(Command, Cfg, Args, []).
+
+control_action(Command, Cfg, Args, Opts) ->
+ Node = pget(node, Cfg),
+ rpc:call(Node, rabbit_control_main, action,
+ [Command, Node, Args, Opts,
+ fun (F, A) ->
+ error_logger:info_msg(F ++ "~n", A)
+ end]).
+
+restart_app(Cfg) ->
+ stop_app(Cfg),
+ start_app(Cfg).
+
+stop_app(Cfg) ->
+ control_action(stop_app, Cfg).
+
+start_app(Cfg) ->
+ control_action(start_app, Cfg).
+
+connect(Cfg) ->
+ Port = pget(port, Cfg),
+ {ok, Conn} = amqp_connection:start(#amqp_params_network{port = Port}),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ {Conn, Ch}.
+
+%%----------------------------------------------------------------------------
+
+kill_after(Time, Cfg, Method) ->
+ timer:sleep(Time),
+ kill(Cfg, Method).
+
+kill(Cfg, Method) ->
+ kill0(Cfg, Method),
+ wait_down(pget(node, Cfg)).
+
+kill0(Cfg, stop) -> rabbit_test_configs:stop_node(Cfg);
+kill0(Cfg, sigkill) -> rabbit_test_configs:kill_node(Cfg).
+
+wait_down(Node) ->
+ case net_adm:ping(Node) of
+ pong -> timer:sleep(25),
+ wait_down(Node);
+ pang -> ok
+ end.
+
+a2b(A) -> list_to_binary(atom_to_list(A)).
+
+%%----------------------------------------------------------------------------
+
+publish(Ch, QName, Count) ->
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ [amqp_channel:call(Ch,
+ #'basic.publish'{routing_key = QName},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = list_to_binary(integer_to_list(I))})
+ || I <- lists:seq(1, Count)],
+ amqp_channel:wait_for_confirms(Ch).
+
+consume(Ch, QName, Count) ->
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = QName, no_ack = true},
+ self()),
+ CTag = receive #'basic.consume_ok'{consumer_tag = C} -> C end,
+ [begin
+ Exp = list_to_binary(integer_to_list(I)),
+ receive {#'basic.deliver'{consumer_tag = CTag},
+ #amqp_msg{payload = Exp}} ->
+ ok
+ after 500 ->
+ exit(timeout)
+ end
+ end|| I <- lists:seq(1, Count)],
+ #'queue.declare_ok'{message_count = 0}
+ = amqp_channel:call(Ch, #'queue.declare'{queue = QName,
+ durable = true}),
+ amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag}),
+ ok.
+
+fetch(Ch, QName, Count) ->
+ [{#'basic.get_ok'{}, _} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QName}) ||
+ _ <- lists:seq(1, Count)],
+ ok.
--- /dev/null
+{application, rabbitmq_test,
+ [
+ {description, ""},
+ {vsn, "1"},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {env, []}
+ ]}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+-module(clustering_management).
+
+-compile(export_all).
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-import(rabbit_misc, [pget/2]).
+
+-define(LOOP_RECURSION_DELAY, 100).
+
+join_and_part_cluster_with() -> start_abc.
+join_and_part_cluster(Config) ->
+ [Rabbit, Hare, Bunny] = cluster_members(Config),
+ assert_not_clustered(Rabbit),
+ assert_not_clustered(Hare),
+ assert_not_clustered(Bunny),
+
+ stop_join_start(Rabbit, Bunny),
+ assert_clustered([Rabbit, Bunny]),
+
+ stop_join_start(Hare, Bunny, true),
+ assert_cluster_status(
+ {[Bunny, Hare, Rabbit], [Bunny, Rabbit], [Bunny, Hare, Rabbit]},
+ [Rabbit, Hare, Bunny]),
+
+ %% Allow clustering with already clustered node
+ ok = stop_app(Rabbit),
+ {ok, already_member} = join_cluster(Rabbit, Hare),
+ ok = start_app(Rabbit),
+
+ stop_reset_start(Rabbit),
+ assert_not_clustered(Rabbit),
+ assert_cluster_status({[Bunny, Hare], [Bunny], [Bunny, Hare]},
+ [Hare, Bunny]),
+
+ stop_reset_start(Hare),
+ assert_not_clustered(Hare),
+ assert_not_clustered(Bunny).
+
+join_cluster_bad_operations_with() -> start_abc.
+join_cluster_bad_operations(Config) ->
+ [Rabbit, Hare, Bunny] = cluster_members(Config),
+
+ %% Non-existant node
+ ok = stop_app(Rabbit),
+ assert_failure(fun () -> join_cluster(Rabbit, non@existant) end),
+ ok = start_app(Rabbit),
+ assert_not_clustered(Rabbit),
+
+ %% Trying to cluster with mnesia running
+ assert_failure(fun () -> join_cluster(Rabbit, Bunny) end),
+ assert_not_clustered(Rabbit),
+
+ %% Trying to cluster the node with itself
+ ok = stop_app(Rabbit),
+ assert_failure(fun () -> join_cluster(Rabbit, Rabbit) end),
+ ok = start_app(Rabbit),
+ assert_not_clustered(Rabbit),
+
+ %% Do not let the node leave the cluster or reset if it's the only
+ %% ram node
+ stop_join_start(Hare, Rabbit, true),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit], [Rabbit, Hare]},
+ [Rabbit, Hare]),
+ ok = stop_app(Hare),
+ assert_failure(fun () -> join_cluster(Rabbit, Bunny) end),
+ assert_failure(fun () -> reset(Rabbit) end),
+ ok = start_app(Hare),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit], [Rabbit, Hare]},
+ [Rabbit, Hare]),
+
+ %% Cannot start RAM-only node first
+ ok = stop_app(Rabbit),
+ ok = stop_app(Hare),
+ assert_failure(fun () -> start_app(Hare) end),
+ ok = start_app(Rabbit),
+ ok = start_app(Hare),
+ ok.
+
+%% This tests that the nodes in the cluster are notified immediately of a node
+%% join, and not just after the app is started.
+join_to_start_interval_with() -> start_abc.
+join_to_start_interval(Config) ->
+ [Rabbit, Hare, _Bunny] = cluster_members(Config),
+
+ ok = stop_app(Rabbit),
+ ok = join_cluster(Rabbit, Hare),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]},
+ [Rabbit, Hare]),
+ ok = start_app(Rabbit),
+ assert_clustered([Rabbit, Hare]).
+
+forget_cluster_node_with() -> start_abc.
+forget_cluster_node(Config) ->
+ [Rabbit, Hare, Bunny] = cluster_members(Config),
+
+ %% Trying to remove a node not in the cluster should fail
+ assert_failure(fun () -> forget_cluster_node(Hare, Rabbit) end),
+
+ stop_join_start(Rabbit, Hare),
+ assert_clustered([Rabbit, Hare]),
+
+ %% Trying to remove an online node should fail
+ assert_failure(fun () -> forget_cluster_node(Hare, Rabbit) end),
+
+ ok = stop_app(Rabbit),
+ %% We're passing the --offline flag, but Hare is online
+ assert_failure(fun () -> forget_cluster_node(Hare, Rabbit, true) end),
+ %% Removing some non-existant node will fail
+ assert_failure(fun () -> forget_cluster_node(Hare, non@existant) end),
+ ok = forget_cluster_node(Hare, Rabbit),
+ assert_not_clustered(Hare),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]},
+ [Rabbit]),
+
+ %% Now we can't start Rabbit since it thinks that it's still in the cluster
+ %% with Hare, while Hare disagrees.
+ assert_failure(fun () -> start_app(Rabbit) end),
+
+ ok = reset(Rabbit),
+ ok = start_app(Rabbit),
+ assert_not_clustered(Rabbit),
+
+ %% Now we remove Rabbit from an offline node.
+ stop_join_start(Bunny, Hare),
+ stop_join_start(Rabbit, Hare),
+ assert_clustered([Rabbit, Hare, Bunny]),
+ ok = stop_app(Hare),
+ ok = stop_app(Rabbit),
+ ok = stop_app(Bunny),
+ %% This is fine but we need the flag
+ assert_failure(fun () -> forget_cluster_node(Hare, Bunny) end),
+ %% Hare was not the second-to-last to go down
+ ok = forget_cluster_node(Hare, Bunny, true),
+ ok = start_app(Hare),
+ ok = start_app(Rabbit),
+ %% Bunny still thinks its clustered with Rabbit and Hare
+ assert_failure(fun () -> start_app(Bunny) end),
+ ok = reset(Bunny),
+ ok = start_app(Bunny),
+ assert_not_clustered(Bunny),
+ assert_clustered([Rabbit, Hare]).
+
+forget_cluster_node_removes_things_with() -> start_abc.
+forget_cluster_node_removes_things([RabbitCfg, HareCfg, _BunnyCfg] = Config) ->
+ [Rabbit, Hare, _Bunny] = cluster_members(Config),
+ stop_join_start(Rabbit, Hare),
+ {_RConn, RCh} = rabbit_test_util:connect(RabbitCfg),
+ #'queue.declare_ok'{} =
+ amqp_channel:call(RCh, #'queue.declare'{queue = <<"test">>,
+ durable = true}),
+
+ ok = stop_app(Rabbit),
+
+ {_HConn, HCh} = rabbit_test_util:connect(HareCfg),
+ {'EXIT',{{shutdown,{server_initiated_close,404,_}}, _}} =
+ (catch amqp_channel:call(HCh, #'queue.declare'{queue = <<"test">>,
+ durable = true})),
+
+ ok = forget_cluster_node(Hare, Rabbit),
+
+ {_HConn2, HCh2} = rabbit_test_util:connect(HareCfg),
+ #'queue.declare_ok'{} =
+ amqp_channel:call(HCh2, #'queue.declare'{queue = <<"test">>,
+ durable = true}),
+ ok.
+
+change_cluster_node_type_with() -> start_abc.
+change_cluster_node_type(Config) ->
+ [Rabbit, Hare, _Bunny] = cluster_members(Config),
+
+ %% Trying to change the ram node when not clustered should always fail
+ ok = stop_app(Rabbit),
+ assert_failure(fun () -> change_cluster_node_type(Rabbit, ram) end),
+ assert_failure(fun () -> change_cluster_node_type(Rabbit, disc) end),
+ ok = start_app(Rabbit),
+
+ ok = stop_app(Rabbit),
+ join_cluster(Rabbit, Hare),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]},
+ [Rabbit, Hare]),
+ change_cluster_node_type(Rabbit, ram),
+ assert_cluster_status({[Rabbit, Hare], [Hare], [Hare]},
+ [Rabbit, Hare]),
+ change_cluster_node_type(Rabbit, disc),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]},
+ [Rabbit, Hare]),
+ change_cluster_node_type(Rabbit, ram),
+ ok = start_app(Rabbit),
+ assert_cluster_status({[Rabbit, Hare], [Hare], [Hare, Rabbit]},
+ [Rabbit, Hare]),
+
+ %% Changing to ram when you're the only ram node should fail
+ ok = stop_app(Hare),
+ assert_failure(fun () -> change_cluster_node_type(Hare, ram) end),
+ ok = start_app(Hare).
+
+change_cluster_when_node_offline_with() -> start_abc.
+change_cluster_when_node_offline(Config) ->
+ [Rabbit, Hare, Bunny] = cluster_members(Config),
+
+ %% Cluster the three notes
+ stop_join_start(Rabbit, Hare),
+ assert_clustered([Rabbit, Hare]),
+
+ stop_join_start(Bunny, Hare),
+ assert_clustered([Rabbit, Hare, Bunny]),
+
+ %% Bring down Rabbit, and remove Bunny from the cluster while
+ %% Rabbit is offline
+ ok = stop_app(Rabbit),
+ ok = stop_app(Bunny),
+ ok = reset(Bunny),
+ assert_cluster_status({[Bunny], [Bunny], []}, [Bunny]),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]}, [Hare]),
+ assert_cluster_status(
+ {[Rabbit, Hare, Bunny], [Rabbit, Hare, Bunny], [Hare, Bunny]}, [Rabbit]),
+
+ %% Bring Rabbit back up
+ ok = start_app(Rabbit),
+ assert_clustered([Rabbit, Hare]),
+ ok = start_app(Bunny),
+ assert_not_clustered(Bunny),
+
+ %% Now the same, but Rabbit is a RAM node, and we bring up Bunny
+ %% before
+ ok = stop_app(Rabbit),
+ ok = change_cluster_node_type(Rabbit, ram),
+ ok = start_app(Rabbit),
+ stop_join_start(Bunny, Hare),
+ assert_cluster_status(
+ {[Rabbit, Hare, Bunny], [Hare, Bunny], [Rabbit, Hare, Bunny]},
+ [Rabbit, Hare, Bunny]),
+ ok = stop_app(Rabbit),
+ ok = stop_app(Bunny),
+ ok = reset(Bunny),
+ ok = start_app(Bunny),
+ assert_not_clustered(Bunny),
+ assert_cluster_status({[Rabbit, Hare], [Hare], [Hare]}, [Hare]),
+ assert_cluster_status(
+ {[Rabbit, Hare, Bunny], [Hare, Bunny], [Hare, Bunny]},
+ [Rabbit]),
+ ok = start_app(Rabbit),
+ assert_cluster_status({[Rabbit, Hare], [Hare], [Rabbit, Hare]},
+ [Rabbit, Hare]),
+ assert_not_clustered(Bunny).
+
+update_cluster_nodes_test_with() -> start_abc.
+update_cluster_nodes_test(Config) ->
+ [Rabbit, Hare, Bunny] = cluster_members(Config),
+
+ %% Mnesia is running...
+ assert_failure(fun () -> update_cluster_nodes(Rabbit, Hare) end),
+
+ ok = stop_app(Rabbit),
+ ok = join_cluster(Rabbit, Hare),
+ ok = stop_app(Bunny),
+ ok = join_cluster(Bunny, Hare),
+ ok = start_app(Bunny),
+ stop_reset_start(Hare),
+ assert_failure(fun () -> start_app(Rabbit) end),
+ %% Bogus node
+ assert_failure(fun () -> update_cluster_nodes(Rabbit, non@existant) end),
+ %% Inconsisent node
+ assert_failure(fun () -> update_cluster_nodes(Rabbit, Hare) end),
+ ok = update_cluster_nodes(Rabbit, Bunny),
+ ok = start_app(Rabbit),
+ assert_not_clustered(Hare),
+ assert_clustered([Rabbit, Bunny]).
+
+erlang_config_with() -> start_abc.
+erlang_config(Config) ->
+ [Rabbit, Hare, _Bunny] = cluster_members(Config),
+
+ ok = stop_app(Hare),
+ ok = reset(Hare),
+ ok = rpc:call(Hare, application, set_env,
+ [rabbit, cluster_nodes, {[Rabbit], disc}]),
+ ok = start_app(Hare),
+ assert_clustered([Rabbit, Hare]),
+
+ ok = stop_app(Hare),
+ ok = reset(Hare),
+ ok = rpc:call(Hare, application, set_env,
+ [rabbit, cluster_nodes, {[Rabbit], ram}]),
+ ok = start_app(Hare),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit], [Rabbit, Hare]},
+ [Rabbit, Hare]),
+
+ %% We get a warning but we start anyway
+ ok = stop_app(Hare),
+ ok = reset(Hare),
+ ok = rpc:call(Hare, application, set_env,
+ [rabbit, cluster_nodes, {[non@existent], disc}]),
+ ok = start_app(Hare),
+ assert_not_clustered(Hare),
+ assert_not_clustered(Rabbit),
+
+ %% If we use a legacy config file, it still works (and a warning is emitted)
+ ok = stop_app(Hare),
+ ok = reset(Hare),
+ ok = rpc:call(Hare, application, set_env,
+ [rabbit, cluster_nodes, [Rabbit]]),
+ ok = start_app(Hare),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit], [Rabbit, Hare]},
+ [Rabbit, Hare]).
+
+force_reset_test_with() -> start_abc.
+force_reset_test(Config) ->
+ [Rabbit, Hare, _Bunny] = cluster_members(Config),
+
+ stop_join_start(Rabbit, Hare),
+ stop_app(Rabbit),
+ force_reset(Rabbit),
+ %% Hare thinks that Rabbit is still clustered
+ assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]},
+ [Hare]),
+ %% %% ...but it isn't
+ assert_cluster_status({[Rabbit], [Rabbit], []}, [Rabbit]),
+ %% We can rejoin Rabbit and Hare
+ update_cluster_nodes(Rabbit, Hare),
+ start_app(Rabbit),
+ assert_clustered([Rabbit, Hare]).
+
+%% ----------------------------------------------------------------------------
+%% Internal utils
+
+cluster_members(Nodes) -> [pget(node,Cfg) || Cfg <- Nodes].
+
+assert_cluster_status(Status0, Nodes) ->
+ Status = {AllNodes, _, _} = sort_cluster_status(Status0),
+ wait_for_cluster_status(Status, AllNodes, Nodes).
+
+wait_for_cluster_status(Status, AllNodes, Nodes) ->
+ Max = 10000 / ?LOOP_RECURSION_DELAY,
+ wait_for_cluster_status(0, Max, Status, AllNodes, Nodes).
+
+wait_for_cluster_status(N, Max, Status, _AllNodes, Nodes) when N >= Max ->
+ error({cluster_status_max_tries_failed,
+ [{nodes, Nodes},
+ {expected_status, Status},
+ {max_tried, Max}]});
+wait_for_cluster_status(N, Max, Status, AllNodes, Nodes) ->
+ case lists:all(fun (Node) ->
+ verify_status_equal(Node, Status, AllNodes)
+ end, Nodes) of
+ true -> ok;
+ false -> timer:sleep(?LOOP_RECURSION_DELAY),
+ wait_for_cluster_status(N + 1, Max, Status, AllNodes, Nodes)
+ end.
+
+verify_status_equal(Node, Status, AllNodes) ->
+ NodeStatus = sort_cluster_status(cluster_status(Node)),
+ (AllNodes =/= [Node]) =:= rpc:call(Node, rabbit_mnesia, is_clustered, [])
+ andalso NodeStatus =:= Status.
+
+cluster_status(Node) ->
+ {rpc:call(Node, rabbit_mnesia, cluster_nodes, [all]),
+ rpc:call(Node, rabbit_mnesia, cluster_nodes, [disc]),
+ rpc:call(Node, rabbit_mnesia, cluster_nodes, [running])}.
+
+sort_cluster_status({All, Disc, Running}) ->
+ {lists:sort(All), lists:sort(Disc), lists:sort(Running)}.
+
+assert_clustered(Nodes) ->
+ assert_cluster_status({Nodes, Nodes, Nodes}, Nodes).
+
+assert_not_clustered(Node) ->
+ assert_cluster_status({[Node], [Node], [Node]}, [Node]).
+
+assert_failure(Fun) ->
+ case catch Fun() of
+ {error, Reason} -> Reason;
+ {badrpc, {'EXIT', Reason}} -> Reason;
+ {badrpc_multi, Reason, _Nodes} -> Reason;
+ Other -> exit({expected_failure, Other})
+ end.
+
+stop_app(Node) ->
+ control_action(stop_app, Node).
+
+start_app(Node) ->
+ control_action(start_app, Node).
+
+join_cluster(Node, To) ->
+ join_cluster(Node, To, false).
+
+join_cluster(Node, To, Ram) ->
+ control_action(join_cluster, Node, [atom_to_list(To)], [{"--ram", Ram}]).
+
+reset(Node) ->
+ control_action(reset, Node).
+
+force_reset(Node) ->
+ control_action(force_reset, Node).
+
+forget_cluster_node(Node, Removee, RemoveWhenOffline) ->
+ control_action(forget_cluster_node, Node, [atom_to_list(Removee)],
+ [{"--offline", RemoveWhenOffline}]).
+
+forget_cluster_node(Node, Removee) ->
+ forget_cluster_node(Node, Removee, false).
+
+change_cluster_node_type(Node, Type) ->
+ control_action(change_cluster_node_type, Node, [atom_to_list(Type)]).
+
+update_cluster_nodes(Node, DiscoveryNode) ->
+ control_action(update_cluster_nodes, Node, [atom_to_list(DiscoveryNode)]).
+
+stop_join_start(Node, ClusterTo, Ram) ->
+ ok = stop_app(Node),
+ ok = join_cluster(Node, ClusterTo, Ram),
+ ok = start_app(Node).
+
+stop_join_start(Node, ClusterTo) ->
+ stop_join_start(Node, ClusterTo, false).
+
+stop_reset_start(Node) ->
+ ok = stop_app(Node),
+ ok = reset(Node),
+ ok = start_app(Node).
+
+control_action(Command, Node) ->
+ control_action(Command, Node, [], []).
+
+control_action(Command, Node, Args) ->
+ control_action(Command, Node, Args, []).
+
+control_action(Command, Node, Args, Opts) ->
+ rpc:call(Node, rabbit_control_main, action,
+ [Command, Node, Args, Opts,
+ fun io:format/2]).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+-module(dynamic_ha).
+
+%% rabbit_tests:test_dynamic_mirroring() is a unit test which should
+%% test the logic of what all the policies decide to do, so we don't
+%% need to exhaustively test that here. What we need to test is that:
+%%
+%% * Going from non-mirrored to mirrored works and vice versa
+%% * Changing policy can add / remove mirrors and change the master
+%% * Adding a node will create a new mirror when there are not enough nodes
+%% for the policy
+%% * Removing a node will not create a new mirror even if the policy
+%% logic wants it (since this gives us a good way to lose messages
+%% on cluster shutdown, by repeated failover to new nodes)
+%%
+%% The first two are change_policy, the last two are change_cluster
+
+-compile(export_all).
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-define(QNAME, <<"ha.test">>).
+-define(POLICY, <<"^ha.test$">>). %% " emacs
+-define(VHOST, <<"/">>).
+
+-import(rabbit_test_util, [set_ha_policy/3, set_ha_policy/4,
+ clear_policy/2, a2b/1, publish/3, consume/3]).
+-import(rabbit_misc, [pget/2]).
+
+change_policy_with() -> cluster_abc.
+change_policy([CfgA, _CfgB, _CfgC] = Cfgs) ->
+ ACh = pget(channel, CfgA),
+ [A, B, C] = [pget(node, Cfg) || Cfg <- Cfgs],
+
+ %% When we first declare a queue with no policy, it's not HA.
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME}),
+ assert_slaves(A, ?QNAME, {A, ''}),
+
+ %% Give it policy "all", it becomes HA and gets all mirrors
+ set_ha_policy(CfgA, ?POLICY, <<"all">>),
+ assert_slaves(A, ?QNAME, {A, [B, C]}),
+
+ %% Give it policy "nodes", it gets specific mirrors
+ set_ha_policy(CfgA, ?POLICY, {<<"nodes">>, [a2b(A), a2b(B)]}),
+ assert_slaves(A, ?QNAME, {A, [B]}),
+
+ %% Now explicitly change the mirrors
+ set_ha_policy(CfgA, ?POLICY, {<<"nodes">>, [a2b(A), a2b(C)]}),
+ assert_slaves(A, ?QNAME, {A, [C]}, [{A, [B, C]}]),
+
+ %% Clear the policy, and we go back to non-mirrored
+ clear_policy(CfgA, ?POLICY),
+ assert_slaves(A, ?QNAME, {A, ''}),
+
+ %% Test switching "away" from an unmirrored node
+ set_ha_policy(CfgA, ?POLICY, {<<"nodes">>, [a2b(B), a2b(C)]}),
+ assert_slaves(A, ?QNAME, {A, [B, C]}, [{A, [B]}, {A, [C]}]),
+
+ ok.
+
+change_cluster_with() -> cluster_abc.
+change_cluster([CfgA, _CfgB, _CfgC] = CfgsABC) ->
+ ACh = pget(channel, CfgA),
+ [A, B, C] = [pget(node, Cfg) || Cfg <- CfgsABC],
+
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME}),
+ assert_slaves(A, ?QNAME, {A, ''}),
+
+ %% Give it policy exactly 4, it should mirror to all 3 nodes
+ set_ha_policy(CfgA, ?POLICY, {<<"exactly">>, 4}),
+ assert_slaves(A, ?QNAME, {A, [B, C]}),
+
+ %% Add D and E, D joins in
+ [CfgD, CfgE] = CfgsDE = rabbit_test_configs:start_nodes(CfgA, [d, e], 5675),
+ D = pget(node, CfgD),
+ rabbit_test_configs:add_to_cluster(CfgsABC, CfgsDE),
+ assert_slaves(A, ?QNAME, {A, [B, C, D]}),
+
+ %% Remove D, E does not join in
+ rabbit_test_configs:stop_node(CfgD),
+ assert_slaves(A, ?QNAME, {A, [B, C]}),
+
+ %% Clean up since we started this by hand
+ rabbit_test_configs:stop_node(CfgE),
+ ok.
+
+rapid_change_with() -> cluster_abc.
+rapid_change([CfgA, _CfgB, _CfgC]) ->
+ ACh = pget(channel, CfgA),
+ Self = self(),
+ spawn_link(
+ fun() ->
+ [rapid_amqp_ops(ACh, I) || I <- lists:seq(1, 100)],
+ Self ! done
+ end),
+ rapid_loop(CfgA),
+ ok.
+
+rapid_amqp_ops(Ch, I) ->
+ Payload = list_to_binary(integer_to_list(I)),
+ amqp_channel:call(Ch, #'queue.declare'{queue = ?QNAME}),
+ amqp_channel:cast(Ch, #'basic.publish'{exchange = <<"">>,
+ routing_key = ?QNAME},
+ #amqp_msg{payload = Payload}),
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = ?QNAME,
+ no_ack = true}, self()),
+ receive #'basic.consume_ok'{} -> ok
+ end,
+ receive {#'basic.deliver'{}, #amqp_msg{payload = Payload}} ->
+ ok
+ end,
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?QNAME}).
+
+rapid_loop(Cfg) ->
+ receive done ->
+ ok
+ after 0 ->
+ set_ha_policy(Cfg, ?POLICY, <<"all">>),
+ clear_policy(Cfg, ?POLICY),
+ rapid_loop(Cfg)
+ end.
+
+%% Vhost deletion needs to successfully tear down policies and queues
+%% with policies. At least smoke-test that it doesn't blow up.
+vhost_deletion_with() -> [cluster_ab, ha_policy_all].
+vhost_deletion([CfgA, _CfgB]) ->
+ ACh = pget(channel, CfgA),
+ Node = pget(node, CfgA),
+ amqp_channel:call(ACh, #'queue.declare'{queue = <<"test">>}),
+ ok = rpc:call(Node, rabbit_vhost, delete, [<<"/">>]),
+ ok.
+
+%%----------------------------------------------------------------------------
+
+assert_slaves(RPCNode, QName, Exp) ->
+ assert_slaves(RPCNode, QName, Exp, []).
+
+assert_slaves(RPCNode, QName, Exp, PermittedIntermediate) ->
+ assert_slaves0(RPCNode, QName, Exp,
+ [{get(previous_exp_m_node), get(previous_exp_s_nodes)} |
+ PermittedIntermediate]).
+
+assert_slaves0(RPCNode, QName, {ExpMNode, ExpSNodes}, PermittedIntermediate) ->
+ Q = find_queue(QName, RPCNode),
+ Pid = proplists:get_value(pid, Q),
+ SPids = proplists:get_value(slave_pids, Q),
+ ActMNode = node(Pid),
+ ActSNodes = case SPids of
+ '' -> '';
+ _ -> [node(SPid) || SPid <- SPids]
+ end,
+ case ExpMNode =:= ActMNode andalso equal_list(ExpSNodes, ActSNodes) of
+ false ->
+ %% It's an async change, so if nothing has changed let's
+ %% just wait - of course this means if something does not
+ %% change when expected then we time out the test which is
+ %% a bit tedious
+ case [found || {PermMNode, PermSNodes} <- PermittedIntermediate,
+ PermMNode =:= ActMNode,
+ equal_list(PermSNodes, ActSNodes)] of
+ [] -> ct:fail("Expected ~p / ~p, got ~p / ~p~nat ~p~n",
+ [ExpMNode, ExpSNodes, ActMNode, ActSNodes,
+ get_stacktrace()]);
+ _ -> timer:sleep(100),
+ assert_slaves0(RPCNode, QName, {ExpMNode, ExpSNodes},
+ PermittedIntermediate)
+ end;
+ true ->
+ put(previous_exp_m_node, ExpMNode),
+ put(previous_exp_s_nodes, ExpSNodes),
+ ok
+ end.
+
+equal_list('', '') -> true;
+equal_list('', _Act) -> false;
+equal_list(_Exp, '') -> false;
+equal_list([], []) -> true;
+equal_list(_Exp, []) -> false;
+equal_list([], _Act) -> false;
+equal_list([H|T], Act) -> case lists:member(H, Act) of
+ true -> equal_list(T, Act -- [H]);
+ false -> false
+ end.
+
+find_queue(QName, RPCNode) ->
+ Qs = rpc:call(RPCNode, rabbit_amqqueue, info_all, [?VHOST], infinity),
+ case find_queue0(QName, Qs) of
+ did_not_find_queue -> timer:sleep(100),
+ find_queue(QName, RPCNode);
+ Q -> Q
+ end.
+
+find_queue0(QName, Qs) ->
+ case [Q || Q <- Qs, proplists:get_value(name, Q) =:=
+ rabbit_misc:r(?VHOST, queue, QName)] of
+ [R] -> R;
+ [] -> did_not_find_queue
+ end.
+
+get_stacktrace() ->
+ try
+ throw(e)
+ catch
+ _:e ->
+ erlang:get_stacktrace()
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+-module(eager_sync).
+
+-compile(export_all).
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-define(QNAME, <<"ha.two.test">>).
+-define(QNAME_AUTO, <<"ha.auto.test">>).
+-define(MESSAGE_COUNT, 2000).
+
+-import(rabbit_test_util, [a2b/1, publish/3, consume/3, fetch/3]).
+-import(rabbit_misc, [pget/2]).
+
+-define(CONFIG, [cluster_abc, ha_policy_two_pos]).
+
+eager_sync_with() -> ?CONFIG.
+eager_sync([A, B, C]) ->
+ %% Queue is on AB but not C.
+ ACh = pget(channel, A),
+ Ch = pget(channel, C),
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME,
+ durable = true}),
+
+ %% Don't sync, lose messages
+ publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ restart(A),
+ restart(B),
+ consume(Ch, ?QNAME, 0),
+
+ %% Sync, keep messages
+ publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ restart(A),
+ ok = sync(C, ?QNAME),
+ restart(B),
+ consume(Ch, ?QNAME, ?MESSAGE_COUNT),
+
+ %% Check the no-need-to-sync path
+ publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ ok = sync(C, ?QNAME),
+ consume(Ch, ?QNAME, ?MESSAGE_COUNT),
+
+ %% keep unacknowledged messages
+ publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ fetch(Ch, ?QNAME, 2),
+ restart(A),
+ fetch(Ch, ?QNAME, 3),
+ sync(C, ?QNAME),
+ restart(B),
+ consume(Ch, ?QNAME, ?MESSAGE_COUNT),
+
+ ok.
+
+eager_sync_cancel_with() -> ?CONFIG.
+eager_sync_cancel([A, B, C]) ->
+ %% Queue is on AB but not C.
+ ACh = pget(channel, A),
+ Ch = pget(channel, C),
+
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME,
+ durable = true}),
+ {ok, not_syncing} = sync_cancel(C, ?QNAME), %% Idempotence
+ eager_sync_cancel_test2(A, B, C, Ch).
+
+eager_sync_cancel_test2(A, B, C, Ch) ->
+ %% Sync then cancel
+ publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ restart(A),
+ spawn_link(fun() -> ok = sync_nowait(C, ?QNAME) end),
+ case wait_for_syncing(C, ?QNAME, 1) of
+ ok ->
+ case sync_cancel(C, ?QNAME) of
+ ok ->
+ wait_for_running(C, ?QNAME),
+ restart(B),
+ consume(Ch, ?QNAME, 0),
+
+ {ok, not_syncing} = sync_cancel(C, ?QNAME), %% Idempotence
+ ok;
+ {ok, not_syncing} ->
+ %% Damn. Syncing finished between wait_for_syncing/3 and
+ %% sync_cancel/2 above. Start again.
+ amqp_channel:call(Ch, #'queue.purge'{queue = ?QNAME}),
+ eager_sync_cancel_test2(A, B, C, Ch)
+ end;
+ synced_already ->
+ %% Damn. Syncing finished before wait_for_syncing/3. Start again.
+ amqp_channel:call(Ch, #'queue.purge'{queue = ?QNAME}),
+ eager_sync_cancel_test2(A, B, C, Ch)
+ end.
+
+eager_sync_auto_with() -> ?CONFIG.
+eager_sync_auto([A, B, C]) ->
+ ACh = pget(channel, A),
+ Ch = pget(channel, C),
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME_AUTO,
+ durable = true}),
+
+ %% Sync automatically, don't lose messages
+ publish(Ch, ?QNAME_AUTO, ?MESSAGE_COUNT),
+ restart(A),
+ wait_for_sync(C, ?QNAME_AUTO),
+ restart(B),
+ wait_for_sync(C, ?QNAME_AUTO),
+ consume(Ch, ?QNAME_AUTO, ?MESSAGE_COUNT),
+
+ ok.
+
+eager_sync_auto_on_policy_change_with() -> ?CONFIG.
+eager_sync_auto_on_policy_change([A, B, C]) ->
+ ACh = pget(channel, A),
+ Ch = pget(channel, C),
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME,
+ durable = true}),
+
+ %% Sync automatically once the policy is changed to tell us to.
+ publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ restart(A),
+ Params = [a2b(pget(node, Cfg)) || Cfg <- [A, B]],
+ rabbit_test_util:set_ha_policy(
+ A, <<"^ha.two.">>, {<<"nodes">>, Params},
+ [{<<"ha-sync-mode">>, <<"automatic">>}]),
+ wait_for_sync(C, ?QNAME),
+
+ ok.
+
+eager_sync_requeue_with() -> ?CONFIG.
+eager_sync_requeue([A, B, C]) ->
+ %% Queue is on AB but not C.
+ ACh = pget(channel, A),
+ Ch = pget(channel, C),
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME,
+ durable = true}),
+
+ publish(Ch, ?QNAME, 2),
+ {#'basic.get_ok'{delivery_tag = TagA}, _} =
+ amqp_channel:call(Ch, #'basic.get'{queue = ?QNAME}),
+ {#'basic.get_ok'{delivery_tag = TagB}, _} =
+ amqp_channel:call(Ch, #'basic.get'{queue = ?QNAME}),
+ amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = TagA, requeue = true}),
+ restart(B),
+ ok = sync(C, ?QNAME),
+ amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = TagB, requeue = true}),
+ consume(Ch, ?QNAME, 2),
+
+ ok.
+
+restart(Cfg) -> rabbit_test_util:restart_app(Cfg).
+
+sync(Cfg, QName) ->
+ case sync_nowait(Cfg, QName) of
+ ok -> wait_for_sync(Cfg, QName),
+ ok;
+ R -> R
+ end.
+
+sync_nowait(Cfg, QName) -> action(Cfg, sync_queue, QName).
+sync_cancel(Cfg, QName) -> action(Cfg, cancel_sync_queue, QName).
+
+wait_for_sync(Cfg, QName) ->
+ sync_detection:wait_for_sync_status(true, Cfg, QName).
+
+action(Cfg, Action, QName) ->
+ rabbit_test_util:control_action(
+ Action, Cfg, [binary_to_list(QName)], [{"-p", "/"}]).
+
+queue(Cfg, QName) ->
+ QNameRes = rabbit_misc:r(<<"/">>, queue, QName),
+ {ok, Q} = rpc:call(pget(node, Cfg), rabbit_amqqueue, lookup, [QNameRes]),
+ Q.
+
+wait_for_syncing(Cfg, QName, Target) ->
+ case state(Cfg, QName) of
+ {{syncing, _}, _} -> ok;
+ {running, Target} -> synced_already;
+ _ -> timer:sleep(100),
+ wait_for_syncing(Cfg, QName, Target)
+ end.
+
+wait_for_running(Cfg, QName) ->
+ case state(Cfg, QName) of
+ {running, _} -> ok;
+ _ -> timer:sleep(100),
+ wait_for_running(Cfg, QName)
+ end.
+
+state(Cfg, QName) ->
+ [{state, State}, {synchronised_slave_pids, Pids}] =
+ rpc:call(pget(node, Cfg), rabbit_amqqueue, info,
+ [queue(Cfg, QName), [state, synchronised_slave_pids]]),
+ {State, length(Pids)}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+-module(many_node_ha).
+
+-compile(export_all).
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-import(rabbit_test_util, [a2b/1]).
+-import(rabbit_misc, [pget/2]).
+
+kill_intermediate_with() ->
+ fun (Cfg) -> rabbit_test_configs:ha_policy_all(
+ rabbit_test_configs:cluster(Cfg, [a,b,c,d,e,f]))
+ end.
+kill_intermediate([CfgA, CfgB, CfgC, CfgD, CfgE, CfgF]) ->
+ Msgs = rabbit_test_configs:cover_work_factor(20000, CfgA),
+ MasterChannel = pget(channel, CfgA),
+ ConsumerChannel = pget(channel, CfgE),
+ ProducerChannel = pget(channel, CfgF),
+ Queue = <<"test">>,
+ amqp_channel:call(MasterChannel, #'queue.declare'{queue = Queue,
+ auto_delete = false}),
+
+ %% TODO: this seems *highly* timing dependant - the assumption being
+ %% that the kill will work quickly enough that there will still be
+ %% some messages in-flight that we *must* receive despite the intervening
+ %% node deaths. It would be nice if we could find a means to do this
+ %% in a way that is not actually timing dependent.
+
+ %% Worse still, it assumes that killing the master will cause a
+ %% failover to Slave1, and so on. Nope.
+
+ ConsumerPid = rabbit_ha_test_consumer:create(ConsumerChannel,
+ Queue, self(), false, Msgs),
+
+ ProducerPid = rabbit_ha_test_producer:create(ProducerChannel,
+ Queue, self(), false, Msgs),
+
+ %% create a killer for the master and the first 3 slaves
+ [rabbit_test_util:kill_after(Time, Cfg, sigkill) ||
+ {Cfg, Time} <- [{CfgA, 50},
+ {CfgB, 50},
+ {CfgC, 100},
+ {CfgD, 100}]],
+
+ %% verify that the consumer got all msgs, or die, or time out
+ rabbit_ha_test_producer:await_response(ProducerPid),
+ rabbit_ha_test_consumer:await_response(ConsumerPid),
+ ok.
+
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+-module(partitions).
+
+-compile(export_all).
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-import(rabbit_misc, [pget/2]).
+
+-define(CONFIG, [start_abc, fun enable_dist_proxy/1,
+ build_cluster, short_ticktime(1), start_connections]).
+%% We set ticktime to 1s and setuptime is 7s so to make sure it
+%% passes...
+-define(DELAY, 8000).
+
+ignore_with() -> ?CONFIG.
+ignore(Cfgs) ->
+ [A, B, C] = [pget(node, Cfg) || Cfg <- Cfgs],
+ block_unblock([{B, C}]),
+ timer:sleep(?DELAY),
+ [] = partitions(A),
+ [C] = partitions(B),
+ [B] = partitions(C),
+ ok.
+
+pause_on_down_with() -> ?CONFIG.
+pause_on_down([CfgA, CfgB, CfgC] = Cfgs) ->
+ A = pget(node, CfgA),
+ set_mode(Cfgs, pause_minority),
+ true = is_running(A),
+
+ rabbit_test_util:kill(CfgB, sigkill),
+ timer:sleep(?DELAY),
+ true = is_running(A),
+
+ rabbit_test_util:kill(CfgC, sigkill),
+ await_running(A, false),
+ ok.
+
+pause_on_blocked_with() -> ?CONFIG.
+pause_on_blocked(Cfgs) ->
+ [A, B, C] = [pget(node, Cfg) || Cfg <- Cfgs],
+ set_mode(Cfgs, pause_minority),
+ [(true = is_running(N)) || N <- [A, B, C]],
+ block([{A, B}, {A, C}]),
+ await_running(A, false),
+ [await_running(N, true) || N <- [B, C]],
+ unblock([{A, B}, {A, C}]),
+ [await_running(N, true) || N <- [A, B, C]],
+ Status = rpc:call(B, rabbit_mnesia, status, []),
+ [] = pget(partitions, Status),
+ ok.
+
+%% Make sure we do not confirm any messages after a partition has
+%% happened but before we pause, since any such confirmations would be
+%% lies.
+%%
+%% This test has to use an AB cluster (not ABC) since GM ends up
+%% taking longer to detect down slaves when there are more nodes and
+%% we close the window by mistake.
+%%
+%% In general there are quite a few ways to accidentally cause this
+%% test to pass since there are a lot of things in the broker that can
+%% suddenly take several seconds to time out when TCP connections
+%% won't establish.
+pause_false_promises_mirrored_with() ->
+ [start_ab, fun enable_dist_proxy/1,
+ build_cluster, short_ticktime(10), start_connections, ha_policy_all].
+
+pause_false_promises_mirrored(Cfgs) ->
+ pause_false_promises(Cfgs).
+
+pause_false_promises_unmirrored_with() ->
+ [start_ab, fun enable_dist_proxy/1,
+ build_cluster, short_ticktime(10), start_connections].
+
+pause_false_promises_unmirrored(Cfgs) ->
+ pause_false_promises(Cfgs).
+
+pause_false_promises([CfgA, CfgB | _] = Cfgs) ->
+ [A, B] = [pget(node, Cfg) || Cfg <- Cfgs],
+ set_mode([CfgA], pause_minority),
+ ChA = pget(channel, CfgA),
+ ChB = pget(channel, CfgB),
+ amqp_channel:call(ChB, #'queue.declare'{queue = <<"test">>,
+ durable = true}),
+ amqp_channel:call(ChA, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(ChA, self()),
+
+ %% Cause a partition after 1s
+ Self = self(),
+ spawn_link(fun () ->
+ timer:sleep(1000),
+ %%io:format(user, "~p BLOCK~n", [calendar:local_time()]),
+ block([{A, B}]),
+ unlink(Self)
+ end),
+
+ %% Publish large no of messages, see how many we get confirmed
+ [amqp_channel:cast(ChA, #'basic.publish'{routing_key = <<"test">>},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 1}}) ||
+ _ <- lists:seq(1, 100000)],
+ %%io:format(user, "~p finish publish~n", [calendar:local_time()]),
+
+ %% Time for the partition to be detected. We don't put this sleep
+ %% in receive_acks since otherwise we'd have another similar sleep
+ %% at the end.
+ timer:sleep(30000),
+ Confirmed = receive_acks(0),
+ %%io:format(user, "~p got acks~n", [calendar:local_time()]),
+ await_running(A, false),
+ %%io:format(user, "~p A stopped~n", [calendar:local_time()]),
+
+ unblock([{A, B}]),
+ await_running(A, true),
+
+ %% But how many made it onto the rest of the cluster?
+ #'queue.declare_ok'{message_count = Survived} =
+ amqp_channel:call(ChB, #'queue.declare'{queue = <<"test">>,
+ durable = true}),
+ %%io:format(user, "~p queue declared~n", [calendar:local_time()]),
+ case Confirmed > Survived of
+ true -> ?debugVal({Confirmed, Survived});
+ false -> ok
+ end,
+ ?assert(Confirmed =< Survived),
+ ok.
+
+receive_acks(Max) ->
+ receive
+ #'basic.ack'{delivery_tag = DTag} ->
+ receive_acks(DTag)
+ after ?DELAY ->
+ Max
+ end.
+
+prompt_disconnect_detection_with() ->
+ [start_ab, fun enable_dist_proxy/1,
+ build_cluster, short_ticktime(1), start_connections].
+
+prompt_disconnect_detection([CfgA, CfgB]) ->
+ A = pget(node, CfgA),
+ B = pget(node, CfgB),
+ ChB = pget(channel, CfgB),
+ [amqp_channel:call(ChB, #'queue.declare'{}) || _ <- lists:seq(1, 100)],
+ block([{A, B}]),
+ timer:sleep(?DELAY),
+ %% We want to make sure we do not end up waiting for setuptime *
+ %% no of queues. Unfortunately that means we need a timeout...
+ [] = rpc(CfgA, rabbit_amqqueue, info_all, [<<"/">>], ?DELAY),
+ ok.
+
+autoheal_with() -> ?CONFIG.
+autoheal(Cfgs) ->
+ [A, B, C] = [pget(node, Cfg) || Cfg <- Cfgs],
+ set_mode(Cfgs, autoheal),
+ Test = fun (Pairs) ->
+ block_unblock(Pairs),
+ [await_running(N, true) || N <- [A, B, C]],
+ [] = partitions(A),
+ [] = partitions(B),
+ [] = partitions(C)
+ end,
+ Test([{B, C}]),
+ Test([{A, C}, {B, C}]),
+ Test([{A, B}, {A, C}, {B, C}]),
+ ok.
+
+set_mode(Cfgs, Mode) ->
+ [set_env(Cfg, rabbit, cluster_partition_handling, Mode) || Cfg <- Cfgs].
+
+set_env(Cfg, App, K, V) ->
+ rpc(Cfg, application, set_env, [App, K, V]).
+
+block_unblock(Pairs) ->
+ block(Pairs),
+ timer:sleep(?DELAY),
+ unblock(Pairs).
+
+block(Pairs) -> [block(X, Y) || {X, Y} <- Pairs].
+unblock(Pairs) -> [allow(X, Y) || {X, Y} <- Pairs].
+
+partitions(Node) ->
+ rpc:call(Node, rabbit_node_monitor, partitions, []).
+
+block(X, Y) ->
+ rpc:call(X, inet_tcp_proxy, block, [Y]),
+ rpc:call(Y, inet_tcp_proxy, block, [X]).
+
+allow(X, Y) ->
+ rpc:call(X, inet_tcp_proxy, allow, [Y]),
+ rpc:call(Y, inet_tcp_proxy, allow, [X]).
+
+await_running (Node, Bool) -> await(Node, Bool, fun is_running/1).
+await_listening(Node, Bool) -> await(Node, Bool, fun is_listening/1).
+
+await(Node, Bool, Fun) ->
+ case Fun(Node) of
+ Bool -> ok;
+ _ -> timer:sleep(100),
+ await(Node, Bool, Fun)
+ end.
+
+is_running(Node) -> rpc:call(Node, rabbit, is_running, []).
+
+is_listening(Node) ->
+ case rpc:call(Node, rabbit_networking, node_listeners, [Node]) of
+ [] -> false;
+ [_|_] -> true;
+ _ -> false
+ end.
+
+enable_dist_proxy(Cfgs) ->
+ inet_tcp_proxy_manager:start_link(),
+ Nodes = [pget(node, Cfg) || Cfg <- Cfgs],
+ [ok = rpc:call(Node, inet_tcp_proxy, start, []) || Node <- Nodes],
+ [ok = rpc:call(Node, inet_tcp_proxy, reconnect, [Nodes]) || Node <- Nodes],
+ Cfgs.
+
+short_ticktime(Time) ->
+ fun (Cfgs) ->
+ [rpc(Cfg, net_kernel, set_net_ticktime, [Time, 0]) || Cfg <- Cfgs],
+ net_kernel:set_net_ticktime(Time, 0),
+ Cfgs
+ end.
+
+rpc(Cfg, M, F, A) ->
+ rpc:call(pget(node, Cfg), M, F, A).
+
+rpc(Cfg, M, F, A, T) ->
+ rpc:call(pget(node, Cfg), M, F, A, T).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+-module(simple_ha).
+
+-compile(export_all).
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-import(rabbit_test_util, [set_ha_policy/3, a2b/1]).
+-import(rabbit_misc, [pget/2]).
+
+-define(CONFIG, [cluster_abc, ha_policy_all]).
+
+rapid_redeclare_with() -> [cluster_ab, ha_policy_all].
+rapid_redeclare([CfgA | _]) ->
+ Ch = pget(channel, CfgA),
+ Queue = <<"test">>,
+ [begin
+ amqp_channel:call(Ch, #'queue.declare'{queue = Queue,
+ durable = true}),
+ amqp_channel:call(Ch, #'queue.delete'{queue = Queue})
+ end || _I <- lists:seq(1, 20)],
+ ok.
+
+consume_survives_stop_with() -> ?CONFIG.
+consume_survives_sigkill_with() -> ?CONFIG.
+consume_survives_policy_with() -> ?CONFIG.
+auto_resume_with() -> ?CONFIG.
+auto_resume_no_ccn_client_with() -> ?CONFIG.
+
+consume_survives_stop(Cf) -> consume_survives(Cf, fun stop/2, true).
+consume_survives_sigkill(Cf) -> consume_survives(Cf, fun sigkill/2, true).
+consume_survives_policy(Cf) -> consume_survives(Cf, fun policy/2, true).
+auto_resume(Cf) -> consume_survives(Cf, fun sigkill/2, false).
+auto_resume_no_ccn_client(Cf) -> consume_survives(Cf, fun sigkill/2, false,
+ false).
+
+confirms_survive_stop_with() -> ?CONFIG.
+confirms_survive_sigkill_with() -> ?CONFIG.
+confirms_survive_policy_with() -> ?CONFIG.
+
+confirms_survive_stop(Cf) -> confirms_survive(Cf, fun stop/2).
+confirms_survive_sigkill(Cf) -> confirms_survive(Cf, fun sigkill/2).
+confirms_survive_policy(Cf) -> confirms_survive(Cf, fun policy/2).
+
+%%----------------------------------------------------------------------------
+
+consume_survives(Nodes, DeathFun, CancelOnFailover) ->
+ consume_survives(Nodes, DeathFun, CancelOnFailover, true).
+
+consume_survives([CfgA, CfgB, CfgC] = Nodes,
+ DeathFun, CancelOnFailover, CCNSupported) ->
+ Msgs = rabbit_test_configs:cover_work_factor(20000, CfgA),
+ Channel1 = pget(channel, CfgA),
+ Channel2 = pget(channel, CfgB),
+ Channel3 = pget(channel, CfgC),
+
+ %% declare the queue on the master, mirrored to the two slaves
+ Queue = <<"test">>,
+ amqp_channel:call(Channel1, #'queue.declare'{queue = Queue,
+ auto_delete = false}),
+
+ %% start up a consumer
+ ConsCh = case CCNSupported of
+ true -> Channel2;
+ false -> open_incapable_channel(pget(port, CfgB))
+ end,
+ ConsumerPid = rabbit_ha_test_consumer:create(
+ ConsCh, Queue, self(), CancelOnFailover, Msgs),
+
+ %% send a bunch of messages from the producer
+ ProducerPid = rabbit_ha_test_producer:create(Channel3, Queue,
+ self(), false, Msgs),
+ DeathFun(CfgA, Nodes),
+ %% verify that the consumer got all msgs, or die - the await_response
+ %% calls throw an exception if anything goes wrong....
+ rabbit_ha_test_consumer:await_response(ConsumerPid),
+ rabbit_ha_test_producer:await_response(ProducerPid),
+ ok.
+
+confirms_survive([CfgA, CfgB, _CfgC] = Nodes, DeathFun) ->
+ Msgs = rabbit_test_configs:cover_work_factor(20000, CfgA),
+ Node1Channel = pget(channel, CfgA),
+ Node2Channel = pget(channel, CfgB),
+
+ %% declare the queue on the master, mirrored to the two slaves
+ Queue = <<"test">>,
+ amqp_channel:call(Node1Channel,#'queue.declare'{queue = Queue,
+ auto_delete = false,
+ durable = true}),
+
+ %% send a bunch of messages from the producer
+ ProducerPid = rabbit_ha_test_producer:create(Node2Channel, Queue,
+ self(), true, Msgs),
+ DeathFun(CfgA, Nodes),
+ rabbit_ha_test_producer:await_response(ProducerPid),
+ ok.
+
+stop(Cfg, _Cfgs) -> rabbit_test_util:kill_after(50, Cfg, stop).
+sigkill(Cfg, _Cfgs) -> rabbit_test_util:kill_after(50, Cfg, sigkill).
+policy(Cfg, [_|T]) -> Nodes = [a2b(pget(node, C)) || C <- T],
+ set_ha_policy(Cfg, <<".*">>, {<<"nodes">>, Nodes}).
+
+open_incapable_channel(NodePort) ->
+ Props = [{<<"capabilities">>, table, []}],
+ {ok, ConsConn} =
+ amqp_connection:start(#amqp_params_network{port = NodePort,
+ client_properties = Props}),
+ {ok, Ch} = amqp_connection:open_channel(ConsConn),
+ Ch.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+-module(sync_detection).
+
+-compile(export_all).
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-import(rabbit_test_util, [stop_app/1, start_app/1]).
+-import(rabbit_misc, [pget/2]).
+
+-define(LOOP_RECURSION_DELAY, 100).
+
+slave_synchronization_with() -> [cluster_ab, ha_policy_two_pos].
+slave_synchronization([Master, Slave]) ->
+ Channel = pget(channel, Master),
+ Queue = <<"ha.two.test">>,
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Channel, #'queue.declare'{queue = Queue,
+ auto_delete = false}),
+
+ %% The comments on the right are the queue length and the pending acks on
+ %% the master.
+ stop_app(Slave),
+
+ %% We get and ack one message when the slave is down, and check that when we
+ %% start the slave it's not marked as synced until ack the message. We also
+ %% publish another message when the slave is up.
+ send_dummy_message(Channel, Queue), % 1 - 0
+ {#'basic.get_ok'{delivery_tag = Tag1}, _} =
+ amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 0 - 1
+
+ start_app(Slave),
+
+ slave_unsynced(Master, Queue),
+ send_dummy_message(Channel, Queue), % 1 - 1
+ slave_unsynced(Master, Queue),
+
+ amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag1}), % 1 - 0
+
+ slave_synced(Master, Queue),
+
+ %% We restart the slave and we send a message, so that the slave will only
+ %% have one of the messages.
+ stop_app(Slave),
+ start_app(Slave),
+
+ send_dummy_message(Channel, Queue), % 2 - 0
+
+ slave_unsynced(Master, Queue),
+
+ %% We reject the message that the slave doesn't have, and verify that it's
+ %% still unsynced
+ {#'basic.get_ok'{delivery_tag = Tag2}, _} =
+ amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 1 - 1
+ slave_unsynced(Master, Queue),
+ amqp_channel:cast(Channel, #'basic.reject'{ delivery_tag = Tag2,
+ requeue = true }), % 2 - 0
+ slave_unsynced(Master, Queue),
+ {#'basic.get_ok'{delivery_tag = Tag3}, _} =
+ amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 1 - 1
+ amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag3}), % 1 - 0
+ slave_synced(Master, Queue),
+ {#'basic.get_ok'{delivery_tag = Tag4}, _} =
+ amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 0 - 1
+ amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag4}), % 0 - 0
+ slave_synced(Master, Queue).
+
+slave_synchronization_ttl_with() -> [cluster_abc, ha_policy_two_pos].
+slave_synchronization_ttl([Master, Slave, DLX]) ->
+ Channel = pget(channel, Master),
+ DLXChannel = pget(channel, DLX),
+
+ %% We declare a DLX queue to wait for messages to be TTL'ed
+ DLXQueue = <<"dlx-queue">>,
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Channel, #'queue.declare'{queue = DLXQueue,
+ auto_delete = false}),
+
+ TestMsgTTL = 5000,
+ Queue = <<"ha.two.test">>,
+ %% Sadly we need fairly high numbers for the TTL because starting/stopping
+ %% nodes takes a fair amount of time.
+ Args = [{<<"x-message-ttl">>, long, TestMsgTTL},
+ {<<"x-dead-letter-exchange">>, longstr, <<>>},
+ {<<"x-dead-letter-routing-key">>, longstr, DLXQueue}],
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Channel, #'queue.declare'{queue = Queue,
+ auto_delete = false,
+ arguments = Args}),
+
+ slave_synced(Master, Queue),
+
+ %% All unknown
+ stop_app(Slave),
+ send_dummy_message(Channel, Queue),
+ send_dummy_message(Channel, Queue),
+ start_app(Slave),
+ slave_unsynced(Master, Queue),
+ wait_for_messages(DLXQueue, DLXChannel, 2),
+ slave_synced(Master, Queue),
+
+ %% 1 unknown, 1 known
+ stop_app(Slave),
+ send_dummy_message(Channel, Queue),
+ start_app(Slave),
+ slave_unsynced(Master, Queue),
+ send_dummy_message(Channel, Queue),
+ slave_unsynced(Master, Queue),
+ wait_for_messages(DLXQueue, DLXChannel, 2),
+ slave_synced(Master, Queue),
+
+ %% %% both known
+ send_dummy_message(Channel, Queue),
+ send_dummy_message(Channel, Queue),
+ slave_synced(Master, Queue),
+ wait_for_messages(DLXQueue, DLXChannel, 2),
+ slave_synced(Master, Queue),
+
+ ok.
+
+send_dummy_message(Channel, Queue) ->
+ Payload = <<"foo">>,
+ Publish = #'basic.publish'{exchange = <<>>, routing_key = Queue},
+ amqp_channel:cast(Channel, Publish, #amqp_msg{payload = Payload}).
+
+slave_pids(Node, Queue) ->
+ {ok, Q} = rpc:call(Node, rabbit_amqqueue, lookup,
+ [rabbit_misc:r(<<"/">>, queue, Queue)]),
+ SSP = synchronised_slave_pids,
+ [{SSP, Pids}] = rpc:call(Node, rabbit_amqqueue, info, [Q, [SSP]]),
+ case Pids of
+ '' -> [];
+ _ -> Pids
+ end.
+
+%% The mnesia syncronization takes a while, but we don't want to wait for the
+%% test to fail, since the timetrap is quite high.
+wait_for_sync_status(Status, Cfg, Queue) ->
+ Max = 10000 / ?LOOP_RECURSION_DELAY,
+ wait_for_sync_status(0, Max, Status, pget(node, Cfg), Queue).
+
+wait_for_sync_status(N, Max, Status, Node, Queue) when N >= Max ->
+ error({sync_status_max_tries_failed,
+ [{queue, Queue},
+ {node, Node},
+ {expected_status, Status},
+ {max_tried, Max}]});
+wait_for_sync_status(N, Max, Status, Node, Queue) ->
+ Synced = length(slave_pids(Node, Queue)) =:= 1,
+ case Synced =:= Status of
+ true -> ok;
+ false -> timer:sleep(?LOOP_RECURSION_DELAY),
+ wait_for_sync_status(N + 1, Max, Status, Node, Queue)
+ end.
+
+slave_synced(Cfg, Queue) ->
+ wait_for_sync_status(true, Cfg, Queue).
+
+slave_unsynced(Cfg, Queue) ->
+ wait_for_sync_status(false, Cfg, Queue).
+
+wait_for_messages(Queue, Channel, N) ->
+ Sub = #'basic.consume'{queue = Queue},
+ #'basic.consume_ok'{consumer_tag = CTag} = amqp_channel:call(Channel, Sub),
+ receive
+ #'basic.consume_ok'{} -> ok
+ end,
+ lists:foreach(
+ fun (_) -> receive
+ {#'basic.deliver'{delivery_tag = Tag}, _Content} ->
+ amqp_channel:cast(Channel,
+ #'basic.ack'{delivery_tag = Tag})
+ end
+ end, lists:seq(1, N)),
+ amqp_channel:call(Channel, #'basic.cancel'{consumer_tag = CTag}).
--- /dev/null
+include ../umbrella.mk
--- /dev/null
+An opinionated tracing plugin for RabbitMQ management. Build it like
+any other plugin. After installation you should see a "Tracing" tab in
+the management UI. Hopefully use is obvious.
+
+Configuration
+=============
+
+There is one configuration option:
+
+directory: This controls where the log files go. It defaults to
+"/var/tmp/rabbitmq-tracing".
+
+Performance
+===========
+
+On my workstation, rabbitmq-tracing can write about 2000 msg/s to a
+log file. You should be careful using rabbitmq-tracing if you think
+you're going to capture more messages than this. Of course, any
+messages that can't be logged are queued.
+
+The code to serve up the log files over HTTP is pretty dumb, it loads
+the whole log into memory. If you have large log files you may wish
+to transfer them off the server in some other way.
+
+HTTP API
+========
+
+GET /api/traces
+GET /api/traces/<vhost>
+GET PUT DELETE /api/traces/<vhost>/<name>
+GET /api/trace-files
+GET DELETE /api/trace-files/<name> (GET returns the file as text/plain,
+ not JSON describing it.)
+
+Example for how to create a trace:
+
+$ curl -i -u guest:guest -H "content-type:application/json" -XPUT \
+ http://localhost:55672/api/traces/%2f/my-trace \
+ -d'{"format":"text","pattern":"#"}'
+
--- /dev/null
+RELEASABLE:=true
+DEPS:=rabbitmq-management
+WITH_BROKER_TEST_COMMANDS:=eunit:test(rabbit_tracing_test,[verbose])
+
+CONSTRUCT_APP_PREREQS:=$(shell find $(PACKAGE_DIR)/priv -type f)
+define construct_app_commands
+ cp -r $(PACKAGE_DIR)/priv $(APP_DIR)
+endef
--- /dev/null
+<h1>Traces</h1>
+<div class="section">
+ <h2>All traces</h2>
+ <div class="hider updatable">
+ <table class="two-col-layout">
+ <tr>
+ <td>
+ <h3>Currently running traces</h3>
+ <% if (traces.length > 0) { %>
+ <table class="list">
+ <thead>
+ <tr>
+ <% if (vhosts_interesting) { %>
+ <th>Virtual host</th>
+ <% } %>
+ <th>Name</th>
+ <th>Pattern</th>
+ <th>Format</th>
+ <th>Rate</th>
+ <th>Queued</th>
+ <th></th>
+ </tr>
+ </thead>
+ <tbody>
+ <%
+ for (var i = 0; i < traces.length; i++) {
+ var trace = traces[i];
+ %>
+ <tr<%= alt_rows(i)%>>
+ <% if (vhosts_interesting) { %>
+ <td><%= fmt_string(trace.vhost) %></td>
+ <% } %>
+ <td><%= fmt_string(trace.name) %></td>
+ <td><%= fmt_string(trace.pattern) %></td>
+ <td><%= fmt_string(trace.format) %></td>
+ <% if (trace.queue) { %>
+ <td class="r">
+ <%= fmt_rate(trace.queue.message_stats, 'ack', false) %>
+ </td>
+ <td class="r">
+ <%= trace.queue.messages %>
+ <sub><%= link_trace_queue(trace) %></sub>
+ </td>
+ <% } else { %>
+ <td colspan="2">
+ <div class="status-red"><acronym title="The trace failed to start - check the server logs for details.">FAILED</acronym></div>
+ </td>
+ <% } %>
+ <td>
+ <form action="#/traces" method="delete">
+ <input type="hidden" name="vhost" value="<%= fmt_string(trace.vhost) %>"/>
+ <input type="hidden" name="name" value="<%= fmt_string(trace.name) %>"/>
+ <input type="submit" value="Stop"/>
+ </form>
+ </td>
+ </tr>
+ <% } %>
+ </tbody>
+ </table>
+ <% } else { %>
+ <p>... no traces running ...</p>
+ <% } %>
+ </td>
+ <td>
+ <h3>Trace log files</h3>
+ <% if (files.length > 0) { %>
+ <table class="list">
+ <thead>
+ <tr>
+ <th>Name</th>
+ <th>Size</th>
+ <th></th>
+ </tr>
+ </thead>
+ <tbody>
+ <%
+ for (var i = 0; i < files.length; i++) {
+ var file = files[i];
+ %>
+ <tr<%= alt_rows(i)%>>
+ <td><%= link_trace(file.name) %></td>
+ <td class="r"><%= fmt_bytes(file.size) %></td>
+ <td>
+ <form action="#/trace-files" method="delete" class="inline-form">
+ <input type="hidden" name="name" value="<%= fmt_string(file.name) %>"/>
+ <input type="submit" value="Delete" />
+ </form>
+ </td>
+ </tr>
+ <% } %>
+ </tbody>
+ </table>
+ <% } else { %>
+ <p>... no files ...</p>
+ <% } %>
+ </td>
+ </tr>
+ </table>
+ </div>
+</div>
+
+<div class="section">
+ <h2>Add a new trace</h2>
+ <div class="hider">
+ <form action="#/traces" method="put">
+ <table class="form">
+<% if (vhosts_interesting) { %>
+ <tr>
+ <th><label>Virtual host:</label></th>
+ <td>
+ <select name="vhost">
+ <% for (var i = 0; i < vhosts.length; i++) { %>
+ <option value="<%= fmt_string(vhosts[i].name) %>"><%= fmt_string(vhosts[i].name) %></option>
+ <% } %>
+ </select>
+ </td>
+ </tr>
+<% } else { %>
+ <tr><td><input type="hidden" name="vhost" value="<%= fmt_string(vhosts[0].name) %>"/></td></tr>
+<% } %>
+ <tr>
+ <th><label>Name:</label></th>
+ <td><input type="text" name="name"/><span class="mand">*</span></td>
+ </tr>
+ <tr>
+ <th><label>Format:</label></th>
+ <td>
+ <select name="format">
+ <option value="text">Text</option>
+ <option value="json">JSON</option>
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Pattern:</label></th>
+ <td>
+ <input type="text" name="pattern" value="#"/>
+ <sub>Examples: #, publish.#, deliver.# #.amq.direct, #.myqueue</sub>
+ </td>
+ </tr>
+ </table>
+ <input type="submit" value="Add trace"/>
+ </form>
+ </div>
+</div>
--- /dev/null
+dispatcher_add(function(sammy) {
+ sammy.get('#/traces', function() {
+ render({'traces': '/traces',
+ 'vhosts': '/vhosts',
+ 'files': '/trace-files'},
+ 'traces', '#/traces');
+ });
+ sammy.get('#/traces/:vhost/:name', function() {
+ var path = '/traces/' + esc(this.params['vhost']) + '/' + esc(this.params['name']);
+ render({'trace': path},
+ 'trace', '#/traces');
+ });
+ sammy.put('#/traces', function() {
+ if (sync_put(this, '/traces/:vhost/:name'))
+ update();
+ return false;
+ });
+ sammy.del('#/traces', function() {
+ if (sync_delete(this, '/traces/:vhost/:name'))
+ partial_update();
+ return false;
+ });
+ sammy.del('#/trace-files', function() {
+ if (sync_delete(this, '/trace-files/:name'))
+ partial_update();
+ return false;
+ });
+});
+
+NAVIGATION['Admin'][0]['Tracing'] = ['#/traces', 'administrator'];
+
+function link_trace(name) {
+ return _link_to(fmt_escape_html(name), 'api/trace-files/' + esc(name));
+}
+
+function link_trace_queue(trace) {
+ return _link_to('(queue)', '#/queues/' + esc(trace.vhost) + '/' + esc(trace.queue.name));
+}
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_tracing_app).
+
+-behaviour(application).
+-export([start/2, stop/1]).
+
+start(_Type, _StartArgs) ->
+ rabbit_tracing_sup:start_link().
+
+stop(_State) ->
+ ok.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_tracing_consumer).
+
+-behaviour(gen_server).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-import(rabbit_misc, [pget/2, pget/3, table_lookup/2]).
+
+-record(state, {conn, ch, vhost, queue, file, filename, format}).
+-record(log_record, {timestamp, type, exchange, queue, node, routing_keys,
+ properties, payload}).
+
+-define(X, <<"amq.rabbitmq.trace">>).
+
+-export([start_link/1, info_all/1]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+start_link(Args) ->
+ gen_server:start_link(?MODULE, Args, []).
+
+info_all(Pid) ->
+ gen_server:call(Pid, info_all, infinity).
+
+%%----------------------------------------------------------------------------
+
+init(Args) ->
+ process_flag(trap_exit, true),
+ Name = pget(name, Args),
+ VHost = pget(vhost, Args),
+ {ok, Conn} = amqp_connection:start(
+ #amqp_params_direct{virtual_host = VHost}),
+ link(Conn),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ link(Ch),
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Ch, #'queue.declare'{durable = false,
+ exclusive = true}),
+ #'queue.bind_ok'{} =
+ amqp_channel:call(
+ Ch, #'queue.bind'{exchange = ?X, queue = Q,
+ routing_key = pget(pattern, Args)}),
+ #'basic.qos_ok'{} =
+ amqp_channel:call(Ch, #'basic.qos'{prefetch_count = 10}),
+ #'basic.consume_ok'{} =
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q,
+ no_ack = false}, self()),
+ {ok, Dir} = application:get_env(directory),
+ Filename = Dir ++ "/" ++ binary_to_list(Name) ++ ".log",
+ case filelib:ensure_dir(Filename) of
+ ok ->
+ case file:open(Filename, [append]) of
+ {ok, F} ->
+ rabbit_tracing_traces:announce(VHost, Name, self()),
+ Format = list_to_atom(binary_to_list(pget(format, Args))),
+ rabbit_log:info("Tracer opened log file ~p with "
+ "format ~p~n", [Filename, Format]),
+ {ok, #state{conn = Conn, ch = Ch, vhost = VHost, queue = Q,
+ file = F, filename = Filename,
+ format = Format}};
+ {error, E} ->
+ {stop, {could_not_open, Filename, E}}
+ end;
+ {error, E} ->
+ {stop, {could_not_create_dir, Dir, E}}
+ end.
+
+handle_call(info_all, _From, State = #state{vhost = V, queue = Q}) ->
+ [QInfo] = rabbit_mgmt_db:augment_queues(
+ [rabbit_mgmt_wm_queue:queue(V, Q)],
+ rabbit_mgmt_util:no_range(), basic),
+ {reply, [{queue, rabbit_mgmt_format:strip_pids(QInfo)}], State};
+
+handle_call(_Req, _From, State) ->
+ {reply, unknown_request, State}.
+
+handle_cast(_C, State) ->
+ {noreply, State}.
+
+handle_info(Delivery = {#'basic.deliver'{delivery_tag = Seq}, #amqp_msg{}},
+ State = #state{ch = Ch, file = F, format = Format}) ->
+ Print = fun(Fmt, Args) -> io:format(F, Fmt, Args) end,
+ log(Format, Print, delivery_to_log_record(Delivery)),
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = Seq}),
+ {noreply, State};
+
+handle_info(_I, State) ->
+ {noreply, State}.
+
+terminate(shutdown, #state{conn = Conn, ch = Ch,
+ file = F, filename = Filename}) ->
+ catch amqp_channel:close(Ch),
+ catch amqp_connection:close(Conn),
+ catch file:close(F),
+ rabbit_log:info("Tracer closed log file ~p~n", [Filename]),
+ ok;
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_, State, _) -> {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+delivery_to_log_record({#'basic.deliver'{routing_key = Key},
+ #amqp_msg{props = #'P_basic'{headers = H},
+ payload = Payload}}) ->
+ {Type, Q} = case Key of
+ <<"publish.", _Rest/binary>> -> {published, none};
+ <<"deliver.", Rest/binary>> -> {received, Rest}
+ end,
+ {longstr, Node} = table_lookup(H, <<"node">>),
+ {longstr, X} = table_lookup(H, <<"exchange_name">>),
+ {array, Keys} = table_lookup(H, <<"routing_keys">>),
+ {table, Props} = table_lookup(H, <<"properties">>),
+ #log_record{timestamp = rabbit_mgmt_format:timestamp(os:timestamp()),
+ type = Type,
+ exchange = X,
+ queue = Q,
+ node = Node,
+ routing_keys = [K || {_, K} <- Keys],
+ properties = Props,
+ payload = Payload}.
+
+log(text, P, Record) ->
+ P("~n~s~n", [string:copies("=", 80)]),
+ P("~s: ", [Record#log_record.timestamp]),
+ case Record#log_record.type of
+ published -> P("Message published~n~n", []);
+ received -> P("Message received~n~n", [])
+ end,
+ P("Node: ~s~n", [Record#log_record.node]),
+ P("Exchange: ~s~n", [Record#log_record.exchange]),
+ case Record#log_record.queue of
+ none -> ok;
+ Q -> P("Queue: ~s~n", [Q])
+ end,
+ P("Routing keys: ~p~n", [Record#log_record.routing_keys]),
+ P("Properties: ~p~n", [Record#log_record.properties]),
+ P("Payload: ~n~s~n", [Record#log_record.payload]);
+
+log(json, P, Record) ->
+ P("~s~n", [mochijson2:encode(
+ [{timestamp, Record#log_record.timestamp},
+ {type, Record#log_record.type},
+ {node, Record#log_record.node},
+ {exchange, Record#log_record.exchange},
+ {queue, Record#log_record.queue},
+ {routing_keys, Record#log_record.routing_keys},
+ {properties, rabbit_mgmt_format:amqp_table(
+ Record#log_record.properties)},
+ {payload, base64:encode(Record#log_record.payload)}])]).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_tracing_consumer_sup).
+
+-behaviour(supervisor).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([start_link/1]).
+-export([init/1]).
+
+start_link(Args) -> supervisor2:start_link(?MODULE, Args).
+
+%%----------------------------------------------------------------------------
+
+init(Args) ->
+ {ok, {{one_for_one, 3, 10},
+ [{consumer, {rabbit_tracing_consumer, start_link, [Args]},
+ transient, ?MAX_WAIT, worker,
+ [rabbit_tracing_consumer]}]}}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_tracing_files).
+
+-include_lib("kernel/include/file.hrl").
+
+-export([list/0, exists/1, delete/1, full_path/1]).
+
+%%--------------------------------------------------------------------
+
+list() ->
+ {ok, Dir} = application:get_env(rabbitmq_tracing, directory),
+ ok = filelib:ensure_dir(Dir ++ "/a"),
+ {ok, Names} = file:list_dir(Dir),
+ [file_info(Name) || Name <- Names].
+
+exists(Name) ->
+ filelib:is_regular(full_path(Name)).
+
+delete(Name) ->
+ ok = file:delete(full_path(Name)).
+
+full_path(Name0) when is_binary(Name0) ->
+ full_path(binary_to_list(Name0));
+full_path(Name0) ->
+ {ok, Dir} = application:get_env(rabbitmq_tracing, directory),
+ case mochiweb_util:safe_relative_path(Name0) of
+ undefined -> exit(how_rude);
+ Name -> Dir ++ "/" ++ Name
+ end.
+
+%%--------------------------------------------------------------------
+
+file_info(Name) ->
+ {ok, Info} = file:read_file_info(full_path(Name)),
+ [{name, list_to_binary(Name)},
+ {size, Info#file_info.size}].
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_tracing_mgmt).
+
+-behaviour(rabbit_mgmt_extension).
+
+-export([dispatcher/0, web_ui/0]).
+
+dispatcher() -> [{["traces"], rabbit_tracing_wm_traces, []},
+ {["traces", vhost], rabbit_tracing_wm_traces, []},
+ {["traces", vhost, name], rabbit_tracing_wm_trace, []},
+ {["trace-files"], rabbit_tracing_wm_files, []},
+ {["trace-files", name], rabbit_tracing_wm_file, []}].
+
+web_ui() -> [{javascript, <<"tracing.js">>}].
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_tracing_sup).
+
+-behaviour(supervisor).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-define(SUPERVISOR, ?MODULE).
+
+-export([start_link/0, start_child/2, stop_child/1]).
+-export([init/1]).
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ supervisor:start_link({local, ?SUPERVISOR}, ?MODULE, []).
+
+start_child(Id, Args) ->
+ supervisor:start_child(
+ ?SUPERVISOR,
+ {Id, {rabbit_tracing_consumer_sup, start_link, [Args]},
+ temporary, ?MAX_WAIT, supervisor,
+ [rabbit_tracing_consumer_sup]}).
+
+stop_child(Id) ->
+ supervisor:terminate_child(?SUPERVISOR, Id),
+ supervisor:delete_child(?SUPERVISOR, Id),
+ ok.
+
+%%----------------------------------------------------------------------------
+
+init([]) -> {ok, {{one_for_one, 3, 10},
+ [{traces, {rabbit_tracing_traces, start_link, []},
+ transient, ?MAX_WAIT, worker,
+ [rabbit_tracing_traces]}]}}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_tracing_traces).
+
+-behaviour(gen_server).
+
+-import(rabbit_misc, [pget/2]).
+
+-export([list/0, lookup/2, create/3, stop/2, announce/3]).
+
+-export([start_link/0]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-define(SERVER, ?MODULE).
+
+-record(state, { table }).
+
+%%--------------------------------------------------------------------
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+list() ->
+ gen_server:call(?MODULE, list, infinity).
+
+lookup(VHost, Name) ->
+ gen_server:call(?MODULE, {lookup, VHost, Name}, infinity).
+
+create(VHost, Name, Trace) ->
+ gen_server:call(?MODULE, {create, VHost, Name, Trace}, infinity).
+
+stop(VHost, Name) ->
+ gen_server:call(?MODULE, {stop, VHost, Name}, infinity).
+
+announce(VHost, Name, Pid) ->
+ gen_server:cast(?MODULE, {announce, {VHost, Name}, Pid}).
+
+%%--------------------------------------------------------------------
+
+init([]) ->
+ {ok, #state{table = ets:new(anon, [private])}}.
+
+handle_call(list, _From, State = #state{table = Table}) ->
+ {reply, [augment(Trace) || {_K, Trace} <- ets:tab2list(Table)], State};
+
+handle_call({lookup, VHost, Name}, _From, State = #state{table = Table}) ->
+ {reply, case ets:lookup(Table, {VHost, Name}) of
+ [] -> not_found;
+ [{_K, Trace}] -> augment(Trace)
+ end, State};
+
+handle_call({create, VHost, Name, Trace0}, _From,
+ State = #state{table = Table}) ->
+ Already = vhost_tracing(VHost, Table),
+ Trace = pset(vhost, VHost, pset(name, Name, Trace0)),
+ true = ets:insert(Table, {{VHost, Name}, Trace}),
+ case Already of
+ true -> ok;
+ false -> rabbit_trace:start(VHost)
+ end,
+ {reply, rabbit_tracing_sup:start_child({VHost, Name}, Trace), State};
+
+handle_call({stop, VHost, Name}, _From, State = #state{table = Table}) ->
+ true = ets:delete(Table, {VHost, Name}),
+ case vhost_tracing(VHost, Table) of
+ true -> ok;
+ false -> rabbit_trace:stop(VHost)
+ end,
+ rabbit_tracing_sup:stop_child({VHost, Name}),
+ {reply, ok, State};
+
+handle_call(_Req, _From, State) ->
+ {reply, unknown_request, State}.
+
+handle_cast({announce, Key, Pid}, State = #state{table = Table}) ->
+ case ets:lookup(Table, Key) of
+ [] -> ok;
+ [{_, Trace}] -> ets:insert(Table, {Key, pset(pid, Pid, Trace)})
+ end,
+ {noreply, State};
+
+handle_cast(_C, State) ->
+ {noreply, State}.
+
+handle_info(_I, State) ->
+ {noreply, State}.
+
+terminate(_, _) -> ok.
+
+code_change(_, State, _) -> {ok, State}.
+
+%%--------------------------------------------------------------------
+
+pset(Key, Value, List) -> [{Key, Value} | proplists:delete(Key, List)].
+
+vhost_tracing(VHost, Table) ->
+ case [true || {{V, _}, _} <- ets:tab2list(Table), V =:= VHost] of
+ [] -> false;
+ _ -> true
+ end.
+
+augment(Trace) ->
+ Pid = pget(pid, Trace),
+ Trace1 = lists:keydelete(pid, 1, Trace),
+ case Pid of
+ undefined -> Trace1;
+ _ -> rabbit_tracing_consumer:info_all(Pid) ++ Trace1
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+
+-module(rabbit_tracing_wm_file).
+
+-export([init/1, resource_exists/2, serve/2, content_types_provided/2,
+ is_authorized/2, allowed_methods/2, delete_resource/2]).
+
+-include_lib("rabbitmq_management/include/rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+
+%%--------------------------------------------------------------------
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"text/plain", serve}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {['HEAD', 'GET', 'DELETE'], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ Name = rabbit_mgmt_util:id(name, ReqData),
+ {rabbit_tracing_files:exists(Name), ReqData, Context}.
+
+serve(ReqData, Context) ->
+ Name = rabbit_mgmt_util:id(name, ReqData),
+ {ok, Content} = file:read_file(rabbit_tracing_files:full_path(Name)),
+ {Content, ReqData, Context}.
+
+delete_resource(ReqData, Context) ->
+ Name = rabbit_mgmt_util:id(name, ReqData),
+ ok = rabbit_tracing_files:delete(Name),
+ {true, ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_tracing_wm_files).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+
+-include_lib("rabbitmq_management/include/rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply(rabbit_tracing_files:list(), ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+
+-module(rabbit_tracing_wm_trace).
+
+-export([init/1, resource_exists/2, to_json/2,
+ content_types_provided/2, content_types_accepted/2,
+ is_authorized/2, allowed_methods/2, accept_content/2,
+ delete_resource/2]).
+
+-define(ERR, <<"Something went wrong trying to start the trace - check the "
+ "logs.">>).
+
+-include_lib("rabbitmq_management/include/rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+
+%%--------------------------------------------------------------------
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{"application/json", accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {['HEAD', 'GET', 'PUT', 'DELETE'], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case trace(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply(trace(ReqData), ReqData, Context).
+
+accept_content(ReqData, Context) ->
+ case rabbit_mgmt_util:vhost(ReqData) of
+ not_found -> not_found;
+ VHost -> Name = rabbit_mgmt_util:id(name, ReqData),
+ rabbit_mgmt_util:with_decode(
+ [format], ReqData, Context,
+ fun([_], Trace) ->
+ case rabbit_tracing_traces:create(
+ VHost, Name, Trace) of
+ {ok, _} -> {true, ReqData, Context};
+ _ -> rabbit_mgmt_util:bad_request(
+ ?ERR, ReqData, Context)
+ end
+ end)
+ end.
+
+delete_resource(ReqData, Context) ->
+ VHost = rabbit_mgmt_util:vhost(ReqData),
+ Name = rabbit_mgmt_util:id(name, ReqData),
+ ok = rabbit_tracing_traces:stop(VHost, Name),
+ {true, ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+trace(ReqData) ->
+ case rabbit_mgmt_util:vhost(ReqData) of
+ not_found -> not_found;
+ VHost -> rabbit_tracing_traces:lookup(
+ VHost, rabbit_mgmt_util:id(name, ReqData))
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_tracing_wm_traces).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+
+-include_lib("rabbitmq_management/include/rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"application/json", to_json}], ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply(rabbit_tracing_traces:list(), ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
--- /dev/null
+{application, rabbitmq_tracing,
+ [{description, "RabbitMQ message logging / tracing"},
+ {vsn, "%%VSN%%"},
+ {modules, []},
+ {registered, []},
+ {mod, {rabbit_tracing_app, []}},
+ {env, [{directory, "/var/tmp/rabbitmq-tracing"}]},
+ {applications, [kernel, stdlib, rabbit, rabbitmq_management]}]}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_tracing_test).
+
+-define(LOG_DIR, "/var/tmp/rabbitmq-tracing/").
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("rabbitmq_management/include/rabbit_mgmt_test.hrl").
+
+-import(rabbit_misc, [pget/2]).
+
+tracing_test() ->
+ case filelib:is_dir(?LOG_DIR) of
+ true -> {ok, Files} = file:list_dir(?LOG_DIR),
+ [ok = file:delete(?LOG_DIR ++ F) || F <- Files];
+ _ -> ok
+ end,
+
+ [] = http_get("/traces/%2f/"),
+ [] = http_get("/trace-files/"),
+
+ Args = [{format, <<"json">>},
+ {pattern, <<"#">>}],
+ http_put("/traces/%2f/test", Args, ?NO_CONTENT),
+ assert_list([[{name, <<"test">>},
+ {format, <<"json">>},
+ {pattern, <<"#">>}]], http_get("/traces/%2f/")),
+ assert_item([{name, <<"test">>},
+ {format, <<"json">>},
+ {pattern, <<"#">>}], http_get("/traces/%2f/test")),
+
+ {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ amqp_channel:cast(Ch, #'basic.publish'{ exchange = <<"amq.topic">>,
+ routing_key = <<"key">> },
+ #amqp_msg{props = #'P_basic'{},
+ payload = <<"Hello world">>}),
+
+ amqp_channel:close(Ch),
+ amqp_connection:close(Conn),
+
+ timer:sleep(100),
+
+ http_delete("/traces/%2f/test", ?NO_CONTENT),
+ [] = http_get("/traces/%2f/"),
+ assert_list([[{name, <<"test.log">>}]], http_get("/trace-files/")),
+ %% This is a bit cheeky as the log is actually one JSON doc per
+ %% line and we assume here it's only one line
+ assert_item([{type, <<"published">>},
+ {exchange, <<"amq.topic">>},
+ {routing_keys, [<<"key">>]},
+ {payload, base64:encode(<<"Hello world">>)}],
+ http_get("/trace-files/test.log")),
+ http_delete("/trace-files/test.log", ?NO_CONTENT),
+ ok.
+
+%%---------------------------------------------------------------------------
+%% Below is copypasta from rabbit_mgmt_test_http, it's not obvious how
+%% to share that given the build system.
+
+http_get(Path) ->
+ http_get(Path, ?OK).
+
+http_get(Path, CodeExp) ->
+ http_get(Path, "guest", "guest", CodeExp).
+
+http_get(Path, User, Pass, CodeExp) ->
+ {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
+ req(get, Path, [auth_header(User, Pass)]),
+ assert_code(CodeExp, CodeAct, "GET", Path, ResBody),
+ decode(CodeExp, Headers, ResBody).
+
+http_put(Path, List, CodeExp) ->
+ http_put_raw(Path, format_for_upload(List), CodeExp).
+
+http_put(Path, List, User, Pass, CodeExp) ->
+ http_put_raw(Path, format_for_upload(List), User, Pass, CodeExp).
+
+http_post(Path, List, CodeExp) ->
+ http_post_raw(Path, format_for_upload(List), CodeExp).
+
+http_post(Path, List, User, Pass, CodeExp) ->
+ http_post_raw(Path, format_for_upload(List), User, Pass, CodeExp).
+
+format_for_upload(List) ->
+ iolist_to_binary(mochijson2:encode({struct, List})).
+
+http_put_raw(Path, Body, CodeExp) ->
+ http_upload_raw(put, Path, Body, "guest", "guest", CodeExp).
+
+http_put_raw(Path, Body, User, Pass, CodeExp) ->
+ http_upload_raw(put, Path, Body, User, Pass, CodeExp).
+
+http_post_raw(Path, Body, CodeExp) ->
+ http_upload_raw(post, Path, Body, "guest", "guest", CodeExp).
+
+http_post_raw(Path, Body, User, Pass, CodeExp) ->
+ http_upload_raw(post, Path, Body, User, Pass, CodeExp).
+
+http_upload_raw(Type, Path, Body, User, Pass, CodeExp) ->
+ {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
+ req(Type, Path, [auth_header(User, Pass)], Body),
+ assert_code(CodeExp, CodeAct, Type, Path, ResBody),
+ decode(CodeExp, Headers, ResBody).
+
+http_delete(Path, CodeExp) ->
+ http_delete(Path, "guest", "guest", CodeExp).
+
+http_delete(Path, User, Pass, CodeExp) ->
+ {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
+ req(delete, Path, [auth_header(User, Pass)]),
+ assert_code(CodeExp, CodeAct, "DELETE", Path, ResBody),
+ decode(CodeExp, Headers, ResBody).
+
+assert_code(CodeExp, CodeAct, Type, Path, Body) ->
+ case CodeExp of
+ CodeAct -> ok;
+ _ -> throw({expected, CodeExp, got, CodeAct, type, Type,
+ path, Path, body, Body})
+ end.
+
+req(Type, Path, Headers) ->
+ httpc:request(Type, {?PREFIX ++ Path, Headers}, ?HTTPC_OPTS, []).
+
+req(Type, Path, Headers, Body) ->
+ httpc:request(Type, {?PREFIX ++ Path, Headers, "application/json", Body},
+ ?HTTPC_OPTS, []).
+
+decode(?OK, _Headers, ResBody) -> cleanup(mochijson2:decode(ResBody));
+decode(_, Headers, _ResBody) -> Headers.
+
+cleanup(L) when is_list(L) ->
+ [cleanup(I) || I <- L];
+cleanup({struct, I}) ->
+ cleanup(I);
+cleanup({K, V}) when is_binary(K) ->
+ {list_to_atom(binary_to_list(K)), cleanup(V)};
+cleanup(I) ->
+ I.
+
+auth_header(Username, Password) ->
+ {"Authorization",
+ "Basic " ++ binary_to_list(base64:encode(Username ++ ":" ++ Password))}.
+
+%%---------------------------------------------------------------------------
+
+assert_list(Exp, Act) ->
+ case length(Exp) == length(Act) of
+ true -> ok;
+ false -> throw({expected, Exp, actual, Act})
+ end,
+ [case length(lists:filter(fun(ActI) -> test_item(ExpI, ActI) end, Act)) of
+ 1 -> ok;
+ N -> throw({found, N, ExpI, in, Act})
+ end || ExpI <- Exp].
+
+assert_item(Exp, Act) ->
+ case test_item0(Exp, Act) of
+ [] -> ok;
+ Or -> throw(Or)
+ end.
+
+test_item(Exp, Act) ->
+ case test_item0(Exp, Act) of
+ [] -> true;
+ _ -> false
+ end.
+
+test_item0(Exp, Act) ->
+ [{did_not_find, ExpI, in, Act} || ExpI <- Exp,
+ not lists:member(ExpI, Act)].
--- /dev/null
+ MOZILLA PUBLIC LICENSE
+ Version 1.1
+
+ ---------------
+
+1. Definitions.
+
+ 1.0.1. "Commercial Use" means distribution or otherwise making the
+ Covered Code available to a third party.
+
+ 1.1. "Contributor" means each entity that creates or contributes to
+ the creation of Modifications.
+
+ 1.2. "Contributor Version" means the combination of the Original
+ Code, prior Modifications used by a Contributor, and the Modifications
+ made by that particular Contributor.
+
+ 1.3. "Covered Code" means the Original Code or Modifications or the
+ combination of the Original Code and Modifications, in each case
+ including portions thereof.
+
+ 1.4. "Electronic Distribution Mechanism" means a mechanism generally
+ accepted in the software development community for the electronic
+ transfer of data.
+
+ 1.5. "Executable" means Covered Code in any form other than Source
+ Code.
+
+ 1.6. "Initial Developer" means the individual or entity identified
+ as the Initial Developer in the Source Code notice required by Exhibit
+ A.
+
+ 1.7. "Larger Work" means a work which combines Covered Code or
+ portions thereof with code not governed by the terms of this License.
+
+ 1.8. "License" means this document.
+
+ 1.8.1. "Licensable" means having the right to grant, to the maximum
+ extent possible, whether at the time of the initial grant or
+ subsequently acquired, any and all of the rights conveyed herein.
+
+ 1.9. "Modifications" means any addition to or deletion from the
+ substance or structure of either the Original Code or any previous
+ Modifications. When Covered Code is released as a series of files, a
+ Modification is:
+ A. Any addition to or deletion from the contents of a file
+ containing Original Code or previous Modifications.
+
+ B. Any new file that contains any part of the Original Code or
+ previous Modifications.
+
+ 1.10. "Original Code" means Source Code of computer software code
+ which is described in the Source Code notice required by Exhibit A as
+ Original Code, and which, at the time of its release under this
+ License is not already Covered Code governed by this License.
+
+ 1.10.1. "Patent Claims" means any patent claim(s), now owned or
+ hereafter acquired, including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by grantor.
+
+ 1.11. "Source Code" means the preferred form of the Covered Code for
+ making modifications to it, including all modules it contains, plus
+ any associated interface definition files, scripts used to control
+ compilation and installation of an Executable, or source code
+ differential comparisons against either the Original Code or another
+ well known, available Covered Code of the Contributor's choice. The
+ Source Code can be in a compressed or archival form, provided the
+ appropriate decompression or de-archiving software is widely available
+ for no charge.
+
+ 1.12. "You" (or "Your") means an individual or a legal entity
+ exercising rights under, and complying with all of the terms of, this
+ License or a future version of this License issued under Section 6.1.
+ For legal entities, "You" includes any entity which controls, is
+ controlled by, or is under common control with You. For purposes of
+ this definition, "control" means (a) the power, direct or indirect,
+ to cause the direction or management of such entity, whether by
+ contract or otherwise, or (b) ownership of more than fifty percent
+ (50%) of the outstanding shares or beneficial ownership of such
+ entity.
+
+2. Source Code License.
+
+ 2.1. The Initial Developer Grant.
+ The Initial Developer hereby grants You a world-wide, royalty-free,
+ non-exclusive license, subject to third party intellectual property
+ claims:
+ (a) under intellectual property rights (other than patent or
+ trademark) Licensable by Initial Developer to use, reproduce,
+ modify, display, perform, sublicense and distribute the Original
+ Code (or portions thereof) with or without Modifications, and/or
+ as part of a Larger Work; and
+
+ (b) under Patents Claims infringed by the making, using or
+ selling of Original Code, to make, have made, use, practice,
+ sell, and offer for sale, and/or otherwise dispose of the
+ Original Code (or portions thereof).
+
+ (c) the licenses granted in this Section 2.1(a) and (b) are
+ effective on the date Initial Developer first distributes
+ Original Code under the terms of this License.
+
+ (d) Notwithstanding Section 2.1(b) above, no patent license is
+ granted: 1) for code that You delete from the Original Code; 2)
+ separate from the Original Code; or 3) for infringements caused
+ by: i) the modification of the Original Code or ii) the
+ combination of the Original Code with other software or devices.
+
+ 2.2. Contributor Grant.
+ Subject to third party intellectual property claims, each Contributor
+ hereby grants You a world-wide, royalty-free, non-exclusive license
+
+ (a) under intellectual property rights (other than patent or
+ trademark) Licensable by Contributor, to use, reproduce, modify,
+ display, perform, sublicense and distribute the Modifications
+ created by such Contributor (or portions thereof) either on an
+ unmodified basis, with other Modifications, as Covered Code
+ and/or as part of a Larger Work; and
+
+ (b) under Patent Claims infringed by the making, using, or
+ selling of Modifications made by that Contributor either alone
+ and/or in combination with its Contributor Version (or portions
+ of such combination), to make, use, sell, offer for sale, have
+ made, and/or otherwise dispose of: 1) Modifications made by that
+ Contributor (or portions thereof); and 2) the combination of
+ Modifications made by that Contributor with its Contributor
+ Version (or portions of such combination).
+
+ (c) the licenses granted in Sections 2.2(a) and 2.2(b) are
+ effective on the date Contributor first makes Commercial Use of
+ the Covered Code.
+
+ (d) Notwithstanding Section 2.2(b) above, no patent license is
+ granted: 1) for any code that Contributor has deleted from the
+ Contributor Version; 2) separate from the Contributor Version;
+ 3) for infringements caused by: i) third party modifications of
+ Contributor Version or ii) the combination of Modifications made
+ by that Contributor with other software (except as part of the
+ Contributor Version) or other devices; or 4) under Patent Claims
+ infringed by Covered Code in the absence of Modifications made by
+ that Contributor.
+
+3. Distribution Obligations.
+
+ 3.1. Application of License.
+ The Modifications which You create or to which You contribute are
+ governed by the terms of this License, including without limitation
+ Section 2.2. The Source Code version of Covered Code may be
+ distributed only under the terms of this License or a future version
+ of this License released under Section 6.1, and You must include a
+ copy of this License with every copy of the Source Code You
+ distribute. You may not offer or impose any terms on any Source Code
+ version that alters or restricts the applicable version of this
+ License or the recipients' rights hereunder. However, You may include
+ an additional document offering the additional rights described in
+ Section 3.5.
+
+ 3.2. Availability of Source Code.
+ Any Modification which You create or to which You contribute must be
+ made available in Source Code form under the terms of this License
+ either on the same media as an Executable version or via an accepted
+ Electronic Distribution Mechanism to anyone to whom you made an
+ Executable version available; and if made available via Electronic
+ Distribution Mechanism, must remain available for at least twelve (12)
+ months after the date it initially became available, or at least six
+ (6) months after a subsequent version of that particular Modification
+ has been made available to such recipients. You are responsible for
+ ensuring that the Source Code version remains available even if the
+ Electronic Distribution Mechanism is maintained by a third party.
+
+ 3.3. Description of Modifications.
+ You must cause all Covered Code to which You contribute to contain a
+ file documenting the changes You made to create that Covered Code and
+ the date of any change. You must include a prominent statement that
+ the Modification is derived, directly or indirectly, from Original
+ Code provided by the Initial Developer and including the name of the
+ Initial Developer in (a) the Source Code, and (b) in any notice in an
+ Executable version or related documentation in which You describe the
+ origin or ownership of the Covered Code.
+
+ 3.4. Intellectual Property Matters
+ (a) Third Party Claims.
+ If Contributor has knowledge that a license under a third party's
+ intellectual property rights is required to exercise the rights
+ granted by such Contributor under Sections 2.1 or 2.2,
+ Contributor must include a text file with the Source Code
+ distribution titled "LEGAL" which describes the claim and the
+ party making the claim in sufficient detail that a recipient will
+ know whom to contact. If Contributor obtains such knowledge after
+ the Modification is made available as described in Section 3.2,
+ Contributor shall promptly modify the LEGAL file in all copies
+ Contributor makes available thereafter and shall take other steps
+ (such as notifying appropriate mailing lists or newsgroups)
+ reasonably calculated to inform those who received the Covered
+ Code that new knowledge has been obtained.
+
+ (b) Contributor APIs.
+ If Contributor's Modifications include an application programming
+ interface and Contributor has knowledge of patent licenses which
+ are reasonably necessary to implement that API, Contributor must
+ also include this information in the LEGAL file.
+
+ (c) Representations.
+ Contributor represents that, except as disclosed pursuant to
+ Section 3.4(a) above, Contributor believes that Contributor's
+ Modifications are Contributor's original creation(s) and/or
+ Contributor has sufficient rights to grant the rights conveyed by
+ this License.
+
+ 3.5. Required Notices.
+ You must duplicate the notice in Exhibit A in each file of the Source
+ Code. If it is not possible to put such notice in a particular Source
+ Code file due to its structure, then You must include such notice in a
+ location (such as a relevant directory) where a user would be likely
+ to look for such a notice. If You created one or more Modification(s)
+ You may add your name as a Contributor to the notice described in
+ Exhibit A. You must also duplicate this License in any documentation
+ for the Source Code where You describe recipients' rights or ownership
+ rights relating to Covered Code. You may choose to offer, and to
+ charge a fee for, warranty, support, indemnity or liability
+ obligations to one or more recipients of Covered Code. However, You
+ may do so only on Your own behalf, and not on behalf of the Initial
+ Developer or any Contributor. You must make it absolutely clear than
+ any such warranty, support, indemnity or liability obligation is
+ offered by You alone, and You hereby agree to indemnify the Initial
+ Developer and every Contributor for any liability incurred by the
+ Initial Developer or such Contributor as a result of warranty,
+ support, indemnity or liability terms You offer.
+
+ 3.6. Distribution of Executable Versions.
+ You may distribute Covered Code in Executable form only if the
+ requirements of Section 3.1-3.5 have been met for that Covered Code,
+ and if You include a notice stating that the Source Code version of
+ the Covered Code is available under the terms of this License,
+ including a description of how and where You have fulfilled the
+ obligations of Section 3.2. The notice must be conspicuously included
+ in any notice in an Executable version, related documentation or
+ collateral in which You describe recipients' rights relating to the
+ Covered Code. You may distribute the Executable version of Covered
+ Code or ownership rights under a license of Your choice, which may
+ contain terms different from this License, provided that You are in
+ compliance with the terms of this License and that the license for the
+ Executable version does not attempt to limit or alter the recipient's
+ rights in the Source Code version from the rights set forth in this
+ License. If You distribute the Executable version under a different
+ license You must make it absolutely clear that any terms which differ
+ from this License are offered by You alone, not by the Initial
+ Developer or any Contributor. You hereby agree to indemnify the
+ Initial Developer and every Contributor for any liability incurred by
+ the Initial Developer or such Contributor as a result of any such
+ terms You offer.
+
+ 3.7. Larger Works.
+ You may create a Larger Work by combining Covered Code with other code
+ not governed by the terms of this License and distribute the Larger
+ Work as a single product. In such a case, You must make sure the
+ requirements of this License are fulfilled for the Covered Code.
+
+4. Inability to Comply Due to Statute or Regulation.
+
+ If it is impossible for You to comply with any of the terms of this
+ License with respect to some or all of the Covered Code due to
+ statute, judicial order, or regulation then You must: (a) comply with
+ the terms of this License to the maximum extent possible; and (b)
+ describe the limitations and the code they affect. Such description
+ must be included in the LEGAL file described in Section 3.4 and must
+ be included with all distributions of the Source Code. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Application of this License.
+
+ This License applies to code to which the Initial Developer has
+ attached the notice in Exhibit A and to related Covered Code.
+
+6. Versions of the License.
+
+ 6.1. New Versions.
+ Netscape Communications Corporation ("Netscape") may publish revised
+ and/or new versions of the License from time to time. Each version
+ will be given a distinguishing version number.
+
+ 6.2. Effect of New Versions.
+ Once Covered Code has been published under a particular version of the
+ License, You may always continue to use it under the terms of that
+ version. You may also choose to use such Covered Code under the terms
+ of any subsequent version of the License published by Netscape. No one
+ other than Netscape has the right to modify the terms applicable to
+ Covered Code created under this License.
+
+ 6.3. Derivative Works.
+ If You create or use a modified version of this License (which you may
+ only do in order to apply it to code which is not already Covered Code
+ governed by this License), You must (a) rename Your license so that
+ the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
+ "MPL", "NPL" or any confusingly similar phrase do not appear in your
+ license (except to note that your license differs from this License)
+ and (b) otherwise make it clear that Your version of the license
+ contains terms which differ from the Mozilla Public License and
+ Netscape Public License. (Filling in the name of the Initial
+ Developer, Original Code or Contributor in the notice described in
+ Exhibit A shall not of themselves be deemed to be modifications of
+ this License.)
+
+7. DISCLAIMER OF WARRANTY.
+
+ COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
+ DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
+ THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
+ IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
+ YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
+ COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
+ OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
+ ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
+
+8. TERMINATION.
+
+ 8.1. This License and the rights granted hereunder will terminate
+ automatically if You fail to comply with terms herein and fail to cure
+ such breach within 30 days of becoming aware of the breach. All
+ sublicenses to the Covered Code which are properly granted shall
+ survive any termination of this License. Provisions which, by their
+ nature, must remain in effect beyond the termination of this License
+ shall survive.
+
+ 8.2. If You initiate litigation by asserting a patent infringement
+ claim (excluding declatory judgment actions) against Initial Developer
+ or a Contributor (the Initial Developer or Contributor against whom
+ You file such action is referred to as "Participant") alleging that:
+
+ (a) such Participant's Contributor Version directly or indirectly
+ infringes any patent, then any and all rights granted by such
+ Participant to You under Sections 2.1 and/or 2.2 of this License
+ shall, upon 60 days notice from Participant terminate prospectively,
+ unless if within 60 days after receipt of notice You either: (i)
+ agree in writing to pay Participant a mutually agreeable reasonable
+ royalty for Your past and future use of Modifications made by such
+ Participant, or (ii) withdraw Your litigation claim with respect to
+ the Contributor Version against such Participant. If within 60 days
+ of notice, a reasonable royalty and payment arrangement are not
+ mutually agreed upon in writing by the parties or the litigation claim
+ is not withdrawn, the rights granted by Participant to You under
+ Sections 2.1 and/or 2.2 automatically terminate at the expiration of
+ the 60 day notice period specified above.
+
+ (b) any software, hardware, or device, other than such Participant's
+ Contributor Version, directly or indirectly infringes any patent, then
+ any rights granted to You by such Participant under Sections 2.1(b)
+ and 2.2(b) are revoked effective as of the date You first made, used,
+ sold, distributed, or had made, Modifications made by that
+ Participant.
+
+ 8.3. If You assert a patent infringement claim against Participant
+ alleging that such Participant's Contributor Version directly or
+ indirectly infringes any patent where such claim is resolved (such as
+ by license or settlement) prior to the initiation of patent
+ infringement litigation, then the reasonable value of the licenses
+ granted by such Participant under Sections 2.1 or 2.2 shall be taken
+ into account in determining the amount or value of any payment or
+ license.
+
+ 8.4. In the event of termination under Sections 8.1 or 8.2 above,
+ all end user license agreements (excluding distributors and resellers)
+ which have been validly granted by You or any distributor hereunder
+ prior to termination shall survive termination.
+
+9. LIMITATION OF LIABILITY.
+
+ UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
+ (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
+ DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
+ OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
+ ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
+ CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
+ WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
+ COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
+ INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
+ LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
+ RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
+ PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
+ EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
+ THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
+
+10. U.S. GOVERNMENT END USERS.
+
+ The Covered Code is a "commercial item," as that term is defined in
+ 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
+ software" and "commercial computer software documentation," as such
+ terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
+ C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
+ all U.S. Government End Users acquire Covered Code with only those
+ rights set forth herein.
+
+11. MISCELLANEOUS.
+
+ This License represents the complete agreement concerning subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. This License shall be governed by
+ California law provisions (except to the extent applicable law, if
+ any, provides otherwise), excluding its conflict-of-law provisions.
+ With respect to disputes in which at least one party is a citizen of,
+ or an entity chartered or registered to do business in the United
+ States of America, any litigation relating to this License shall be
+ subject to the jurisdiction of the Federal Courts of the Northern
+ District of California, with venue lying in Santa Clara County,
+ California, with the losing party responsible for costs, including
+ without limitation, court costs and reasonable attorneys' fees and
+ expenses. The application of the United Nations Convention on
+ Contracts for the International Sale of Goods is expressly excluded.
+ Any law or regulation which provides that the language of a contract
+ shall be construed against the drafter shall not apply to this
+ License.
+
+12. RESPONSIBILITY FOR CLAIMS.
+
+ As between Initial Developer and the Contributors, each party is
+ responsible for claims and damages arising, directly or indirectly,
+ out of its utilization of rights under this License and You agree to
+ work with Initial Developer and Contributors to distribute such
+ responsibility on an equitable basis. Nothing herein is intended or
+ shall be deemed to constitute any admission of liability.
+
+13. MULTIPLE-LICENSED CODE.
+
+ Initial Developer may designate portions of the Covered Code as
+ "Multiple-Licensed". "Multiple-Licensed" means that the Initial
+ Developer permits you to utilize portions of the Covered Code under
+ Your choice of the NPL or the alternative licenses, if any, specified
+ by the Initial Developer in the file described in Exhibit A.
+
+EXHIBIT A -Mozilla Public License.
+
+ ``The contents of this file are subject to the Mozilla Public License
+ Version 1.1 (the "License"); you may not use this file except in
+ compliance with the License. You may obtain a copy of the License at
+ http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+ License for the specific language governing rights and limitations
+ under the License.
+
+ The Original Code is ______________________________________.
+
+ The Initial Developer of the Original Code is ________________________.
+ Portions created by ______________________ are Copyright (C) ______
+ _______________________. All Rights Reserved.
+
+ Contributor(s): ______________________________________.
+
+ Alternatively, the contents of this file may be used under the terms
+ of the _____ license (the "[___] License"), in which case the
+ provisions of [______] License are applicable instead of those
+ above. If you wish to allow use of your version of this file only
+ under the terms of the [____] License and not to allow others to use
+ your version of this file under the MPL, indicate your decision by
+ deleting the provisions above and replace them with the notice and
+ other provisions required by the [___] License. If you do not delete
+ the provisions above, a recipient may use your version of this file
+ under either the MPL or the [___] License."
+
+ [NOTE: The text of this Exhibit A may differ slightly from the text of
+ the notices in the Source Code files of the Original Code. You should
+ use the text of this Exhibit A rather than the text found in the
+ Original Code Source Code for Your Modifications.]
+
--- /dev/null
+include ../umbrella.mk
--- /dev/null
+rabbitmq-web-dispatch
+---------------------
+
+rabbitmq-web-dispatch is a thin veneer around mochiweb that provides the
+ability for multiple applications to co-exist on mochiweb
+listeners. Applications can register static docroots or dynamic
+handlers to be executed, dispatched by URL path prefix.
+
+See http://www.rabbitmq.com/mochiweb.html for information on
+configuring web plugins.
+
+The most general registration procedure is
+`rabbit_web_dispatch:register_context_handler/5`. This takes a callback
+procedure of the form
+
+ loop(Request) ->
+ ...
+
+The module `rabbit_webmachine` provides a means of running more than
+one webmachine in a VM, and understands rabbitmq-web-dispatch contexts. To
+use it, supply a dispatch table term of the kind usually given to
+webmachine in the file `priv/dispatch.conf`.
+
+`setup/{1,2}` in the same module allows some global configuration of
+webmachine logging and error handling.
--- /dev/null
+DEPS:=mochiweb-wrapper webmachine-wrapper
+WITH_BROKER_TEST_COMMANDS:=rabbit_web_dispatch_test:test()
+STANDALONE_TEST_COMMANDS:=rabbit_web_dispatch_test_unit:test()
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_web_dispatch).
+
+-export([register_context_handler/5, register_static_context/6]).
+-export([register_port_redirect/4]).
+-export([unregister_context/1]).
+
+%% Handler Registration
+
+%% Registers a dynamic selector and handler combination, with a link
+%% to display in lists.
+register_handler(Name, Listener, Selector, Handler, Link) ->
+ rabbit_web_dispatch_registry:add(Name, Listener, Selector, Handler, Link).
+
+%% Methods for standard use cases
+
+%% Registers a dynamic handler under a fixed context path, with link
+%% to display in the global context.
+register_context_handler(Name, Listener, Prefix, Handler, LinkText) ->
+ register_handler(
+ Name, Listener, context_selector(Prefix), Handler, {Prefix, LinkText}),
+ {ok, Prefix}.
+
+%% Convenience function registering a fully static context to serve
+%% content from a module-relative directory, with link to display in
+%% the global context.
+register_static_context(Name, Listener, Prefix, Module, FSPath, LinkText) ->
+ register_handler(Name, Listener,
+ context_selector(Prefix),
+ static_context_handler(Prefix, Module, FSPath),
+ {Prefix, LinkText}),
+ {ok, Prefix}.
+
+%% A context which just redirects the request to a different port.
+register_port_redirect(Name, Listener, Prefix, RedirectPort) ->
+ register_context_handler(
+ Name, Listener, Prefix,
+ fun (Req) ->
+ Host = case Req:get_header_value("host") of
+ undefined -> {ok, {IP, _Port}} = rabbit_net:sockname(
+ Req:get(socket)),
+ rabbit_misc:ntoa(IP);
+ Header -> hd(string:tokens(Header, ":"))
+ end,
+ URL = rabbit_misc:format(
+ "~s://~s:~B~s",
+ [Req:get(scheme), Host, RedirectPort, Req:get(raw_path)]),
+ Req:respond({301, [{"Location", URL}], ""})
+ end,
+ rabbit_misc:format("Redirect to port ~B", [RedirectPort])).
+
+context_selector("") ->
+ fun(_Req) -> true end;
+context_selector(Prefix) ->
+ Prefix1 = "/" ++ Prefix,
+ fun(Req) ->
+ Path = Req:get(raw_path),
+ (Path == Prefix1) orelse (string:str(Path, Prefix1 ++ "/") == 1)
+ end.
+
+%% Produces a handler for use with register_handler that serves up
+%% static content from a directory specified relative to the directory
+%% containing the ebin directory containing the named module's beam
+%% file.
+static_context_handler(Prefix, Module, FSPath) ->
+ {file, Here} = code:is_loaded(Module),
+ ModuleRoot = filename:dirname(filename:dirname(Here)),
+ LocalPath = filename:join(ModuleRoot, FSPath),
+ static_context_handler(Prefix, LocalPath).
+
+%% Produces a handler for use with register_handler that serves up
+%% static content from a specified directory.
+static_context_handler("", LocalPath) ->
+ fun(Req) ->
+ "/" ++ Path = Req:get(raw_path),
+ serve_file(Req, Path, LocalPath)
+ end;
+static_context_handler(Prefix, LocalPath) ->
+ fun(Req) ->
+ "/" ++ Path = Req:get(raw_path),
+ case string:substr(Path, length(Prefix) + 1) of
+ "" -> Req:respond({301, [{"Location", "/" ++ Prefix ++ "/"}], ""});
+ "/" ++ P -> serve_file(Req, P, LocalPath)
+ end
+ end.
+
+serve_file(Req, Path, LocalPath) ->
+ case Req:get(method) of
+ Method when Method =:= 'GET'; Method =:= 'HEAD' ->
+ Req:serve_file(Path, LocalPath);
+ _ ->
+ Req:respond({405, [{"Allow", "GET, HEAD"}],
+ "Only GET or HEAD supported for static content"})
+ end.
+
+%% The opposite of all those register_* functions.
+unregister_context(Name) ->
+ rabbit_web_dispatch_registry:remove(Name).
+
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_web_dispatch_app).
+
+-behaviour(application).
+-export([start/2,stop/1]).
+
+%% @spec start(_Type, _StartArgs) -> ServerRet
+%% @doc application start callback for rabbit_web_dispatch.
+start(_Type, _StartArgs) ->
+ rabbit_web_dispatch_sup:start_link().
+
+%% @spec stop(_State) -> ServerRet
+%% @doc application stop callback for rabbit_web_dispatch.
+stop(_State) ->
+ ok.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_web_dispatch_registry).
+
+-behaviour(gen_server).
+
+-export([start_link/0]).
+-export([add/5, remove/1, set_fallback/2, lookup/2, list_all/0]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-define(ETS, rabbitmq_web_dispatch).
+
+%% This gen_server is merely to serialise modifications to the dispatch
+%% table for listeners.
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+add(Name, Listener, Selector, Handler, Link) ->
+ gen_server:call(?MODULE, {add, Name, Listener, Selector, Handler, Link},
+ infinity).
+
+remove(Name) ->
+ gen_server:call(?MODULE, {remove, Name}, infinity).
+
+set_fallback(Listener, FallbackHandler) ->
+ gen_server:call(?MODULE, {set_fallback, Listener, FallbackHandler},
+ infinity).
+
+lookup(Listener, Req) ->
+ case lookup_dispatch(Listener) of
+ {ok, {Selectors, Fallback}} ->
+ case catch match_request(Selectors, Req) of
+ {'EXIT', Reason} -> {lookup_failure, Reason};
+ no_handler -> {handler, Fallback};
+ Handler -> {handler, Handler}
+ end;
+ Err ->
+ Err
+ end.
+
+%% This is called in a somewhat obfuscated manner in
+%% rabbit_mgmt_external_stats:rabbit_web_dispatch_registry_list_all()
+list_all() ->
+ gen_server:call(?MODULE, list_all, infinity).
+
+%% Callback Methods
+
+init([]) ->
+ ?ETS = ets:new(?ETS, [named_table, public]),
+ {ok, undefined}.
+
+handle_call({add, Name, Listener, Selector, Handler, Link = {_, Desc}}, _From,
+ undefined) ->
+ Continue = case rabbit_web_dispatch_sup:ensure_listener(Listener) of
+ new -> set_dispatch(
+ Listener, [],
+ listing_fallback_handler(Listener)),
+ true;
+ existing -> true;
+ ignore -> false
+ end,
+ case Continue of
+ true -> case lookup_dispatch(Listener) of
+ {ok, {Selectors, Fallback}} ->
+ Selector2 = lists:keystore(
+ Name, 1, Selectors,
+ {Name, Selector, Handler, Link}),
+ set_dispatch(Listener, Selector2, Fallback);
+ {error, {different, Desc2, Listener2}} ->
+ exit({incompatible_listeners,
+ {Desc, Listener}, {Desc2, Listener2}})
+ end;
+ false -> ok
+ end,
+ {reply, ok, undefined};
+
+handle_call({remove, Name}, _From,
+ undefined) ->
+ Listener = listener_by_name(Name),
+ {ok, {Selectors, Fallback}} = lookup_dispatch(Listener),
+ Selectors1 = lists:keydelete(Name, 1, Selectors),
+ set_dispatch(Listener, Selectors1, Fallback),
+ case Selectors1 of
+ [] -> rabbit_web_dispatch_sup:stop_listener(Listener);
+ _ -> ok
+ end,
+ {reply, ok, undefined};
+
+handle_call({set_fallback, Listener, FallbackHandler}, _From,
+ undefined) ->
+ {ok, {Selectors, _OldFallback}} = lookup_dispatch(Listener),
+ set_dispatch(Listener, Selectors, FallbackHandler),
+ {reply, ok, undefined};
+
+handle_call(list_all, _From, undefined) ->
+ {reply, list(), undefined};
+
+handle_call(Req, _From, State) ->
+ error_logger:format("Unexpected call to ~p: ~p~n", [?MODULE, Req]),
+ {stop, unknown_request, State}.
+
+handle_cast(_, State) ->
+ {noreply, State}.
+
+handle_info(_, State) ->
+ {noreply, State}.
+
+terminate(_, _) ->
+ true = ets:delete(?ETS),
+ ok.
+
+code_change(_, State, _) ->
+ {ok, State}.
+
+%%---------------------------------------------------------------------------
+
+%% Internal Methods
+
+port(Listener) -> proplists:get_value(port, Listener).
+
+lookup_dispatch(Lsnr) ->
+ case ets:lookup(?ETS, port(Lsnr)) of
+ [{_, Lsnr, S, F}] -> {ok, {S, F}};
+ [{_, Lsnr2, S, _F}] -> {error, {different, first_desc(S), Lsnr2}};
+ [] -> {error, {no_record_for_listener, Lsnr}}
+ end.
+
+first_desc([{_N, _S, _H, {_, Desc}} | _]) -> Desc.
+
+set_dispatch(Listener, Selectors, Fallback) ->
+ ets:insert(?ETS, {port(Listener), Listener, Selectors, Fallback}).
+
+match_request([], _) ->
+ no_handler;
+match_request([{_Name, Selector, Handler, _Link}|Rest], Req) ->
+ case Selector(Req) of
+ true -> Handler;
+ false -> match_request(Rest, Req)
+ end.
+
+list() ->
+ [{Path, Desc, Listener} ||
+ {_P, Listener, Selectors, _F} <- ets:tab2list(?ETS),
+ {_N, _S, _H, {Path, Desc}} <- Selectors].
+
+listener_by_name(Name) ->
+ case [L || {_P, L, S, _F} <- ets:tab2list(?ETS), contains_name(Name, S)] of
+ [Listener] -> Listener;
+ [] -> exit({not_found, Name})
+ end.
+
+contains_name(Name, Selectors) ->
+ lists:member(Name, [N || {N, _S, _H, _L} <- Selectors]).
+
+list(Listener) ->
+ {ok, {Selectors, _Fallback}} = lookup_dispatch(Listener),
+ [{Path, Desc} || {_N, _S, _H, {Path, Desc}} <- Selectors].
+
+%%---------------------------------------------------------------------------
+
+listing_fallback_handler(Listener) ->
+ fun(Req) ->
+ HTMLPrefix =
+ "<html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"en\">"
+ "<head><title>RabbitMQ Web Server</title></head>"
+ "<body><h1>RabbitMQ Web Server</h1><p>Contexts available:</p><ul>",
+ HTMLSuffix = "</ul></body></html>",
+ {ReqPath, _, _} = mochiweb_util:urlsplit_path(Req:get(raw_path)),
+ List =
+ case list(Listener) of
+ [] ->
+ "<li>No contexts installed</li>";
+ Contexts ->
+ [handler_listing(Path, ReqPath, Desc)
+ || {Path, Desc} <- Contexts]
+ end,
+ Req:respond({200, [], HTMLPrefix ++ List ++ HTMLSuffix})
+ end.
+
+handler_listing(Path, ReqPath, Desc) ->
+ io_lib:format(
+ "<li><a href=\"~s\">~s</a></li>",
+ [rabbit_web_dispatch_util:relativise(ReqPath, "/" ++ Path), Desc]).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_web_dispatch_sup).
+
+-behaviour(supervisor).
+
+-define(SUP, ?MODULE).
+
+%% External exports
+-export([start_link/0, ensure_listener/1, stop_listener/1]).
+
+%% supervisor callbacks
+-export([init/1]).
+
+%% @spec start_link() -> ServerRet
+%% @doc API for starting the supervisor.
+start_link() ->
+ supervisor:start_link({local, ?SUP}, ?MODULE, []).
+
+ensure_listener(Listener) ->
+ case proplists:get_value(port, Listener) of
+ undefined ->
+ {error, {no_port_given, Listener}};
+ _ ->
+ Child = {{rabbit_web_dispatch_web, name(Listener)},
+ {mochiweb_http, start, [mochi_options(Listener)]},
+ transient, 5000, worker, dynamic},
+ case supervisor:start_child(?SUP, Child) of
+ {ok, _} -> new;
+ {error, {already_started, _}} -> existing;
+ {error, {E, _}} -> check_error(Listener, E)
+ end
+ end.
+
+stop_listener(Listener) ->
+ Name = name(Listener),
+ ok = supervisor:terminate_child(?SUP, {rabbit_web_dispatch_web, Name}),
+ ok = supervisor:delete_child(?SUP, {rabbit_web_dispatch_web, Name}).
+
+%% @spec init([[instance()]]) -> SupervisorTree
+%% @doc supervisor callback.
+init([]) ->
+ Registry = {rabbit_web_dispatch_registry,
+ {rabbit_web_dispatch_registry, start_link, []},
+ transient, 5000, worker, dynamic},
+ {ok, {{one_for_one, 10, 10}, [Registry]}}.
+
+%% ----------------------------------------------------------------------
+
+mochi_options(Listener) ->
+ [{name, name(Listener)},
+ {loop, loopfun(Listener)} |
+ easy_ssl(proplists:delete(
+ name, proplists:delete(ignore_in_use, Listener)))].
+
+loopfun(Listener) ->
+ fun (Req) ->
+ case rabbit_web_dispatch_registry:lookup(Listener, Req) of
+ no_handler ->
+ Req:not_found();
+ {error, Reason} ->
+ Req:respond({500, [], "Registry Error: " ++ Reason});
+ {handler, Handler} ->
+ Handler(Req)
+ end
+ end.
+
+name(Listener) ->
+ Port = proplists:get_value(port, Listener),
+ list_to_atom(atom_to_list(?MODULE) ++ "_" ++ integer_to_list(Port)).
+
+easy_ssl(Options) ->
+ case {proplists:get_value(ssl, Options),
+ proplists:get_value(ssl_opts, Options)} of
+ {true, undefined} ->
+ {ok, ServerOpts} = application:get_env(rabbit, ssl_options),
+ SSLOpts = [{K, V} ||
+ {K, V} <- ServerOpts,
+ not lists:member(K, [verify, fail_if_no_peer_cert])],
+ [{ssl_opts, SSLOpts}|Options];
+ _ ->
+ Options
+ end.
+
+check_error(Listener, Error) ->
+ Ignore = proplists:get_value(ignore_in_use, Listener, false),
+ case {Error, Ignore} of
+ {eaddrinuse, true} -> ignore;
+ _ -> exit({could_not_start_listener, Listener, Error})
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_web_dispatch_util).
+
+-export([parse_auth_header/1]).
+-export([relativise/2, unrelativise/2]).
+
+parse_auth_header(Header) ->
+ case Header of
+ "Basic " ++ Base64 ->
+ Str = base64:mime_decode_to_string(Base64),
+ case string:chr(Str, $:) of
+ 0 -> invalid;
+ N -> [list_to_binary(string:sub_string(Str, 1, N - 1)),
+ list_to_binary(string:sub_string(Str, N + 1))]
+ end;
+ _ ->
+ invalid
+ end.
+
+relativise("/" ++ F, "/" ++ T) ->
+ From = string:tokens(F, "/"),
+ To = string:tokens(T, "/"),
+ string:join(relativise0(From, To), "/").
+
+relativise0([H], [H|_] = To) ->
+ To;
+relativise0([H|From], [H|To]) ->
+ relativise0(From, To);
+relativise0(From, []) ->
+ lists:duplicate(length(From), "..");
+relativise0([_|From], To) ->
+ lists:duplicate(length(From), "..") ++ To;
+relativise0([], To) ->
+ To.
+
+unrelativise(F, "/" ++ T) -> "/" ++ T;
+unrelativise(F, "./" ++ T) -> unrelativise(F, T);
+unrelativise(F, "../" ++ T) -> unrelativise(strip_tail(F), T);
+unrelativise(F, T) -> case string:str(F, "/") of
+ 0 -> T;
+ _ -> strip_tail(F) ++ "/" ++ T
+ end.
+
+strip_tail("") -> exit(not_enough_to_strip);
+strip_tail(S) -> case string:rstr(S, "/") of
+ 0 -> "";
+ I -> string:left(S, I - 1)
+ end.
--- /dev/null
+%% This file contains an adapted version of webmachine_mochiweb:loop/1
+%% from webmachine (revision 0c4b60ac68b4).
+
+%% All modifications are (C) 2011-2013 GoPivotal, Inc.
+
+-module(rabbit_webmachine).
+
+%% An alternative to webmachine_mochiweb, which places the dispatch
+%% table (among other things) into the application env, and thereby
+%% makes it impossible to run more than one instance of
+%% webmachine. Since rabbit_web_dispatch is all about multi-tenanting
+%% webapps, clearly this won't do for us.
+
+%% Instead of using webmachine_mochiweb:start/1 or
+%% webmachine_mochiweb:loop/1, construct a loop procedure using
+%% makeloop/1 and supply it as the argument to
+%% rabbit_web_dispatch:register_context_handler or to mochiweb_http:start.
+
+%% We hardwire the "error handler" and use a "logging module" if
+%% supplied.
+
+-export([makeloop/1, setup/0]).
+
+setup() ->
+ application:set_env(webmachine, error_handler, webmachine_error_handler).
+
+makeloop(Dispatch) ->
+ fun (MochiReq) ->
+ Req = webmachine:new_request(mochiweb, MochiReq),
+ {Path, _} = Req:path(),
+ {ReqData, _} = Req:get_reqdata(),
+ %% webmachine_mochiweb:loop/1 uses dispatch/4 here;
+ %% however, we don't need to dispatch by the host name.
+ case webmachine_dispatcher:dispatch(Path, Dispatch, ReqData) of
+ {no_dispatch_match, _Host, _PathElements} ->
+ {ErrorHTML, ReqState1} =
+ webmachine_error_handler:render_error(
+ 404, Req, {none, none, []}),
+ Req1 = {webmachine_request, ReqState1},
+ {ok, ReqState2} = Req1:append_to_response_body(ErrorHTML),
+ Req2 = {webmachine_request, ReqState2},
+ {ok, ReqState3} = Req2:send_response(404),
+ maybe_log_access(ReqState3);
+ {Mod, ModOpts, HostTokens, Port, PathTokens, Bindings,
+ AppRoot, StringPath} ->
+ BootstrapResource = webmachine_resource:new(x,x,x,x),
+ {ok, Resource} = BootstrapResource:wrap(Mod, ModOpts),
+ {ok, RS1} = Req:load_dispatch_data(Bindings, HostTokens, Port,
+ PathTokens,
+ AppRoot, StringPath),
+ XReq1 = {webmachine_request, RS1},
+ {ok, RS2} = XReq1:set_metadata('resource_module', Mod),
+ try
+ webmachine_decision_core:handle_request(Resource, RS2)
+ catch
+ error:_ ->
+ FailReq = {webmachine_request, RS2},
+ {ok, RS3} = FailReq:send_response(500),
+ maybe_log_access(RS3)
+ end
+ end
+ end.
+
+maybe_log_access(ReqState) ->
+ Req = {webmachine_request, ReqState},
+ {LogData, _ReqState1} = Req:log_data(),
+ webmachine_log:log_access(LogData).
--- /dev/null
+{application, rabbitmq_web_dispatch,
+ [{description, "RabbitMQ Web Dispatcher"},
+ {vsn, "%%VSN%%"},
+ {modules, []},
+ {registered, []},
+ {mod, {rabbit_web_dispatch_app, []}},
+ {env, []},
+ {applications, [kernel, stdlib, mochiweb, webmachine]}]}.
--- /dev/null
+<html>
+ <head>
+ <title>RabbitMQ HTTP Server Test Page</title>
+ </head>
+ <body>
+ </body>
+</html>
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_web_dispatch_test).
+
+-include_lib("eunit/include/eunit.hrl").
+
+query_static_resource_test() ->
+ %% TODO this is a fairly rubbish test, but not as bad as it was
+ rabbit_web_dispatch:register_static_context(test, [{port, 12345}],
+ "rabbit_web_dispatch_test",
+ ?MODULE, "priv/www", "Test"),
+ {ok, {_Status, _Headers, Body}} =
+ httpc:request("http://localhost:12345/rabbit_web_dispatch_test/index.html"),
+ ?assert(string:str(Body, "RabbitMQ HTTP Server Test Page") /= 0).
+
+add_idempotence_test() ->
+ F = fun(_Req) -> ok end,
+ L = {"/foo", "Foo"},
+ rabbit_web_dispatch_registry:add(foo, [{port, 12345}], F, F, L),
+ rabbit_web_dispatch_registry:add(foo, [{port, 12345}], F, F, L),
+ ?assertEqual(
+ 1, length([ok || {"/foo", _, _} <-
+ rabbit_web_dispatch_registry:list_all()])),
+ passed.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_web_dispatch_test_unit).
+
+-include_lib("eunit/include/eunit.hrl").
+
+relativise_test() ->
+ Rel = fun rabbit_web_dispatch_util:relativise/2,
+ ?assertEqual("baz", Rel("/foo/bar/bash", "/foo/bar/baz")),
+ ?assertEqual("../bax/baz", Rel("/foo/bar/bash", "/foo/bax/baz")),
+ ?assertEqual("../bax/baz", Rel("/bar/bash", "/bax/baz")),
+ ?assertEqual("..", Rel("/foo/bar/bash", "/foo/bar")),
+ ?assertEqual("../..", Rel("/foo/bar/bash", "/foo")),
+ ?assertEqual("bar/baz", Rel("/foo/bar", "/foo/bar/baz")),
+ ?assertEqual("foo", Rel("/", "/foo")).
+
+unrelativise_test() ->
+ Un = fun rabbit_web_dispatch_util:unrelativise/2,
+ ?assertEqual("/foo/bar", Un("/foo/foo", "bar")),
+ ?assertEqual("/foo/bar", Un("/foo/foo", "./bar")),
+ ?assertEqual("bar", Un("foo", "bar")),
+ ?assertEqual("/baz/bar", Un("/foo/foo", "../baz/bar")).
--- /dev/null
+This package, the rabbitmq-web-stomp-examples, is licensed under the
+MPL. For the MPL, please see LICENSE-MPL-RabbitMQ.
+
+priv/stomp.js is a part of stomp-websocket project
+(https://github.com/jmesnil/stomp-websocket) and is released under
+APL2. For the license see LICENSE-APL2-Stomp-Websocket.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
--- /dev/null
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
--- /dev/null
+ MOZILLA PUBLIC LICENSE
+ Version 1.1
+
+ ---------------
+
+1. Definitions.
+
+ 1.0.1. "Commercial Use" means distribution or otherwise making the
+ Covered Code available to a third party.
+
+ 1.1. "Contributor" means each entity that creates or contributes to
+ the creation of Modifications.
+
+ 1.2. "Contributor Version" means the combination of the Original
+ Code, prior Modifications used by a Contributor, and the Modifications
+ made by that particular Contributor.
+
+ 1.3. "Covered Code" means the Original Code or Modifications or the
+ combination of the Original Code and Modifications, in each case
+ including portions thereof.
+
+ 1.4. "Electronic Distribution Mechanism" means a mechanism generally
+ accepted in the software development community for the electronic
+ transfer of data.
+
+ 1.5. "Executable" means Covered Code in any form other than Source
+ Code.
+
+ 1.6. "Initial Developer" means the individual or entity identified
+ as the Initial Developer in the Source Code notice required by Exhibit
+ A.
+
+ 1.7. "Larger Work" means a work which combines Covered Code or
+ portions thereof with code not governed by the terms of this License.
+
+ 1.8. "License" means this document.
+
+ 1.8.1. "Licensable" means having the right to grant, to the maximum
+ extent possible, whether at the time of the initial grant or
+ subsequently acquired, any and all of the rights conveyed herein.
+
+ 1.9. "Modifications" means any addition to or deletion from the
+ substance or structure of either the Original Code or any previous
+ Modifications. When Covered Code is released as a series of files, a
+ Modification is:
+ A. Any addition to or deletion from the contents of a file
+ containing Original Code or previous Modifications.
+
+ B. Any new file that contains any part of the Original Code or
+ previous Modifications.
+
+ 1.10. "Original Code" means Source Code of computer software code
+ which is described in the Source Code notice required by Exhibit A as
+ Original Code, and which, at the time of its release under this
+ License is not already Covered Code governed by this License.
+
+ 1.10.1. "Patent Claims" means any patent claim(s), now owned or
+ hereafter acquired, including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by grantor.
+
+ 1.11. "Source Code" means the preferred form of the Covered Code for
+ making modifications to it, including all modules it contains, plus
+ any associated interface definition files, scripts used to control
+ compilation and installation of an Executable, or source code
+ differential comparisons against either the Original Code or another
+ well known, available Covered Code of the Contributor's choice. The
+ Source Code can be in a compressed or archival form, provided the
+ appropriate decompression or de-archiving software is widely available
+ for no charge.
+
+ 1.12. "You" (or "Your") means an individual or a legal entity
+ exercising rights under, and complying with all of the terms of, this
+ License or a future version of this License issued under Section 6.1.
+ For legal entities, "You" includes any entity which controls, is
+ controlled by, or is under common control with You. For purposes of
+ this definition, "control" means (a) the power, direct or indirect,
+ to cause the direction or management of such entity, whether by
+ contract or otherwise, or (b) ownership of more than fifty percent
+ (50%) of the outstanding shares or beneficial ownership of such
+ entity.
+
+2. Source Code License.
+
+ 2.1. The Initial Developer Grant.
+ The Initial Developer hereby grants You a world-wide, royalty-free,
+ non-exclusive license, subject to third party intellectual property
+ claims:
+ (a) under intellectual property rights (other than patent or
+ trademark) Licensable by Initial Developer to use, reproduce,
+ modify, display, perform, sublicense and distribute the Original
+ Code (or portions thereof) with or without Modifications, and/or
+ as part of a Larger Work; and
+
+ (b) under Patents Claims infringed by the making, using or
+ selling of Original Code, to make, have made, use, practice,
+ sell, and offer for sale, and/or otherwise dispose of the
+ Original Code (or portions thereof).
+
+ (c) the licenses granted in this Section 2.1(a) and (b) are
+ effective on the date Initial Developer first distributes
+ Original Code under the terms of this License.
+
+ (d) Notwithstanding Section 2.1(b) above, no patent license is
+ granted: 1) for code that You delete from the Original Code; 2)
+ separate from the Original Code; or 3) for infringements caused
+ by: i) the modification of the Original Code or ii) the
+ combination of the Original Code with other software or devices.
+
+ 2.2. Contributor Grant.
+ Subject to third party intellectual property claims, each Contributor
+ hereby grants You a world-wide, royalty-free, non-exclusive license
+
+ (a) under intellectual property rights (other than patent or
+ trademark) Licensable by Contributor, to use, reproduce, modify,
+ display, perform, sublicense and distribute the Modifications
+ created by such Contributor (or portions thereof) either on an
+ unmodified basis, with other Modifications, as Covered Code
+ and/or as part of a Larger Work; and
+
+ (b) under Patent Claims infringed by the making, using, or
+ selling of Modifications made by that Contributor either alone
+ and/or in combination with its Contributor Version (or portions
+ of such combination), to make, use, sell, offer for sale, have
+ made, and/or otherwise dispose of: 1) Modifications made by that
+ Contributor (or portions thereof); and 2) the combination of
+ Modifications made by that Contributor with its Contributor
+ Version (or portions of such combination).
+
+ (c) the licenses granted in Sections 2.2(a) and 2.2(b) are
+ effective on the date Contributor first makes Commercial Use of
+ the Covered Code.
+
+ (d) Notwithstanding Section 2.2(b) above, no patent license is
+ granted: 1) for any code that Contributor has deleted from the
+ Contributor Version; 2) separate from the Contributor Version;
+ 3) for infringements caused by: i) third party modifications of
+ Contributor Version or ii) the combination of Modifications made
+ by that Contributor with other software (except as part of the
+ Contributor Version) or other devices; or 4) under Patent Claims
+ infringed by Covered Code in the absence of Modifications made by
+ that Contributor.
+
+3. Distribution Obligations.
+
+ 3.1. Application of License.
+ The Modifications which You create or to which You contribute are
+ governed by the terms of this License, including without limitation
+ Section 2.2. The Source Code version of Covered Code may be
+ distributed only under the terms of this License or a future version
+ of this License released under Section 6.1, and You must include a
+ copy of this License with every copy of the Source Code You
+ distribute. You may not offer or impose any terms on any Source Code
+ version that alters or restricts the applicable version of this
+ License or the recipients' rights hereunder. However, You may include
+ an additional document offering the additional rights described in
+ Section 3.5.
+
+ 3.2. Availability of Source Code.
+ Any Modification which You create or to which You contribute must be
+ made available in Source Code form under the terms of this License
+ either on the same media as an Executable version or via an accepted
+ Electronic Distribution Mechanism to anyone to whom you made an
+ Executable version available; and if made available via Electronic
+ Distribution Mechanism, must remain available for at least twelve (12)
+ months after the date it initially became available, or at least six
+ (6) months after a subsequent version of that particular Modification
+ has been made available to such recipients. You are responsible for
+ ensuring that the Source Code version remains available even if the
+ Electronic Distribution Mechanism is maintained by a third party.
+
+ 3.3. Description of Modifications.
+ You must cause all Covered Code to which You contribute to contain a
+ file documenting the changes You made to create that Covered Code and
+ the date of any change. You must include a prominent statement that
+ the Modification is derived, directly or indirectly, from Original
+ Code provided by the Initial Developer and including the name of the
+ Initial Developer in (a) the Source Code, and (b) in any notice in an
+ Executable version or related documentation in which You describe the
+ origin or ownership of the Covered Code.
+
+ 3.4. Intellectual Property Matters
+ (a) Third Party Claims.
+ If Contributor has knowledge that a license under a third party's
+ intellectual property rights is required to exercise the rights
+ granted by such Contributor under Sections 2.1 or 2.2,
+ Contributor must include a text file with the Source Code
+ distribution titled "LEGAL" which describes the claim and the
+ party making the claim in sufficient detail that a recipient will
+ know whom to contact. If Contributor obtains such knowledge after
+ the Modification is made available as described in Section 3.2,
+ Contributor shall promptly modify the LEGAL file in all copies
+ Contributor makes available thereafter and shall take other steps
+ (such as notifying appropriate mailing lists or newsgroups)
+ reasonably calculated to inform those who received the Covered
+ Code that new knowledge has been obtained.
+
+ (b) Contributor APIs.
+ If Contributor's Modifications include an application programming
+ interface and Contributor has knowledge of patent licenses which
+ are reasonably necessary to implement that API, Contributor must
+ also include this information in the LEGAL file.
+
+ (c) Representations.
+ Contributor represents that, except as disclosed pursuant to
+ Section 3.4(a) above, Contributor believes that Contributor's
+ Modifications are Contributor's original creation(s) and/or
+ Contributor has sufficient rights to grant the rights conveyed by
+ this License.
+
+ 3.5. Required Notices.
+ You must duplicate the notice in Exhibit A in each file of the Source
+ Code. If it is not possible to put such notice in a particular Source
+ Code file due to its structure, then You must include such notice in a
+ location (such as a relevant directory) where a user would be likely
+ to look for such a notice. If You created one or more Modification(s)
+ You may add your name as a Contributor to the notice described in
+ Exhibit A. You must also duplicate this License in any documentation
+ for the Source Code where You describe recipients' rights or ownership
+ rights relating to Covered Code. You may choose to offer, and to
+ charge a fee for, warranty, support, indemnity or liability
+ obligations to one or more recipients of Covered Code. However, You
+ may do so only on Your own behalf, and not on behalf of the Initial
+ Developer or any Contributor. You must make it absolutely clear than
+ any such warranty, support, indemnity or liability obligation is
+ offered by You alone, and You hereby agree to indemnify the Initial
+ Developer and every Contributor for any liability incurred by the
+ Initial Developer or such Contributor as a result of warranty,
+ support, indemnity or liability terms You offer.
+
+ 3.6. Distribution of Executable Versions.
+ You may distribute Covered Code in Executable form only if the
+ requirements of Section 3.1-3.5 have been met for that Covered Code,
+ and if You include a notice stating that the Source Code version of
+ the Covered Code is available under the terms of this License,
+ including a description of how and where You have fulfilled the
+ obligations of Section 3.2. The notice must be conspicuously included
+ in any notice in an Executable version, related documentation or
+ collateral in which You describe recipients' rights relating to the
+ Covered Code. You may distribute the Executable version of Covered
+ Code or ownership rights under a license of Your choice, which may
+ contain terms different from this License, provided that You are in
+ compliance with the terms of this License and that the license for the
+ Executable version does not attempt to limit or alter the recipient's
+ rights in the Source Code version from the rights set forth in this
+ License. If You distribute the Executable version under a different
+ license You must make it absolutely clear that any terms which differ
+ from this License are offered by You alone, not by the Initial
+ Developer or any Contributor. You hereby agree to indemnify the
+ Initial Developer and every Contributor for any liability incurred by
+ the Initial Developer or such Contributor as a result of any such
+ terms You offer.
+
+ 3.7. Larger Works.
+ You may create a Larger Work by combining Covered Code with other code
+ not governed by the terms of this License and distribute the Larger
+ Work as a single product. In such a case, You must make sure the
+ requirements of this License are fulfilled for the Covered Code.
+
+4. Inability to Comply Due to Statute or Regulation.
+
+ If it is impossible for You to comply with any of the terms of this
+ License with respect to some or all of the Covered Code due to
+ statute, judicial order, or regulation then You must: (a) comply with
+ the terms of this License to the maximum extent possible; and (b)
+ describe the limitations and the code they affect. Such description
+ must be included in the LEGAL file described in Section 3.4 and must
+ be included with all distributions of the Source Code. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Application of this License.
+
+ This License applies to code to which the Initial Developer has
+ attached the notice in Exhibit A and to related Covered Code.
+
+6. Versions of the License.
+
+ 6.1. New Versions.
+ Netscape Communications Corporation ("Netscape") may publish revised
+ and/or new versions of the License from time to time. Each version
+ will be given a distinguishing version number.
+
+ 6.2. Effect of New Versions.
+ Once Covered Code has been published under a particular version of the
+ License, You may always continue to use it under the terms of that
+ version. You may also choose to use such Covered Code under the terms
+ of any subsequent version of the License published by Netscape. No one
+ other than Netscape has the right to modify the terms applicable to
+ Covered Code created under this License.
+
+ 6.3. Derivative Works.
+ If You create or use a modified version of this License (which you may
+ only do in order to apply it to code which is not already Covered Code
+ governed by this License), You must (a) rename Your license so that
+ the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
+ "MPL", "NPL" or any confusingly similar phrase do not appear in your
+ license (except to note that your license differs from this License)
+ and (b) otherwise make it clear that Your version of the license
+ contains terms which differ from the Mozilla Public License and
+ Netscape Public License. (Filling in the name of the Initial
+ Developer, Original Code or Contributor in the notice described in
+ Exhibit A shall not of themselves be deemed to be modifications of
+ this License.)
+
+7. DISCLAIMER OF WARRANTY.
+
+ COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
+ DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
+ THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
+ IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
+ YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
+ COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
+ OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
+ ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
+
+8. TERMINATION.
+
+ 8.1. This License and the rights granted hereunder will terminate
+ automatically if You fail to comply with terms herein and fail to cure
+ such breach within 30 days of becoming aware of the breach. All
+ sublicenses to the Covered Code which are properly granted shall
+ survive any termination of this License. Provisions which, by their
+ nature, must remain in effect beyond the termination of this License
+ shall survive.
+
+ 8.2. If You initiate litigation by asserting a patent infringement
+ claim (excluding declatory judgment actions) against Initial Developer
+ or a Contributor (the Initial Developer or Contributor against whom
+ You file such action is referred to as "Participant") alleging that:
+
+ (a) such Participant's Contributor Version directly or indirectly
+ infringes any patent, then any and all rights granted by such
+ Participant to You under Sections 2.1 and/or 2.2 of this License
+ shall, upon 60 days notice from Participant terminate prospectively,
+ unless if within 60 days after receipt of notice You either: (i)
+ agree in writing to pay Participant a mutually agreeable reasonable
+ royalty for Your past and future use of Modifications made by such
+ Participant, or (ii) withdraw Your litigation claim with respect to
+ the Contributor Version against such Participant. If within 60 days
+ of notice, a reasonable royalty and payment arrangement are not
+ mutually agreed upon in writing by the parties or the litigation claim
+ is not withdrawn, the rights granted by Participant to You under
+ Sections 2.1 and/or 2.2 automatically terminate at the expiration of
+ the 60 day notice period specified above.
+
+ (b) any software, hardware, or device, other than such Participant's
+ Contributor Version, directly or indirectly infringes any patent, then
+ any rights granted to You by such Participant under Sections 2.1(b)
+ and 2.2(b) are revoked effective as of the date You first made, used,
+ sold, distributed, or had made, Modifications made by that
+ Participant.
+
+ 8.3. If You assert a patent infringement claim against Participant
+ alleging that such Participant's Contributor Version directly or
+ indirectly infringes any patent where such claim is resolved (such as
+ by license or settlement) prior to the initiation of patent
+ infringement litigation, then the reasonable value of the licenses
+ granted by such Participant under Sections 2.1 or 2.2 shall be taken
+ into account in determining the amount or value of any payment or
+ license.
+
+ 8.4. In the event of termination under Sections 8.1 or 8.2 above,
+ all end user license agreements (excluding distributors and resellers)
+ which have been validly granted by You or any distributor hereunder
+ prior to termination shall survive termination.
+
+9. LIMITATION OF LIABILITY.
+
+ UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
+ (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
+ DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
+ OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
+ ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
+ CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
+ WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
+ COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
+ INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
+ LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
+ RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
+ PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
+ EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
+ THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
+
+10. U.S. GOVERNMENT END USERS.
+
+ The Covered Code is a "commercial item," as that term is defined in
+ 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
+ software" and "commercial computer software documentation," as such
+ terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
+ C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
+ all U.S. Government End Users acquire Covered Code with only those
+ rights set forth herein.
+
+11. MISCELLANEOUS.
+
+ This License represents the complete agreement concerning subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. This License shall be governed by
+ California law provisions (except to the extent applicable law, if
+ any, provides otherwise), excluding its conflict-of-law provisions.
+ With respect to disputes in which at least one party is a citizen of,
+ or an entity chartered or registered to do business in the United
+ States of America, any litigation relating to this License shall be
+ subject to the jurisdiction of the Federal Courts of the Northern
+ District of California, with venue lying in Santa Clara County,
+ California, with the losing party responsible for costs, including
+ without limitation, court costs and reasonable attorneys' fees and
+ expenses. The application of the United Nations Convention on
+ Contracts for the International Sale of Goods is expressly excluded.
+ Any law or regulation which provides that the language of a contract
+ shall be construed against the drafter shall not apply to this
+ License.
+
+12. RESPONSIBILITY FOR CLAIMS.
+
+ As between Initial Developer and the Contributors, each party is
+ responsible for claims and damages arising, directly or indirectly,
+ out of its utilization of rights under this License and You agree to
+ work with Initial Developer and Contributors to distribute such
+ responsibility on an equitable basis. Nothing herein is intended or
+ shall be deemed to constitute any admission of liability.
+
+13. MULTIPLE-LICENSED CODE.
+
+ Initial Developer may designate portions of the Covered Code as
+ "Multiple-Licensed". "Multiple-Licensed" means that the Initial
+ Developer permits you to utilize portions of the Covered Code under
+ Your choice of the NPL or the alternative licenses, if any, specified
+ by the Initial Developer in the file described in Exhibit A.
+
+EXHIBIT A -Mozilla Public License.
+
+ ``The contents of this file are subject to the Mozilla Public License
+ Version 1.1 (the "License"); you may not use this file except in
+ compliance with the License. You may obtain a copy of the License at
+ http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+ License for the specific language governing rights and limitations
+ under the License.
+
+ The Original Code is RabbitMQ.
+
+ The Initial Developer of the Original Code is GoPivotal, Inc.
+ Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.''
+
+ [NOTE: The text of this Exhibit A may differ slightly from the text of
+ the notices in the Source Code files of the Original Code. You should
+ use the text of this Exhibit A rather than the text found in the
+ Original Code Source Code for Your Modifications.]
--- /dev/null
+include ../umbrella.mk
--- /dev/null
+
+RabbitMQ-Web-Stomp-Examples plugin
+==================================
+
+This project contains few basic examples of RabbitMq-Web-Stomp plugin
+usage.
+
+Once installed the server will bind to port 15670 and serve few static
+html files from there:
+
+ * http://127.0.0.1:15670/
+
+Installation
+------------
+
+Generic build instructions are at:
+
+ * http://www.rabbitmq.com/plugin-development.html
+
+Instructions on how to install a plugin into RabbitMQ broker:
+
+ * http://www.rabbitmq.com/plugins.html#installing-plugins
--- /dev/null
+RELEASABLE:=true
+DEPS:=rabbitmq-web-dispatch rabbitmq-web-stomp rabbitmq-server
+
+define construct_app_commands
+ cp -r $(PACKAGE_DIR)/priv $(APP_DIR)
+endef
--- /dev/null
+<!doctype html>
+<html><head>
+ <script src="http://ajax.googleapis.com/ajax/libs/jquery/1.7.1/jquery.min.js"></script>
+ <script src="http://cdn.sockjs.org/sockjs-0.3.min.js"></script>
+ <script src="stomp.js"></script>
+
+ <style>
+ #cnvs {
+ border: none;
+ -moz-border-radius: 4px;
+ cursor: url(pencil.cur),crosshair;
+ position: absolute;
+ overflow: hidden;
+ width: 100%;
+ height: 100%;
+ }
+ #cnvs:active {
+ cursor: url(pencil.cur),crosshair;
+ }
+ body {
+ overflow: hidden;
+ }
+ </style>
+ <title>RabbitMQ Web STOMP Examples: Bunny Drawing</title>
+ <link href="main.css" rel="stylesheet" type="text/css"/>
+</head><body lang="en">
+ <h1><a href="index.html">RabbitMQ Web STOMP Examples</a> > Bunny Drawing</h1>
+ <canvas id="cnvs"></canvas>
+<script>
+var send; var draw;
+send = draw = function(){};
+
+var lines = [];
+
+var canvas = document.getElementById('cnvs');
+
+if (canvas.getContext) {
+ var ctx = canvas.getContext('2d');
+
+ var img = new Image();
+ img.onload = function() {
+ ctx.drawImage(img, 230, 160);
+ };
+ img.src = 'bunny.png';
+
+ draw = function(p) {
+ ctx.beginPath();
+ ctx.moveTo(p.x1, p.y1);
+ ctx.lineTo(p.x2, p.y2);
+ ctx.stroke();
+ ctx.drawImage(img, 230, 160);
+ };
+
+ var do_resize = function() {
+ canvas.width = window.innerWidth;
+ canvas.height = window.innerHeight;
+
+ ctx.font = "bold 20px sans-serif";
+ ctx.fillStyle = "#444";
+ ctx.fillText("Draw wings on the bunny!", 260, 100);
+ ctx.font = "normal 16px sans-serif";
+ ctx.fillStyle = "#888";
+ ctx.fillText("(For more fun open a second browser)", 255, 130);
+
+ ctx.drawImage(img, 230, 160);
+
+ ctx.strokeStyle = "#fa0";
+ ctx.lineWidth = "10";
+ ctx.lineCap = "round";
+
+ $.map(lines, function (p) {
+ draw(p);
+ });
+ };
+
+ $(window).resize(do_resize);
+ $(do_resize);
+
+
+ var pos = $('#cnvs').position();
+ var prev = null;
+ $('#cnvs').mousedown(function(evt) {
+ evt.preventDefault();
+ evt.stopPropagation();
+ $('#cnvs').bind('mousemove', function(e) {
+ var curr = {x:e.pageX-pos.left, y:e.pageY-pos.top};
+ if (!prev) {
+ prev = curr;
+ return;
+ }
+ if (Math.sqrt(Math.pow(prev.x - curr.x, 2) +
+ Math.pow(prev.y - curr.y, 2)) > 8) {
+ var p = {x1:prev.x, y1:prev.y, x2:curr.x, y2:curr.y}
+ lines.push(p);
+ draw(p);
+ send(JSON.stringify(p));
+ prev = curr;
+ }
+ });
+ });
+ $('html').mouseup(function() {
+ prev = null;
+ $('#cnvs').unbind('mousemove');
+ });
+}
+else {
+ document.write("Sorry - this demo requires a browser with canvas tag support.");
+}
+
+// Stomp.js boilerplate
+var ws = new SockJS('http://' + window.location.hostname + ':15674/stomp');
+var client = Stomp.over(ws);
+
+// SockJS does not support heart-beat: disable heart-beats
+client.heartbeat.outgoing = 0;
+client.heartbeat.incoming = 0;
+
+client.debug = function() {
+ if (window.console && console.log && console.log.apply) {
+ console.log.apply(console, arguments);
+ }
+};
+
+send = function(data) {
+ client.send('/topic/bunny', {}, data);
+};
+
+var on_connect = function(x) {
+ id = client.subscribe('/topic/bunny', function(d) {
+ var p = JSON.parse(d.body);
+ lines.push(p);
+ draw(p, true);
+ });
+};
+var on_error = function() {
+ console.log('error');
+};
+client.connect('guest', 'guest', on_connect, on_error, '/');
+
+</script>
+</body></html>
--- /dev/null
+<!DOCTYPE html>
+<html><head>
+ <script src="http://ajax.googleapis.com/ajax/libs/jquery/1.6.2/jquery.min.js"></script>
+ <script src="http://cdn.sockjs.org/sockjs-0.3.min.js"></script>
+ <script src="stomp.js"></script>
+ <style>
+ .box {
+ width: 440px;
+ float: left;
+ margin: 0 20px 0 20px;
+ }
+
+ .box div, .box input {
+ border: 1px solid;
+ -moz-border-radius: 4px;
+ border-radius: 4px;
+ width: 100%;
+ padding: 5px;
+ margin: 3px 0 10px 0;
+ }
+
+ .box div {
+ border-color: grey;
+ height: 300px;
+ overflow: auto;
+ }
+
+ div code {
+ display: block;
+ }
+
+ #first div code {
+ -moz-border-radius: 2px;
+ border-radius: 2px;
+ border: 1px solid #eee;
+ margin-bottom: 5px;
+ }
+
+ #second div {
+ font-size: 0.8em;
+ }
+ </style>
+ <title>RabbitMQ Web STOMP Examples : Echo Server</title>
+ <link href="main.css" rel="stylesheet" type="text/css"/>
+</head><body lang="en">
+ <h1><a href="index.html">RabbitMQ Web STOMP Examples</a> > Echo Server</h1>
+
+ <div id="first" class="box">
+ <h2>Received</h2>
+ <div></div>
+ <form><input autocomplete="off" value="Type here..."></input></form>
+ </div>
+
+ <div id="second" class="box">
+ <h2>Logs</h2>
+ <div></div>
+ </div>
+
+ <script>
+ var has_had_focus = false;
+ var pipe = function(el_name, send) {
+ var div = $(el_name + ' div');
+ var inp = $(el_name + ' input');
+ var form = $(el_name + ' form');
+
+ var print = function(m, p) {
+ p = (p === undefined) ? '' : JSON.stringify(p);
+ div.append($("<code>").text(m + ' ' + p));
+ div.scrollTop(div.scrollTop() + 10000);
+ };
+
+ if (send) {
+ form.submit(function() {
+ send(inp.val());
+ inp.val('');
+ return false;
+ });
+ }
+ return print;
+ };
+
+ // Stomp.js boilerplate
+ var ws = new SockJS('http://' + window.location.hostname + ':15674/stomp');
+ var client = Stomp.over(ws);
+
+ // SockJS does not support heart-beat: disable heart-beats
+ client.heartbeat.outgoing = 0;
+ client.heartbeat.incoming = 0;
+ client.debug = pipe('#second');
+
+ var print_first = pipe('#first', function(data) {
+ client.send('/topic/test', {"content-type":"text/plain"}, data);
+ });
+ var on_connect = function(x) {
+ id = client.subscribe("/topic/test", function(d) {
+ print_first(d.body);
+ });
+ };
+ var on_error = function() {
+ console.log('error');
+ };
+ client.connect('guest', 'guest', on_connect, on_error, '/');
+
+ $('#first input').focus(function() {
+ if (!has_had_focus) {
+ has_had_focus = true;
+ $(this).val("");
+ }
+ });
+ </script>
+</body></html>
--- /dev/null
+<!doctype html>
+<html lang="en">
+ <head>
+ <meta charset="utf-8">
+ <title>RabbitMQ Web STOMP Examples</title>
+ <link href="main.css" rel="stylesheet" type="text/css"/>
+ </head>
+ <body>
+ <h1>RabbitMQ Web STOMP Examples</h1>
+ <ul class="menu">
+ <li><a href="echo.html">Simple Echo Server</a></li>
+ <li><a href="bunny.html">Bunny Drawing</a></li>
+ <li><a href="temp-queue.html">Temporary Queue Example</a></li>
+ </ul>
+ </body>
+</html>
--- /dev/null
+body {
+ font-family: "Arial";
+ color: #444;
+}
+
+h1, h2 {
+ color: #f60;
+ font-weight: normal;
+}
+
+h1 {
+ font-size: 1.5em;
+}
+
+h2 {
+ font-size: 1.2em;
+ margin: 0;
+}
+
+a {
+ color: #f60;
+ border: 1px solid #fda;
+ background: #fff0e0;
+ border-radius: 3px; -moz-border-radius: 3px;
+ padding: 2px;
+ text-decoration: none;
+ /* font-weight: bold; */
+}
+
+ul.menu {
+ list-style-type: none;
+ padding: 0;
+ margin: 0;
+}
+
+ul.menu li {
+ padding: 5px 0;
+}
--- /dev/null
+// Generated by CoffeeScript 1.6.3
+/*
+ Stomp Over WebSocket http://www.jmesnil.net/stomp-websocket/doc/ | Apache License V2.0
+*/
+
+
+(function() {
+ var Byte, Client, Frame, Stomp,
+ __hasProp = {}.hasOwnProperty;
+
+ Byte = {
+ LF: '\x0A',
+ NULL: '\x00'
+ };
+
+ Frame = (function() {
+ function Frame(command, headers, body) {
+ this.command = command;
+ this.headers = headers != null ? headers : {};
+ this.body = body != null ? body : '';
+ }
+
+ Frame.prototype.toString = function() {
+ var lines, name, value, _ref;
+ lines = [this.command];
+ _ref = this.headers;
+ for (name in _ref) {
+ if (!__hasProp.call(_ref, name)) continue;
+ value = _ref[name];
+ lines.push("" + name + ":" + value);
+ }
+ if (this.body) {
+ lines.push("content-length:" + ('' + this.body).length);
+ }
+ lines.push(Byte.LF + this.body);
+ return lines.join(Byte.LF);
+ };
+
+ Frame._unmarshallSingle = function(data) {
+ var body, chr, command, divider, headerLines, headers, i, idx, len, line, start, trim, _i, _j, _ref, _ref1;
+ divider = data.search(RegExp("" + Byte.LF + Byte.LF));
+ headerLines = data.substring(0, divider).split(Byte.LF);
+ command = headerLines.shift();
+ headers = {};
+ trim = function(str) {
+ return str.replace(/^\s+|\s+$/g, '');
+ };
+ line = idx = null;
+ for (i = _i = 0, _ref = headerLines.length; 0 <= _ref ? _i < _ref : _i > _ref; i = 0 <= _ref ? ++_i : --_i) {
+ line = headerLines[i];
+ idx = line.indexOf(':');
+ headers[trim(line.substring(0, idx))] = trim(line.substring(idx + 1));
+ }
+ body = '';
+ start = divider + 2;
+ if (headers['content-length']) {
+ len = parseInt(headers['content-length']);
+ body = ('' + data).substring(start, start + len);
+ } else {
+ chr = null;
+ for (i = _j = start, _ref1 = data.length; start <= _ref1 ? _j < _ref1 : _j > _ref1; i = start <= _ref1 ? ++_j : --_j) {
+ chr = data.charAt(i);
+ if (chr === Byte.NULL) {
+ break;
+ }
+ body += chr;
+ }
+ }
+ return new Frame(command, headers, body);
+ };
+
+ Frame.unmarshall = function(datas) {
+ var data;
+ return (function() {
+ var _i, _len, _ref, _results;
+ _ref = datas.split(RegExp("" + Byte.NULL + Byte.LF + "*"));
+ _results = [];
+ for (_i = 0, _len = _ref.length; _i < _len; _i++) {
+ data = _ref[_i];
+ if ((data != null ? data.length : void 0) > 0) {
+ _results.push(Frame._unmarshallSingle(data));
+ }
+ }
+ return _results;
+ })();
+ };
+
+ Frame.marshall = function(command, headers, body) {
+ var frame;
+ frame = new Frame(command, headers, body);
+ return frame.toString() + Byte.NULL;
+ };
+
+ return Frame;
+
+ })();
+
+ Client = (function() {
+ function Client(ws) {
+ this.ws = ws;
+ this.ws.binaryType = "arraybuffer";
+ this.counter = 0;
+ this.connected = false;
+ this.heartbeat = {
+ outgoing: 10000,
+ incoming: 10000
+ };
+ this.maxWebSocketFrameSize = 16 * 1024;
+ this.subscriptions = {};
+ }
+
+ Client.prototype.debug = function(message) {
+ var _ref;
+ return typeof window !== "undefined" && window !== null ? (_ref = window.console) != null ? _ref.log(message) : void 0 : void 0;
+ };
+
+ Client.prototype._transmit = function(command, headers, body) {
+ var out;
+ out = Frame.marshall(command, headers, body);
+ if (typeof this.debug === "function") {
+ this.debug(">>> " + out);
+ }
+ while (true) {
+ if (out.length > this.maxWebSocketFrameSize) {
+ this.ws.send(out.substring(0, this.maxWebSocketFrameSize));
+ out = out.substring(this.maxWebSocketFrameSize);
+ if (typeof this.debug === "function") {
+ this.debug("remaining = " + out.length);
+ }
+ } else {
+ return this.ws.send(out);
+ }
+ }
+ };
+
+ Client.prototype._setupHeartbeat = function(headers) {
+ var serverIncoming, serverOutgoing, ttl, v, _ref, _ref1,
+ _this = this;
+ if ((_ref = headers.version) !== Stomp.VERSIONS.V1_1 && _ref !== Stomp.VERSIONS.V1_2) {
+ return;
+ }
+ _ref1 = (function() {
+ var _i, _len, _ref1, _results;
+ _ref1 = headers['heart-beat'].split(",");
+ _results = [];
+ for (_i = 0, _len = _ref1.length; _i < _len; _i++) {
+ v = _ref1[_i];
+ _results.push(parseInt(v));
+ }
+ return _results;
+ })(), serverOutgoing = _ref1[0], serverIncoming = _ref1[1];
+ if (!(this.heartbeat.outgoing === 0 || serverIncoming === 0)) {
+ ttl = Math.max(this.heartbeat.outgoing, serverIncoming);
+ if (typeof this.debug === "function") {
+ this.debug("send PING every " + ttl + "ms");
+ }
+ this.pinger = typeof window !== "undefined" && window !== null ? window.setInterval(function() {
+ _this.ws.send(Byte.LF);
+ return typeof _this.debug === "function" ? _this.debug(">>> PING") : void 0;
+ }, ttl) : void 0;
+ }
+ if (!(this.heartbeat.incoming === 0 || serverOutgoing === 0)) {
+ ttl = Math.max(this.heartbeat.incoming, serverOutgoing);
+ if (typeof this.debug === "function") {
+ this.debug("check PONG every " + ttl + "ms");
+ }
+ return this.ponger = typeof window !== "undefined" && window !== null ? window.setInterval(function() {
+ var delta;
+ delta = Date.now() - _this.serverActivity;
+ if (delta > ttl * 2) {
+ if (typeof _this.debug === "function") {
+ _this.debug("did not receive server activity for the last " + delta + "ms");
+ }
+ return _this.ws.close();
+ }
+ }, ttl) : void 0;
+ }
+ };
+
+ Client.prototype.connect = function(login, passcode, connectCallback, errorCallback, vhost) {
+ var _this = this;
+ this.connectCallback = connectCallback;
+ if (typeof this.debug === "function") {
+ this.debug("Opening Web Socket...");
+ }
+ this.ws.onmessage = function(evt) {
+ var arr, c, data, frame, onreceive, _i, _len, _ref, _results;
+ data = typeof ArrayBuffer !== 'undefined' && evt.data instanceof ArrayBuffer ? (arr = new Uint8Array(evt.data), typeof _this.debug === "function" ? _this.debug("--- got data length: " + arr.length) : void 0, ((function() {
+ var _i, _len, _results;
+ _results = [];
+ for (_i = 0, _len = arr.length; _i < _len; _i++) {
+ c = arr[_i];
+ _results.push(String.fromCharCode(c));
+ }
+ return _results;
+ })()).join('')) : evt.data;
+ _this.serverActivity = Date.now();
+ if (data === Byte.LF) {
+ if (typeof _this.debug === "function") {
+ _this.debug("<<< PONG");
+ }
+ return;
+ }
+ if (typeof _this.debug === "function") {
+ _this.debug("<<< " + data);
+ }
+ _ref = Frame.unmarshall(data);
+ _results = [];
+ for (_i = 0, _len = _ref.length; _i < _len; _i++) {
+ frame = _ref[_i];
+ switch (frame.command) {
+ case "CONNECTED":
+ if (typeof _this.debug === "function") {
+ _this.debug("connected to server " + frame.headers.server);
+ }
+ _this.connected = true;
+ _this._setupHeartbeat(frame.headers);
+ _results.push(typeof _this.connectCallback === "function" ? _this.connectCallback(frame) : void 0);
+ break;
+ case "MESSAGE":
+ onreceive = _this.subscriptions[frame.headers.subscription] || _this.onreceive;
+ if (onreceive) {
+ _results.push(onreceive(frame));
+ } else {
+ _results.push(typeof _this.debug === "function" ? _this.debug("Unhandled received MESSAGE: " + frame) : void 0);
+ }
+ break;
+ case "RECEIPT":
+ _results.push(typeof _this.onreceipt === "function" ? _this.onreceipt(frame) : void 0);
+ break;
+ case "ERROR":
+ _results.push(typeof errorCallback === "function" ? errorCallback(frame) : void 0);
+ break;
+ default:
+ _results.push(typeof _this.debug === "function" ? _this.debug("Unhandled frame: " + frame) : void 0);
+ }
+ }
+ return _results;
+ };
+ this.ws.onclose = function() {
+ var msg;
+ msg = "Whoops! Lost connection to " + _this.ws.url;
+ if (typeof _this.debug === "function") {
+ _this.debug(msg);
+ }
+ _this._cleanUp();
+ return typeof errorCallback === "function" ? errorCallback(msg) : void 0;
+ };
+ return this.ws.onopen = function() {
+ var headers;
+ if (typeof _this.debug === "function") {
+ _this.debug('Web Socket Opened...');
+ }
+ headers = {
+ "accept-version": Stomp.VERSIONS.supportedVersions(),
+ "heart-beat": [_this.heartbeat.outgoing, _this.heartbeat.incoming].join(',')
+ };
+ if (vhost) {
+ headers.host = vhost;
+ }
+ if (login) {
+ headers.login = login;
+ }
+ if (passcode) {
+ headers.passcode = passcode;
+ }
+ return _this._transmit("CONNECT", headers);
+ };
+ };
+
+ Client.prototype.disconnect = function(disconnectCallback) {
+ this._transmit("DISCONNECT");
+ this.ws.onclose = null;
+ this.ws.close();
+ this._cleanUp();
+ return typeof disconnectCallback === "function" ? disconnectCallback() : void 0;
+ };
+
+ Client.prototype._cleanUp = function() {
+ this.connected = false;
+ if (this.pinger) {
+ if (typeof window !== "undefined" && window !== null) {
+ window.clearInterval(this.pinger);
+ }
+ }
+ if (this.ponger) {
+ return typeof window !== "undefined" && window !== null ? window.clearInterval(this.ponger) : void 0;
+ }
+ };
+
+ Client.prototype.send = function(destination, headers, body) {
+ if (headers == null) {
+ headers = {};
+ }
+ if (body == null) {
+ body = '';
+ }
+ headers.destination = destination;
+ return this._transmit("SEND", headers, body);
+ };
+
+ Client.prototype.subscribe = function(destination, callback, headers) {
+ if (headers == null) {
+ headers = {};
+ }
+ if (!headers.id) {
+ headers.id = "sub-" + this.counter++;
+ }
+ headers.destination = destination;
+ this.subscriptions[headers.id] = callback;
+ this._transmit("SUBSCRIBE", headers);
+ return headers.id;
+ };
+
+ Client.prototype.unsubscribe = function(id) {
+ delete this.subscriptions[id];
+ return this._transmit("UNSUBSCRIBE", {
+ id: id
+ });
+ };
+
+ Client.prototype.begin = function(transaction) {
+ return this._transmit("BEGIN", {
+ transaction: transaction
+ });
+ };
+
+ Client.prototype.commit = function(transaction) {
+ return this._transmit("COMMIT", {
+ transaction: transaction
+ });
+ };
+
+ Client.prototype.abort = function(transaction) {
+ return this._transmit("ABORT", {
+ transaction: transaction
+ });
+ };
+
+ Client.prototype.ack = function(messageID, subscription, headers) {
+ if (headers == null) {
+ headers = {};
+ }
+ headers["message-id"] = messageID;
+ headers.subscription = subscription;
+ return this._transmit("ACK", headers);
+ };
+
+ Client.prototype.nack = function(messageID, subscription, headers) {
+ if (headers == null) {
+ headers = {};
+ }
+ headers["message-id"] = messageID;
+ headers.subscription = subscription;
+ return this._transmit("NACK", headers);
+ };
+
+ return Client;
+
+ })();
+
+ Stomp = {
+ libVersion: "2.0.0-next",
+ VERSIONS: {
+ V1_0: '1.0',
+ V1_1: '1.1',
+ V1_2: '1.2',
+ supportedVersions: function() {
+ return '1.1,1.0';
+ }
+ },
+ client: function(url, protocols) {
+ var klass, ws;
+ if (protocols == null) {
+ protocols = ['v10.stomp', 'v11.stomp'];
+ }
+ klass = Stomp.WebSocketClass || WebSocket;
+ ws = new klass(url, protocols);
+ return new Client(ws);
+ },
+ over: function(ws) {
+ return new Client(ws);
+ },
+ Frame: Frame
+ };
+
+ if (typeof window !== "undefined" && window !== null) {
+ window.Stomp = Stomp;
+ } else if (typeof exports !== "undefined" && exports !== null) {
+ exports.Stomp = Stomp;
+ Stomp.WebSocketClass = require('./test/server.mock.js').StompServerMock;
+ } else {
+ self.Stomp = Stomp;
+ }
+
+}).call(this);
--- /dev/null
+<!DOCTYPE html>
+<html><head>
+ <script src="http://ajax.googleapis.com/ajax/libs/jquery/1.6.2/jquery.min.js"></script>
+ <script src="http://cdn.sockjs.org/sockjs-0.3.min.js"></script>
+ <script src="stomp.js"></script>
+ <style>
+ .box {
+ width: 440px;
+ float: left;
+ margin: 0 20px 0 20px;
+ }
+
+ .box div, .box input {
+ border: 1px solid;
+ -moz-border-radius: 4px;
+ border-radius: 4px;
+ width: 100%;
+ padding: 5px;
+ margin: 3px 0 10px 0;
+ }
+
+ .box div {
+ border-color: grey;
+ height: 300px;
+ overflow: auto;
+ }
+
+ div code {
+ display: block;
+ }
+
+ #first div code {
+ -moz-border-radius: 2px;
+ border-radius: 2px;
+ border: 1px solid #eee;
+ margin-bottom: 5px;
+ }
+
+ #second div {
+ font-size: 0.8em;
+ }
+ </style>
+ <title>RabbitMQ Web STOMP Examples : Temporary Queue</title>
+ <link href="main.css" rel="stylesheet" type="text/css"/>
+</head><body lang="en">
+ <h1><a href="index.html">RabbitMQ Web STOMP Examples</a> > Temporary Queue</h1>
+
+ <p>When you type text in the form's input, the application will send a message to the <code>/queue/test</code> destination
+ with the <code>reply-to</code> header set to <code>/temp-queue/foo</code>.</p>
+ <p>The STOMP client sets a default <code>onreceive</code> callback to receive messages from this temporary queue and display the message's text.</p>
+ <p>Finally, the client subscribes to the <code>/queue/test</code> destination. When it receives message from this destination, it reverses the message's
+ text and reply by sending the reversed text to the destination defined by the message's <code>reply-to</code> header.</p>
+
+ <div id="first" class="box">
+ <h2>Received</h2>
+ <div></div>
+ <form><input autocomplete="off" placeholder="Type here..."></input></form>
+ </div>
+
+ <div id="second" class="box">
+ <h2>Logs</h2>
+ <div></div>
+ </div>
+
+ <script>
+ var ws = new SockJS('http://' + window.location.hostname + ':15674/stomp');
+ var client = Stomp.over(ws);
+ // SockJS does not support heart-beat: disable heart-beats
+ client.heartbeat.incoming = 0;
+ client.heartbeat.outgoing = 0;
+
+ client.debug = function(e) {
+ $('#second div').append($("<code>").text(e));
+ };
+
+ // default receive callback to get message from temporary queues
+ client.onreceive = function(m) {
+ $('#first div').append($("<code>").text(m.body));
+ }
+
+ var on_connect = function(x) {
+ id = client.subscribe("/queue/test", function(m) {
+ // reply by sending the reversed text to the temp queue defined in the "reply-to" header
+ var reversedText = m.body.split("").reverse().join("");
+ client.send(m.headers['reply-to'], {"content-type":"text/plain"}, reversedText);
+ });
+ };
+ var on_error = function() {
+ console.log('error');
+ };
+ client.connect('guest', 'guest', on_connect, on_error, '/');
+
+ $('#first form').submit(function() {
+ var text = $('#first form input').val();
+ if (text) {
+ client.send('/queue/test', {'reply-to': '/temp-queue/foo'}, text);
+ $('#first form input').val("");
+ }
+ return false;
+ });
+ </script>
+</body></html>
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2012-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_web_stomp_examples_app).
+
+-behaviour(application).
+-export([start/2,stop/1]).
+
+%% Dummy supervisor - see Ulf Wiger's comment at
+%% http://erlang.2086793.n4.nabble.com/initializing-library-applications-without-processes-td2094473.html
+-behaviour(supervisor).
+-export([init/1]).
+
+start(_Type, _StartArgs) ->
+ {ok, Listener} = application:get_env(rabbitmq_web_stomp_examples, listener),
+ {ok, _} = rabbit_web_dispatch:register_static_context(
+ web_stomp_examples, Listener, "web-stomp-examples", ?MODULE,
+ "priv", "WEB-STOMP: examples"),
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+stop(_State) ->
+ rabbit_web_dispatch:unregister_context(web_stomp_examples),
+ ok.
+
+init([]) -> {ok, {{one_for_one, 3, 10}, []}}.
--- /dev/null
+{application, rabbitmq_web_stomp_examples,
+ [{description, "Rabbit WEB-STOMP - examples"},
+ {vsn, "%%VSN%%"},
+ {modules, []},
+ {registered, []},
+ {mod, {rabbit_web_stomp_examples_app, []}},
+ {env, [{listener, [{port, 15670}]}]},
+ {applications, [kernel, stdlib, rabbitmq_web_dispatch, rabbitmq_web_stomp]}]}.
--- /dev/null
+This package, the rabbitmq-web-stomp, is licensed under the MPL. For
+the MPL, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
--- /dev/null
+ MOZILLA PUBLIC LICENSE
+ Version 1.1
+
+ ---------------
+
+1. Definitions.
+
+ 1.0.1. "Commercial Use" means distribution or otherwise making the
+ Covered Code available to a third party.
+
+ 1.1. "Contributor" means each entity that creates or contributes to
+ the creation of Modifications.
+
+ 1.2. "Contributor Version" means the combination of the Original
+ Code, prior Modifications used by a Contributor, and the Modifications
+ made by that particular Contributor.
+
+ 1.3. "Covered Code" means the Original Code or Modifications or the
+ combination of the Original Code and Modifications, in each case
+ including portions thereof.
+
+ 1.4. "Electronic Distribution Mechanism" means a mechanism generally
+ accepted in the software development community for the electronic
+ transfer of data.
+
+ 1.5. "Executable" means Covered Code in any form other than Source
+ Code.
+
+ 1.6. "Initial Developer" means the individual or entity identified
+ as the Initial Developer in the Source Code notice required by Exhibit
+ A.
+
+ 1.7. "Larger Work" means a work which combines Covered Code or
+ portions thereof with code not governed by the terms of this License.
+
+ 1.8. "License" means this document.
+
+ 1.8.1. "Licensable" means having the right to grant, to the maximum
+ extent possible, whether at the time of the initial grant or
+ subsequently acquired, any and all of the rights conveyed herein.
+
+ 1.9. "Modifications" means any addition to or deletion from the
+ substance or structure of either the Original Code or any previous
+ Modifications. When Covered Code is released as a series of files, a
+ Modification is:
+ A. Any addition to or deletion from the contents of a file
+ containing Original Code or previous Modifications.
+
+ B. Any new file that contains any part of the Original Code or
+ previous Modifications.
+
+ 1.10. "Original Code" means Source Code of computer software code
+ which is described in the Source Code notice required by Exhibit A as
+ Original Code, and which, at the time of its release under this
+ License is not already Covered Code governed by this License.
+
+ 1.10.1. "Patent Claims" means any patent claim(s), now owned or
+ hereafter acquired, including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by grantor.
+
+ 1.11. "Source Code" means the preferred form of the Covered Code for
+ making modifications to it, including all modules it contains, plus
+ any associated interface definition files, scripts used to control
+ compilation and installation of an Executable, or source code
+ differential comparisons against either the Original Code or another
+ well known, available Covered Code of the Contributor's choice. The
+ Source Code can be in a compressed or archival form, provided the
+ appropriate decompression or de-archiving software is widely available
+ for no charge.
+
+ 1.12. "You" (or "Your") means an individual or a legal entity
+ exercising rights under, and complying with all of the terms of, this
+ License or a future version of this License issued under Section 6.1.
+ For legal entities, "You" includes any entity which controls, is
+ controlled by, or is under common control with You. For purposes of
+ this definition, "control" means (a) the power, direct or indirect,
+ to cause the direction or management of such entity, whether by
+ contract or otherwise, or (b) ownership of more than fifty percent
+ (50%) of the outstanding shares or beneficial ownership of such
+ entity.
+
+2. Source Code License.
+
+ 2.1. The Initial Developer Grant.
+ The Initial Developer hereby grants You a world-wide, royalty-free,
+ non-exclusive license, subject to third party intellectual property
+ claims:
+ (a) under intellectual property rights (other than patent or
+ trademark) Licensable by Initial Developer to use, reproduce,
+ modify, display, perform, sublicense and distribute the Original
+ Code (or portions thereof) with or without Modifications, and/or
+ as part of a Larger Work; and
+
+ (b) under Patents Claims infringed by the making, using or
+ selling of Original Code, to make, have made, use, practice,
+ sell, and offer for sale, and/or otherwise dispose of the
+ Original Code (or portions thereof).
+
+ (c) the licenses granted in this Section 2.1(a) and (b) are
+ effective on the date Initial Developer first distributes
+ Original Code under the terms of this License.
+
+ (d) Notwithstanding Section 2.1(b) above, no patent license is
+ granted: 1) for code that You delete from the Original Code; 2)
+ separate from the Original Code; or 3) for infringements caused
+ by: i) the modification of the Original Code or ii) the
+ combination of the Original Code with other software or devices.
+
+ 2.2. Contributor Grant.
+ Subject to third party intellectual property claims, each Contributor
+ hereby grants You a world-wide, royalty-free, non-exclusive license
+
+ (a) under intellectual property rights (other than patent or
+ trademark) Licensable by Contributor, to use, reproduce, modify,
+ display, perform, sublicense and distribute the Modifications
+ created by such Contributor (or portions thereof) either on an
+ unmodified basis, with other Modifications, as Covered Code
+ and/or as part of a Larger Work; and
+
+ (b) under Patent Claims infringed by the making, using, or
+ selling of Modifications made by that Contributor either alone
+ and/or in combination with its Contributor Version (or portions
+ of such combination), to make, use, sell, offer for sale, have
+ made, and/or otherwise dispose of: 1) Modifications made by that
+ Contributor (or portions thereof); and 2) the combination of
+ Modifications made by that Contributor with its Contributor
+ Version (or portions of such combination).
+
+ (c) the licenses granted in Sections 2.2(a) and 2.2(b) are
+ effective on the date Contributor first makes Commercial Use of
+ the Covered Code.
+
+ (d) Notwithstanding Section 2.2(b) above, no patent license is
+ granted: 1) for any code that Contributor has deleted from the
+ Contributor Version; 2) separate from the Contributor Version;
+ 3) for infringements caused by: i) third party modifications of
+ Contributor Version or ii) the combination of Modifications made
+ by that Contributor with other software (except as part of the
+ Contributor Version) or other devices; or 4) under Patent Claims
+ infringed by Covered Code in the absence of Modifications made by
+ that Contributor.
+
+3. Distribution Obligations.
+
+ 3.1. Application of License.
+ The Modifications which You create or to which You contribute are
+ governed by the terms of this License, including without limitation
+ Section 2.2. The Source Code version of Covered Code may be
+ distributed only under the terms of this License or a future version
+ of this License released under Section 6.1, and You must include a
+ copy of this License with every copy of the Source Code You
+ distribute. You may not offer or impose any terms on any Source Code
+ version that alters or restricts the applicable version of this
+ License or the recipients' rights hereunder. However, You may include
+ an additional document offering the additional rights described in
+ Section 3.5.
+
+ 3.2. Availability of Source Code.
+ Any Modification which You create or to which You contribute must be
+ made available in Source Code form under the terms of this License
+ either on the same media as an Executable version or via an accepted
+ Electronic Distribution Mechanism to anyone to whom you made an
+ Executable version available; and if made available via Electronic
+ Distribution Mechanism, must remain available for at least twelve (12)
+ months after the date it initially became available, or at least six
+ (6) months after a subsequent version of that particular Modification
+ has been made available to such recipients. You are responsible for
+ ensuring that the Source Code version remains available even if the
+ Electronic Distribution Mechanism is maintained by a third party.
+
+ 3.3. Description of Modifications.
+ You must cause all Covered Code to which You contribute to contain a
+ file documenting the changes You made to create that Covered Code and
+ the date of any change. You must include a prominent statement that
+ the Modification is derived, directly or indirectly, from Original
+ Code provided by the Initial Developer and including the name of the
+ Initial Developer in (a) the Source Code, and (b) in any notice in an
+ Executable version or related documentation in which You describe the
+ origin or ownership of the Covered Code.
+
+ 3.4. Intellectual Property Matters
+ (a) Third Party Claims.
+ If Contributor has knowledge that a license under a third party's
+ intellectual property rights is required to exercise the rights
+ granted by such Contributor under Sections 2.1 or 2.2,
+ Contributor must include a text file with the Source Code
+ distribution titled "LEGAL" which describes the claim and the
+ party making the claim in sufficient detail that a recipient will
+ know whom to contact. If Contributor obtains such knowledge after
+ the Modification is made available as described in Section 3.2,
+ Contributor shall promptly modify the LEGAL file in all copies
+ Contributor makes available thereafter and shall take other steps
+ (such as notifying appropriate mailing lists or newsgroups)
+ reasonably calculated to inform those who received the Covered
+ Code that new knowledge has been obtained.
+
+ (b) Contributor APIs.
+ If Contributor's Modifications include an application programming
+ interface and Contributor has knowledge of patent licenses which
+ are reasonably necessary to implement that API, Contributor must
+ also include this information in the LEGAL file.
+
+ (c) Representations.
+ Contributor represents that, except as disclosed pursuant to
+ Section 3.4(a) above, Contributor believes that Contributor's
+ Modifications are Contributor's original creation(s) and/or
+ Contributor has sufficient rights to grant the rights conveyed by
+ this License.
+
+ 3.5. Required Notices.
+ You must duplicate the notice in Exhibit A in each file of the Source
+ Code. If it is not possible to put such notice in a particular Source
+ Code file due to its structure, then You must include such notice in a
+ location (such as a relevant directory) where a user would be likely
+ to look for such a notice. If You created one or more Modification(s)
+ You may add your name as a Contributor to the notice described in
+ Exhibit A. You must also duplicate this License in any documentation
+ for the Source Code where You describe recipients' rights or ownership
+ rights relating to Covered Code. You may choose to offer, and to
+ charge a fee for, warranty, support, indemnity or liability
+ obligations to one or more recipients of Covered Code. However, You
+ may do so only on Your own behalf, and not on behalf of the Initial
+ Developer or any Contributor. You must make it absolutely clear than
+ any such warranty, support, indemnity or liability obligation is
+ offered by You alone, and You hereby agree to indemnify the Initial
+ Developer and every Contributor for any liability incurred by the
+ Initial Developer or such Contributor as a result of warranty,
+ support, indemnity or liability terms You offer.
+
+ 3.6. Distribution of Executable Versions.
+ You may distribute Covered Code in Executable form only if the
+ requirements of Section 3.1-3.5 have been met for that Covered Code,
+ and if You include a notice stating that the Source Code version of
+ the Covered Code is available under the terms of this License,
+ including a description of how and where You have fulfilled the
+ obligations of Section 3.2. The notice must be conspicuously included
+ in any notice in an Executable version, related documentation or
+ collateral in which You describe recipients' rights relating to the
+ Covered Code. You may distribute the Executable version of Covered
+ Code or ownership rights under a license of Your choice, which may
+ contain terms different from this License, provided that You are in
+ compliance with the terms of this License and that the license for the
+ Executable version does not attempt to limit or alter the recipient's
+ rights in the Source Code version from the rights set forth in this
+ License. If You distribute the Executable version under a different
+ license You must make it absolutely clear that any terms which differ
+ from this License are offered by You alone, not by the Initial
+ Developer or any Contributor. You hereby agree to indemnify the
+ Initial Developer and every Contributor for any liability incurred by
+ the Initial Developer or such Contributor as a result of any such
+ terms You offer.
+
+ 3.7. Larger Works.
+ You may create a Larger Work by combining Covered Code with other code
+ not governed by the terms of this License and distribute the Larger
+ Work as a single product. In such a case, You must make sure the
+ requirements of this License are fulfilled for the Covered Code.
+
+4. Inability to Comply Due to Statute or Regulation.
+
+ If it is impossible for You to comply with any of the terms of this
+ License with respect to some or all of the Covered Code due to
+ statute, judicial order, or regulation then You must: (a) comply with
+ the terms of this License to the maximum extent possible; and (b)
+ describe the limitations and the code they affect. Such description
+ must be included in the LEGAL file described in Section 3.4 and must
+ be included with all distributions of the Source Code. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Application of this License.
+
+ This License applies to code to which the Initial Developer has
+ attached the notice in Exhibit A and to related Covered Code.
+
+6. Versions of the License.
+
+ 6.1. New Versions.
+ Netscape Communications Corporation ("Netscape") may publish revised
+ and/or new versions of the License from time to time. Each version
+ will be given a distinguishing version number.
+
+ 6.2. Effect of New Versions.
+ Once Covered Code has been published under a particular version of the
+ License, You may always continue to use it under the terms of that
+ version. You may also choose to use such Covered Code under the terms
+ of any subsequent version of the License published by Netscape. No one
+ other than Netscape has the right to modify the terms applicable to
+ Covered Code created under this License.
+
+ 6.3. Derivative Works.
+ If You create or use a modified version of this License (which you may
+ only do in order to apply it to code which is not already Covered Code
+ governed by this License), You must (a) rename Your license so that
+ the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
+ "MPL", "NPL" or any confusingly similar phrase do not appear in your
+ license (except to note that your license differs from this License)
+ and (b) otherwise make it clear that Your version of the license
+ contains terms which differ from the Mozilla Public License and
+ Netscape Public License. (Filling in the name of the Initial
+ Developer, Original Code or Contributor in the notice described in
+ Exhibit A shall not of themselves be deemed to be modifications of
+ this License.)
+
+7. DISCLAIMER OF WARRANTY.
+
+ COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
+ DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
+ THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
+ IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
+ YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
+ COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
+ OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
+ ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
+
+8. TERMINATION.
+
+ 8.1. This License and the rights granted hereunder will terminate
+ automatically if You fail to comply with terms herein and fail to cure
+ such breach within 30 days of becoming aware of the breach. All
+ sublicenses to the Covered Code which are properly granted shall
+ survive any termination of this License. Provisions which, by their
+ nature, must remain in effect beyond the termination of this License
+ shall survive.
+
+ 8.2. If You initiate litigation by asserting a patent infringement
+ claim (excluding declatory judgment actions) against Initial Developer
+ or a Contributor (the Initial Developer or Contributor against whom
+ You file such action is referred to as "Participant") alleging that:
+
+ (a) such Participant's Contributor Version directly or indirectly
+ infringes any patent, then any and all rights granted by such
+ Participant to You under Sections 2.1 and/or 2.2 of this License
+ shall, upon 60 days notice from Participant terminate prospectively,
+ unless if within 60 days after receipt of notice You either: (i)
+ agree in writing to pay Participant a mutually agreeable reasonable
+ royalty for Your past and future use of Modifications made by such
+ Participant, or (ii) withdraw Your litigation claim with respect to
+ the Contributor Version against such Participant. If within 60 days
+ of notice, a reasonable royalty and payment arrangement are not
+ mutually agreed upon in writing by the parties or the litigation claim
+ is not withdrawn, the rights granted by Participant to You under
+ Sections 2.1 and/or 2.2 automatically terminate at the expiration of
+ the 60 day notice period specified above.
+
+ (b) any software, hardware, or device, other than such Participant's
+ Contributor Version, directly or indirectly infringes any patent, then
+ any rights granted to You by such Participant under Sections 2.1(b)
+ and 2.2(b) are revoked effective as of the date You first made, used,
+ sold, distributed, or had made, Modifications made by that
+ Participant.
+
+ 8.3. If You assert a patent infringement claim against Participant
+ alleging that such Participant's Contributor Version directly or
+ indirectly infringes any patent where such claim is resolved (such as
+ by license or settlement) prior to the initiation of patent
+ infringement litigation, then the reasonable value of the licenses
+ granted by such Participant under Sections 2.1 or 2.2 shall be taken
+ into account in determining the amount or value of any payment or
+ license.
+
+ 8.4. In the event of termination under Sections 8.1 or 8.2 above,
+ all end user license agreements (excluding distributors and resellers)
+ which have been validly granted by You or any distributor hereunder
+ prior to termination shall survive termination.
+
+9. LIMITATION OF LIABILITY.
+
+ UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
+ (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
+ DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
+ OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
+ ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
+ CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
+ WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
+ COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
+ INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
+ LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
+ RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
+ PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
+ EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
+ THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
+
+10. U.S. GOVERNMENT END USERS.
+
+ The Covered Code is a "commercial item," as that term is defined in
+ 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
+ software" and "commercial computer software documentation," as such
+ terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
+ C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
+ all U.S. Government End Users acquire Covered Code with only those
+ rights set forth herein.
+
+11. MISCELLANEOUS.
+
+ This License represents the complete agreement concerning subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. This License shall be governed by
+ California law provisions (except to the extent applicable law, if
+ any, provides otherwise), excluding its conflict-of-law provisions.
+ With respect to disputes in which at least one party is a citizen of,
+ or an entity chartered or registered to do business in the United
+ States of America, any litigation relating to this License shall be
+ subject to the jurisdiction of the Federal Courts of the Northern
+ District of California, with venue lying in Santa Clara County,
+ California, with the losing party responsible for costs, including
+ without limitation, court costs and reasonable attorneys' fees and
+ expenses. The application of the United Nations Convention on
+ Contracts for the International Sale of Goods is expressly excluded.
+ Any law or regulation which provides that the language of a contract
+ shall be construed against the drafter shall not apply to this
+ License.
+
+12. RESPONSIBILITY FOR CLAIMS.
+
+ As between Initial Developer and the Contributors, each party is
+ responsible for claims and damages arising, directly or indirectly,
+ out of its utilization of rights under this License and You agree to
+ work with Initial Developer and Contributors to distribute such
+ responsibility on an equitable basis. Nothing herein is intended or
+ shall be deemed to constitute any admission of liability.
+
+13. MULTIPLE-LICENSED CODE.
+
+ Initial Developer may designate portions of the Covered Code as
+ "Multiple-Licensed". "Multiple-Licensed" means that the Initial
+ Developer permits you to utilize portions of the Covered Code under
+ Your choice of the NPL or the alternative licenses, if any, specified
+ by the Initial Developer in the file described in Exhibit A.
+
+EXHIBIT A -Mozilla Public License.
+
+ ``The contents of this file are subject to the Mozilla Public License
+ Version 1.1 (the "License"); you may not use this file except in
+ compliance with the License. You may obtain a copy of the License at
+ http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+ License for the specific language governing rights and limitations
+ under the License.
+
+ The Original Code is RabbitMQ.
+
+ The Initial Developer of the Original Code is GoPivotal, Inc.
+ Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.''
+
+ [NOTE: The text of this Exhibit A may differ slightly from the text of
+ the notices in the Source Code files of the Original Code. You should
+ use the text of this Exhibit A rather than the text found in the
+ Original Code Source Code for Your Modifications.]
--- /dev/null
+include ../umbrella.mk
--- /dev/null
+RabbitMQ-Web-Stomp plugin
+=========================
+
+This project is a simple bridge between "RabbitMQ-stomp" plugin and
+SockJS.
+
+Once started the plugin opens a SockJS endpoint on prefix "/stomp" on
+port 15674, for example a valid SockJS endpoint url may look like:
+"http://127.0.0.1:15674/stomp".
+
+Once the server is started you should be able to establish a SockJS
+connection to this url. You will be able to communicate using the
+usual STOMP protocol over it. For example, a page using Jeff Mesnil's
+"stomp-websocket" project may look like this:
+
+
+ <script src="http://cdn.sockjs.org/sockjs-0.3.min.js"></script>
+ <script src="stomp.js"></script>
+ <script>
+ Stomp.WebSocketClass = SockJS;
+
+ var client = Stomp.client('http://127.0.0.1:15674/stomp');
+ var on_connect = function() {
+ console.log('connected');
+ };
+ var on_error = function() {
+ console.log('error');
+ };
+ client.connect('guest', 'guest', on_connect, on_error, '/');
+ [...]
+
+See the "RabbitMQ-Web-Stomp-examples" plugin for more details.
+
+
+Installation
+------------
+
+Generic build instructions are at:
+
+ * http://www.rabbitmq.com/plugin-development.html
+
+Instructions on how to install a plugin into RabbitMQ broker:
+
+ * http://www.rabbitmq.com/plugins.html#installing-plugins
+
--- /dev/null
+RELEASABLE:=true
+DEPS:=cowboy-wrapper sockjs-erlang-wrapper rabbitmq-stomp
+
+WITH_BROKER_TEST_COMMANDS:=rabbit_ws_test_all:all_tests()
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2012-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_ws_app).
+
+-behaviour(application).
+-export([start/2, stop/1]).
+
+%%----------------------------------------------------------------------------
+
+-spec start(_, _) -> {ok, pid()}.
+start(_Type, _StartArgs) ->
+ ok = rabbit_ws_sockjs:init(),
+ rabbit_ws_sup:start_link().
+
+-spec stop(_) -> ok.
+stop(_State) ->
+ ok.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2012-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_ws_client).
+-behaviour(gen_server).
+
+-export([start_link/1]).
+-export([sockjs_msg/2, sockjs_closed/1]).
+
+-export([init/1, handle_call/3, handle_info/2, terminate/2,
+ code_change/3, handle_cast/2]).
+
+-record(state, {conn, processor, parse_state}).
+
+%%----------------------------------------------------------------------------
+
+start_link(Params) ->
+ gen_server:start_link(?MODULE, Params, []).
+
+sockjs_msg(Pid, Data) ->
+ gen_server:cast(Pid, {sockjs_msg, Data}).
+
+sockjs_closed(Pid) ->
+ gen_server:cast(Pid, sockjs_closed).
+
+%%----------------------------------------------------------------------------
+
+init({Processor, Conn}) ->
+ ok = file_handle_cache:obtain(),
+ process_flag(trap_exit, true),
+ {ok, #state{conn = Conn,
+ processor = Processor,
+ parse_state = rabbit_stomp_frame:initial_state()}}.
+
+handle_cast({sockjs_msg, Data}, State = #state{processor = Processor,
+ parse_state = ParseState}) ->
+ ParseState1 = process_received_bytes(Data, Processor, ParseState),
+ {noreply, State#state{parse_state = ParseState1}};
+
+handle_cast(sockjs_closed, State) ->
+ {stop, normal, State};
+
+handle_cast(Cast, State) ->
+ {stop, {odd_cast, Cast}, State}.
+
+%% TODO this is a bit rubbish - after the preview release we should
+%% make the credit_flow:send/1 invocation in
+%% rabbit_stomp_processor:process_frame/2 optional.
+handle_info({bump_credit, {_, _}}, State) ->
+ {noreply, State};
+
+handle_info(Info, State) ->
+ {stop, {odd_info, Info}, State}.
+
+handle_call(Request, _From, State) ->
+ {stop, {odd_request, Request}, State}.
+
+terminate(Reason, #state{conn = Conn, processor = Processor}) ->
+ ok = file_handle_cache:release(),
+ _ = case Reason of
+ normal -> % SockJS initiated exit
+ rabbit_stomp_processor:flush_and_die(Processor);
+ shutdown -> % STOMP died
+ Conn:close(1000, "STOMP died")
+ end,
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+
+%%----------------------------------------------------------------------------
+
+process_received_bytes(Bytes, Processor, ParseState) ->
+ case rabbit_stomp_frame:parse(Bytes, ParseState) of
+ {ok, Frame, Rest} ->
+ rabbit_stomp_processor:process_frame(Processor, Frame),
+ ParseState1 = rabbit_stomp_frame:initial_state(),
+ process_received_bytes(Rest, Processor, ParseState1);
+ {more, ParseState1} ->
+ ParseState1
+ end.
+
+
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2012-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_ws_client_sup).
+-behaviour(supervisor2).
+
+-export([start_client/1]).
+-export([init/1]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("rabbitmq_stomp/include/rabbit_stomp.hrl").
+
+%% --------------------------------------------------------------------------
+
+start_client({Conn}) ->
+ {ok, SupPid} = supervisor2:start_link(?MODULE, []),
+ {ok, Processor} = start_proc(SupPid, Conn),
+ {ok, Client} = supervisor2:start_child(
+ SupPid, client_spec(Processor, Conn)),
+ {ok, SupPid, Client}.
+
+start_proc(SupPid, Conn) ->
+ StompConfig = #stomp_configuration{implicit_connect = false},
+
+ SendFun = fun (_Sync, Data) ->
+ Conn:send(Data),
+ ok
+ end,
+ Info = Conn:info(),
+ {PeerAddr, PeerPort} = proplists:get_value(peername, Info),
+ {SockAddr, SockPort} = proplists:get_value(sockname, Info),
+ Name = rabbit_misc:format("~s:~b -> ~s:~b",
+ [rabbit_misc:ntoa(PeerAddr), PeerPort,
+ rabbit_misc:ntoa(SockAddr), SockPort]),
+ AdapterInfo = #amqp_adapter_info{protocol = {'Web STOMP', 0},
+ host = SockAddr,
+ port = SockPort,
+ peer_host = PeerAddr,
+ peer_port = PeerPort,
+ name = list_to_binary(Name),
+ additional_info = [{ssl, false}]},
+
+ {ok, Processor} =
+ supervisor2:start_child(
+ SupPid, {rabbit_stomp_processor,
+ {rabbit_stomp_processor, start_link, [StompConfig]},
+ intrinsic, ?MAX_WAIT, worker,
+ [rabbit_stomp_processor]}),
+ rabbit_stomp_processor:init_arg(
+ Processor, [SendFun, AdapterInfo, fun (_, _, _, _) -> ok end, none,
+ PeerAddr]),
+ {ok, Processor}.
+
+client_spec(Processor, Conn) ->
+ {rabbit_ws_client, {rabbit_ws_client, start_link, [{Processor, Conn}]},
+ intrinsic, ?MAX_WAIT, worker, [rabbit_ws_client]}.
+
+init(_Any) ->
+ {ok, {{one_for_all, 0, 1}, []}}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2012-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_ws_sockjs).
+
+-export([init/0]).
+
+-include_lib("rabbitmq_stomp/include/rabbit_stomp.hrl").
+
+
+%% --------------------------------------------------------------------------
+
+-spec init() -> ok.
+init() ->
+ Port = get_env(port, 55674),
+ SockjsOpts = get_env(sockjs_opts, []) ++ [{logger, fun logger/3}],
+
+ SockjsState = sockjs_handler:init_state(
+ <<"/stomp">>, fun service_stomp/3, {}, SockjsOpts),
+ VhostRoutes = [{[<<"stomp">>, '...'], sockjs_cowboy_handler, SockjsState}],
+ Routes = [{'_', VhostRoutes}], % any vhost
+ cowboy:start_listener(http, 100,
+ cowboy_tcp_transport, [{port, Port}],
+ cowboy_http_protocol, [{dispatch, Routes}]),
+ rabbit_log:info("rabbit_web_stomp: listening for HTTP connections on ~s:~w~n",
+ ["0.0.0.0", Port]),
+ case get_env(ssl_config, []) of
+ [] ->
+ ok;
+ Conf ->
+ rabbit_networking:ensure_ssl(),
+ TLSPort = proplists:get_value(port, Conf),
+ cowboy:start_listener(https, 100,
+ cowboy_ssl_transport, Conf,
+ cowboy_http_protocol, [{dispatch, Routes}]),
+ rabbit_log:info("rabbit_web_stomp: listening for HTTPS connections on ~s:~w~n",
+ ["0.0.0.0", TLSPort])
+ end,
+ ok.
+
+get_env(Key, Default) ->
+ case application:get_env(rabbitmq_web_stomp, Key) of
+ undefined -> Default;
+ {ok, V} -> V
+ end.
+
+
+%% Don't print sockjs logs
+logger(_Service, Req, _Type) ->
+ Req.
+
+%% --------------------------------------------------------------------------
+
+service_stomp(Conn, init, _State) ->
+ {ok, _Sup, Pid} = rabbit_ws_sup:start_client({Conn}),
+ {ok, Pid};
+
+service_stomp(_Conn, {recv, Data}, Pid) ->
+ rabbit_ws_client:sockjs_msg(Pid, Data),
+ {ok, Pid};
+
+service_stomp(_Conn, closed, Pid) ->
+ rabbit_ws_client:sockjs_closed(Pid),
+ ok.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2012-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_ws_sup).
+-behaviour(supervisor2).
+
+-export([start_link/0, init/1, start_client/1]).
+
+-define(SUP_NAME, ?MODULE).
+
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> ignore | {'ok', pid()} | {'error', any()}.
+start_link() ->
+ supervisor2:start_link({local, ?SUP_NAME}, ?MODULE, []).
+
+init([]) ->
+ {ok, {{simple_one_for_one, 0, 1},
+ [{client, {rabbit_ws_client_sup, start_client, []},
+ temporary, infinity, supervisor, [rabbit_ws_client_sup]}]}}.
+
+start_client(Params) ->
+ supervisor2:start_child(?SUP_NAME, [Params]).
--- /dev/null
+{application, rabbitmq_web_stomp,
+ [
+ {description, "Rabbit WEB-STOMP - WebSockets to Stomp adapter"},
+ {vsn, "%%VSN%%"},
+ {modules, []},
+ {registered, []},
+ {mod, {rabbit_ws_app, []}},
+ {env, [{port, 15674},
+ {ssl_config, []}]},
+ {applications, [kernel, stdlib, rabbit, rabbitmq_stomp, cowboy, sockjs]}
+ ]}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Console.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2012-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_ws_test_all).
+
+-export([all_tests/0]).
+
+all_tests() ->
+ ok = eunit:test(rabbit_ws_test_raw_websocket, [verbose]),
+ ok = eunit:test(rabbit_ws_test_sockjs_websocket, [verbose]),
+ ok.
+
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Console.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2012-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_ws_test_raw_websocket).
+
+-include_lib("eunit/include/eunit.hrl").
+
+connection_test() ->
+ WS = rfc6455_client:new("ws://127.0.0.1:15674/stomp/websocket", self()),
+ {ok, _} = rfc6455_client:open(WS),
+ {close, _} = rfc6455_client:close(WS),
+ ok.
+
+
+raw_send(WS, Command, Headers) ->
+ raw_send(WS, Command, Headers, <<>>).
+raw_send(WS, Command, Headers, Body) ->
+ Frame = stomp:marshal(Command, Headers, Body),
+ rfc6455_client:send(WS, Frame).
+
+raw_recv(WS) ->
+ {ok, P} = rfc6455_client:recv(WS),
+ stomp:unmarshal(P).
+
+
+pubsub_test() ->
+ WS = rfc6455_client:new("ws://127.0.0.1:15674/stomp/websocket", self()),
+ {ok, _} = rfc6455_client:open(WS),
+ ok = raw_send(WS, "CONNECT", [{"login","guest"}, {"passcode", "guest"}]),
+
+ {<<"CONNECTED">>, _, <<>>} = raw_recv(WS),
+
+ Dst = "/topic/test-" ++ stomp:list_to_hex(binary_to_list(crypto:rand_bytes(8))),
+
+ ok = raw_send(WS, "SUBSCRIBE", [{"destination", Dst},
+ {"id", "s0"}]),
+
+ ok = raw_send(WS, "SEND", [{"destination", Dst},
+ {"content-length", "3"}], <<"a\x00a">>),
+
+ {<<"MESSAGE">>, H, <<"a\x00a">>} = raw_recv(WS),
+ Dst = binary_to_list(proplists:get_value(<<"destination">>, H)),
+
+ {close, _} = rfc6455_client:close(WS),
+ ok.
+
+
+disconnect_test() ->
+ WS = rfc6455_client:new("ws://127.0.0.1:15674/stomp/websocket", self()),
+ {ok, _} = rfc6455_client:open(WS),
+ ok = raw_send(WS, "CONNECT", [{"login","guest"}, {"passcode", "guest"}]),
+
+ {<<"CONNECTED">>, _, <<>>} = raw_recv(WS),
+
+ ok = raw_send(WS, "DISCONNECT", []),
+ {close, {1005, _}} = rfc6455_client:recv(WS),
+
+ ok.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Console.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2012-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_ws_test_sockjs_websocket).
+
+-include_lib("eunit/include/eunit.hrl").
+
+connection_test() ->
+ WS = rfc6455_client:new("ws://127.0.0.1:15674/stomp/0/0/websocket", self()),
+ {ok, _} = rfc6455_client:open(WS),
+ {ok, <<"o">>} = rfc6455_client:recv(WS),
+
+ {close, _} = rfc6455_client:close(WS),
+ ok.
+
+
+sjs_send(WS, Command, Headers) ->
+ sjs_send(WS, Command, Headers, <<>>).
+sjs_send(WS, Command, Headers, Body) ->
+ StompFrame = stomp:marshal(Command, Headers, Body),
+ SockJSFrame = sockjs_json:encode([StompFrame]),
+ rfc6455_client:send(WS, SockJSFrame).
+
+sjs_recv(WS) ->
+ {ok, P} = rfc6455_client:recv(WS),
+ case P of
+ <<"a", JsonArr/binary>> ->
+ {ok, [StompFrame]} = sockjs_json:decode(JsonArr),
+ {ok, stomp:unmarshal(StompFrame)};
+ <<"c", JsonArr/binary>> ->
+ {ok, CloseReason} = sockjs_json:decode(JsonArr),
+ {close, CloseReason}
+ end.
+
+pubsub_test() ->
+ WS = rfc6455_client:new("ws://127.0.0.1:15674/stomp/0/0/websocket", self()),
+ {ok, _} = rfc6455_client:open(WS),
+ {ok, <<"o">>} = rfc6455_client:recv(WS),
+
+ ok = sjs_send(WS, "CONNECT", [{"login","guest"}, {"passcode", "guest"}]),
+
+ {ok, {<<"CONNECTED">>, _, <<>>}} = sjs_recv(WS),
+
+ Dst = "/topic/test-" ++ stomp:list_to_hex(binary_to_list(crypto:rand_bytes(8))),
+
+ ok = sjs_send(WS, "SUBSCRIBE", [{"destination", Dst},
+ {"id", "s0"}]),
+
+ ok = sjs_send(WS, "SEND", [{"destination", Dst},
+ {"content-length", "3"}], <<"a\x00a">>),
+
+ {ok, {<<"MESSAGE">>, H, <<"a\x00a">>}} = sjs_recv(WS),
+ Dst = binary_to_list(proplists:get_value(<<"destination">>, H)),
+
+ {close, _} = rfc6455_client:close(WS),
+
+ ok.
+
+
+disconnect_test() ->
+ WS = rfc6455_client:new("ws://127.0.0.1:15674/stomp/0/0/websocket", self()),
+ {ok, _} = rfc6455_client:open(WS),
+ {ok, <<"o">>} = rfc6455_client:recv(WS),
+
+ ok = sjs_send(WS, "CONNECT", [{"login","guest"}, {"passcode", "guest"}]),
+ {ok, {<<"CONNECTED">>, _, <<>>}} = sjs_recv(WS),
+
+ ok = sjs_send(WS, "DISCONNECT", []),
+ {close, [1000, _]} = sjs_recv(WS),
+
+ ok.
+
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Console.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2012-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rfc6455_client).
+
+-export([new/2, open/1, recv/1, send/2, close/1, close/2]).
+
+-record(state, {host, port, addr, path, ppid, socket, data, phase}).
+
+%% --------------------------------------------------------------------------
+
+new(WsUrl, PPid) ->
+ crypto:start(),
+ "ws://" ++ Rest = WsUrl,
+ [Addr, Path] = split("/", Rest, 1),
+ [Host, MaybePort] = split(":", Addr, 1, empty),
+ Port = case MaybePort of
+ empty -> 80;
+ V -> {I, ""} = string:to_integer(V), I
+ end,
+ State = #state{host = Host,
+ port = Port,
+ addr = Addr,
+ path = "/" ++ Path,
+ ppid = PPid},
+ spawn(fun () ->
+ start_conn(State)
+ end).
+
+open(WS) ->
+ receive
+ {rfc6455, open, WS, Opts} ->
+ {ok, Opts};
+ {rfc6455, close, WS, R} ->
+ {close, R}
+ end.
+
+recv(WS) ->
+ receive
+ {rfc6455, recv, WS, Payload} ->
+ {ok, Payload};
+ {rfc6455, close, WS, R} ->
+ {close, R}
+ end.
+
+send(WS, IoData) ->
+ WS ! {send, IoData},
+ ok.
+
+close(WS) ->
+ close(WS, {1000, ""}).
+
+close(WS, WsReason) ->
+ WS ! {close, WsReason},
+ receive
+ {rfc6455, close, WS, R} ->
+ {close, R}
+ end.
+
+
+%% --------------------------------------------------------------------------
+
+start_conn(State) ->
+ {ok, Socket} = gen_tcp:connect(State#state.host, State#state.port,
+ [binary,
+ {packet, 0}]),
+ Key = base64:encode_to_string(crypto:rand_bytes(16)),
+ gen_tcp:send(Socket,
+ "GET " ++ State#state.path ++ " HTTP/1.1\r\n" ++
+ "Host: " ++ State#state.addr ++ "\r\n" ++
+ "Upgrade: websocket\r\n" ++
+ "Connection: Upgrade\r\n" ++
+ "Sec-WebSocket-Key: " ++ Key ++ "\r\n" ++
+ "Origin: null\r\n" ++
+ "Sec-WebSocket-Version: 13\r\n\r\n"),
+
+ loop(State#state{socket = Socket,
+ data = <<>>,
+ phase = opening}).
+
+do_recv(State = #state{phase = opening, ppid = PPid, data = Data}) ->
+ case split("\r\n\r\n", binary_to_list(Data), 1, empty) of
+ [_Http, empty] -> State;
+ [Http, Data1] ->
+ %% TODO: don't ignore http response data, verify key
+ PPid ! {rfc6455, open, self(), [{http_response, Http}]},
+ State#state{phase = open,
+ data = Data1}
+ end;
+do_recv(State = #state{phase = Phase, data = Data, socket = Socket, ppid = PPid})
+ when Phase =:= open orelse Phase =:= closing ->
+ R = case Data of
+ <<F:1, _:3, O:4, 0:1, L:7, Payload:L/binary, Rest/binary>>
+ when L < 126 ->
+ {F, O, Payload, Rest};
+
+ <<F:1, _:3, O:4, 0:1, 126:7, L2:16, Payload:L2/binary, Rest/binary>> ->
+ {F, O, Payload, Rest};
+
+ <<F:1, _:3, O:4, 0:1, 127:7, L2:64, Payload:L2/binary, Rest/binary>> ->
+ {F, O, Payload, Rest};
+
+ <<_:1, _:3, _:4, 1:1, _/binary>> ->
+ %% According o rfc6455 5.1 the server must not mask any frames.
+ die(Socket, PPid, {1006, "Protocol error"}, normal);
+ _ ->
+ moredata
+ end,
+ case R of
+ moredata ->
+ State;
+ _ -> do_recv2(State, R)
+ end.
+
+do_recv2(State = #state{phase = Phase, socket = Socket, ppid = PPid}, R) ->
+ case R of
+ {1, 1, Payload, Rest} ->
+ PPid ! {rfc6455, recv, self(), Payload},
+ State#state{data = Rest};
+ {1, 8, Payload, _Rest} ->
+ WsReason = case Payload of
+ <<WC:16, WR/binary>> -> {WC, WR};
+ <<>> -> {1005, "No status received"}
+ end,
+ case Phase of
+ open -> %% echo
+ do_close(State, WsReason),
+ gen_tcp:close(Socket);
+ closing ->
+ ok
+ end,
+ die(Socket, PPid, WsReason, normal);
+ {_, _, _, Rest2} ->
+ io:format("Unknown frame type~n"),
+ die(Socket, PPid, {1006, "Unknown frame type"}, normal)
+ end.
+
+encode_frame(F, O, Payload) ->
+ Mask = crypto:rand_bytes(4),
+ MaskedPayload = apply_mask(Mask, iolist_to_binary(Payload)),
+
+ L = byte_size(MaskedPayload),
+ IoData = case L of
+ _ when L < 126 ->
+ [<<F:1, 0:3, O:4, 1:1, L:7>>, Mask, MaskedPayload];
+ _ when L < 65536 ->
+ [<<F:1, 0:3, O:4, 1:1, 126:7, L:16>>, Mask, MaskedPayload];
+ _ ->
+ [<<F:1, 0:3, O:4, 1:1, 127:7, L:64>>, Mask, MaskedPayload]
+ end,
+ iolist_to_binary(IoData).
+
+do_send(State = #state{socket = Socket}, Payload) ->
+ gen_tcp:send(Socket, encode_frame(1, 1, Payload)),
+ State.
+
+do_close(State = #state{socket = Socket}, {Code, Reason}) ->
+ Payload = iolist_to_binary([<<Code:16>>, Reason]),
+ gen_tcp:send(Socket, encode_frame(1, 8, Payload)),
+ State#state{phase = closing}.
+
+
+loop(State = #state{socket = Socket, ppid = PPid, data = Data,
+ phase = Phase}) ->
+ receive
+ {tcp, Socket, Bin} ->
+ State1 = State#state{data = iolist_to_binary([Data, Bin])},
+ loop(do_recv(State1));
+ {send, Payload} when Phase == open ->
+ loop(do_send(State, Payload));
+ {tcp_closed, Socket} ->
+ die(Socket, PPid, {1006, "Connection closed abnormally"}, normal);
+ {close, WsReason} when Phase == open ->
+ loop(do_close(State, WsReason))
+ end.
+
+
+die(Socket, PPid, WsReason, Reason) ->
+ gen_tcp:shutdown(Socket, read_write),
+ PPid ! {rfc6455, close, self(), WsReason},
+ exit(Reason).
+
+
+%% --------------------------------------------------------------------------
+
+split(SubStr, Str, Limit) ->
+ split(SubStr, Str, Limit, "").
+
+split(SubStr, Str, Limit, Default) ->
+ Acc = split(SubStr, Str, Limit, [], Default),
+ lists:reverse(Acc).
+split(_SubStr, Str, 0, Acc, _Default) -> [Str | Acc];
+split(SubStr, Str, Limit, Acc, Default) ->
+ {L, R} = case string:str(Str, SubStr) of
+ 0 -> {Str, Default};
+ I -> {string:substr(Str, 1, I-1),
+ string:substr(Str, I+length(SubStr))}
+ end,
+ split(SubStr, R, Limit-1, [L | Acc], Default).
+
+
+apply_mask(Mask, Data) when is_number(Mask) ->
+ apply_mask(<<Mask:32>>, Data);
+
+apply_mask(<<0:32>>, Data) ->
+ Data;
+apply_mask(Mask, Data) ->
+ iolist_to_binary(lists:reverse(apply_mask2(Mask, Data, []))).
+
+apply_mask2(M = <<Mask:32>>, <<Data:32, Rest/binary>>, Acc) ->
+ T = Data bxor Mask,
+ apply_mask2(M, Rest, [<<T:32>> | Acc]);
+apply_mask2(<<Mask:24, _:8>>, <<Data:24>>, Acc) ->
+ T = Data bxor Mask,
+ [<<T:24>> | Acc];
+apply_mask2(<<Mask:16, _:16>>, <<Data:16>>, Acc) ->
+ T = Data bxor Mask,
+ [<<T:16>> | Acc];
+apply_mask2(<<Mask:8, _:24>>, <<Data:8>>, Acc) ->
+ T = Data bxor Mask,
+ [<<T:8>> | Acc];
+apply_mask2(_, <<>>, Acc) ->
+ Acc.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Console.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2012-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(stomp).
+
+-export([marshal/2, marshal/3, unmarshal/1]).
+
+-export([list_to_hex/1]).
+
+marshal(Command, Headers) ->
+ marshal(Command, Headers, <<>>).
+marshal(Command, Headers, Body) ->
+ Lines = [Command] ++ [[K, ":", V] || {K, V} <- Headers] ++ [["\n", Body]],
+ iolist_to_binary([iolist_join(Lines, "\n"), "\x00"]).
+
+unmarshal(Frame) ->
+ [Head, Body] = binary:split(Frame, <<"\n\n">>),
+ [Command | HeaderLines] = binary:split(Head, <<"\n">>, [global]),
+ Headers = [list_to_tuple(binary:split(Line, <<":">>)) || Line <- HeaderLines],
+ [Body1, <<>>] = binary:split(Body, [<<0>>],[{scope,{byte_size(Body)-1, 1}}]),
+ {Command, Headers, Body1}.
+
+%% ----------
+
+iolist_join(List, Separator) ->
+ lists:reverse(iolist_join2(List, Separator, [])).
+
+iolist_join2([], _Separator, Acc) ->
+ Acc;
+iolist_join2([E | List], Separator, Acc) ->
+ iolist_join2(List, Separator, [E, Separator | Acc]).
+
+
+list_to_hex(L) ->
+ lists:flatten(lists:map(fun(X) -> int_to_hex(X) end, L)).
+int_to_hex(N) when N < 256 ->
+ [hex(N div 16), hex(N rem 16)].
+hex(N) when N < 10 ->
+ $0+N;
+hex(N) when N >= 10, N < 16 ->
+ $a + (N-10).
--- /dev/null
+# This is a TEMPORARY umbrella makefile, that will likely not survive
+# the repo split.
+
+VERSION=0.0.0
+VDIR=v$(VERSION)
+TAG=rabbitmq_$(subst .,_,$(VDIR))
+BRANCH=default
+
+SIGNING_KEY=056E8E56
+SIGNING_USER_EMAIL=info@rabbitmq.com
+SIGNING_USER_ID=RabbitMQ Release Signing Key <info@rabbitmq.com>
+
+# Misc options to pass to hg commands
+HG_OPTS=
+
+# Misc options to pass to ssh commands
+SSH_OPTS=
+
+PACKAGES_DIR=packages
+TMP_DIR=tmp
+
+SERVER_PACKAGES_DIR=$(PACKAGES_DIR)/rabbitmq-server/$(VDIR)
+MANPAGES_DIR=$(SERVER_PACKAGES_DIR)/man
+JAVA_CLIENT_PACKAGES_DIR=$(PACKAGES_DIR)/rabbitmq-java-client/$(VDIR)
+DOTNET_CLIENT_PACKAGES_DIR=$(PACKAGES_DIR)/rabbitmq-dotnet-client/$(VDIR)
+ERLANG_CLIENT_PACKAGES_DIR=$(PACKAGES_DIR)/rabbitmq-erlang-client/$(VDIR)
+PLUGINS_DIR=$(PACKAGES_DIR)/plugins/$(VDIR)
+PLUGINS_SRC_DIR=$(TMP_DIR)/plugins-src/$(VDIR)
+ABSOLUTE_PLUGINS_DIR=$(CURDIR)/$(PLUGINS_DIR)
+ABSOLUTE_PLUGINS_SRC_DIR=$(CURDIR)/$(PLUGINS_SRC_DIR)
+
+REQUIRED_EMULATOR_VERSION=R13B03
+ACTUAL_EMULATOR_VERSION=$(shell erl -noshell -eval 'io:format("~s",[erlang:system_info(otp_release)]),init:stop().')
+SKIP_EMULATOR_VERSION_CHECK=
+
+REPOS:=rabbitmq-codegen rabbitmq-server rabbitmq-java-client rabbitmq-dotnet-client rabbitmq-test
+
+HGREPOBASE:=$(shell dirname `hg paths default 2>/dev/null` 2>/dev/null)
+
+ifeq ($(HGREPOBASE),)
+HGREPOBASE=ssh://hg@hg.rabbitmq.com
+endif
+
+.PHONY: all
+all:
+ @echo Please choose a target from the Makefile.
+
+.PHONY: dist
+ifeq "$(UNOFFICIAL_RELEASE)$(GNUPG_PATH)" ""
+dist:
+ @echo "You must specify one of UNOFFICIAL_RELEASE (to true, if you don't want to sign packages) or GNUPG_PATH (to the location of the RabbitMQ keyring) when making dist."
+ @false
+else
+dist: rabbitmq-server-artifacts
+dist: rabbitmq-java-artifacts
+ifeq ($(SKIP_DOTNET_CLIENT),)
+dist: rabbitmq-dotnet-artifacts
+endif
+dist: rabbitmq-erlang-client-artifacts
+dist: rabbitmq-plugins-srcdist
+dist: rabbitmq-plugins-artifacts
+endif
+
+.PHONY: clean
+clean: clean-packaging
+ $(MAKE) -C . clean
+
+.PHONY: clean-packaging
+ rm -rf $(PACKAGES_DIR) $(TMP_DIR) .*.checkout
+
+.PHONY: prepare
+prepare:
+ifeq "$(SKIP_EMULATOR_VERSION_CHECK)" ""
+ @[ "$(REQUIRED_EMULATOR_VERSION)" = "$(ACTUAL_EMULATOR_VERSION)" ] || \
+ (echo "You are trying to compile with the wrong Erlang/OTP release."; \
+ echo "Please use emulator version $(REQUIRED_EMULATOR_VERSION)."; \
+ echo "Or skip the version check by setting the variable SKIP_EMULATOR_VERSION_CHECK."; \
+ [ -n "$(UNOFFICIAL_RELEASE)" ] )
+endif
+ @echo Checking the presence of the tools necessary to build a release on a Debian based OS.
+ [ -f "/etc/debian_version" ] && dpkg -L cdbs elinks fakeroot findutils gnupg gzip perl python python-simplejson rpm rsync wget reprepro tar tofrodos zip python-pexpect openssl xmlto xsltproc git-core nsis > /dev/null || echo Not a Debian system
+ @echo All required tools are installed, great!
+
+.PHONY: rabbitmq-server-clean
+rabbitmq-server-clean:
+ $(MAKE) -C rabbitmq-server distclean
+ $(MAKE) -C rabbitmq-server/packaging/generic-unix clean
+ $(MAKE) -C rabbitmq-server/packaging/windows clean
+ $(MAKE) -C rabbitmq-server/packaging/windows-exe clean
+ $(MAKE) -C rabbitmq-server/packaging/debs/Debian clean
+ $(MAKE) -C rabbitmq-server/packaging/debs/apt-repository clean
+ $(MAKE) -C rabbitmq-server/packaging/RPMS/Fedora clean
+ $(MAKE) -C rabbitmq-server/packaging/macports clean
+
+.PHONY: rabbitmq-server-artifacts
+rabbitmq-server-artifacts: rabbitmq-server-srcdist
+rabbitmq-server-artifacts: rabbitmq-server-website-manpages
+rabbitmq-server-artifacts: rabbitmq-server-generic-unix-packaging
+rabbitmq-server-artifacts: rabbitmq-server-windows-packaging
+rabbitmq-server-artifacts: rabbitmq-server-windows-exe-packaging
+rabbitmq-server-artifacts: rabbitmq-server-debian-packaging
+rabbitmq-server-artifacts: rabbitmq-server-rpm-packaging
+
+.PHONY: rabbitmq-server-srcdist
+rabbitmq-server-srcdist: prepare rabbitmq-plugins-srcdist
+ $(MAKE) -C rabbitmq-server srcdist VERSION=$(VERSION) PLUGINS_SRC_DIR=$(ABSOLUTE_PLUGINS_SRC_DIR)
+ mkdir -p $(SERVER_PACKAGES_DIR)
+ cp rabbitmq-server/dist/rabbitmq-server-*.tar.gz rabbitmq-server/dist/rabbitmq-server-*.zip $(SERVER_PACKAGES_DIR)
+
+.PHONY: rabbitmq-server-website-manpages
+rabbitmq-server-website-manpages: rabbitmq-server-srcdist
+ $(MAKE) -C rabbitmq-server docs_all VERSION=$(VERSION)
+ mkdir -p $(MANPAGES_DIR)
+ cp rabbitmq-server/docs/*.man.xml $(MANPAGES_DIR)
+
+.PHONY: rabbitmq-server-generic-unix-packaging
+rabbitmq-server-generic-unix-packaging: rabbitmq-server-srcdist
+ $(MAKE) -C rabbitmq-server/packaging/generic-unix dist VERSION=$(VERSION)
+ cp rabbitmq-server/packaging/generic-unix/rabbitmq-server-generic-unix-*.tar.gz $(SERVER_PACKAGES_DIR)
+
+.PHONY: rabbitmq-server-mac-standalone-packaging
+rabbitmq-server-mac-standalone-packaging: rabbitmq-server-srcdist
+ $(MAKE) -C rabbitmq-server/packaging/standalone dist VERSION=$(VERSION) OS=mac
+ cp rabbitmq-server/packaging/standalone/rabbitmq-server-mac-standalone-*.tar.gz $(SERVER_PACKAGES_DIR)
+
+.PHONY: rabbitmq-server-windows-packaging
+rabbitmq-server-windows-packaging: rabbitmq-server-srcdist
+ $(MAKE) -C rabbitmq-server/packaging/windows dist VERSION=$(VERSION)
+ cp rabbitmq-server/packaging/windows/rabbitmq-server-windows-*.zip $(SERVER_PACKAGES_DIR)
+
+.PHONY: rabbitmq-server-windows-exe-packaging
+rabbitmq-server-windows-exe-packaging: rabbitmq-server-windows-packaging
+ $(MAKE) -C rabbitmq-server/packaging/windows-exe dist VERSION=$(VERSION)
+ cp rabbitmq-server/packaging/windows-exe/rabbitmq-server-*.exe $(SERVER_PACKAGES_DIR)
+
+.PHONY: rabbitmq-server-debian-packaging
+rabbitmq-server-debian-packaging: rabbitmq-server-srcdist
+ $(MAKE) -C rabbitmq-server/packaging/debs/Debian package \
+ UNOFFICIAL_RELEASE=$(UNOFFICIAL_RELEASE) \
+ GNUPG_PATH=$(GNUPG_PATH) \
+ VERSION=$(VERSION) \
+ SIGNING_KEY_ID=$(SIGNING_KEY)
+ cp rabbitmq-server/packaging/debs/Debian/rabbitmq-server*$(VERSION)*.deb $(SERVER_PACKAGES_DIR)
+ cp rabbitmq-server/packaging/debs/Debian/rabbitmq-server*$(VERSION)*.diff.gz $(SERVER_PACKAGES_DIR)
+ cp rabbitmq-server/packaging/debs/Debian/rabbitmq-server*$(VERSION)*.orig.tar.gz $(SERVER_PACKAGES_DIR)
+ cp rabbitmq-server/packaging/debs/Debian/rabbitmq-server*$(VERSION)*.dsc $(SERVER_PACKAGES_DIR)
+ cp rabbitmq-server/packaging/debs/Debian/rabbitmq-server*$(VERSION)*.changes $(SERVER_PACKAGES_DIR)
+ $(MAKE) -C rabbitmq-server/packaging/debs/apt-repository all \
+ UNOFFICIAL_RELEASE=$(UNOFFICIAL_RELEASE) \
+ GNUPG_PATH=$(GNUPG_PATH) \
+ SIGNING_USER_EMAIL=$(SIGNING_USER_EMAIL)
+ cp -r rabbitmq-server/packaging/debs/apt-repository/debian $(PACKAGES_DIR)
+
+.PHONY: rabbitmq-server-rpm-packaging
+rabbitmq-server-rpm-packaging: rabbitmq-server-srcdist
+ for distro in fedora suse ; do \
+ $(MAKE) -C rabbitmq-server/packaging/RPMS/Fedora rpms VERSION=$(VERSION) RPM_OS=$$distro && \
+ find rabbitmq-server/packaging/RPMS/Fedora -name "*.rpm" -exec cp '{}' $(SERVER_PACKAGES_DIR) ';' ; \
+ done
+
+# This target ssh's into the OSX host in order to finalize the
+# macports repo, so it is not invoked by rabbitmq-server-artifacts.
+# Note that the "clean" below is significant: Because the REAL_WEB_URL
+# environment variable might change, we need to rebuild the macports
+# artifacts at each deploy.
+.PHONY: rabbitmq-server-macports-packaging
+rabbitmq-server-macports-packaging:
+ $(MAKE) -C rabbitmq-server/packaging/macports clean macports VERSION=$(VERSION)
+ cp -r rabbitmq-server/packaging/macports/macports $(PACKAGES_DIR)
+
+
+.PHONY: rabbitmq-java-artifacts
+rabbitmq-java-artifacts: prepare
+ $(MAKE) -C rabbitmq-java-client dist VERSION=$(VERSION)
+ mkdir -p $(JAVA_CLIENT_PACKAGES_DIR)
+ cp rabbitmq-java-client/build/*.tar.gz $(JAVA_CLIENT_PACKAGES_DIR)
+ cp rabbitmq-java-client/build/*.zip $(JAVA_CLIENT_PACKAGES_DIR)
+ cd $(JAVA_CLIENT_PACKAGES_DIR); unzip -q rabbitmq-java-client-javadoc-$(VERSION).zip
+
+
+.PHONY: rabbitmq-dotnet-artifacts
+rabbitmq-dotnet-artifacts: prepare
+ $(MAKE) -C rabbitmq-dotnet-client dist RABBIT_VSN=$(VERSION)
+ mkdir -p $(DOTNET_CLIENT_PACKAGES_DIR)
+ cp -a rabbitmq-dotnet-client/release/* $(DOTNET_CLIENT_PACKAGES_DIR)
+
+
+.PHONY: rabbitmq-erlang-client-artifacts
+rabbitmq-erlang-client-artifacts: prepare
+ $(MAKE) -C rabbitmq-erlang-client distribution VERSION=$(VERSION)
+ mkdir -p $(ERLANG_CLIENT_PACKAGES_DIR)
+ cp rabbitmq-erlang-client/dist/*.ez $(ERLANG_CLIENT_PACKAGES_DIR)
+ cp rabbitmq-erlang-client/dist/*.tar.gz $(ERLANG_CLIENT_PACKAGES_DIR)
+ cp -r rabbitmq-erlang-client/doc/ $(ERLANG_CLIENT_PACKAGES_DIR)
+
+
+.PHONY: rabbitmq-plugins-artifacts
+rabbitmq-plugins-artifacts:
+ $(MAKE) -C . plugins-dist PLUGINS_DIST_DIR=$(ABSOLUTE_PLUGINS_DIR) VERSION=$(VERSION)
+
+.PHONY: rabbitmq-plugins-srcdist
+rabbitmq-plugins-srcdist:
+ $(MAKE) -C . plugins-srcdist PLUGINS_SRC_DIST_DIR=$(ABSOLUTE_PLUGINS_SRC_DIR) VERSION=$(VERSION)
+
+.PHONY: sign-artifacts
+ifneq "$(UNOFFICIAL_RELEASE)" ""
+sign-artifacts:
+ true
+else
+sign-artifacts:
+ python util/nopassphrase.py \
+ rpm --addsign \
+ --define '_signature gpg' \
+ --define '_gpg_path $(GNUPG_PATH)/.gnupg/' \
+ --define '_gpg_name $(SIGNING_USER_ID)' \
+ $(PACKAGES_DIR)/*/*/*.rpm
+ for p in \
+ $(SERVER_PACKAGES_DIR)/* \
+ $(JAVA_CLIENT_PACKAGES_DIR)/* \
+ $(ERLANG_CLIENT_PACKAGES_DIR)/* \
+ ; do \
+ [ -f $$p ] && \
+ HOME=$(GNUPG_PATH) gpg --default-key $(SIGNING_KEY) -abs -o $$p.asc $$p ; \
+ done
+endif
+
+###########################################################################
+
+DEPLOY_HOST=localhost
+DEPLOY_PATH=/tmp/rabbitmq/extras/releases
+DEPLOY_DEST=$(DEPLOY_HOST):$(DEPLOY_PATH)
+
+RSYNC_CMD=rsync -rpl --delete-after
+
+DEPLOYMENT_SUBDIRECTORIES=rabbitmq-server rabbitmq-java-client rabbitmq-dotnet-client rabbitmq-erlang-client
+
+DEPLOY_RSYNC_CMDS=\
+ set -x -e; \
+ for subdirectory in $(DEPLOYMENT_SUBDIRECTORIES) ; do \
+ ssh $(SSH_OPTS) $(DEPLOY_HOST) "(cd $(DEPLOY_PATH); if [ ! -d $$subdirectory ] ; then mkdir -p $$subdirectory; chmod g+w $$subdirectory; fi)"; \
+ $(RSYNC_CMD) $(PACKAGES_DIR)/$$subdirectory/* \
+ $(DEPLOY_DEST)/$$subdirectory ; \
+ done; \
+ for subdirectory in debian ; do \
+ $(RSYNC_CMD) $(PACKAGES_DIR)/$$subdirectory \
+ $(DEPLOY_DEST); \
+ done; \
+ unpacked_javadoc_dir=`(cd packages/rabbitmq-java-client; ls -td */rabbitmq-java-client-javadoc-*/ | head -1)`; \
+ ssh $(SSH_OPTS) $(DEPLOY_HOST) "(cd $(DEPLOY_PATH)/rabbitmq-java-client; rm -f current-javadoc; ln -s $$unpacked_javadoc_dir current-javadoc)"; \
+ ssh $(SSH_OPTS) $(DEPLOY_HOST) "(cd $(DEPLOY_PATH)/rabbitmq-server; rm -f current; ln -s $(VDIR) current)"; \
+
+deploy: verify-signatures fixup-permissions-for-deploy
+ $(DEPLOY_RSYNC_CMDS)
+
+deploy-live: verify-signatures deploy-maven fixup-permissions-for-deploy
+ $(DEPLOY_RSYNC_CMDS)
+
+fixup-permissions-for-deploy:
+ chmod -R g+w $(PACKAGES_DIR)
+ chmod g+s `find $(PACKAGES_DIR) -type d`
+
+verify-signatures:
+ for file in `find $(PACKAGES_DIR) -type f -name "*.asc"`; do \
+ echo "Checking $$file" ; \
+ if ! HOME=$(GNUPG_PATH) gpg --verify $$file `echo $$file | sed -e 's/\.asc$$//'`; then \
+ bad_signature=1 ; \
+ fi ; \
+ done ; \
+ [ -z "$$bad_signature" ]
+
+deploy-maven: verify-signatures
+ $(MAKE) -C rabbitmq-java-client stage-and-promote-maven-bundle SIGNING_KEY=$(SIGNING_KEY) VERSION=$(VERSION) GNUPG_PATH=$(GNUPG_PATH)
--- /dev/null
+diff --git a/src/mochijson2_fork.erl b/src/mochijson2_fork.erl
+index 355f068..a088d9d 100644
+--- a/src/mochijson2_fork.erl
++++ b/src/mochijson2_fork.erl
+@@ -91,17 +91,17 @@
+ -define(IS_WHITESPACE(C),
+ (C =:= $\s orelse C =:= $\t orelse C =:= $\r orelse C =:= $\n)).
+
+--type(decoder_option() :: any()).
+--type(handler_option() :: any()).
+-
+--type(json_string() :: atom | binary()).
+--type(json_number() :: integer() | float()).
+--type(json_array() :: [json_term()]).
+--type(json_object() :: {struct, [{json_string(), json_term()}]}).
+--type(json_eep18_object() :: {[{json_string(), json_term()}]}).
+--type(json_iolist() :: {json, iolist()}).
+--type(json_term() :: json_string() | json_number() | json_array() |
+- json_object() | json_eep18_object() | json_iolist()).
++%% -type(decoder_option() :: any()).
++%% -type(handler_option() :: any()).
++
++%% -type(json_string() :: atom | binary()).
++%% -type(json_number() :: integer() | float()).
++%% -type(json_array() :: [json_term()]).
++%% -type(json_object() :: {struct, [{json_string(), json_term()}]}).
++%% -type(json_eep18_object() :: {[{json_string(), json_term()}]}).
++%% -type(json_iolist() :: {json, iolist()}).
++%% -type(json_term() :: json_string() | json_number() | json_array() |
++%% json_object() | json_eep18_object() | json_iolist()).
+
+ -record(encoder, {handler=null,
+ utf8=false}).
+@@ -112,27 +112,27 @@
+ column=1,
+ state=null}).
+
+--type(utf8_option() :: boolean()).
+--type(encoder_option() :: handler_option() | utf8_option()).
+--spec encoder([encoder_option()]) -> function().
++%% -type(utf8_option() :: boolean()).
++%% -type(encoder_option() :: handler_option() | utf8_option()).
++%% -spec encoder([encoder_option()]) -> function().
+ %% @doc Create an encoder/1 with the given options.
+ %% Emit unicode as utf8 (default - false)
+ encoder(Options) ->
+ State = parse_encoder_options(Options, #encoder{}),
+ fun (O) -> json_encode(O, State) end.
+
+--spec encode(json_term()) -> iolist().
++%% -spec encode(json_term()) -> iolist().
+ %% @doc Encode the given as JSON to an iolist.
+ encode(Any) ->
+ json_encode(Any, #encoder{}).
+
+--spec decoder([decoder_option()]) -> function().
++%% -spec decoder([decoder_option()]) -> function().
+ %% @doc Create a decoder/1 with the given options.
+ decoder(Options) ->
+ State = parse_decoder_options(Options, #decoder{}),
+ fun (O) -> json_decode(O, State) end.
+
+--spec decode(iolist(), [{format, proplist | eep18 | struct}]) -> json_term().
++%% -spec decode(iolist(), [{format, proplist | eep18 | struct}]) -> json_term().
+ %% @doc Decode the given iolist to Erlang terms using the given object format
+ %% for decoding, where proplist returns JSON objects as [{binary(), json_term()}]
+ %% proplists, eep18 returns JSON objects as {[binary(), json_term()]}, and struct
+@@ -140,7 +140,7 @@ decoder(Options) ->
+ decode(S, Options) ->
+ json_decode(S, parse_decoder_options(Options, #decoder{})).
+
+--spec decode(iolist()) -> json_term().
++%% -spec decode(iolist()) -> json_term().
+ %% @doc Decode the given iolist to Erlang terms.
+ decode(S) ->
+ json_decode(S, #decoder{}).
+diff --git a/src/sockjs.erl b/src/sockjs.erl
+index 68163ca..98b1173 100644
+--- a/src/sockjs.erl
++++ b/src/sockjs.erl
+@@ -2,23 +2,23 @@
+
+ -export([send/2, close/1, close/3, info/1]).
+
+--type(conn() :: {sockjs_session, any()}).
++%% -type(conn() :: {sockjs_session, any()}).
+
+ %% Send data over a connection.
+--spec send(iodata(), conn()) -> ok.
++%% -spec send(iodata(), conn()) -> ok.
+ send(Data, Conn = {sockjs_session, _}) ->
+ sockjs_session:send(Data, Conn).
+
+ %% Initiate a close of a connection.
+--spec close(conn()) -> ok.
++%% -spec close(conn()) -> ok.
+ close(Conn) ->
+ close(1000, "Normal closure", Conn).
+
+--spec close(non_neg_integer(), string(), conn()) -> ok.
++%% -spec close(non_neg_integer(), string(), conn()) -> ok.
+ close(Code, Reason, Conn = {sockjs_session, _}) ->
+ sockjs_session:close(Code, Reason, Conn).
+
+--spec info(conn()) -> [{atom(), any()}].
++%% -spec info(conn()) -> [{atom(), any()}].
+ info(Conn = {sockjs_session, _}) ->
+ sockjs_session:info(Conn).
+
+diff --git a/src/sockjs_action.erl b/src/sockjs_action.erl
+index 3f13beb..4310963 100644
+--- a/src/sockjs_action.erl
++++ b/src/sockjs_action.erl
+@@ -45,17 +45,17 @@
+
+ %% --------------------------------------------------------------------------
+
+--spec welcome_screen(req(), headers(), service()) -> req().
++%% -spec welcome_screen(req(), headers(), service()) -> req().
+ welcome_screen(Req, Headers, _Service) ->
+ H = [{"Content-Type", "text/plain; charset=UTF-8"}],
+ sockjs_http:reply(200, H ++ Headers,
+ "Welcome to SockJS!\n", Req).
+
+--spec options(req(), headers(), service()) -> req().
++%% -spec options(req(), headers(), service()) -> req().
+ options(Req, Headers, _Service) ->
+ sockjs_http:reply(204, Headers, "", Req).
+
+--spec iframe(req(), headers(), service()) -> req().
++%% -spec iframe(req(), headers(), service()) -> req().
+ iframe(Req, Headers, #service{sockjs_url = SockjsUrl}) ->
+ IFrame = io_lib:format(?IFRAME, [SockjsUrl]),
+ MD5 = "\"" ++ binary_to_list(base64:encode(erlang:md5(IFrame))) ++ "\"",
+@@ -68,7 +68,7 @@ iframe(Req, Headers, #service{sockjs_url = SockjsUrl}) ->
+ end.
+
+
+--spec info_test(req(), headers(), service()) -> req().
++%% -spec info_test(req(), headers(), service()) -> req().
+ info_test(Req, Headers, #service{websocket = Websocket,
+ cookie_needed = CookieNeeded}) ->
+ I = [{websocket, Websocket},
+@@ -81,12 +81,12 @@ info_test(Req, Headers, #service{websocket = Websocket,
+
+ %% --------------------------------------------------------------------------
+
+--spec xhr_polling(req(), headers(), service(), session()) -> req().
++%% -spec xhr_polling(req(), headers(), service(), session()) -> req().
+ xhr_polling(Req, Headers, Service, Session) ->
+ Req1 = chunk_start(Req, Headers),
+ reply_loop(Req1, Session, 1, fun fmt_xhr/1, Service).
+
+--spec xhr_streaming(req(), headers(), service(), session()) -> req().
++%% -spec xhr_streaming(req(), headers(), service(), session()) -> req().
+ xhr_streaming(Req, Headers, Service = #service{response_limit = ResponseLimit},
+ Session) ->
+ Req1 = chunk_start(Req, Headers),
+@@ -96,7 +96,7 @@ xhr_streaming(Req, Headers, Service = #service{response_limit = ResponseLimit},
+ fun fmt_xhr/1),
+ reply_loop(Req2, Session, ResponseLimit, fun fmt_xhr/1, Service).
+
+--spec eventsource(req(), headers(), service(), session()) -> req().
++%% -spec eventsource(req(), headers(), service(), session()) -> req().
+ eventsource(Req, Headers, Service = #service{response_limit = ResponseLimit},
+ SessionId) ->
+ Req1 = chunk_start(Req, Headers, "text/event-stream; charset=UTF-8"),
+@@ -104,7 +104,7 @@ eventsource(Req, Headers, Service = #service{response_limit = ResponseLimit},
+ reply_loop(Req2, SessionId, ResponseLimit, fun fmt_eventsource/1, Service).
+
+
+--spec htmlfile(req(), headers(), service(), session()) -> req().
++%% -spec htmlfile(req(), headers(), service(), session()) -> req().
+ htmlfile(Req, Headers, Service = #service{response_limit = ResponseLimit},
+ SessionId) ->
+ S = fun (Req1, CB) ->
+@@ -119,7 +119,7 @@ htmlfile(Req, Headers, Service = #service{response_limit = ResponseLimit},
+ end,
+ verify_callback(Req, S).
+
+--spec jsonp(req(), headers(), service(), session()) -> req().
++%% -spec jsonp(req(), headers(), service(), session()) -> req().
+ jsonp(Req, Headers, Service, SessionId) ->
+ S = fun (Req1, CB) ->
+ Req2 = chunk_start(Req1, Headers),
+@@ -139,7 +139,7 @@ verify_callback(Req, Success) ->
+
+ %% --------------------------------------------------------------------------
+
+--spec xhr_send(req(), headers(), service(), session()) -> req().
++%% -spec xhr_send(req(), headers(), service(), session()) -> req().
+ xhr_send(Req, Headers, _Service, Session) ->
+ {Body, Req1} = sockjs_http:body(Req),
+ case handle_recv(Req1, Body, Session) of
+@@ -150,7 +150,7 @@ xhr_send(Req, Headers, _Service, Session) ->
+ sockjs_http:reply(204, H ++ Headers, "", Req1)
+ end.
+
+--spec jsonp_send(req(), headers(), service(), session()) -> req().
++%% -spec jsonp_send(req(), headers(), service(), session()) -> req().
+ jsonp_send(Req, Headers, _Service, Session) ->
+ {Body, Req1} = sockjs_http:body_qs(Req),
+ case handle_recv(Req1, Body, Session) of
+@@ -236,21 +236,21 @@ chunk_end(Req) -> sockjs_http:chunk_end(Req).
+ chunk_end(Req, Body, Fmt) -> Req1 = chunk(Req, Body, Fmt),
+ chunk_end(Req1).
+
+--spec fmt_xhr(iodata()) -> iodata().
++%% -spec fmt_xhr(iodata()) -> iodata().
+ fmt_xhr(Body) -> [Body, "\n"].
+
+--spec fmt_eventsource(iodata()) -> iodata().
++%% -spec fmt_eventsource(iodata()) -> iodata().
+ fmt_eventsource(Body) ->
+ Escaped = sockjs_util:url_escape(binary_to_list(iolist_to_binary(Body)),
+ "%\r\n\0"), %% $% must be first!
+ [<<"data: ">>, Escaped, <<"\r\n\r\n">>].
+
+--spec fmt_htmlfile(iodata()) -> iodata().
++%% -spec fmt_htmlfile(iodata()) -> iodata().
+ fmt_htmlfile(Body) ->
+ Double = sockjs_json:encode(iolist_to_binary(Body)),
+ [<<"<script>\np(">>, Double, <<");\n</script>\r\n">>].
+
+--spec fmt_jsonp(iodata(), iodata()) -> iodata().
++%% -spec fmt_jsonp(iodata(), iodata()) -> iodata().
+ fmt_jsonp(Body, Callback) ->
+ %% Yes, JSONed twice, there isn't a a better way, we must pass
+ %% a string back, and the script, will be evaled() by the
+@@ -259,7 +259,7 @@ fmt_jsonp(Body, Callback) ->
+
+ %% --------------------------------------------------------------------------
+
+--spec websocket(req(), headers(), service()) -> req().
++%% -spec websocket(req(), headers(), service()) -> req().
+ websocket(Req, Headers, Service) ->
+ {_Any, Req1, {R1, R2}} = sockjs_handler:is_valid_ws(Service, Req),
+ case {R1, R2} of
+@@ -274,6 +274,6 @@ websocket(Req, Headers, Service) ->
+ "This WebSocket request can't be handled.", Req1)
+ end.
+
+--spec rawwebsocket(req(), headers(), service()) -> req().
++%% -spec rawwebsocket(req(), headers(), service()) -> req().
+ rawwebsocket(Req, Headers, Service) ->
+ websocket(Req, Headers, Service).
+diff --git a/src/sockjs_app.erl b/src/sockjs_app.erl
+index 1b8e77c..54aceb6 100644
+--- a/src/sockjs_app.erl
++++ b/src/sockjs_app.erl
+@@ -4,11 +4,11 @@
+
+ -export([start/2, stop/1]).
+
+--spec start(_, _) -> {ok, pid()}.
++%% -spec start(_, _) -> {ok, pid()}.
+ start(_StartType, _StartArgs) ->
+ sockjs_session:init(),
+ sockjs_session_sup:start_link().
+
+--spec stop(_) -> ok.
++%% -spec stop(_) -> ok.
+ stop(_State) ->
+ ok.
+diff --git a/src/sockjs_filters.erl b/src/sockjs_filters.erl
+index 15aa8e3..fba43cc 100644
+--- a/src/sockjs_filters.erl
++++ b/src/sockjs_filters.erl
+@@ -9,7 +9,7 @@
+
+ %% --------------------------------------------------------------------------
+
+--spec cache_for(req(), headers()) -> {headers(), req()}.
++%% -spec cache_for(req(), headers()) -> {headers(), req()}.
+ cache_for(Req, Headers) ->
+ Expires = calendar:gregorian_seconds_to_datetime(
+ calendar:datetime_to_gregorian_seconds(
+@@ -18,7 +18,7 @@ cache_for(Req, Headers) ->
+ {"Expires", httpd_util:rfc1123_date(Expires)}],
+ {H ++ Headers, Req}.
+
+--spec h_sid(req(), headers()) -> {headers(), req()}.
++%% -spec h_sid(req(), headers()) -> {headers(), req()}.
+ h_sid(Req, Headers) ->
+ %% Some load balancers do sticky sessions, but only if there is
+ %% a JSESSIONID cookie. If this cookie isn't yet set, we shall
+@@ -31,12 +31,12 @@ h_sid(Req, Headers) ->
+ end,
+ {H ++ Headers, Req2}.
+
+--spec h_no_cache(req(), headers()) -> {headers(), req()}.
++%% -spec h_no_cache(req(), headers()) -> {headers(), req()}.
+ h_no_cache(Req, Headers) ->
+ H = [{"Cache-Control", "no-store, no-cache, must-revalidate, max-age=0"}],
+ {H ++ Headers, Req}.
+
+--spec xhr_cors(req(), headers()) -> {headers(), req()}.
++%% -spec xhr_cors(req(), headers()) -> {headers(), req()}.
+ xhr_cors(Req, Headers) ->
+ {OriginH, Req1} = sockjs_http:header('Origin', Req),
+ Origin = case OriginH of
+@@ -54,15 +54,15 @@ xhr_cors(Req, Headers) ->
+ {"Access-Control-Allow-Credentials", "true"}],
+ {H ++ AllowHeaders ++ Headers, Req2}.
+
+--spec xhr_options_post(req(), headers()) -> {headers(), req()}.
++%% -spec xhr_options_post(req(), headers()) -> {headers(), req()}.
+ xhr_options_post(Req, Headers) ->
+ xhr_options(Req, Headers, ["OPTIONS", "POST"]).
+
+--spec xhr_options_get(req(), headers()) -> {headers(), req()}.
++%% -spec xhr_options_get(req(), headers()) -> {headers(), req()}.
+ xhr_options_get(Req, Headers) ->
+ xhr_options(Req, Headers, ["OPTIONS", "GET"]).
+
+--spec xhr_options(req(), headers(), list(string())) -> {headers(), req()}.
++%% -spec xhr_options(req(), headers(), list(string())) -> {headers(), req()}.
+ xhr_options(Req, Headers, Methods) ->
+ H = [{"Access-Control-Allow-Methods", string:join(Methods, ", ")},
+ {"Access-Control-Max-Age", integer_to_list(?YEAR)}],
+diff --git a/src/sockjs_handler.erl b/src/sockjs_handler.erl
+index ebb3982..b706453 100644
+--- a/src/sockjs_handler.erl
++++ b/src/sockjs_handler.erl
+@@ -11,7 +11,7 @@
+
+ %% --------------------------------------------------------------------------
+
+--spec init_state(binary(), callback(), any(), list(tuple())) -> service().
++%% -spec init_state(binary(), callback(), any(), list(tuple())) -> service().
+ init_state(Prefix, Callback, State, Options) ->
+ #service{prefix = binary_to_list(Prefix),
+ callback = Callback,
+@@ -34,7 +34,7 @@ init_state(Prefix, Callback, State, Options) ->
+
+ %% --------------------------------------------------------------------------
+
+--spec is_valid_ws(service(), req()) -> {boolean(), req(), tuple()}.
++%% -spec is_valid_ws(service(), req()) -> {boolean(), req(), tuple()}.
+ is_valid_ws(Service, Req) ->
+ case get_action(Service, Req) of
+ {{match, WS}, Req1} when WS =:= websocket orelse
+@@ -44,7 +44,7 @@ is_valid_ws(Service, Req) ->
+ {false, Req1, {}}
+ end.
+
+--spec valid_ws_request(service(), req()) -> {boolean(), req(), tuple()}.
++%% -spec valid_ws_request(service(), req()) -> {boolean(), req(), tuple()}.
+ valid_ws_request(_Service, Req) ->
+ {R1, Req1} = valid_ws_upgrade(Req),
+ {R2, Req2} = valid_ws_connection(Req1),
+@@ -73,7 +73,7 @@ valid_ws_connection(Req) ->
+ {lists:member("upgrade", Vs), Req2}
+ end.
+
+--spec get_action(service(), req()) -> {nomatch | {match, atom()}, req()}.
++%% -spec get_action(service(), req()) -> {nomatch | {match, atom()}, req()}.
+ get_action(Service, Req) ->
+ {Dispatch, Req1} = dispatch_req(Service, Req),
+ case Dispatch of
+@@ -93,20 +93,20 @@ strip_prefix(LongPath, Prefix) ->
+ end.
+
+
+--type(dispatch_result() ::
+- nomatch |
+- {match, {send | recv | none , atom(),
+- server(), session(), list(atom())}} |
+- {bad_method, list(atom())}).
++%% -type(dispatch_result() ::
++%% nomatch |
++%% {match, {send | recv | none , atom(),
++%% server(), session(), list(atom())}} |
++%% {bad_method, list(atom())}).
+
+--spec dispatch_req(service(), req()) -> {dispatch_result(), req()}.
++%% -spec dispatch_req(service(), req()) -> {dispatch_result(), req()}.
+ dispatch_req(#service{prefix = Prefix}, Req) ->
+ {Method, Req1} = sockjs_http:method(Req),
+ {LongPath, Req2} = sockjs_http:path(Req1),
+ {ok, PathRemainder} = strip_prefix(LongPath, Prefix),
+ {dispatch(Method, PathRemainder), Req2}.
+
+--spec dispatch(atom(), nonempty_string()) -> dispatch_result().
++%% -spec dispatch(atom(), nonempty_string()) -> dispatch_result().
+ dispatch(Method, Path) ->
+ lists:foldl(
+ fun ({Match, MethodFilters}, nomatch) ->
+@@ -163,7 +163,7 @@ re(Path, S) ->
+
+ %% --------------------------------------------------------------------------
+
+--spec handle_req(service(), req()) -> req().
++%% -spec handle_req(service(), req()) -> req().
+ handle_req(Service = #service{logger = Logger}, Req) ->
+ Req0 = Logger(Service, Req, http),
+
+@@ -202,14 +202,14 @@ handle({match, {Type, Action, _Server, Session, Filters}}, Service, Req) ->
+
+ %% --------------------------------------------------------------------------
+
+--spec default_logger(service(), req(), websocket | http) -> req().
++%% -spec default_logger(service(), req(), websocket | http) -> req().
+ default_logger(_Service, Req, _Type) ->
+ {LongPath, Req1} = sockjs_http:path(Req),
+ {Method, Req2} = sockjs_http:method(Req1),
+ io:format("~s ~s~n", [Method, LongPath]),
+ Req2.
+
+--spec extract_info(req()) -> {info(), req()}.
++%% -spec extract_info(req()) -> {info(), req()}.
+ extract_info(Req) ->
+ {Peer, Req0} = sockjs_http:peername(Req),
+ {Sock, Req1} = sockjs_http:sockname(Req0),
+diff --git a/src/sockjs_http.erl b/src/sockjs_http.erl
+index 9754119..5cdf431 100644
+--- a/src/sockjs_http.erl
++++ b/src/sockjs_http.erl
+@@ -8,22 +8,22 @@
+
+ %% --------------------------------------------------------------------------
+
+--spec path(req()) -> {string(), req()}.
++%% -spec path(req()) -> {string(), req()}.
+ path({cowboy, Req}) -> {Path, Req1} = cowboy_http_req:raw_path(Req),
+ {binary_to_list(Path), {cowboy, Req1}}.
+
+--spec method(req()) -> {atom(), req()}.
++%% -spec method(req()) -> {atom(), req()}.
+ method({cowboy, Req}) -> {Method, Req1} = cowboy_http_req:method(Req),
+ case is_binary(Method) of
+ true -> {binary_to_atom(Method, utf8), {cowboy, Req1}};
+ false -> {Method, {cowboy, Req1}}
+ end.
+
+--spec body(req()) -> {binary(), req()}.
++%% -spec body(req()) -> {binary(), req()}.
+ body({cowboy, Req}) -> {ok, Body, Req1} = cowboy_http_req:body(Req),
+ {Body, {cowboy, Req1}}.
+
+--spec body_qs(req()) -> {binary(), req()}.
++%% -spec body_qs(req()) -> {binary(), req()}.
+ body_qs(Req) ->
+ {H, Req1} = header('Content-Type', Req),
+ case H of
+@@ -42,7 +42,7 @@ body_qs2({cowboy, Req}) ->
+ {V, {cowboy, Req1}}
+ end.
+
+--spec header(atom(), req()) -> {nonempty_string() | undefined, req()}.
++%% -spec header(atom(), req()) -> {nonempty_string() | undefined, req()}.
+ header(K, {cowboy, Req})->
+ {H, Req2} = cowboy_http_req:header(K, Req),
+ {V, Req3} = case H of
+@@ -55,7 +55,7 @@ header(K, {cowboy, Req})->
+ _ -> {binary_to_list(V), {cowboy, Req3}}
+ end.
+
+--spec jsessionid(req()) -> {nonempty_string() | undefined, req()}.
++%% -spec jsessionid(req()) -> {nonempty_string() | undefined, req()}.
+ jsessionid({cowboy, Req}) ->
+ {C, Req2} = cowboy_http_req:cookie(<<"JSESSIONID">>, Req),
+ case C of
+@@ -65,7 +65,7 @@ jsessionid({cowboy, Req}) ->
+ {undefined, {cowboy, Req2}}
+ end.
+
+--spec callback(req()) -> {nonempty_string() | undefined, req()}.
++%% -spec callback(req()) -> {nonempty_string() | undefined, req()}.
+ callback({cowboy, Req}) ->
+ {CB, Req1} = cowboy_http_req:qs_val(<<"c">>, Req),
+ case CB of
+@@ -73,12 +73,12 @@ callback({cowboy, Req}) ->
+ _ -> {binary_to_list(CB), {cowboy, Req1}}
+ end.
+
+--spec peername(req()) -> {{inet:ip_address(), non_neg_integer()}, req()}.
++%% -spec peername(req()) -> {{inet:ip_address(), non_neg_integer()}, req()}.
+ peername({cowboy, Req}) ->
+ {P, Req1} = cowboy_http_req:peer(Req),
+ {P, {cowboy, Req1}}.
+
+--spec sockname(req()) -> {{inet:ip_address(), non_neg_integer()}, req()}.
++%% -spec sockname(req()) -> {{inet:ip_address(), non_neg_integer()}, req()}.
+ sockname({cowboy, Req} = R) ->
+ {ok, _T, S} = cowboy_http_req:transport(Req),
+ %% Cowboy has peername(), but doesn't have sockname() equivalent.
+@@ -92,18 +92,18 @@ sockname({cowboy, Req} = R) ->
+
+ %% --------------------------------------------------------------------------
+
+--spec reply(non_neg_integer(), headers(), iodata(), req()) -> req().
++%% -spec reply(non_neg_integer(), headers(), iodata(), req()) -> req().
+ reply(Code, Headers, Body, {cowboy, Req}) ->
+ Body1 = iolist_to_binary(Body),
+ {ok, Req1} = cowboy_http_req:reply(Code, enbinary(Headers), Body1, Req),
+ {cowboy, Req1}.
+
+--spec chunk_start(non_neg_integer(), headers(), req()) -> req().
++%% -spec chunk_start(non_neg_integer(), headers(), req()) -> req().
+ chunk_start(Code, Headers, {cowboy, Req}) ->
+ {ok, Req1} = cowboy_http_req:chunked_reply(Code, enbinary(Headers), Req),
+ {cowboy, Req1}.
+
+--spec chunk(iodata(), req()) -> {ok | error, req()}.
++%% -spec chunk(iodata(), req()) -> {ok | error, req()}.
+ chunk(Chunk, {cowboy, Req} = R) ->
+ case cowboy_http_req:chunk(Chunk, Req) of
+ ok -> {ok, R};
+@@ -112,25 +112,25 @@ chunk(Chunk, {cowboy, Req} = R) ->
+ %% should catch tco socket closure before.
+ end.
+
+--spec chunk_end(req()) -> req().
++%% -spec chunk_end(req()) -> req().
+ chunk_end({cowboy, _Req} = R) -> R.
+
+ enbinary(L) -> [{list_to_binary(K), list_to_binary(V)} || {K, V} <- L].
+
+
+--spec hook_tcp_close(req()) -> req().
++%% -spec hook_tcp_close(req()) -> req().
+ hook_tcp_close(R = {cowboy, Req}) ->
+ {ok, T, S} = cowboy_http_req:transport(Req),
+ T:setopts(S,[{active,once}]),
+ R.
+
+--spec unhook_tcp_close(req()) -> req().
++%% -spec unhook_tcp_close(req()) -> req().
+ unhook_tcp_close(R = {cowboy, Req}) ->
+ {ok, T, S} = cowboy_http_req:transport(Req),
+ T:setopts(S,[{active,false}]),
+ R.
+
+--spec abruptly_kill(req()) -> req().
++%% -spec abruptly_kill(req()) -> req().
+ abruptly_kill(R = {cowboy, Req}) ->
+ {ok, T, S} = cowboy_http_req:transport(Req),
+ T:close(S),
+diff --git a/src/sockjs_internal.hrl b/src/sockjs_internal.hrl
+index 4f696d8..629b2fe 100644
+--- a/src/sockjs_internal.hrl
++++ b/src/sockjs_internal.hrl
+@@ -1,32 +1,32 @@
+
+--type(req() :: {cowboy, any()}).
++%% -type(req() :: {cowboy, any()}).
+
+--type(user_session() :: nonempty_string()).
+--type(emittable() :: init|closed|{recv, binary()}).
+--type(callback() :: fun((user_session(), emittable(), any()) -> ok)).
+--type(logger() :: fun((any(), req(), websocket|http) -> req())).
++%% -type(user_session() :: nonempty_string()).
++%% -type(emittable() :: init|closed|{recv, binary()}).
++%% -type(callback() :: fun((user_session(), emittable(), any()) -> ok)).
++%% -type(logger() :: fun((any(), req(), websocket|http) -> req())).
+
+--record(service, {prefix :: nonempty_string(),
+- callback :: callback(),
+- state :: any(),
+- sockjs_url :: nonempty_string(),
+- cookie_needed :: boolean(),
+- websocket :: boolean(),
+- disconnect_delay :: non_neg_integer(),
+- heartbeat_delay :: non_neg_integer(),
+- response_limit :: non_neg_integer(),
+- logger :: logger()
++-record(service, {prefix , %% nonempty_string(),
++ callback , %% callback()
++ state , %% any()
++ sockjs_url , %% nonempty_string()
++ cookie_needed , %% boolean()
++ websocket , %% boolean()
++ disconnect_delay , %% non_neg_integer()
++ heartbeat_delay , %% non_neg_integer()
++ response_limit , %% non_neg_integer()
++ logger %% logger()
+ }).
+
+--type(service() :: #service{}).
++%% -type(service() :: #service{}).
+
+--type(headers() :: list({nonempty_string(), nonempty_string()})).
+--type(server() :: nonempty_string()).
+--type(session() :: nonempty_string()).
++%% -type(headers() :: list({nonempty_string(), nonempty_string()})).
++%% -type(server() :: nonempty_string()).
++%% -type(session() :: nonempty_string()).
+
+--type(frame() :: {open, nil} |
+- {close, {non_neg_integer(), string()}} |
+- {data, list(iodata())} |
+- {heartbeat, nil} ).
++%% -type(frame() :: {open, nil} |
++%% {close, {non_neg_integer(), string()}} |
++%% {data, list(iodata())} |
++%% {heartbeat, nil} ).
+
+--type(info() :: [{atom(), any()}]).
++%% -type(info() :: [{atom(), any()}]).
+diff --git a/src/sockjs_json.erl b/src/sockjs_json.erl
+index e61f4b9..d3dae20 100644
+--- a/src/sockjs_json.erl
++++ b/src/sockjs_json.erl
+@@ -4,11 +4,11 @@
+
+ %% --------------------------------------------------------------------------
+
+--spec encode(any()) -> iodata().
++%% -spec encode(any()) -> iodata().
+ encode(Thing) ->
+ mochijson2_fork:encode(Thing).
+
+--spec decode(iodata()) -> {ok, any()} | {error, any()}.
++%% -spec decode(iodata()) -> {ok, any()} | {error, any()}.
+ decode(Encoded) ->
+ try mochijson2_fork:decode(Encoded) of
+ V -> {ok, V}
+diff --git a/src/sockjs_session.erl b/src/sockjs_session.erl
+index 66c5df0..7e4ae00 100644
+--- a/src/sockjs_session.erl
++++ b/src/sockjs_session.erl
+@@ -11,39 +11,39 @@
+ handle_cast/2]).
+
+ -include("sockjs_internal.hrl").
+--type(handle() :: {?MODULE, {pid(), info()}}).
+-
+--record(session, {id :: session(),
+- outbound_queue = queue:new() :: queue(),
+- response_pid :: pid(),
+- disconnect_tref :: reference(),
+- disconnect_delay = 5000 :: non_neg_integer(),
+- heartbeat_tref :: reference() | triggered,
+- heartbeat_delay = 25000 :: non_neg_integer(),
+- ready_state = connecting :: connecting | open | closed,
+- close_msg :: {non_neg_integer(), string()},
++%% -type(handle() :: {?MODULE, {pid(), info()}}).
++
++-record(session, {id , %% session(),
++ outbound_queue = queue:new() , %% queue()
++ response_pid , %% pid()
++ disconnect_tref , %% reference()
++ disconnect_delay = 5000 , %% non_neg_integer()
++ heartbeat_tref , %% reference() | triggered
++ heartbeat_delay = 25000 , %% non_neg_integer()
++ ready_state = connecting , %% connecting | open | closed
++ close_msg , %% {non_neg_integer(), string()}
+ callback,
+ state,
+- handle :: handle()
++ handle %% handle()
+ }).
+ -define(ETS, sockjs_table).
+
+
+--type(session_or_undefined() :: session() | undefined).
+--type(session_or_pid() :: session() | pid()).
++%% -type(session_or_undefined() :: session() | undefined).
++%% -type(session_or_pid() :: session() | pid()).
+
+ %% --------------------------------------------------------------------------
+
+--spec init() -> ok.
++%% -spec init() -> ok.
+ init() ->
+ _ = ets:new(?ETS, [public, named_table]),
+ ok.
+
+--spec start_link(session_or_undefined(), service(), info()) -> {ok, pid()}.
++%% -spec start_link(session_or_undefined(), service(), info()) -> {ok, pid()}.
+ start_link(SessionId, Service, Info) ->
+ gen_server:start_link(?MODULE, {SessionId, Service, Info}, []).
+
+--spec maybe_create(session_or_undefined(), service(), info()) -> pid().
++%% -spec maybe_create(session_or_undefined(), service(), info()) -> pid().
+ maybe_create(SessionId, Service, Info) ->
+ case ets:lookup(?ETS, SessionId) of
+ [] -> {ok, SPid} = sockjs_session_sup:start_child(
+@@ -53,7 +53,7 @@ maybe_create(SessionId, Service, Info) ->
+ end.
+
+
+--spec received(list(iodata()), session_or_pid()) -> ok.
++%% -spec received(list(iodata()), session_or_pid()) -> ok.
+ received(Messages, SessionPid) when is_pid(SessionPid) ->
+ case gen_server:call(SessionPid, {received, Messages}, infinity) of
+ ok -> ok;
+@@ -63,27 +63,27 @@ received(Messages, SessionPid) when is_pid(SessionPid) ->
+ received(Messages, SessionId) ->
+ received(Messages, spid(SessionId)).
+
+--spec send(iodata(), handle()) -> ok.
++%% -spec send(iodata(), handle()) -> ok.
+ send(Data, {?MODULE, {SPid, _}}) ->
+ gen_server:cast(SPid, {send, Data}),
+ ok.
+
+--spec close(non_neg_integer(), string(), handle()) -> ok.
++%% -spec close(non_neg_integer(), string(), handle()) -> ok.
+ close(Code, Reason, {?MODULE, {SPid, _}}) ->
+ gen_server:cast(SPid, {close, Code, Reason}),
+ ok.
+
+--spec info(handle()) -> info().
++%% -spec info(handle()) -> info().
+ info({?MODULE, {_SPid, Info}}) ->
+ Info.
+
+--spec reply(session_or_pid()) ->
+- wait | session_in_use | {ok | close, frame()}.
++%% -spec reply(session_or_pid()) ->
++%% wait | session_in_use | {ok | close, frame()}.
+ reply(Session) ->
+ reply(Session, true).
+
+--spec reply(session_or_pid(), boolean()) ->
+- wait | session_in_use | {ok | close, frame()}.
++%% -spec reply(session_or_pid(), boolean()) ->
++%% wait | session_in_use | {ok | close, frame()}.
+ reply(SessionPid, Multiple) when is_pid(SessionPid) ->
+ gen_server:call(SessionPid, {reply, self(), Multiple}, infinity);
+ reply(SessionId, Multiple) ->
+@@ -154,7 +154,7 @@ unmark_waiting(RPid, State = #session{response_pid = Pid,
+ when Pid =/= undefined andalso Pid =/= RPid ->
+ State.
+
+--spec emit(emittable(), #session{}) -> #session{}.
++%% -spec emit(emittable(), #session{}) -> #session{}.
+ emit(What, State = #session{callback = Callback,
+ state = UserState,
+ handle = Handle}) ->
+@@ -175,7 +175,7 @@ emit(What, State = #session{callback = Callback,
+
+ %% --------------------------------------------------------------------------
+
+--spec init({session_or_undefined(), service(), info()}) -> {ok, #session{}}.
++%% -spec init({session_or_undefined(), service(), info()}) -> {ok, #session{}}.
+ init({SessionId, #service{callback = Callback,
+ state = UserState,
+ disconnect_delay = DisconnectDelay,
+diff --git a/src/sockjs_session_sup.erl b/src/sockjs_session_sup.erl
+index 4197ce3..71c7ff4 100644
+--- a/src/sockjs_session_sup.erl
++++ b/src/sockjs_session_sup.erl
+@@ -7,7 +7,7 @@
+
+ %% --------------------------------------------------------------------------
+
+--spec start_link() -> ignore | {'ok', pid()} | {'error', any()}.
++%% -spec start_link() -> ignore | {'ok', pid()} | {'error', any()}.
+ start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+diff --git a/src/sockjs_util.erl b/src/sockjs_util.erl
+index be3f972..9b9969d 100644
+--- a/src/sockjs_util.erl
++++ b/src/sockjs_util.erl
+@@ -8,7 +8,7 @@
+
+ %% --------------------------------------------------------------------------
+
+--spec rand32() -> non_neg_integer().
++%% -spec rand32() -> non_neg_integer().
+ rand32() ->
+ case get(random_seeded) of
+ undefined ->
+@@ -21,7 +21,7 @@ rand32() ->
+ random:uniform(erlang:trunc(math:pow(2,32)))-1.
+
+
+--spec encode_frame(frame()) -> iodata().
++%% -spec encode_frame(frame()) -> iodata().
+ encode_frame({open, nil}) ->
+ <<"o">>;
+ encode_frame({close, {Code, Reason}}) ->
+@@ -34,7 +34,7 @@ encode_frame({heartbeat, nil}) ->
+ <<"h">>.
+
+
+--spec url_escape(string(), string()) -> iolist().
++%% -spec url_escape(string(), string()) -> iolist().
+ url_escape(Str, Chars) ->
+ [case lists:member(Char, Chars) of
+ true -> hex(Char);
+diff --git a/src/sockjs_ws_handler.erl b/src/sockjs_ws_handler.erl
+index bcf463d..c011c89 100644
+--- a/src/sockjs_ws_handler.erl
++++ b/src/sockjs_ws_handler.erl
+@@ -6,7 +6,7 @@
+
+ %% --------------------------------------------------------------------------
+
+--spec received(websocket|rawwebsocket, pid(), binary()) -> ok | shutdown.
++%% -spec received(websocket|rawwebsocket, pid(), binary()) -> ok | shutdown.
+ %% Ignore empty
+ received(_RawWebsocket, _SessionPid, <<>>) ->
+ ok;
+@@ -30,7 +30,7 @@ session_received(Messages, SessionPid) ->
+ no_session -> shutdown
+ end.
+
+--spec reply(websocket|rawwebsocket, pid()) -> {close|open, binary()} | wait.
++%% -spec reply(websocket|rawwebsocket, pid()) -> {close|open, binary()} | wait.
+ reply(websocket, SessionPid) ->
+ case sockjs_session:reply(SessionPid) of
+ {W, Frame} when W =:= ok orelse W =:= close->
+@@ -52,7 +52,7 @@ reply(rawwebsocket, SessionPid) ->
+ wait
+ end.
+
+--spec close(websocket|rawwebsocket, pid()) -> ok.
++%% -spec close(websocket|rawwebsocket, pid()) -> ok.
+ close(_RawWebsocket, SessionPid) ->
+ SessionPid ! force_shutdown,
+ ok.
--- /dev/null
+diff --git a/src/sockjs_http.erl b/src/sockjs_http.erl
+index 5cdf431..837b64f 100644
+--- a/src/sockjs_http.erl
++++ b/src/sockjs_http.erl
+@@ -15,7 +15,7 @@ path({cowboy, Req}) -> {Path, Req1} = cowboy_http_req:raw_path(Req),
+ %% -spec method(req()) -> {atom(), req()}.
+ method({cowboy, Req}) -> {Method, Req1} = cowboy_http_req:method(Req),
+ case is_binary(Method) of
+- true -> {binary_to_atom(Method, utf8), {cowboy, Req1}};
++ true -> {list_to_atom(binary_to_list(Method)), {cowboy, Req1}};
+ false -> {Method, {cowboy, Req1}}
+ end.
+
+@@ -47,7 +47,7 @@ header(K, {cowboy, Req})->
+ {H, Req2} = cowboy_http_req:header(K, Req),
+ {V, Req3} = case H of
+ undefined ->
+- cowboy_http_req:header(atom_to_binary(K, utf8), Req2);
++ cowboy_http_req:header(list_to_binary(atom_to_list(K)), Req2);
+ _ -> {H, Req2}
+ end,
+ case V of
--- /dev/null
+diff --git a/src/pmod_pt.erl b/src/pmod_pt.erl
+new file mode 100644
+index 0000000..db21974
+--- /dev/null
++++ b/src/pmod_pt.erl
+@@ -0,0 +1,461 @@
++%%
++%% %CopyrightBegin%
++%%
++%% Copyright Ericsson AB 2013. All Rights Reserved.
++%%
++%% The contents of this file are subject to the Erlang Public License,
++%% Version 1.1, (the "License"); you may not use this file except in
++%% compliance with the License. You should have received a copy of the
++%% Erlang Public License along with this software. If not, it can be
++%% retrieved online at http://www.erlang.org/.
++%%
++%% Software distributed under the License is distributed on an "AS IS"
++%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
++%% the License for the specific language governing rights and limitations
++%% under the License.
++%%
++%% %CopyrightEnd%
++%%
++
++-module(pmod_pt).
++-export([parse_transform/2,
++ format_error/1]).
++
++%% Expand function definition forms of parameterized module.
++%% The code is based on the code in sys_expand_pmod which used to be
++%% included in the compiler, but details are different because
++%% sys_pre_expand has not been run. In particular:
++%%
++%% * Record definitions are still present and must be handled.
++%%
++%% * (Syntatic) local calls may actually be calls to an imported
++%% funtion or a BIF. It is a local call if and only if there
++%% is a definition for the function in the module.
++%%
++%% * When we introduce the module parameters and 'THIS' in each
++%% function, we must artificially use it to avoid a warning for
++%% unused variables.
++%%
++%% * On the other hand, we don't have to worry about module_info/0,1
++%% because they have not been added yet.
++
++-record(pmod, {parameters,
++ defined
++ }).
++
++parse_transform(Forms0, _Options) ->
++ put(?MODULE, []),
++ Forms = transform(Forms0),
++ case erase(?MODULE) of
++ [] ->
++ Forms;
++ [_|_]=Errors ->
++ File = get_file(Forms),
++ {error,[{File,Errors}],[]}
++ end.
++
++format_error(extends_self) ->
++ "cannot extend from self";
++format_error(define_instance) ->
++ "defining instance function not allowed in parameterized module".
++
++add_error(Line, Error) ->
++ put(?MODULE, get(?MODULE) ++ [{Line,?MODULE,Error}]).
++
++get_file([{attribute,_,file,{File,_}}|_]) -> File;
++get_file([_|T]) -> get_file(T).
++
++transform(Forms0) ->
++ Def = collect_defined(Forms0),
++ {Base,ModAs,Forms1} = attribs(Forms0, [], undefined, []),
++ {Mod,Ps0} = case ModAs of
++ {M0,P0} -> {M0,P0};
++ M0 -> {M0,undefined}
++ end,
++ Forms2 = case Ps0 of
++ undefined ->
++ Forms1;
++ _ ->
++ pmod_expand(Forms1, Mod, Base, Ps0, Def)
++ end,
++
++ %% Add new functions.
++ NewFs0 = maybe_extend(Base, Mod, Ps0),
++ NewExps = collect_defined(NewFs0),
++ Forms3 = add_attributes(Forms2, [{attribute,0,export,NewExps}]),
++ add_new_funcs(Forms3, NewFs0).
++
++pmod_expand(Forms0, Mod, Base, Ps0, Def) ->
++ Ps = if is_atom(Base) ->
++ ['BASE' | Ps0];
++ true ->
++ Ps0
++ end,
++ St0 = #pmod{parameters=Ps,defined=gb_sets:from_list(Def)},
++ {Forms1,_} = forms(Forms0, St0),
++ Forms2 = update_exps(Forms1),
++ Forms3 = update_forms(Forms2),
++ NewFs0 = add_instance(Mod, Ps, []),
++ NewFs = ensure_new(Base, Ps0, NewFs0),
++ Forms = add_new_funcs(Forms3, NewFs),
++ NewExps = collect_defined(NewFs),
++ add_attributes(Forms, [{attribute,0,export,NewExps}]).
++
++add_attributes([{attribute,_,module,_}=F|Fs], Attrs) ->
++ [F|Attrs++Fs];
++add_attributes([F|Fs], Attrs) ->
++ [F|add_attributes(Fs, Attrs)].
++
++add_new_funcs([{eof,_}|_]=Fs, NewFs) ->
++ NewFs ++ Fs;
++add_new_funcs([F|Fs], Es) ->
++ [F|add_new_funcs(Fs, Es)].
++
++maybe_extend([], _, _) ->
++ %% No 'extends' attribute.
++ [];
++maybe_extend(Base, _Mod, undefined) ->
++ %% There is a an 'extends' attribute; the module is not parameterized.
++ Name = '$handle_undefined_function',
++ Args = [{var,0,'Func'},{var,0,'Args'}],
++ Body = [make_apply({atom,0,Base}, {var,0,'Func'}, {var,0,'Args'})],
++ F = {function,0,Name,2,[{clause,0,Args,[],Body}]},
++ [F];
++maybe_extend(Base, Mod, Ps) ->
++ %% There is a an 'extends' attribute; the module is parameterized.
++ Name = '$handle_undefined_function',
++ Args = [{var,0,'Func'},{var,0,'Args'}],
++ DontCares = [{var,0,'_'} || _ <- Ps],
++ TuplePs = {tuple,0,[{atom,0,Mod},{var,0,'BaseVars'}|DontCares]},
++ G = [{call,0,{atom,0,is_atom},
++ [{call,0,{atom,0,element},
++ [{integer,0,1},{var,0,'BaseVars'}]}]}],
++ FixedArgs = make_lists_rev([{var,0,'Rs'},
++ {cons,0,{var,0,'BaseVars'},{nil,0}}]),
++ Body = [{'case',0,make_lists_rev([{var,0,'Args'}]),
++ [{clause,0,[{cons,0,TuplePs,{var,0,'Rs'}}],[G],
++ [make_apply({atom,0,Base}, {var,0,'Func'}, FixedArgs)]},
++ {clause,0,[{var,0,'_'}],[],
++ [make_apply({atom,0,Base}, {var,0,'Func'}, {var,0,'Args'})]}
++ ]}],
++ F = {function,0,Name,2,[{clause,0,Args,[],Body}]},
++ [F].
++
++make_apply(M, F, A) ->
++ {call,0,{remote,0,{atom,0,erlang},{atom,0,apply}},[M,F,A]}.
++
++make_lists_rev(As) ->
++ {call,0,{remote,0,{atom,0,lists},{atom,0,reverse}},As}.
++
++ensure_new(Base, Ps, Fs) ->
++ case has_new(Fs) of
++ true ->
++ Fs;
++ false ->
++ add_new(Base, Ps, Fs)
++ end.
++
++has_new([{function,_L,new,_A,_Cs} | _Fs]) ->
++ true;
++has_new([_ | Fs]) ->
++ has_new(Fs);
++has_new([]) ->
++ false.
++
++add_new(Base, Ps, Fs) ->
++ Vs = [{var,0,V} || V <- Ps],
++ As = if is_atom(Base) ->
++ [{call,0,{remote,0,{atom,0,Base},{atom,0,new}},Vs} | Vs];
++ true ->
++ Vs
++ end,
++ Body = [{call,0,{atom,0,instance},As}],
++ add_func(new, Vs, Body, Fs).
++
++add_instance(Mod, Ps, Fs) ->
++ Vs = [{var,0,V} || V <- Ps],
++ AbsMod = [{tuple,0,[{atom,0,Mod}|Vs]}],
++ add_func(instance, Vs, AbsMod, Fs).
++
++add_func(Name, Args, Body, Fs) ->
++ A = length(Args),
++ F = {function,0,Name,A,[{clause,0,Args,[],Body}]},
++ [F|Fs].
++
++collect_defined(Fs) ->
++ [{N,A} || {function,_,N,A,_} <- Fs].
++
++attribs([{attribute,Line,module,{Mod,_}=ModAs}|T], Base, _, Acc) ->
++ attribs(T, Base, ModAs, [{attribute,Line,module,Mod}|Acc]);
++attribs([{attribute,_,module,Mod}=H|T], Base, _, Acc) ->
++ attribs(T, Base, Mod, [H|Acc]);
++attribs([{attribute,Line,extends,Base}|T], Base0, Ps, Acc) when is_atom(Base) ->
++ Mod = case Ps of
++ {Mod0,_} -> Mod0;
++ Mod0 -> Mod0
++ end,
++ case Mod of
++ Base ->
++ add_error(Line, extends_self),
++ attribs(T, Base0, Ps, Acc);
++ _ ->
++ attribs(T, Base, Ps, Acc)
++ end;
++attribs([H|T], Base, Ps, Acc) ->
++ attribs(T, Base, Ps, [H|Acc]);
++attribs([], Base, Ps, Acc) ->
++ {Base,Ps,lists:reverse(Acc)}.
++
++%% This is extremely simplistic for now; all functions get an extra
++%% parameter, whether they need it or not, except for static functions.
++
++update_function_name({F,A}) when F =/= new ->
++ {F,A+1};
++update_function_name(E) ->
++ E.
++
++update_forms([{function,L,N,A,Cs}|Fs]) when N =/= new ->
++ [{function,L,N,A+1,Cs}|update_forms(Fs)];
++update_forms([F|Fs]) ->
++ [F|update_forms(Fs)];
++update_forms([]) ->
++ [].
++
++update_exps([{attribute,Line,export,Es0}|T]) ->
++ Es = [update_function_name(E) || E <- Es0],
++ [{attribute,Line,export,Es}|update_exps(T)];
++update_exps([H|T]) ->
++ [H|update_exps(T)];
++update_exps([]) ->
++ [].
++
++%% Process the program forms.
++
++forms([F0|Fs0],St0) ->
++ {F1,St1} = form(F0,St0),
++ {Fs1,St2} = forms(Fs0,St1),
++ {[F1|Fs1],St2};
++forms([], St0) ->
++ {[], St0}.
++
++%% Only function definitions are of interest here. State is not updated.
++form({function,Line,instance,_Arity,_Clauses}=F,St) ->
++ add_error(Line, define_instance),
++ {F,St};
++form({function,Line,Name0,Arity0,Clauses0},St) when Name0 =/= new ->
++ {Name,Arity,Clauses} = function(Name0, Arity0, Clauses0, St),
++ {{function,Line,Name,Arity,Clauses},St};
++%% Pass anything else through
++form(F,St) -> {F,St}.
++
++function(Name, Arity, Clauses0, St) ->
++ Clauses1 = clauses(Clauses0,St),
++ {Name,Arity,Clauses1}.
++
++clauses([C|Cs],#pmod{parameters=Ps}=St) ->
++ {clause,L,H,G,B0} = clause(C,St),
++ T = {tuple,L,[{var,L,V} || V <- ['_'|Ps]]},
++ B = [{match,L,{var,L,'_'},{var,L,V}} || V <- ['THIS'|Ps]] ++ B0,
++ [{clause,L,H++[{match,L,T,{var,L,'THIS'}}],G,B}|clauses(Cs,St)];
++clauses([],_St) -> [].
++
++clause({clause,Line,H,G,B0},St) ->
++ %% We never update H and G, so we will just copy them.
++ B1 = exprs(B0,St),
++ {clause,Line,H,G,B1}.
++
++pattern_grp([{bin_element,L1,E1,S1,T1} | Fs],St) ->
++ S2 = case S1 of
++ default ->
++ default;
++ _ ->
++ expr(S1,St)
++ end,
++ T2 = case T1 of
++ default ->
++ default;
++ _ ->
++ bit_types(T1)
++ end,
++ [{bin_element,L1,expr(E1,St),S2,T2} | pattern_grp(Fs,St)];
++pattern_grp([],_St) ->
++ [].
++
++bit_types([]) ->
++ [];
++bit_types([Atom | Rest]) when is_atom(Atom) ->
++ [Atom | bit_types(Rest)];
++bit_types([{Atom, Integer} | Rest]) when is_atom(Atom), is_integer(Integer) ->
++ [{Atom, Integer} | bit_types(Rest)].
++
++exprs([E0|Es],St) ->
++ E1 = expr(E0,St),
++ [E1|exprs(Es,St)];
++exprs([],_St) -> [].
++
++expr({var,_L,_V}=Var,_St) ->
++ Var;
++expr({integer,_Line,_I}=Integer,_St) -> Integer;
++expr({float,_Line,_F}=Float,_St) -> Float;
++expr({atom,_Line,_A}=Atom,_St) -> Atom;
++expr({string,_Line,_S}=String,_St) -> String;
++expr({char,_Line,_C}=Char,_St) -> Char;
++expr({nil,_Line}=Nil,_St) -> Nil;
++expr({cons,Line,H0,T0},St) ->
++ H1 = expr(H0,St),
++ T1 = expr(T0,St),
++ {cons,Line,H1,T1};
++expr({lc,Line,E0,Qs0},St) ->
++ Qs1 = lc_bc_quals(Qs0,St),
++ E1 = expr(E0,St),
++ {lc,Line,E1,Qs1};
++expr({bc,Line,E0,Qs0},St) ->
++ Qs1 = lc_bc_quals(Qs0,St),
++ E1 = expr(E0,St),
++ {bc,Line,E1,Qs1};
++expr({tuple,Line,Es0},St) ->
++ Es1 = expr_list(Es0,St),
++ {tuple,Line,Es1};
++expr({record,Line,Name,Is0},St) ->
++ Is = record_fields(Is0,St),
++ {record,Line,Name,Is};
++expr({record,Line,E0,Name,Is0},St) ->
++ E = expr(E0,St),
++ Is = record_fields(Is0,St),
++ {record,Line,E,Name,Is};
++expr({record_field,Line,E0,Name,Key},St) ->
++ E = expr(E0,St),
++ {record_field,Line,E,Name,Key};
++expr({block,Line,Es0},St) ->
++ Es1 = exprs(Es0,St),
++ {block,Line,Es1};
++expr({'if',Line,Cs0},St) ->
++ Cs1 = icr_clauses(Cs0,St),
++ {'if',Line,Cs1};
++expr({'case',Line,E0,Cs0},St) ->
++ E1 = expr(E0,St),
++ Cs1 = icr_clauses(Cs0,St),
++ {'case',Line,E1,Cs1};
++expr({'receive',Line,Cs0},St) ->
++ Cs1 = icr_clauses(Cs0,St),
++ {'receive',Line,Cs1};
++expr({'receive',Line,Cs0,To0,ToEs0},St) ->
++ To1 = expr(To0,St),
++ ToEs1 = exprs(ToEs0,St),
++ Cs1 = icr_clauses(Cs0,St),
++ {'receive',Line,Cs1,To1,ToEs1};
++expr({'try',Line,Es0,Scs0,Ccs0,As0},St) ->
++ Es1 = exprs(Es0,St),
++ Scs1 = icr_clauses(Scs0,St),
++ Ccs1 = icr_clauses(Ccs0,St),
++ As1 = exprs(As0,St),
++ {'try',Line,Es1,Scs1,Ccs1,As1};
++expr({'fun',_,{function,_,_,_}}=ExtFun,_St) ->
++ ExtFun;
++expr({'fun',Line,Body},St) ->
++ case Body of
++ {clauses,Cs0} ->
++ Cs1 = fun_clauses(Cs0,St),
++ {'fun',Line,{clauses,Cs1}};
++ {function,F,A} = Function ->
++ {F1,A1} = update_function_name({F,A}),
++ if A1 =:= A ->
++ {'fun',Line,Function};
++ true ->
++ %% Must rewrite local fun-name to a fun that does a
++ %% call with the extra THIS parameter.
++ As = make_vars(A, Line),
++ As1 = As ++ [{var,Line,'THIS'}],
++ Call = {call,Line,{atom,Line,F1},As1},
++ Cs = [{clause,Line,As,[],[Call]}],
++ {'fun',Line,{clauses,Cs}}
++ end;
++ {function,_M,_F,_A} = Fun4 -> %This is an error in lint!
++ {'fun',Line,Fun4}
++ end;
++expr({call,Lc,{atom,_,instance}=Name,As0},St) ->
++ %% All local functions 'instance(...)' are static by definition,
++ %% so they do not take a 'THIS' argument when called
++ As1 = expr_list(As0,St),
++ {call,Lc,Name,As1};
++expr({call,Lc,{atom,_,new}=Name,As0},St) ->
++ %% All local functions 'new(...)' are static by definition,
++ %% so they do not take a 'THIS' argument when called
++ As1 = expr_list(As0,St),
++ {call,Lc,Name,As1};
++expr({call,Lc,{atom,_Lf,F}=Atom,As0}, #pmod{defined=Def}=St) ->
++ As1 = expr_list(As0,St),
++ case gb_sets:is_member({F,length(As0)}, Def) of
++ false ->
++ %% BIF or imported function.
++ {call,Lc,Atom,As1};
++ true ->
++ %% Local function call - needs THIS parameter.
++ {call,Lc,Atom,As1 ++ [{var,0,'THIS'}]}
++ end;
++expr({call,Line,F0,As0},St) ->
++ %% Other function call
++ F1 = expr(F0,St),
++ As1 = expr_list(As0,St),
++ {call,Line,F1,As1};
++expr({'catch',Line,E0},St) ->
++ E1 = expr(E0,St),
++ {'catch',Line,E1};
++expr({match,Line,P,E0},St) ->
++ E1 = expr(E0,St),
++ {match,Line,P,E1};
++expr({bin,Line,Fs},St) ->
++ Fs2 = pattern_grp(Fs,St),
++ {bin,Line,Fs2};
++expr({op,Line,Op,A0},St) ->
++ A1 = expr(A0,St),
++ {op,Line,Op,A1};
++expr({op,Line,Op,L0,R0},St) ->
++ L1 = expr(L0,St),
++ R1 = expr(R0,St),
++ {op,Line,Op,L1,R1};
++%% The following are not allowed to occur anywhere!
++expr({remote,Line,M0,F0},St) ->
++ M1 = expr(M0,St),
++ F1 = expr(F0,St),
++ {remote,Line,M1,F1}.
++
++expr_list([E0|Es],St) ->
++ E1 = expr(E0,St),
++ [E1|expr_list(Es,St)];
++expr_list([],_St) -> [].
++
++record_fields([{record_field,L,K,E0}|T],St) ->
++ E = expr(E0,St),
++ [{record_field,L,K,E}|record_fields(T,St)];
++record_fields([],_) -> [].
++
++icr_clauses([C0|Cs],St) ->
++ C1 = clause(C0,St),
++ [C1|icr_clauses(Cs,St)];
++icr_clauses([],_St) -> [].
++
++lc_bc_quals([{generate,Line,P,E0}|Qs],St) ->
++ E1 = expr(E0,St),
++ [{generate,Line,P,E1}|lc_bc_quals(Qs,St)];
++lc_bc_quals([{b_generate,Line,P,E0}|Qs],St) ->
++ E1 = expr(E0,St),
++ [{b_generate,Line,P,E1}|lc_bc_quals(Qs,St)];
++lc_bc_quals([E0|Qs],St) ->
++ E1 = expr(E0,St),
++ [E1|lc_bc_quals(Qs,St)];
++lc_bc_quals([],_St) -> [].
++
++fun_clauses([C0|Cs],St) ->
++ C1 = clause(C0,St),
++ [C1|fun_clauses(Cs,St)];
++fun_clauses([],_St) -> [].
++
++make_vars(N, L) ->
++ make_vars(1, N, L).
++
++make_vars(N, M, L) when N =< M ->
++ V = list_to_atom("X"++integer_to_list(N)),
++ [{var,L,V} | make_vars(N + 1, M, L)];
++make_vars(_, _, _) ->
++ [].
+diff --git a/src/sockjs_multiplex_channel.erl b/src/sockjs_multiplex_channel.erl
+index cbb8274..5afcfa3 100644
+--- a/src/sockjs_multiplex_channel.erl
++++ b/src/sockjs_multiplex_channel.erl
+@@ -1,3 +1,5 @@
++-compile({parse_transform,pmod_pt}).
++
+ -module(sockjs_multiplex_channel, [Conn, Topic]).
+
+ -export([send/1, close/0, close/2, info/0]).
--- /dev/null
+diff --git a/src/sockjs_cowboy_handler.erl b/src/sockjs_cowboy_handler.erl
+index 3b1ffe3..d2f05ae 100644
+--- a/src/sockjs_cowboy_handler.erl
++++ b/src/sockjs_cowboy_handler.erl
+@@ -30,21 +30,35 @@ terminate(_Req, _Service) ->
+
+ %% --------------------------------------------------------------------------
+
+-websocket_init(_TransportName, Req, Service = #service{logger = Logger}) ->
+- Req0 = Logger(Service, {cowboy, Req}, websocket),
++websocket_init(_TransportName, Req,
++ Service = #service{logger = Logger,
++ subproto_pref = SubProtocolPref}) ->
++ Req3 = case cowboy_http_req:header(<<"Sec-Websocket-Protocol">>, Req) of
++ {undefined, Req1} ->
++ Req1;
++ {SubProtocols, Req1} ->
++ SelectedSubProtocol =
++ choose_subprotocol_bin(SubProtocols, SubProtocolPref),
++ {ok, Req2} = cowboy_http_req:set_resp_header(
++ <<"Sec-Websocket-Protocol">>,
++ SelectedSubProtocol, Req1),
++ Req2
++ end,
++
++ Req4 = Logger(Service, {cowboy, Req3}, websocket),
+
+ Service1 = Service#service{disconnect_delay = 5*60*1000},
+
+- {Info, Req1} = sockjs_handler:extract_info(Req0),
++ {Info, Req5} = sockjs_handler:extract_info(Req4),
+ SessionPid = sockjs_session:maybe_create(undefined, Service1, Info),
+- {RawWebsocket, {cowboy, Req3}} =
+- case sockjs_handler:get_action(Service, Req1) of
+- {{match, WS}, Req2} when WS =:= websocket orelse
++ {RawWebsocket, {cowboy, Req7}} =
++ case sockjs_handler:get_action(Service, Req5) of
++ {{match, WS}, Req6} when WS =:= websocket orelse
+ WS =:= rawwebsocket ->
+- {WS, Req2}
++ {WS, Req6}
+ end,
+ self() ! go,
+- {ok, Req3, {RawWebsocket, SessionPid}}.
++ {ok, Req7, {RawWebsocket, SessionPid}}.
+
+ websocket_handle({text, Data}, Req, {RawWebsocket, SessionPid} = S) ->
+ case sockjs_ws_handler:received(RawWebsocket, SessionPid, Data) of
+@@ -69,3 +83,15 @@ websocket_info(shutdown, Req, S) ->
+ websocket_terminate(_Reason, _Req, {RawWebsocket, SessionPid}) ->
+ sockjs_ws_handler:close(RawWebsocket, SessionPid),
+ ok.
++
++%% --------------------------------------------------------------------------
++
++choose_subprotocol_bin(SubProtocols, Pref) ->
++ choose_subprotocol(re:split(SubProtocols, ", *"), Pref).
++choose_subprotocol(SubProtocols, undefined) ->
++ erlang:hd(lists:reverse(lists:sort(SubProtocols)));
++choose_subprotocol(SubProtocols, Pref) ->
++ case lists:filter(fun (E) -> lists:member(E, SubProtocols) end, Pref) of
++ [Hd | _] -> Hd;
++ [] -> choose_subprotocol(SubProtocols, undefined)
++ end.
+diff --git a/src/sockjs_handler.erl b/src/sockjs_handler.erl
+index b706453..81d4ef7 100644
+--- a/src/sockjs_handler.erl
++++ b/src/sockjs_handler.erl
+@@ -29,7 +29,9 @@ init_state(Prefix, Callback, State, Options) ->
+ response_limit =
+ proplists:get_value(response_limit, Options, 128*1024),
+ logger =
+- proplists:get_value(logger, Options, fun default_logger/3)
++ proplists:get_value(logger, Options, fun default_logger/3),
++ subproto_pref =
++ proplists:get_value(subproto_pref, Options)
+ }.
+
+ %% --------------------------------------------------------------------------
+diff --git a/src/sockjs_internal.hrl b/src/sockjs_internal.hrl
+index 629b2fe..eed5597 100644
+--- a/src/sockjs_internal.hrl
++++ b/src/sockjs_internal.hrl
+@@ -15,7 +15,8 @@
+ disconnect_delay , %% non_neg_integer()
+ heartbeat_delay , %% non_neg_integer()
+ response_limit , %% non_neg_integer()
+- logger %% logger()
++ logger , %% logger()
++ subproto_pref %% [binary()]
+ }).
+
+ %% -type(service() :: #service{}).
--- /dev/null
+include ../umbrella.mk
--- /dev/null
+#!/bin/sh
+# To update the patch run this script.
+cd sockjs-erlang-git/src
+git checkout *
+sed 's#^\(-type.*\)#%% \1#g' -i *
+sed 's#^\(-spec.*\)#%% \1#g' -i *
+sed 's#^\(-record.*\)::\(.*\)$#\1, %% \2#g' * -i
+sed 's#^\( .*\)::\(.*\),$#\1, %% \2#g' * -i
+sed 's#^\( .*\)::\(.*\)$#\1 %% \2#g' * -i
+git diff > ../../0000-remove-spec-patch.diff
--- /dev/null
+UPSTREAM_SHORT_HASH:=3132eb9
--- /dev/null
+APP_NAME:=sockjs
+DEPS:=cowboy-wrapper
+
+UPSTREAM_GIT:=https://github.com/rabbitmq/sockjs-erlang.git
+UPSTREAM_REVISION:=3132eb920aea9abd5c5e65349331c32d8cfa961e # 0.3.4
+RETAIN_ORIGINAL_VERSION:=true
+WRAPPER_PATCHES:=\
+ 0000-remove-spec-patch.diff \
+ 0001-a2b-b2a.diff \
+ 0002-parameterised-modules-r16a.diff \
+ 0003-websocket-subprotocol
+
+ORIGINAL_APP_FILE:=$(CLONE_DIR)/src/$(APP_NAME).app.src
+DO_NOT_GENERATE_APP_FILE=true
+
+ERLC_OPTS:=$(ERLC_OPTS) -D no_specs
+
+define construct_app_commands
+ cp $(CLONE_DIR)/LICENSE-* $(APP_DIR)
+ rm $(APP_DIR)/ebin/pmod_pt.beam
+endef
+
+define package_rules
+
+$(CLONE_DIR)/ebin/sockjs_multiplex_channel.beam: $(CLONE_DIR)/ebin/pmod_pt.beam
+
+endef
--- /dev/null
+All code is released under the MIT license (see LICENSE-MIT-SockJS)
+with the exception of following files:
+
+ * src/mochijson2_fork.erl and src/mochinum_fork.erl, which are forked
+ from Mochiweb project (https://github.com/mochi/mochiweb) and
+ covered by LICENSE-MIT-Mochiweb.
+
+ * rebar, which is a compiled binary from Rebar project
+ (https://github.com/basho/rebar) and covered by LICENSE-APL2-Rebar.
+
--- /dev/null
+0.3.4
+=====
+
+ * #41 - fix a traceback when websocket is too slow or too
+ busy to prompty process incoming and outgoing data
+ * #37 - make porting to new cowboy a tiny bit easier
+ (allow 'method' to be a binary)
+
+
+0.3.3
+=====
+
+ * sockjs/sockjs-protocol#56 Fix for iOS 6 caching POSTs
+
+
+0.3.0
+=====
+
+ * Fixed {odd_info, heartbeat_triggered} exception (Isaev Ivan)
+ * Changes to pass sockjs-protocol-0.3
+ * Fixed sockname badmatch (Egobrain)
+ * Updated README
+ * Introduced parametrized module API (to get multiplexer working).
+ * Introduced Multiplexer example.
+ * Fixed invalid catch in sockjs_json:decode (Isaev Ivan)
+ * Bumped Cowboy version.
+ * Specs were moved around to make R13 happy
+ * Dropped milsultin support.
+
+
--- /dev/null
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
--- /dev/null
+This is the MIT license.
+
+Copyright (c) 2007 Mochi Media, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--- /dev/null
+Copyright (C) 2011 VMware, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
--- /dev/null
+REBAR=./rebar
+
+.PHONY: all clean distclean
+all: deps
+ $(REBAR) compile
+
+deps:
+ $(REBAR) get-deps
+
+clean::
+ $(REBAR) clean
+ rm -rf priv/www
+
+distclean::
+ rm -rf deps priv ebin
+
+
+# **** serve ****
+
+.PHONY: serve
+SERVE_SCRIPT=./examples/cowboy_test_server.erl
+serve:
+ @if [ -e .pidfile.pid ]; then \
+ kill `cat .pidfile.pid`; \
+ rm .pidfile.pid; \
+ fi
+
+ @while [ 1 ]; do \
+ $(REBAR) compile && ( \
+ echo " [*] Running erlang"; \
+ $(SERVE_SCRIPT) & \
+ SRVPID=$$!; \
+ echo $$SRVPID > .pidfile.pid; \
+ echo " [*] Pid: $$SRVPID"; \
+ ); \
+ inotifywait -r -q -e modify src/*erl examples/*erl src/*hrl; \
+ test -e .pidfile.pid && kill `cat .pidfile.pid`; \
+ rm -f .pidfile.pid; \
+ sleep 0.1; \
+ done
+
+
+# **** dialyzer ****
+
+.dialyzer_generic.plt:
+ dialyzer \
+ --build_plt \
+ --output_plt .dialyzer_generic.plt \
+ --apps erts kernel stdlib compiler sasl os_mon mnesia \
+ tools public_key crypto ssl
+
+.dialyzer_sockjs.plt: .dialyzer_generic.plt
+ dialyzer \
+ --no_native \
+ --add_to_plt \
+ --plt .dialyzer_generic.plt \
+ --output_plt .dialyzer_sockjs.plt -r deps/*/ebin
+
+distclean::
+ rm -f .dialyzer_sockjs.plt
+
+dialyze: .dialyzer_sockjs.plt
+ @dialyzer \
+ --plt .dialyzer_sockjs.plt \
+ --no_native \
+ --fullpath \
+ -Wrace_conditions \
+ -Werror_handling \
+ -Wunmatched_returns \
+ ebin
+
+.PHONY: xref
+xref:
+ $(REBAR) xref | egrep -v unused
+
+
+# **** release ****
+# 1. Commit
+# 2. Bump version in "src/sockjs.app.src"
+# 3. git tag -s "vx.y.z" -m "Release vx.y.z"
--- /dev/null
+SockJS family:
+
+ * [SockJS-client](https://github.com/sockjs/sockjs-client) JavaScript client library
+ * [SockJS-node](https://github.com/sockjs/sockjs-node) Node.js server
+ * [SockJS-erlang](https://github.com/sockjs/sockjs-erlang) Erlang server
+
+
+SockJS-erlang server
+====================
+
+[SockJS](http://sockjs.org) server written in Erlang. Can run with
+[Cowboy](https://github.com/extend/cowboy) http server. SockJS-erlang
+is in core web-framework agnostic (up to version
+[v0.2.1](https://github.com/sockjs/sockjs-erlang/tree/v0.2.1 ) we also
+supported
+[Misultin](https://github.com/ostinelli/misultin)). SockJS-erlang is
+compatible with
+[SockJS client version 0.3](http://sockjs.github.com/sockjs-protocol/sockjs-protocol-0.3.html). See
+https://github.com/sockjs/sockjs-client for more information on
+SockJS.
+
+
+Show me the code!
+-----------------
+
+A simplistic echo SockJS server using Cowboy may look more or less
+like this:
+
+```erlang
+main(_) ->
+ application:start(sockjs),
+ application:start(cowboy),
+
+ SockjsState = sockjs_handler:init_state(
+ <<"/echo">>, fun service_echo/3, state, []),
+
+ Routes = [{'_', [{[<<"echo">>, '...'],
+ sockjs_cowboy_handler, SockjsState}]}],
+
+ cowboy:start_listener(http, 100,
+ cowboy_tcp_transport, [{port, 8081}],
+ cowboy_http_protocol, [{dispatch, Routes}]),
+ receive
+ _ -> ok
+ end.
+
+service_echo(_Conn, init, state) -> {ok, state};
+service_echo(Conn, {recv, Data}, state) -> Conn:send(Data);
+service_echo(_Conn, closed, state) -> {ok, state}.
+```
+
+Dig into the `examples` directory to get working code:
+
+ * https://github.com/sockjs/sockjs-erlang/examples/cowboy_echo.erl
+
+
+How to run the examples?
+------------------------
+
+You may need a recent version of Erlang/OTP, at least R14B is recommended.
+
+To run Cowboy example:
+
+ cd sockjs-erlang
+ ./rebar get-deps
+ ./rebar compile
+ ./examples/cowboy_echo.erl
+
+This will start a simple `/echo` SockJS server on
+`http://localhost:8081`. Open this link in a browser and play
+around.
+
+
+SockJS-erlang API
+-----------------
+
+Except for the web framework-specific API's, SockJS-erlang is rather
+simple. It has just a couple of methods:
+
+ * **sockjs_handler:init_state(prefix, callback, state, options) -> service()**
+
+ Initializes the state of a SockJS service (ie: a thing you can
+ access from the browser, it has an url and a code on the server
+ side). `prefix` is a binary that must exacty match the url prefix
+ of the service, for example, if service will be listening on
+ '/echo', this parameter must be set to `<<"/echo">>`. `callback`
+ function will be called when a new SockJS connection is
+ established, data received or a connection is closed. The value of
+ `state` will be passed to the callback and preserved if returned
+ value has changed. Options is a proplist that can contain
+ following tuples:
+
+ * `{sockjs_url, string()}` - Transports which don't support
+ cross-domain communication natively ('eventsource' to name one)
+ use an iframe trick. A simple page is served from the SockJS
+ server (using its foreign domain) and is placed in an invisible
+ iframe. Code run from this iframe doesn't need to worry about
+ cross-domain issues, as it's being run from domain local to the
+ SockJS server. This iframe also does need to load SockJS
+ javascript client library, and this option lets you specify its
+ url (if you're unsure, point it to <a
+ href="http://cdn.sockjs.org/sockjs-0.2.min.js"> the latest
+ minified SockJS client release</a>, this is the default).
+ * `{websocket, boolean()}` - are native websockets enabled? This
+ can be usefull when your loadbalancer doesn't support them.
+ * `{cookie_needed, boolean()}` - is your load balancer relying on
+ cookies to get sticky sessions working?
+ * `{heartbeat_delay, integer()}` - how often to send heartbeat
+ packets (in ms).
+ * `{disconnect_delay, integer()}` - how long to hold session state
+ after the client was last connected (in ms).
+ * `{response_limit, integer()}` - the maximum size of a single
+ http streaming response (in bytes).
+ * `{logger, fun/3}` - a function called on every request, used
+ to print request to the logs (or on the screen by default).
+
+ For more explanation, please do take a look at
+ [SockJS-node readme](https://github.com/sockjs/sockjs-node/blob/master/README.md).
+
+ * **Connection:send(payload) -> ok**
+
+ Send data over an active SockJS connection. Payload should be of
+ iodata() type. Messages sent after connection gets closed will be
+ lost.
+
+ * **Connection:close(code, reason) -> ok**
+
+ Close an active SockJS connection with code and reason. If code
+ and reason are skipped, the defaults are used.
+
+ * **Connection:info() -> proplist()**
+
+ Sometimes you may want to know more about the underlying
+ connection. This method returns a proplist with few attributes
+ extracted from the first HTTP/websocket request that was coming
+ to this connection. You should see:
+
+ * peername - ip address and port of the remote host
+ * sockname - ip address and port of the local endpoint
+ * path - the path used by the request that started the connection
+ * headers - a set of headers extracted from the request that
+ may be handy (don't expect to retrieve Cookie header).
+
+
+The framework-specific calls are more problematic. Instead of trying
+to explain how to use them, please take a look at the examples.
+
+ * **type(req() :: {cowboy, request()})**
+ * **sockjs_handler:handle_req(service(), req()) -> req()**
+ * **sockjs_handler:handle_ws(service(), req()) -> req()**
+
+
+Stability
+---------
+
+SockJS-erlang is quite new, but should be reasonably stable. Cowboy is passes all the
+[SockJS-protocol tests](https://github.com/sockjs/sockjs-protocol).
+
+Deployment and load balancing
+-----------------------------
+
+SockJS servers should work well behind many load balancer setups, but
+it sometimes requres some additional twaks. For more details, please
+do take a look at the 'Deployment' section in
+[SockJS-node readme](https://github.com/sockjs/sockjs-node/blob/master/README.md).
+
+
+Development and testing
+-----------------------
+
+You need [rebar](https://github.com/basho/rebar)
+([instructions](https://github.com/basho/rebar/wiki/Building-rebar)).
+Due to a bug in rebar config handling you need a reasonably recent
+version - newer than late Oct 2011. Alternatively, SockJS-erlang is
+bundeled with a recent rebar binary.
+
+SockJS-erlang contains a `test_server`, a simple server used for
+testing.
+
+To run Cowboy test_server:
+
+ cd sockjs-erlang
+ ./rebar get-deps
+ ./rebar compile
+ ./examples/cowboy_test_server.erl
+
+That should start test_server on port 8081. Currently, there are two
+separate test suits using test_server.
+
+### SockJS-protocol Python tests
+
+Once test_server is listening on `http://localhost:8081` you may test it
+using SockJS-protocol:
+
+ cd sockjs-protocol
+ make test_deps
+ ./venv/bin/python sockjs-protocol-dev.py
+
+For details see
+[SockJS-protocol README](https://github.com/sockjs/sockjs-protocol#readme).
+
+### SockJS-client QUnit tests
+
+You need to start a second web server (by default listening on 8080)
+that is serving various static html and javascript files:
+
+ cd sockjs-client
+ make test
+
+At that point you should have two web servers running: sockjs-erlang on
+8081 and sockjs-client on 8080. When you open the browser on
+[http://localhost:8080/](http://localhost:8080/) you should be able
+run the QUnit tests against your sockjs-node server.
+
+For details see
+[SockJS-client README](https://github.com/sockjs/sockjs-client#readme).
+
+Additionally, if you're doing more serious development consider using
+`make serve`, which will automatically the server when you modify the
+source code.
--- /dev/null
+#!/usr/bin/env escript
+%%! -smp disable +A1 +K true -pa ebin deps/cowboy/ebin -input
+-module(cowboy_echo).
+-mode(compile).
+
+-export([main/1]).
+
+%% Cowboy callbacks
+-export([init/3, handle/2, terminate/2]).
+
+
+main(_) ->
+ Port = 8081,
+ application:start(sockjs),
+ application:start(cowboy),
+
+ SockjsState = sockjs_handler:init_state(
+ <<"/echo">>, fun service_echo/3, state, []),
+
+ VhostRoutes = [{[<<"echo">>, '...'], sockjs_cowboy_handler, SockjsState},
+ {'_', ?MODULE, []}],
+ Routes = [{'_', VhostRoutes}], % any vhost
+
+ io:format(" [*] Running at http://localhost:~p~n", [Port]),
+ cowboy:start_listener(http, 100,
+ cowboy_tcp_transport, [{port, Port}],
+ cowboy_http_protocol, [{dispatch, Routes}]),
+ receive
+ _ -> ok
+ end.
+
+%% --------------------------------------------------------------------------
+
+init({_Any, http}, Req, []) ->
+ {ok, Req, []}.
+
+handle(Req, State) ->
+ {ok, Data} = file:read_file("./examples/echo.html"),
+ {ok, Req1} = cowboy_http_req:reply(200, [{<<"Content-Type">>, "text/html"}],
+ Data, Req),
+ {ok, Req1, State}.
+
+terminate(_Req, _State) ->
+ ok.
+
+%% --------------------------------------------------------------------------
+
+service_echo(_Conn, init, state) -> {ok, state};
+service_echo(Conn, {recv, Data}, state) -> Conn:send(Data);
+service_echo(_Conn, closed, state) -> {ok, state}.
--- /dev/null
+#!/usr/bin/env escript
+%%! -smp disable +A1 +K true -pa ebin deps/cowboy/ebin -input
+-module(cowboy_test_server).
+-mode(compile).
+
+-export([main/1]).
+
+%% Cowboy callbacks
+-export([init/3, handle/2, terminate/2]).
+
+
+main(_) ->
+ Port = 8081,
+ application:start(sockjs),
+ application:start(cowboy),
+
+ StateEcho = sockjs_handler:init_state(
+ <<"/echo">>, fun service_echo/3, state,
+ [{response_limit, 4096}]),
+ StateClose = sockjs_handler:init_state(
+ <<"/close">>, fun service_close/3, state, []),
+ StateAmplify = sockjs_handler:init_state(
+ <<"/amplify">>, fun service_amplify/3, state, []),
+ StateBroadcast = sockjs_handler:init_state(
+ <<"/broadcast">>, fun service_broadcast/3, state, []),
+ StateDWSEcho = sockjs_handler:init_state(
+ <<"/disabled_websocket_echo">>, fun service_echo/3, state,
+ [{websocket, false}]),
+ StateCNEcho = sockjs_handler:init_state(
+ <<"/cookie_needed_echo">>, fun service_echo/3, state,
+ [{cookie_needed, true}]),
+
+ VRoutes = [{[<<"echo">>, '...'], sockjs_cowboy_handler, StateEcho},
+ {[<<"close">>, '...'], sockjs_cowboy_handler, StateClose},
+ {[<<"amplify">>, '...'], sockjs_cowboy_handler, StateAmplify},
+ {[<<"broadcast">>, '...'], sockjs_cowboy_handler, StateBroadcast},
+ {[<<"disabled_websocket_echo">>, '...'], sockjs_cowboy_handler,
+ StateDWSEcho},
+ {[<<"cookie_needed_echo">>, '...'], sockjs_cowboy_handler,
+ StateCNEcho},
+ {'_', ?MODULE, []}],
+ Routes = [{'_', VRoutes}], % any vhost
+
+ io:format(" [*] Running at http://localhost:~p~n", [Port]),
+ cowboy:start_listener(http, 100,
+ cowboy_tcp_transport, [{port, Port}],
+ cowboy_http_protocol, [{dispatch, Routes}]),
+ receive
+ _ -> ok
+ end.
+
+%% --------------------------------------------------------------------------
+
+init({_Any, http}, Req, []) ->
+ {ok, Req, []}.
+
+handle(Req, State) ->
+ {ok, Req2} = cowboy_http_req:reply(404, [],
+ <<"404 - Nothing here (via sockjs-erlang fallback)\n">>, Req),
+ {ok, Req2, State}.
+
+terminate(_Req, _State) ->
+ ok.
+
+%% --------------------------------------------------------------------------
+
+service_echo(_Conn, init, state) -> {ok, state};
+service_echo(Conn, {recv, Data}, state) -> Conn:send(Data);
+service_echo(_Conn, closed, state) -> {ok, state}.
+
+service_close(Conn, _, _State) ->
+ Conn:close(3000, "Go away!").
+
+service_amplify(Conn, {recv, Data}, _State) ->
+ N0 = list_to_integer(binary_to_list(Data)),
+ N = if N0 > 0 andalso N0 < 19 -> N0;
+ true -> 1
+ end,
+ Conn:send(list_to_binary(
+ string:copies("x", round(math:pow(2, N)))));
+service_amplify(_Conn, _, _State) ->
+ ok.
+
+service_broadcast(Conn, init, _State) ->
+ case ets:info(broadcast_table, memory) of
+ undefined ->
+ ets:new(broadcast_table, [public, named_table]);
+ _Any ->
+ ok
+ end,
+ true = ets:insert(broadcast_table, {Conn}),
+ ok;
+service_broadcast(Conn, closed, _State) ->
+ true = ets:delete_object(broadcast_table, {Conn}),
+ ok;
+service_broadcast(_Conn, {recv, Data}, _State) ->
+ ets:foldl(fun({Conn1}, _Acc) -> Conn1:send(Data) end,
+ [], broadcast_table),
+ ok.
--- /dev/null
+<!doctype html>
+<html><head>
+ <script src="http://ajax.googleapis.com/ajax/libs/jquery/1.6.2/jquery.min.js">
+ </script>
+ <script src="http://cdn.sockjs.org/sockjs-0.2.min.js">
+ </script>
+ <style>
+ .box {
+ border: 1px dashed black;
+ border-radius: 4px;
+ -moz-border-radius: 4px;
+ width: 400px;
+ display: block;
+ height: 300px;
+ float: left;
+ }
+ #output {
+ border-color: grey;
+ overflow:auto;
+ }
+ #input {
+ vertical-align: text-top;
+ -moz-outline-style: none;
+ outline-style: none;
+ outline-width: 0px;
+ outline-color: -moz-use-text-color;
+ }
+ body {
+ background-color: #F0F0F0;
+ }
+ </style>
+</head><body lang="en">
+ <h2>SockJS-erlang Echo example</h2>
+ <form id="form">
+ <input id="input" autocomplete="off" class="box"
+ value="type something here" />
+ </form>
+ <div id="output" class="box"></div>
+ <script>
+ function log(m) {
+ $('#output').append($("<code>").text(m));
+ $('#output').append($("<br>"));
+ $('#output').scrollTop($('#output').scrollTop()+10000);
+ }
+
+ var sockjs_url = '/echo';
+ var sockjs = new SockJS(sockjs_url);
+ sockjs.onopen = function() {
+ log(' [*] Connected (using: '+sockjs.protocol+')');
+ };
+ sockjs.onclose = function(e) {
+ log(' [*] Disconnected ('+e.status + ' ' + e.reason+ ')');
+ };
+ sockjs.onmessage = function(e) {
+ log(' [ ] received: ' + JSON.stringify(e.data));
+ };
+
+ $('#input').focus();
+ $('#form').submit(function() {
+ var val = $('#input').val();
+ $('#input').val('');
+ var l = ' [ ] sending: ' + JSON.stringify(val);
+ if (sockjs.readyState !== SockJS.OPEN) {
+ l += ' (error, connection not established)';
+ } else {
+ sockjs.send(val);
+ }
+ log(l);
+ return false;
+ });
+ </script>
+</body></html>
--- /dev/null
+#!/usr/bin/env escript
+%%! -smp disable +A1 +K true -pa ebin deps/cowboy/ebin -input
+-module(cowboy_multiplex).
+-mode(compile).
+
+-export([main/1]).
+
+%% Cowboy callbacks
+-export([init/3, handle/2, terminate/2]).
+
+main(_) ->
+ Port = 8081,
+ application:start(sockjs),
+ application:start(cowboy),
+
+ MultiplexState = sockjs_multiplex:init_state(
+ [{"ann", fun service_ann/3, []},
+ {"bob", fun service_bob/3, []},
+ {"carl", fun service_carl/3, []}]),
+
+ SockjsState = sockjs_handler:init_state(
+ <<"/multiplex">>, sockjs_multiplex, MultiplexState, []),
+
+ VhostRoutes = [{[<<"multiplex">>, '...'], sockjs_cowboy_handler, SockjsState},
+ {'_', ?MODULE, []}],
+ Routes = [{'_', VhostRoutes}], % any vhost
+
+ io:format(" [*] Running at http://localhost:~p~n", [Port]),
+ cowboy:start_listener(http, 100,
+ cowboy_tcp_transport, [{port, Port}],
+ cowboy_http_protocol, [{dispatch, Routes}]),
+ receive
+ _ -> ok
+ end.
+
+%% --------------------------------------------------------------------------
+
+init({_Any, http}, Req, []) ->
+ {ok, Req, []}.
+
+handle(Req, State) ->
+ {Path, Req1} = cowboy_http_req:path(Req),
+ {ok, Req2} = case Path of
+ [<<"multiplex.js">>] ->
+ {ok, Data} = file:read_file("./examples/multiplex/multiplex.js"),
+ cowboy_http_req:reply(200, [{<<"Content-Type">>, "application/javascript"}],
+ Data, Req1);
+ [] ->
+ {ok, Data} = file:read_file("./examples/multiplex/index.html"),
+ cowboy_http_req:reply(200, [{<<"Content-Type">>, "text/html"}],
+ Data, Req1);
+ _ ->
+ cowboy_http_req:reply(404, [],
+ <<"404 - Nothing here\n">>, Req1)
+ end,
+ {ok, Req2, State}.
+
+terminate(_Req, _State) ->
+ ok.
+
+%% --------------------------------------------------------------------------
+
+service_ann(Conn, init, State) ->
+ Conn:send("Ann says hi!"),
+ {ok, State};
+service_ann(Conn, {recv, Data}, State) ->
+ Conn:send(["Ann nods: ", Data]),
+ {ok, State};
+service_ann(_Conn, closed, State) ->
+ {ok, State}.
+
+service_bob(Conn, init, State) ->
+ Conn:send("Bob doesn't agree."),
+ {ok, State};
+service_bob(Conn, {recv, Data}, State) ->
+ Conn:send(["Bob says no to: ", Data]),
+ {ok, State};
+service_bob(_Conn, closed, State) ->
+ {ok, State}.
+
+service_carl(Conn, init, State) ->
+ Conn:send("Carl says goodbye!"),
+ Conn:close(),
+ {ok, State};
+service_carl(_Conn, _, State) ->
+ {ok, State}.
--- /dev/null
+<!doctype html>
+<html><head>
+ <script src="http://ajax.googleapis.com/ajax/libs/jquery/1.7.1/jquery.min.js"></script>
+ <script src="http://cdn.sockjs.org/sockjs-0.2.min.js"></script>
+ <script src="multiplex.js"></script>
+ <style>
+ .box {
+ width: 300px;
+ float: left;
+ margin: 0 20px 0 20px;
+ }
+ .box div, .box input {
+ border: 1px solid;
+ -moz-border-radius: 4px;
+ border-radius: 4px;
+ width: 100%;
+ padding: 0px;
+ margin: 5px;
+ }
+ .box div {
+ border-color: grey;
+ height: 300px;
+ overflow: auto;
+ }
+ .box input {
+ height: 30px;
+ }
+ h1 {
+ margin-left: 75px;
+ }
+ body {
+ background-color: #F0F0F0;
+ font-family: "Arial";
+ }
+ </style>
+<head><body lang="en">
+ <h1>SockJS Multiplex example</h1>
+
+ <div id="first" class="box">
+ <div></div>
+ <form><input autocomplete="off" value="Type here..."></input></form>
+ </div>
+
+ <div id="second" class="box">
+ <div></div>
+ <form><input autocomplete="off"></input></form>
+ </div>
+
+ <div id="third" class="box">
+ <div></div>
+ <form><input autocomplete="off"></input></form>
+ </div>
+
+ <script>
+ // Pipe - convenience wrapper to present data received from an
+ // object supporting WebSocket API in an html element. And the other
+ // direction: data typed into an input box shall be sent back.
+ var pipe = function(ws, el_name) {
+ var div = $(el_name + ' div');
+ var inp = $(el_name + ' input');
+ var form = $(el_name + ' form');
+
+ var print = function(m, p) {
+ p = (p === undefined) ? '' : JSON.stringify(p);
+ div.append($("<code>").text(m + ' ' + p));
+ div.append($("<br>"));
+ div.scrollTop(div.scrollTop() + 10000);
+ };
+
+ ws.onopen = function() {print('[*] open', ws.protocol);};
+ ws.onmessage = function(e) {print('[.] message', e.data);};
+ ws.onclose = function() {print('[*] close');};
+
+ form.submit(function() {
+ print('[ ] sending', inp.val());
+ ws.send(inp.val());
+ inp.val('');
+ return false;
+ });
+ };
+
+ var sockjs_url = '/multiplex';
+ var sockjs = new SockJS(sockjs_url);
+
+ var multiplexer = new MultiplexedWebSocket(sockjs);
+ var ann = multiplexer.channel('ann');
+ var bob = multiplexer.channel('bob');
+ var carl = multiplexer.channel('carl');
+
+ pipe(ann, '#first');
+ pipe(bob, '#second');
+ pipe(carl, '#third');
+
+ $('#first input').focus();
+ </script>
+</body></html>
--- /dev/null
+// ****
+
+var DumbEventTarget = function() {
+ this._listeners = {};
+};
+DumbEventTarget.prototype._ensure = function(type) {
+ if(!(type in this._listeners)) this._listeners[type] = [];
+};
+DumbEventTarget.prototype.addEventListener = function(type, listener) {
+ this._ensure(type);
+ this._listeners[type].push(listener);
+};
+DumbEventTarget.prototype.emit = function(type) {
+ this._ensure(type);
+ var args = Array.prototype.slice.call(arguments, 1);
+ if(this['on' + type]) this['on' + type].apply(this, args);
+ for(var i=0; i < this._listeners[type].length; i++) {
+ this._listeners[type][i].apply(this, args);
+ }
+};
+
+
+// ****
+
+var MultiplexedWebSocket = function(ws) {
+ var that = this;
+ this.ws = ws;
+ this.channels = {};
+ this.ws.addEventListener('message', function(e) {
+ var t = e.data.split(',');
+ var type = t.shift(), name = t.shift(), payload = t.join();
+ if(!(name in that.channels)) {
+ return;
+ }
+ var sub = that.channels[name];
+
+ switch(type) {
+ case 'uns':
+ delete that.channels[name];
+ sub.emit('close', {});
+ break;
+ case 'msg':
+ sub.emit('message', {data: payload});
+ break
+ }
+ });
+};
+MultiplexedWebSocket.prototype.channel = function(raw_name) {
+ return this.channels[escape(raw_name)] =
+ new Channel(this.ws, escape(raw_name), this.channels);
+};
+
+
+var Channel = function(ws, name, channels) {
+ DumbEventTarget.call(this);
+ var that = this;
+ this.ws = ws;
+ this.name = name;
+ this.channels = channels;
+ var onopen = function() {
+ that.ws.send('sub,' + that.name);
+ that.emit('open');
+ };
+ if(ws.readyState > 0) {
+ setTimeout(onopen, 0);
+ } else {
+ this.ws.addEventListener('open', onopen);
+ }
+};
+Channel.prototype = new DumbEventTarget()
+
+Channel.prototype.send = function(data) {
+ this.ws.send('msg,' + this.name + ',' + data);
+};
+Channel.prototype.close = function() {
+ var that = this;
+ this.ws.send('uns,' + this.name);
+ delete this.channels[this.name];
+ setTimeout(function(){that.emit('close', {})},0);
+};
--- /dev/null
+%% -*- erlang -*-
+%% This is the default `rebar.config` for SockJS-erlang.
+%%
+
+{erl_opts, [
+ %% fail_on_warning,
+ bin_opt_info,
+ warn_missing_spec,
+ debug_info,
+ warn_export_all
+]}.
+
+{deps, [
+ {cowboy, ".*",
+ {git, "git://github.com/extend/cowboy.git", "4fb2a6face6e7d6ff1dd34a02c3bd8b63d972624"}}
+ ]}.
--- /dev/null
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% Changes specific to SockJS: support for handling \xFFFE and \xFFFF
+%% characters. The usual xmerl_ucs:to_utf8 doesn't work for those (in
+%% fact these characters aren't valid unicode characters). But we can
+%% support them, why not:
+%%
+%% diff --git a/src/mochijson2_fork.erl b/src/mochijson2_fork.erl
+%% index ddd62c7..8c26fc6 100644
+%% --- a/src/mochijson2_fork.erl
+%% +++ b/src/mochijson2_fork.erl
+%% @@ -458,7 +458,14 @@ tokenize_string(B, S=#decoder{offset=O}, Acc) ->
+%% Acc1 = lists:reverse(xmerl_ucs:to_utf8(CodePoint), Acc),
+%% tokenize_string(B, ?ADV_COL(S, 12), Acc1);
+%% true ->
+%% - Acc1 = lists:reverse(xmerl_ucs:to_utf8(C), Acc),
+%% + R = if C < 16#FFFE ->
+%% + xmerl_ucs:to_utf8(C);
+%% + true ->
+%% + [16#E0 + (C bsr 12),
+%% + 128+((C bsr 6) band 16#3F),
+%% + 128+(C band 16#3F)]
+%% + end,
+%% + Acc1 = lists:reverse(R, Acc),
+%% tokenize_string(B, ?ADV_COL(S, 6), Acc1)
+%% end;
+%% <<_:O/binary, C1, _/binary>> when C1 < 128 ->
+%%
+
+%% @doc Yet another JSON (RFC 4627) library for Erlang. mochijson2 works
+%% with binaries as strings, arrays as lists (without an {array, _})
+%% wrapper and it only knows how to decode UTF-8 (and ASCII).
+%%
+%% JSON terms are decoded as follows (javascript -> erlang):
+%% <ul>
+%% <li>{"key": "value"} ->
+%% {struct, [{<<"key">>, <<"value">>}]}</li>
+%% <li>["array", 123, 12.34, true, false, null] ->
+%% [<<"array">>, 123, 12.34, true, false, null]
+%% </li>
+%% </ul>
+%% <ul>
+%% <li>Strings in JSON decode to UTF-8 binaries in Erlang</li>
+%% <li>Objects decode to {struct, PropList}</li>
+%% <li>Numbers decode to integer or float</li>
+%% <li>true, false, null decode to their respective terms.</li>
+%% </ul>
+%% The encoder will accept the same format that the decoder will produce,
+%% but will also allow additional cases for leniency:
+%% <ul>
+%% <li>atoms other than true, false, null will be considered UTF-8
+%% strings (even as a proplist key)
+%% </li>
+%% <li>{json, IoList} will insert IoList directly into the output
+%% with no validation
+%% </li>
+%% <li>{array, Array} will be encoded as Array
+%% (legacy mochijson style)
+%% </li>
+%% <li>A non-empty raw proplist will be encoded as an object as long
+%% as the first pair does not have an atom key of json, struct,
+%% or array
+%% </li>
+%% </ul>
+
+-module(mochijson2_fork).
+-author('bob@mochimedia.com').
+-export([encoder/1, encode/1]).
+-export([decoder/1, decode/1, decode/2]).
+
+%% This is a macro to placate syntax highlighters..
+-define(Q, $\").
+-define(ADV_COL(S, N), S#decoder{offset=N+S#decoder.offset,
+ column=N+S#decoder.column}).
+-define(INC_COL(S), S#decoder{offset=1+S#decoder.offset,
+ column=1+S#decoder.column}).
+-define(INC_LINE(S), S#decoder{offset=1+S#decoder.offset,
+ column=1,
+ line=1+S#decoder.line}).
+-define(INC_CHAR(S, C),
+ case C of
+ $\n ->
+ S#decoder{column=1,
+ line=1+S#decoder.line,
+ offset=1+S#decoder.offset};
+ _ ->
+ S#decoder{column=1+S#decoder.column,
+ offset=1+S#decoder.offset}
+ end).
+-define(IS_WHITESPACE(C),
+ (C =:= $\s orelse C =:= $\t orelse C =:= $\r orelse C =:= $\n)).
+
+%% -type(decoder_option() :: any()).
+%% -type(handler_option() :: any()).
+
+%% -type(json_string() :: atom | binary()).
+%% -type(json_number() :: integer() | float()).
+%% -type(json_array() :: [json_term()]).
+%% -type(json_object() :: {struct, [{json_string(), json_term()}]}).
+%% -type(json_eep18_object() :: {[{json_string(), json_term()}]}).
+%% -type(json_iolist() :: {json, iolist()}).
+%% -type(json_term() :: json_string() | json_number() | json_array() |
+%% json_object() | json_eep18_object() | json_iolist()).
+
+-record(encoder, {handler=null,
+ utf8=false}).
+
+-record(decoder, {object_hook=null,
+ offset=0,
+ line=1,
+ column=1,
+ state=null}).
+
+%% -type(utf8_option() :: boolean()).
+%% -type(encoder_option() :: handler_option() | utf8_option()).
+%% -spec encoder([encoder_option()]) -> function().
+%% @doc Create an encoder/1 with the given options.
+%% Emit unicode as utf8 (default - false)
+encoder(Options) ->
+ State = parse_encoder_options(Options, #encoder{}),
+ fun (O) -> json_encode(O, State) end.
+
+%% -spec encode(json_term()) -> iolist().
+%% @doc Encode the given as JSON to an iolist.
+encode(Any) ->
+ json_encode(Any, #encoder{}).
+
+%% -spec decoder([decoder_option()]) -> function().
+%% @doc Create a decoder/1 with the given options.
+decoder(Options) ->
+ State = parse_decoder_options(Options, #decoder{}),
+ fun (O) -> json_decode(O, State) end.
+
+%% -spec decode(iolist(), [{format, proplist | eep18 | struct}]) -> json_term().
+%% @doc Decode the given iolist to Erlang terms using the given object format
+%% for decoding, where proplist returns JSON objects as [{binary(), json_term()}]
+%% proplists, eep18 returns JSON objects as {[binary(), json_term()]}, and struct
+%% returns them as-is.
+decode(S, Options) ->
+ json_decode(S, parse_decoder_options(Options, #decoder{})).
+
+%% -spec decode(iolist()) -> json_term().
+%% @doc Decode the given iolist to Erlang terms.
+decode(S) ->
+ json_decode(S, #decoder{}).
+
+%% Internal API
+
+parse_encoder_options([], State) ->
+ State;
+parse_encoder_options([{handler, Handler} | Rest], State) ->
+ parse_encoder_options(Rest, State#encoder{handler=Handler});
+parse_encoder_options([{utf8, Switch} | Rest], State) ->
+ parse_encoder_options(Rest, State#encoder{utf8=Switch}).
+
+parse_decoder_options([], State) ->
+ State;
+parse_decoder_options([{object_hook, Hook} | Rest], State) ->
+ parse_decoder_options(Rest, State#decoder{object_hook=Hook});
+parse_decoder_options([{format, Format} | Rest], State)
+ when Format =:= struct orelse Format =:= eep18 orelse Format =:= proplist ->
+ parse_decoder_options(Rest, State#decoder{object_hook=Format}).
+
+json_encode(true, _State) ->
+ <<"true">>;
+json_encode(false, _State) ->
+ <<"false">>;
+json_encode(null, _State) ->
+ <<"null">>;
+json_encode(I, _State) when is_integer(I) ->
+ integer_to_list(I);
+json_encode(F, _State) when is_float(F) ->
+ mochinum_fork:digits(F);
+json_encode(S, State) when is_binary(S); is_atom(S) ->
+ json_encode_string(S, State);
+json_encode([{K, _}|_] = Props, State) when (K =/= struct andalso
+ K =/= array andalso
+ K =/= json) ->
+ json_encode_proplist(Props, State);
+json_encode({struct, Props}, State) when is_list(Props) ->
+ json_encode_proplist(Props, State);
+json_encode({Props}, State) when is_list(Props) ->
+ json_encode_proplist(Props, State);
+json_encode({}, State) ->
+ json_encode_proplist([], State);
+json_encode(Array, State) when is_list(Array) ->
+ json_encode_array(Array, State);
+json_encode({array, Array}, State) when is_list(Array) ->
+ json_encode_array(Array, State);
+json_encode({json, IoList}, _State) ->
+ IoList;
+json_encode(Bad, #encoder{handler=null}) ->
+ exit({json_encode, {bad_term, Bad}});
+json_encode(Bad, State=#encoder{handler=Handler}) ->
+ json_encode(Handler(Bad), State).
+
+json_encode_array([], _State) ->
+ <<"[]">>;
+json_encode_array(L, State) ->
+ F = fun (O, Acc) ->
+ [$,, json_encode(O, State) | Acc]
+ end,
+ [$, | Acc1] = lists:foldl(F, "[", L),
+ lists:reverse([$\] | Acc1]).
+
+json_encode_proplist([], _State) ->
+ <<"{}">>;
+json_encode_proplist(Props, State) ->
+ F = fun ({K, V}, Acc) ->
+ KS = json_encode_string(K, State),
+ VS = json_encode(V, State),
+ [$,, VS, $:, KS | Acc]
+ end,
+ [$, | Acc1] = lists:foldl(F, "{", Props),
+ lists:reverse([$\} | Acc1]).
+
+json_encode_string(A, State) when is_atom(A) ->
+ L = atom_to_list(A),
+ case json_string_is_safe(L) of
+ true ->
+ [?Q, L, ?Q];
+ false ->
+ json_encode_string_unicode(xmerl_ucs:from_utf8(L), State, [?Q])
+ end;
+json_encode_string(B, State) when is_binary(B) ->
+ case json_bin_is_safe(B) of
+ true ->
+ [?Q, B, ?Q];
+ false ->
+ json_encode_string_unicode(xmerl_ucs:from_utf8(B), State, [?Q])
+ end;
+json_encode_string(I, _State) when is_integer(I) ->
+ [?Q, integer_to_list(I), ?Q];
+json_encode_string(L, State) when is_list(L) ->
+ case json_string_is_safe(L) of
+ true ->
+ [?Q, L, ?Q];
+ false ->
+ json_encode_string_unicode(L, State, [?Q])
+ end.
+
+json_string_is_safe([]) ->
+ true;
+json_string_is_safe([C | Rest]) ->
+ case C of
+ ?Q ->
+ false;
+ $\\ ->
+ false;
+ $\b ->
+ false;
+ $\f ->
+ false;
+ $\n ->
+ false;
+ $\r ->
+ false;
+ $\t ->
+ false;
+ C when C >= 0, C < $\s; C >= 16#7f, C =< 16#10FFFF ->
+ false;
+ C when C < 16#7f ->
+ json_string_is_safe(Rest);
+ _ ->
+ false
+ end.
+
+json_bin_is_safe(<<>>) ->
+ true;
+json_bin_is_safe(<<C, Rest/binary>>) ->
+ case C of
+ ?Q ->
+ false;
+ $\\ ->
+ false;
+ $\b ->
+ false;
+ $\f ->
+ false;
+ $\n ->
+ false;
+ $\r ->
+ false;
+ $\t ->
+ false;
+ C when C >= 0, C < $\s; C >= 16#7f ->
+ false;
+ C when C < 16#7f ->
+ json_bin_is_safe(Rest)
+ end.
+
+json_encode_string_unicode([], _State, Acc) ->
+ lists:reverse([$\" | Acc]);
+json_encode_string_unicode([C | Cs], State, Acc) ->
+ Acc1 = case C of
+ ?Q ->
+ [?Q, $\\ | Acc];
+ %% Escaping solidus is only useful when trying to protect
+ %% against "</script>" injection attacks which are only
+ %% possible when JSON is inserted into a HTML document
+ %% in-line. mochijson2 does not protect you from this, so
+ %% if you do insert directly into HTML then you need to
+ %% uncomment the following case or escape the output of encode.
+ %%
+ %% $/ ->
+ %% [$/, $\\ | Acc];
+ %%
+ $\\ ->
+ [$\\, $\\ | Acc];
+ $\b ->
+ [$b, $\\ | Acc];
+ $\f ->
+ [$f, $\\ | Acc];
+ $\n ->
+ [$n, $\\ | Acc];
+ $\r ->
+ [$r, $\\ | Acc];
+ $\t ->
+ [$t, $\\ | Acc];
+ C when C >= 0, C < $\s ->
+ [unihex(C) | Acc];
+ C when C >= 16#7f, C =< 16#10FFFF, State#encoder.utf8 ->
+ [xmerl_ucs:to_utf8(C) | Acc];
+ C when C >= 16#7f, C =< 16#10FFFF, not State#encoder.utf8 ->
+ [unihex(C) | Acc];
+ C when C < 16#7f ->
+ [C | Acc];
+ _ ->
+ exit({json_encode, {bad_char, C}})
+ end,
+ json_encode_string_unicode(Cs, State, Acc1).
+
+hexdigit(C) when C >= 0, C =< 9 ->
+ C + $0;
+hexdigit(C) when C =< 15 ->
+ C + $a - 10.
+
+unihex(C) when C < 16#10000 ->
+ <<D3:4, D2:4, D1:4, D0:4>> = <<C:16>>,
+ Digits = [hexdigit(D) || D <- [D3, D2, D1, D0]],
+ [$\\, $u | Digits];
+unihex(C) when C =< 16#10FFFF ->
+ N = C - 16#10000,
+ S1 = 16#d800 bor ((N bsr 10) band 16#3ff),
+ S2 = 16#dc00 bor (N band 16#3ff),
+ [unihex(S1), unihex(S2)].
+
+json_decode(L, S) when is_list(L) ->
+ json_decode(iolist_to_binary(L), S);
+json_decode(B, S) ->
+ {Res, S1} = decode1(B, S),
+ {eof, _} = tokenize(B, S1#decoder{state=trim}),
+ Res.
+
+decode1(B, S=#decoder{state=null}) ->
+ case tokenize(B, S#decoder{state=any}) of
+ {{const, C}, S1} ->
+ {C, S1};
+ {start_array, S1} ->
+ decode_array(B, S1);
+ {start_object, S1} ->
+ decode_object(B, S1)
+ end.
+
+make_object(V, #decoder{object_hook=N}) when N =:= null orelse N =:= struct ->
+ V;
+make_object({struct, P}, #decoder{object_hook=eep18}) ->
+ {P};
+make_object({struct, P}, #decoder{object_hook=proplist}) ->
+ P;
+make_object(V, #decoder{object_hook=Hook}) ->
+ Hook(V).
+
+decode_object(B, S) ->
+ decode_object(B, S#decoder{state=key}, []).
+
+decode_object(B, S=#decoder{state=key}, Acc) ->
+ case tokenize(B, S) of
+ {end_object, S1} ->
+ V = make_object({struct, lists:reverse(Acc)}, S1),
+ {V, S1#decoder{state=null}};
+ {{const, K}, S1} ->
+ {colon, S2} = tokenize(B, S1),
+ {V, S3} = decode1(B, S2#decoder{state=null}),
+ decode_object(B, S3#decoder{state=comma}, [{K, V} | Acc])
+ end;
+decode_object(B, S=#decoder{state=comma}, Acc) ->
+ case tokenize(B, S) of
+ {end_object, S1} ->
+ V = make_object({struct, lists:reverse(Acc)}, S1),
+ {V, S1#decoder{state=null}};
+ {comma, S1} ->
+ decode_object(B, S1#decoder{state=key}, Acc)
+ end.
+
+decode_array(B, S) ->
+ decode_array(B, S#decoder{state=any}, []).
+
+decode_array(B, S=#decoder{state=any}, Acc) ->
+ case tokenize(B, S) of
+ {end_array, S1} ->
+ {lists:reverse(Acc), S1#decoder{state=null}};
+ {start_array, S1} ->
+ {Array, S2} = decode_array(B, S1),
+ decode_array(B, S2#decoder{state=comma}, [Array | Acc]);
+ {start_object, S1} ->
+ {Array, S2} = decode_object(B, S1),
+ decode_array(B, S2#decoder{state=comma}, [Array | Acc]);
+ {{const, Const}, S1} ->
+ decode_array(B, S1#decoder{state=comma}, [Const | Acc])
+ end;
+decode_array(B, S=#decoder{state=comma}, Acc) ->
+ case tokenize(B, S) of
+ {end_array, S1} ->
+ {lists:reverse(Acc), S1#decoder{state=null}};
+ {comma, S1} ->
+ decode_array(B, S1#decoder{state=any}, Acc)
+ end.
+
+tokenize_string(B, S=#decoder{offset=O}) ->
+ case tokenize_string_fast(B, O) of
+ {escape, O1} ->
+ Length = O1 - O,
+ S1 = ?ADV_COL(S, Length),
+ <<_:O/binary, Head:Length/binary, _/binary>> = B,
+ tokenize_string(B, S1, lists:reverse(binary_to_list(Head)));
+ O1 ->
+ Length = O1 - O,
+ <<_:O/binary, String:Length/binary, ?Q, _/binary>> = B,
+ {{const, String}, ?ADV_COL(S, Length + 1)}
+ end.
+
+tokenize_string_fast(B, O) ->
+ case B of
+ <<_:O/binary, ?Q, _/binary>> ->
+ O;
+ <<_:O/binary, $\\, _/binary>> ->
+ {escape, O};
+ <<_:O/binary, C1, _/binary>> when C1 < 128 ->
+ tokenize_string_fast(B, 1 + O);
+ <<_:O/binary, C1, C2, _/binary>> when C1 >= 194, C1 =< 223,
+ C2 >= 128, C2 =< 191 ->
+ tokenize_string_fast(B, 2 + O);
+ <<_:O/binary, C1, C2, C3, _/binary>> when C1 >= 224, C1 =< 239,
+ C2 >= 128, C2 =< 191,
+ C3 >= 128, C3 =< 191 ->
+ tokenize_string_fast(B, 3 + O);
+ <<_:O/binary, C1, C2, C3, C4, _/binary>> when C1 >= 240, C1 =< 244,
+ C2 >= 128, C2 =< 191,
+ C3 >= 128, C3 =< 191,
+ C4 >= 128, C4 =< 191 ->
+ tokenize_string_fast(B, 4 + O);
+ _ ->
+ throw(invalid_utf8)
+ end.
+
+tokenize_string(B, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, ?Q, _/binary>> ->
+ {{const, iolist_to_binary(lists:reverse(Acc))}, ?INC_COL(S)};
+ <<_:O/binary, "\\\"", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\" | Acc]);
+ <<_:O/binary, "\\\\", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\\ | Acc]);
+ <<_:O/binary, "\\/", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$/ | Acc]);
+ <<_:O/binary, "\\b", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\b | Acc]);
+ <<_:O/binary, "\\f", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\f | Acc]);
+ <<_:O/binary, "\\n", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\n | Acc]);
+ <<_:O/binary, "\\r", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\r | Acc]);
+ <<_:O/binary, "\\t", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\t | Acc]);
+ <<_:O/binary, "\\u", C3, C2, C1, C0, Rest/binary>> ->
+ C = erlang:list_to_integer([C3, C2, C1, C0], 16),
+ if C > 16#D7FF, C < 16#DC00 ->
+ %% coalesce UTF-16 surrogate pair
+ <<"\\u", D3, D2, D1, D0, _/binary>> = Rest,
+ D = erlang:list_to_integer([D3,D2,D1,D0], 16),
+ [CodePoint] = xmerl_ucs:from_utf16be(<<C:16/big-unsigned-integer,
+ D:16/big-unsigned-integer>>),
+ Acc1 = lists:reverse(xmerl_ucs:to_utf8(CodePoint), Acc),
+ tokenize_string(B, ?ADV_COL(S, 12), Acc1);
+ true ->
+ R = if C < 16#FFFE ->
+ xmerl_ucs:to_utf8(C);
+ true ->
+ [16#E0 + (C bsr 12),
+ 128+((C bsr 6) band 16#3F),
+ 128+(C band 16#3F)]
+ end,
+ Acc1 = lists:reverse(R, Acc),
+ tokenize_string(B, ?ADV_COL(S, 6), Acc1)
+ end;
+ <<_:O/binary, C1, _/binary>> when C1 < 128 ->
+ tokenize_string(B, ?INC_CHAR(S, C1), [C1 | Acc]);
+ <<_:O/binary, C1, C2, _/binary>> when C1 >= 194, C1 =< 223,
+ C2 >= 128, C2 =< 191 ->
+ tokenize_string(B, ?ADV_COL(S, 2), [C2, C1 | Acc]);
+ <<_:O/binary, C1, C2, C3, _/binary>> when C1 >= 224, C1 =< 239,
+ C2 >= 128, C2 =< 191,
+ C3 >= 128, C3 =< 191 ->
+ tokenize_string(B, ?ADV_COL(S, 3), [C3, C2, C1 | Acc]);
+ <<_:O/binary, C1, C2, C3, C4, _/binary>> when C1 >= 240, C1 =< 244,
+ C2 >= 128, C2 =< 191,
+ C3 >= 128, C3 =< 191,
+ C4 >= 128, C4 =< 191 ->
+ tokenize_string(B, ?ADV_COL(S, 4), [C4, C3, C2, C1 | Acc]);
+ _ ->
+ throw(invalid_utf8)
+ end.
+
+tokenize_number(B, S) ->
+ case tokenize_number(B, sign, S, []) of
+ {{int, Int}, S1} ->
+ {{const, list_to_integer(Int)}, S1};
+ {{float, Float}, S1} ->
+ {{const, list_to_float(Float)}, S1}
+ end.
+
+tokenize_number(B, sign, S=#decoder{offset=O}, []) ->
+ case B of
+ <<_:O/binary, $-, _/binary>> ->
+ tokenize_number(B, int, ?INC_COL(S), [$-]);
+ _ ->
+ tokenize_number(B, int, S, [])
+ end;
+tokenize_number(B, int, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, $0, _/binary>> ->
+ tokenize_number(B, frac, ?INC_COL(S), [$0 | Acc]);
+ <<_:O/binary, C, _/binary>> when C >= $1 andalso C =< $9 ->
+ tokenize_number(B, int1, ?INC_COL(S), [C | Acc])
+ end;
+tokenize_number(B, int1, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
+ tokenize_number(B, int1, ?INC_COL(S), [C | Acc]);
+ _ ->
+ tokenize_number(B, frac, S, Acc)
+ end;
+tokenize_number(B, frac, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, $., C, _/binary>> when C >= $0, C =< $9 ->
+ tokenize_number(B, frac1, ?ADV_COL(S, 2), [C, $. | Acc]);
+ <<_:O/binary, E, _/binary>> when E =:= $e orelse E =:= $E ->
+ tokenize_number(B, esign, ?INC_COL(S), [$e, $0, $. | Acc]);
+ _ ->
+ {{int, lists:reverse(Acc)}, S}
+ end;
+tokenize_number(B, frac1, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
+ tokenize_number(B, frac1, ?INC_COL(S), [C | Acc]);
+ <<_:O/binary, E, _/binary>> when E =:= $e orelse E =:= $E ->
+ tokenize_number(B, esign, ?INC_COL(S), [$e | Acc]);
+ _ ->
+ {{float, lists:reverse(Acc)}, S}
+ end;
+tokenize_number(B, esign, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when C =:= $- orelse C=:= $+ ->
+ tokenize_number(B, eint, ?INC_COL(S), [C | Acc]);
+ _ ->
+ tokenize_number(B, eint, S, Acc)
+ end;
+tokenize_number(B, eint, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
+ tokenize_number(B, eint1, ?INC_COL(S), [C | Acc])
+ end;
+tokenize_number(B, eint1, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
+ tokenize_number(B, eint1, ?INC_COL(S), [C | Acc]);
+ _ ->
+ {{float, lists:reverse(Acc)}, S}
+ end.
+
+tokenize(B, S=#decoder{offset=O}) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C) ->
+ tokenize(B, ?INC_CHAR(S, C));
+ <<_:O/binary, "{", _/binary>> ->
+ {start_object, ?INC_COL(S)};
+ <<_:O/binary, "}", _/binary>> ->
+ {end_object, ?INC_COL(S)};
+ <<_:O/binary, "[", _/binary>> ->
+ {start_array, ?INC_COL(S)};
+ <<_:O/binary, "]", _/binary>> ->
+ {end_array, ?INC_COL(S)};
+ <<_:O/binary, ",", _/binary>> ->
+ {comma, ?INC_COL(S)};
+ <<_:O/binary, ":", _/binary>> ->
+ {colon, ?INC_COL(S)};
+ <<_:O/binary, "null", _/binary>> ->
+ {{const, null}, ?ADV_COL(S, 4)};
+ <<_:O/binary, "true", _/binary>> ->
+ {{const, true}, ?ADV_COL(S, 4)};
+ <<_:O/binary, "false", _/binary>> ->
+ {{const, false}, ?ADV_COL(S, 5)};
+ <<_:O/binary, "\"", _/binary>> ->
+ tokenize_string(B, ?INC_COL(S));
+ <<_:O/binary, C, _/binary>> when (C >= $0 andalso C =< $9)
+ orelse C =:= $- ->
+ tokenize_number(B, S);
+ <<_:O/binary>> ->
+ trim = S#decoder.state,
+ {eof, S}
+ end.
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+
+%% testing constructs borrowed from the Yaws JSON implementation.
+
+%% Create an object from a list of Key/Value pairs.
+
+obj_new() ->
+ {struct, []}.
+
+is_obj({struct, Props}) ->
+ F = fun ({K, _}) when is_binary(K) -> true end,
+ lists:all(F, Props).
+
+obj_from_list(Props) ->
+ Obj = {struct, Props},
+ ?assert(is_obj(Obj)),
+ Obj.
+
+%% Test for equivalence of Erlang terms.
+%% Due to arbitrary order of construction, equivalent objects might
+%% compare unequal as erlang terms, so we need to carefully recurse
+%% through aggregates (tuples and objects).
+
+equiv({struct, Props1}, {struct, Props2}) ->
+ equiv_object(Props1, Props2);
+equiv(L1, L2) when is_list(L1), is_list(L2) ->
+ equiv_list(L1, L2);
+equiv(N1, N2) when is_number(N1), is_number(N2) -> N1 == N2;
+equiv(B1, B2) when is_binary(B1), is_binary(B2) -> B1 == B2;
+equiv(A, A) when A =:= true orelse A =:= false orelse A =:= null -> true.
+
+%% Object representation and traversal order is unknown.
+%% Use the sledgehammer and sort property lists.
+
+equiv_object(Props1, Props2) ->
+ L1 = lists:keysort(1, Props1),
+ L2 = lists:keysort(1, Props2),
+ Pairs = lists:zip(L1, L2),
+ true = lists:all(fun({{K1, V1}, {K2, V2}}) ->
+ equiv(K1, K2) and equiv(V1, V2)
+ end, Pairs).
+
+%% Recursively compare tuple elements for equivalence.
+
+equiv_list([], []) ->
+ true;
+equiv_list([V1 | L1], [V2 | L2]) ->
+ equiv(V1, V2) andalso equiv_list(L1, L2).
+
+decode_test() ->
+ [1199344435545.0, 1] = decode(<<"[1199344435545.0,1]">>),
+ <<16#F0,16#9D,16#9C,16#95>> = decode([34,"\\ud835","\\udf15",34]).
+
+e2j_vec_test() ->
+ test_one(e2j_test_vec(utf8), 1).
+
+test_one([], _N) ->
+ %% io:format("~p tests passed~n", [N-1]),
+ ok;
+test_one([{E, J} | Rest], N) ->
+ %% io:format("[~p] ~p ~p~n", [N, E, J]),
+ true = equiv(E, decode(J)),
+ true = equiv(E, decode(encode(E))),
+ test_one(Rest, 1+N).
+
+e2j_test_vec(utf8) ->
+ [
+ {1, "1"},
+ {3.1416, "3.14160"}, %% text representation may truncate, trail zeroes
+ {-1, "-1"},
+ {-3.1416, "-3.14160"},
+ {12.0e10, "1.20000e+11"},
+ {1.234E+10, "1.23400e+10"},
+ {-1.234E-10, "-1.23400e-10"},
+ {10.0, "1.0e+01"},
+ {123.456, "1.23456E+2"},
+ {10.0, "1e1"},
+ {<<"foo">>, "\"foo\""},
+ {<<"foo", 5, "bar">>, "\"foo\\u0005bar\""},
+ {<<"">>, "\"\""},
+ {<<"\n\n\n">>, "\"\\n\\n\\n\""},
+ {<<"\" \b\f\r\n\t\"">>, "\"\\\" \\b\\f\\r\\n\\t\\\"\""},
+ {obj_new(), "{}"},
+ {obj_from_list([{<<"foo">>, <<"bar">>}]), "{\"foo\":\"bar\"}"},
+ {obj_from_list([{<<"foo">>, <<"bar">>}, {<<"baz">>, 123}]),
+ "{\"foo\":\"bar\",\"baz\":123}"},
+ {[], "[]"},
+ {[[]], "[[]]"},
+ {[1, <<"foo">>], "[1,\"foo\"]"},
+
+ %% json array in a json object
+ {obj_from_list([{<<"foo">>, [123]}]),
+ "{\"foo\":[123]}"},
+
+ %% json object in a json object
+ {obj_from_list([{<<"foo">>, obj_from_list([{<<"bar">>, true}])}]),
+ "{\"foo\":{\"bar\":true}}"},
+
+ %% fold evaluation order
+ {obj_from_list([{<<"foo">>, []},
+ {<<"bar">>, obj_from_list([{<<"baz">>, true}])},
+ {<<"alice">>, <<"bob">>}]),
+ "{\"foo\":[],\"bar\":{\"baz\":true},\"alice\":\"bob\"}"},
+
+ %% json object in a json array
+ {[-123, <<"foo">>, obj_from_list([{<<"bar">>, []}]), null],
+ "[-123,\"foo\",{\"bar\":[]},null]"}
+ ].
+
+%% test utf8 encoding
+encoder_utf8_test() ->
+ %% safe conversion case (default)
+ [34,"\\u0001","\\u0442","\\u0435","\\u0441","\\u0442",34] =
+ encode(<<1,"\321\202\320\265\321\201\321\202">>),
+
+ %% raw utf8 output (optional)
+ Enc = mochijson2:encoder([{utf8, true}]),
+ [34,"\\u0001",[209,130],[208,181],[209,129],[209,130],34] =
+ Enc(<<1,"\321\202\320\265\321\201\321\202">>).
+
+input_validation_test() ->
+ Good = [
+ {16#00A3, <<?Q, 16#C2, 16#A3, ?Q>>}, %% pound
+ {16#20AC, <<?Q, 16#E2, 16#82, 16#AC, ?Q>>}, %% euro
+ {16#10196, <<?Q, 16#F0, 16#90, 16#86, 16#96, ?Q>>} %% denarius
+ ],
+ lists:foreach(fun({CodePoint, UTF8}) ->
+ Expect = list_to_binary(xmerl_ucs:to_utf8(CodePoint)),
+ Expect = decode(UTF8)
+ end, Good),
+
+ Bad = [
+ %% 2nd, 3rd, or 4th byte of a multi-byte sequence w/o leading byte
+ <<?Q, 16#80, ?Q>>,
+ %% missing continuations, last byte in each should be 80-BF
+ <<?Q, 16#C2, 16#7F, ?Q>>,
+ <<?Q, 16#E0, 16#80,16#7F, ?Q>>,
+ <<?Q, 16#F0, 16#80, 16#80, 16#7F, ?Q>>,
+ %% we don't support code points > 10FFFF per RFC 3629
+ <<?Q, 16#F5, 16#80, 16#80, 16#80, ?Q>>,
+ %% escape characters trigger a different code path
+ <<?Q, $\\, $\n, 16#80, ?Q>>
+ ],
+ lists:foreach(
+ fun(X) ->
+ ok = try decode(X) catch invalid_utf8 -> ok end,
+ %% could be {ucs,{bad_utf8_character_code}} or
+ %% {json_encode,{bad_char,_}}
+ {'EXIT', _} = (catch encode(X))
+ end, Bad).
+
+inline_json_test() ->
+ ?assertEqual(<<"\"iodata iodata\"">>,
+ iolist_to_binary(
+ encode({json, [<<"\"iodata">>, " iodata\""]}))),
+ ?assertEqual({struct, [{<<"key">>, <<"iodata iodata">>}]},
+ decode(
+ encode({struct,
+ [{key, {json, [<<"\"iodata">>, " iodata\""]}}]}))),
+ ok.
+
+big_unicode_test() ->
+ UTF8Seq = list_to_binary(xmerl_ucs:to_utf8(16#0001d120)),
+ ?assertEqual(
+ <<"\"\\ud834\\udd20\"">>,
+ iolist_to_binary(encode(UTF8Seq))),
+ ?assertEqual(
+ UTF8Seq,
+ decode(iolist_to_binary(encode(UTF8Seq)))),
+ ok.
+
+custom_decoder_test() ->
+ ?assertEqual(
+ {struct, [{<<"key">>, <<"value">>}]},
+ (decoder([]))("{\"key\": \"value\"}")),
+ F = fun ({struct, [{<<"key">>, <<"value">>}]}) -> win end,
+ ?assertEqual(
+ win,
+ (decoder([{object_hook, F}]))("{\"key\": \"value\"}")),
+ ok.
+
+atom_test() ->
+ %% JSON native atoms
+ [begin
+ ?assertEqual(A, decode(atom_to_list(A))),
+ ?assertEqual(iolist_to_binary(atom_to_list(A)),
+ iolist_to_binary(encode(A)))
+ end || A <- [true, false, null]],
+ %% Atom to string
+ ?assertEqual(
+ <<"\"foo\"">>,
+ iolist_to_binary(encode(foo))),
+ ?assertEqual(
+ <<"\"\\ud834\\udd20\"">>,
+ iolist_to_binary(encode(list_to_atom(xmerl_ucs:to_utf8(16#0001d120))))),
+ ok.
+
+key_encode_test() ->
+ %% Some forms are accepted as keys that would not be strings in other
+ %% cases
+ ?assertEqual(
+ <<"{\"foo\":1}">>,
+ iolist_to_binary(encode({struct, [{foo, 1}]}))),
+ ?assertEqual(
+ <<"{\"foo\":1}">>,
+ iolist_to_binary(encode({struct, [{<<"foo">>, 1}]}))),
+ ?assertEqual(
+ <<"{\"foo\":1}">>,
+ iolist_to_binary(encode({struct, [{"foo", 1}]}))),
+ ?assertEqual(
+ <<"{\"foo\":1}">>,
+ iolist_to_binary(encode([{foo, 1}]))),
+ ?assertEqual(
+ <<"{\"foo\":1}">>,
+ iolist_to_binary(encode([{<<"foo">>, 1}]))),
+ ?assertEqual(
+ <<"{\"foo\":1}">>,
+ iolist_to_binary(encode([{"foo", 1}]))),
+ ?assertEqual(
+ <<"{\"\\ud834\\udd20\":1}">>,
+ iolist_to_binary(
+ encode({struct, [{[16#0001d120], 1}]}))),
+ ?assertEqual(
+ <<"{\"1\":1}">>,
+ iolist_to_binary(encode({struct, [{1, 1}]}))),
+ ok.
+
+unsafe_chars_test() ->
+ Chars = "\"\\\b\f\n\r\t",
+ [begin
+ ?assertEqual(false, json_string_is_safe([C])),
+ ?assertEqual(false, json_bin_is_safe(<<C>>)),
+ ?assertEqual(<<C>>, decode(encode(<<C>>)))
+ end || C <- Chars],
+ ?assertEqual(
+ false,
+ json_string_is_safe([16#0001d120])),
+ ?assertEqual(
+ false,
+ json_bin_is_safe(list_to_binary(xmerl_ucs:to_utf8(16#0001d120)))),
+ ?assertEqual(
+ [16#0001d120],
+ xmerl_ucs:from_utf8(
+ binary_to_list(
+ decode(encode(list_to_atom(xmerl_ucs:to_utf8(16#0001d120))))))),
+ ?assertEqual(
+ false,
+ json_string_is_safe([16#110000])),
+ ?assertEqual(
+ false,
+ json_bin_is_safe(list_to_binary(xmerl_ucs:to_utf8([16#110000])))),
+ %% solidus can be escaped but isn't unsafe by default
+ ?assertEqual(
+ <<"/">>,
+ decode(<<"\"\\/\"">>)),
+ ok.
+
+int_test() ->
+ ?assertEqual(0, decode("0")),
+ ?assertEqual(1, decode("1")),
+ ?assertEqual(11, decode("11")),
+ ok.
+
+large_int_test() ->
+ ?assertEqual(<<"-2147483649214748364921474836492147483649">>,
+ iolist_to_binary(encode(-2147483649214748364921474836492147483649))),
+ ?assertEqual(<<"2147483649214748364921474836492147483649">>,
+ iolist_to_binary(encode(2147483649214748364921474836492147483649))),
+ ok.
+
+float_test() ->
+ ?assertEqual(<<"-2147483649.0">>, iolist_to_binary(encode(-2147483649.0))),
+ ?assertEqual(<<"2147483648.0">>, iolist_to_binary(encode(2147483648.0))),
+ ok.
+
+handler_test() ->
+ ?assertEqual(
+ {'EXIT',{json_encode,{bad_term,{x,y}}}},
+ catch encode({x,y})),
+ F = fun ({x,y}) -> [] end,
+ ?assertEqual(
+ <<"[]">>,
+ iolist_to_binary((encoder([{handler, F}]))({x, y}))),
+ ok.
+
+encode_empty_test_() ->
+ [{A, ?_assertEqual(<<"{}">>, iolist_to_binary(encode(B)))}
+ || {A, B} <- [{"eep18 {}", {}},
+ {"eep18 {[]}", {[]}},
+ {"{struct, []}", {struct, []}}]].
+
+encode_test_() ->
+ P = [{<<"k">>, <<"v">>}],
+ JSON = iolist_to_binary(encode({struct, P})),
+ [{atom_to_list(F),
+ ?_assertEqual(JSON, iolist_to_binary(encode(decode(JSON, [{format, F}]))))}
+ || F <- [struct, eep18, proplist]].
+
+format_test_() ->
+ P = [{<<"k">>, <<"v">>}],
+ JSON = iolist_to_binary(encode({struct, P})),
+ [{atom_to_list(F),
+ ?_assertEqual(A, decode(JSON, [{format, F}]))}
+ || {F, A} <- [{struct, {struct, P}},
+ {eep18, {P}},
+ {proplist, P}]].
+
+-endif.
--- /dev/null
+%% @copyright 2007 Mochi Media, Inc.
+%% @author Bob Ippolito <bob@mochimedia.com>
+
+%% @doc Useful numeric algorithms for floats that cover some deficiencies
+%% in the math module. More interesting is digits/1, which implements
+%% the algorithm from:
+%% http://www.cs.indiana.edu/~burger/fp/index.html
+%% See also "Printing Floating-Point Numbers Quickly and Accurately"
+%% in Proceedings of the SIGPLAN '96 Conference on Programming Language
+%% Design and Implementation.
+
+-module(mochinum_fork).
+-author("Bob Ippolito <bob@mochimedia.com>").
+-export([digits/1, frexp/1, int_pow/2, int_ceil/1]).
+
+%% IEEE 754 Float exponent bias
+-define(FLOAT_BIAS, 1022).
+-define(MIN_EXP, -1074).
+-define(BIG_POW, 4503599627370496).
+
+%% External API
+
+%% @spec digits(number()) -> string()
+%% @doc Returns a string that accurately represents the given integer or float
+%% using a conservative amount of digits. Great for generating
+%% human-readable output, or compact ASCII serializations for floats.
+digits(N) when is_integer(N) ->
+ integer_to_list(N);
+digits(0.0) ->
+ "0.0";
+digits(Float) ->
+ {Frac1, Exp1} = frexp_int(Float),
+ [Place0 | Digits0] = digits1(Float, Exp1, Frac1),
+ {Place, Digits} = transform_digits(Place0, Digits0),
+ R = insert_decimal(Place, Digits),
+ case Float < 0 of
+ true ->
+ [$- | R];
+ _ ->
+ R
+ end.
+
+%% @spec frexp(F::float()) -> {Frac::float(), Exp::float()}
+%% @doc Return the fractional and exponent part of an IEEE 754 double,
+%% equivalent to the libc function of the same name.
+%% F = Frac * pow(2, Exp).
+frexp(F) ->
+ frexp1(unpack(F)).
+
+%% @spec int_pow(X::integer(), N::integer()) -> Y::integer()
+%% @doc Moderately efficient way to exponentiate integers.
+%% int_pow(10, 2) = 100.
+int_pow(_X, 0) ->
+ 1;
+int_pow(X, N) when N > 0 ->
+ int_pow(X, N, 1).
+
+%% @spec int_ceil(F::float()) -> integer()
+%% @doc Return the ceiling of F as an integer. The ceiling is defined as
+%% F when F == trunc(F);
+%% trunc(F) when F < 0;
+%% trunc(F) + 1 when F > 0.
+int_ceil(X) ->
+ T = trunc(X),
+ case (X - T) of
+ Pos when Pos > 0 -> T + 1;
+ _ -> T
+ end.
+
+
+%% Internal API
+
+int_pow(X, N, R) when N < 2 ->
+ R * X;
+int_pow(X, N, R) ->
+ int_pow(X * X, N bsr 1, case N band 1 of 1 -> R * X; 0 -> R end).
+
+insert_decimal(0, S) ->
+ "0." ++ S;
+insert_decimal(Place, S) when Place > 0 ->
+ L = length(S),
+ case Place - L of
+ 0 ->
+ S ++ ".0";
+ N when N < 0 ->
+ {S0, S1} = lists:split(L + N, S),
+ S0 ++ "." ++ S1;
+ N when N < 6 ->
+ %% More places than digits
+ S ++ lists:duplicate(N, $0) ++ ".0";
+ _ ->
+ insert_decimal_exp(Place, S)
+ end;
+insert_decimal(Place, S) when Place > -6 ->
+ "0." ++ lists:duplicate(abs(Place), $0) ++ S;
+insert_decimal(Place, S) ->
+ insert_decimal_exp(Place, S).
+
+insert_decimal_exp(Place, S) ->
+ [C | S0] = S,
+ S1 = case S0 of
+ [] ->
+ "0";
+ _ ->
+ S0
+ end,
+ Exp = case Place < 0 of
+ true ->
+ "e-";
+ false ->
+ "e+"
+ end,
+ [C] ++ "." ++ S1 ++ Exp ++ integer_to_list(abs(Place - 1)).
+
+
+digits1(Float, Exp, Frac) ->
+ Round = ((Frac band 1) =:= 0),
+ case Exp >= 0 of
+ true ->
+ BExp = 1 bsl Exp,
+ case (Frac =/= ?BIG_POW) of
+ true ->
+ scale((Frac * BExp * 2), 2, BExp, BExp,
+ Round, Round, Float);
+ false ->
+ scale((Frac * BExp * 4), 4, (BExp * 2), BExp,
+ Round, Round, Float)
+ end;
+ false ->
+ case (Exp =:= ?MIN_EXP) orelse (Frac =/= ?BIG_POW) of
+ true ->
+ scale((Frac * 2), 1 bsl (1 - Exp), 1, 1,
+ Round, Round, Float);
+ false ->
+ scale((Frac * 4), 1 bsl (2 - Exp), 2, 1,
+ Round, Round, Float)
+ end
+ end.
+
+scale(R, S, MPlus, MMinus, LowOk, HighOk, Float) ->
+ Est = int_ceil(math:log10(abs(Float)) - 1.0e-10),
+ %% Note that the scheme implementation uses a 326 element look-up table
+ %% for int_pow(10, N) where we do not.
+ case Est >= 0 of
+ true ->
+ fixup(R, S * int_pow(10, Est), MPlus, MMinus, Est,
+ LowOk, HighOk);
+ false ->
+ Scale = int_pow(10, -Est),
+ fixup(R * Scale, S, MPlus * Scale, MMinus * Scale, Est,
+ LowOk, HighOk)
+ end.
+
+fixup(R, S, MPlus, MMinus, K, LowOk, HighOk) ->
+ TooLow = case HighOk of
+ true ->
+ (R + MPlus) >= S;
+ false ->
+ (R + MPlus) > S
+ end,
+ case TooLow of
+ true ->
+ [(K + 1) | generate(R, S, MPlus, MMinus, LowOk, HighOk)];
+ false ->
+ [K | generate(R * 10, S, MPlus * 10, MMinus * 10, LowOk, HighOk)]
+ end.
+
+generate(R0, S, MPlus, MMinus, LowOk, HighOk) ->
+ D = R0 div S,
+ R = R0 rem S,
+ TC1 = case LowOk of
+ true ->
+ R =< MMinus;
+ false ->
+ R < MMinus
+ end,
+ TC2 = case HighOk of
+ true ->
+ (R + MPlus) >= S;
+ false ->
+ (R + MPlus) > S
+ end,
+ case TC1 of
+ false ->
+ case TC2 of
+ false ->
+ [D | generate(R * 10, S, MPlus * 10, MMinus * 10,
+ LowOk, HighOk)];
+ true ->
+ [D + 1]
+ end;
+ true ->
+ case TC2 of
+ false ->
+ [D];
+ true ->
+ case R * 2 < S of
+ true ->
+ [D];
+ false ->
+ [D + 1]
+ end
+ end
+ end.
+
+unpack(Float) ->
+ <<Sign:1, Exp:11, Frac:52>> = <<Float:64/float>>,
+ {Sign, Exp, Frac}.
+
+frexp1({_Sign, 0, 0}) ->
+ {0.0, 0};
+frexp1({Sign, 0, Frac}) ->
+ Exp = log2floor(Frac),
+ <<Frac1:64/float>> = <<Sign:1, ?FLOAT_BIAS:11, (Frac-1):52>>,
+ {Frac1, -(?FLOAT_BIAS) - 52 + Exp};
+frexp1({Sign, Exp, Frac}) ->
+ <<Frac1:64/float>> = <<Sign:1, ?FLOAT_BIAS:11, Frac:52>>,
+ {Frac1, Exp - ?FLOAT_BIAS}.
+
+log2floor(Int) ->
+ log2floor(Int, 0).
+
+log2floor(0, N) ->
+ N;
+log2floor(Int, N) ->
+ log2floor(Int bsr 1, 1 + N).
+
+
+transform_digits(Place, [0 | Rest]) ->
+ transform_digits(Place, Rest);
+transform_digits(Place, Digits) ->
+ {Place, [$0 + D || D <- Digits]}.
+
+
+frexp_int(F) ->
+ case unpack(F) of
+ {_Sign, 0, Frac} ->
+ {Frac, ?MIN_EXP};
+ {_Sign, Exp, Frac} ->
+ {Frac + (1 bsl 52), Exp - 53 - ?FLOAT_BIAS}
+ end.
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+int_ceil_test() ->
+ ?assertEqual(1, int_ceil(0.0001)),
+ ?assertEqual(0, int_ceil(0.0)),
+ ?assertEqual(1, int_ceil(0.99)),
+ ?assertEqual(1, int_ceil(1.0)),
+ ?assertEqual(-1, int_ceil(-1.5)),
+ ?assertEqual(-2, int_ceil(-2.0)),
+ ok.
+
+int_pow_test() ->
+ ?assertEqual(1, int_pow(1, 1)),
+ ?assertEqual(1, int_pow(1, 0)),
+ ?assertEqual(1, int_pow(10, 0)),
+ ?assertEqual(10, int_pow(10, 1)),
+ ?assertEqual(100, int_pow(10, 2)),
+ ?assertEqual(1000, int_pow(10, 3)),
+ ok.
+
+digits_test() ->
+ ?assertEqual("0",
+ digits(0)),
+ ?assertEqual("0.0",
+ digits(0.0)),
+ ?assertEqual("1.0",
+ digits(1.0)),
+ ?assertEqual("-1.0",
+ digits(-1.0)),
+ ?assertEqual("0.1",
+ digits(0.1)),
+ ?assertEqual("0.01",
+ digits(0.01)),
+ ?assertEqual("0.001",
+ digits(0.001)),
+ ?assertEqual("1.0e+6",
+ digits(1000000.0)),
+ ?assertEqual("0.5",
+ digits(0.5)),
+ ?assertEqual("4503599627370496.0",
+ digits(4503599627370496.0)),
+ %% small denormalized number
+ %% 4.94065645841246544177e-324 =:= 5.0e-324
+ <<SmallDenorm/float>> = <<0,0,0,0,0,0,0,1>>,
+ ?assertEqual("5.0e-324",
+ digits(SmallDenorm)),
+ ?assertEqual(SmallDenorm,
+ list_to_float(digits(SmallDenorm))),
+ %% large denormalized number
+ %% 2.22507385850720088902e-308
+ <<BigDenorm/float>> = <<0,15,255,255,255,255,255,255>>,
+ ?assertEqual("2.225073858507201e-308",
+ digits(BigDenorm)),
+ ?assertEqual(BigDenorm,
+ list_to_float(digits(BigDenorm))),
+ %% small normalized number
+ %% 2.22507385850720138309e-308
+ <<SmallNorm/float>> = <<0,16,0,0,0,0,0,0>>,
+ ?assertEqual("2.2250738585072014e-308",
+ digits(SmallNorm)),
+ ?assertEqual(SmallNorm,
+ list_to_float(digits(SmallNorm))),
+ %% large normalized number
+ %% 1.79769313486231570815e+308
+ <<LargeNorm/float>> = <<127,239,255,255,255,255,255,255>>,
+ ?assertEqual("1.7976931348623157e+308",
+ digits(LargeNorm)),
+ ?assertEqual(LargeNorm,
+ list_to_float(digits(LargeNorm))),
+ %% issue #10 - mochinum:frexp(math:pow(2, -1074)).
+ ?assertEqual("5.0e-324",
+ digits(math:pow(2, -1074))),
+ ok.
+
+frexp_test() ->
+ %% zero
+ ?assertEqual({0.0, 0}, frexp(0.0)),
+ %% one
+ ?assertEqual({0.5, 1}, frexp(1.0)),
+ %% negative one
+ ?assertEqual({-0.5, 1}, frexp(-1.0)),
+ %% small denormalized number
+ %% 4.94065645841246544177e-324
+ <<SmallDenorm/float>> = <<0,0,0,0,0,0,0,1>>,
+ ?assertEqual({0.5, -1073}, frexp(SmallDenorm)),
+ %% large denormalized number
+ %% 2.22507385850720088902e-308
+ <<BigDenorm/float>> = <<0,15,255,255,255,255,255,255>>,
+ ?assertEqual(
+ {0.99999999999999978, -1022},
+ frexp(BigDenorm)),
+ %% small normalized number
+ %% 2.22507385850720138309e-308
+ <<SmallNorm/float>> = <<0,16,0,0,0,0,0,0>>,
+ ?assertEqual({0.5, -1021}, frexp(SmallNorm)),
+ %% large normalized number
+ %% 1.79769313486231570815e+308
+ <<LargeNorm/float>> = <<127,239,255,255,255,255,255,255>>,
+ ?assertEqual(
+ {0.99999999999999989, 1024},
+ frexp(LargeNorm)),
+ %% issue #10 - mochinum:frexp(math:pow(2, -1074)).
+ ?assertEqual(
+ {0.5, -1073},
+ frexp(math:pow(2, -1074))),
+ ok.
+
+-endif.
--- /dev/null
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2013. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(pmod_pt).
+-export([parse_transform/2,
+ format_error/1]).
+
+%% Expand function definition forms of parameterized module.
+%% The code is based on the code in sys_expand_pmod which used to be
+%% included in the compiler, but details are different because
+%% sys_pre_expand has not been run. In particular:
+%%
+%% * Record definitions are still present and must be handled.
+%%
+%% * (Syntatic) local calls may actually be calls to an imported
+%% funtion or a BIF. It is a local call if and only if there
+%% is a definition for the function in the module.
+%%
+%% * When we introduce the module parameters and 'THIS' in each
+%% function, we must artificially use it to avoid a warning for
+%% unused variables.
+%%
+%% * On the other hand, we don't have to worry about module_info/0,1
+%% because they have not been added yet.
+
+-record(pmod, {parameters,
+ defined
+ }).
+
+parse_transform(Forms0, _Options) ->
+ put(?MODULE, []),
+ Forms = transform(Forms0),
+ case erase(?MODULE) of
+ [] ->
+ Forms;
+ [_|_]=Errors ->
+ File = get_file(Forms),
+ {error,[{File,Errors}],[]}
+ end.
+
+format_error(extends_self) ->
+ "cannot extend from self";
+format_error(define_instance) ->
+ "defining instance function not allowed in parameterized module".
+
+add_error(Line, Error) ->
+ put(?MODULE, get(?MODULE) ++ [{Line,?MODULE,Error}]).
+
+get_file([{attribute,_,file,{File,_}}|_]) -> File;
+get_file([_|T]) -> get_file(T).
+
+transform(Forms0) ->
+ Def = collect_defined(Forms0),
+ {Base,ModAs,Forms1} = attribs(Forms0, [], undefined, []),
+ {Mod,Ps0} = case ModAs of
+ {M0,P0} -> {M0,P0};
+ M0 -> {M0,undefined}
+ end,
+ Forms2 = case Ps0 of
+ undefined ->
+ Forms1;
+ _ ->
+ pmod_expand(Forms1, Mod, Base, Ps0, Def)
+ end,
+
+ %% Add new functions.
+ NewFs0 = maybe_extend(Base, Mod, Ps0),
+ NewExps = collect_defined(NewFs0),
+ Forms3 = add_attributes(Forms2, [{attribute,0,export,NewExps}]),
+ add_new_funcs(Forms3, NewFs0).
+
+pmod_expand(Forms0, Mod, Base, Ps0, Def) ->
+ Ps = if is_atom(Base) ->
+ ['BASE' | Ps0];
+ true ->
+ Ps0
+ end,
+ St0 = #pmod{parameters=Ps,defined=gb_sets:from_list(Def)},
+ {Forms1,_} = forms(Forms0, St0),
+ Forms2 = update_exps(Forms1),
+ Forms3 = update_forms(Forms2),
+ NewFs0 = add_instance(Mod, Ps, []),
+ NewFs = ensure_new(Base, Ps0, NewFs0),
+ Forms = add_new_funcs(Forms3, NewFs),
+ NewExps = collect_defined(NewFs),
+ add_attributes(Forms, [{attribute,0,export,NewExps}]).
+
+add_attributes([{attribute,_,module,_}=F|Fs], Attrs) ->
+ [F|Attrs++Fs];
+add_attributes([F|Fs], Attrs) ->
+ [F|add_attributes(Fs, Attrs)].
+
+add_new_funcs([{eof,_}|_]=Fs, NewFs) ->
+ NewFs ++ Fs;
+add_new_funcs([F|Fs], Es) ->
+ [F|add_new_funcs(Fs, Es)].
+
+maybe_extend([], _, _) ->
+ %% No 'extends' attribute.
+ [];
+maybe_extend(Base, _Mod, undefined) ->
+ %% There is a an 'extends' attribute; the module is not parameterized.
+ Name = '$handle_undefined_function',
+ Args = [{var,0,'Func'},{var,0,'Args'}],
+ Body = [make_apply({atom,0,Base}, {var,0,'Func'}, {var,0,'Args'})],
+ F = {function,0,Name,2,[{clause,0,Args,[],Body}]},
+ [F];
+maybe_extend(Base, Mod, Ps) ->
+ %% There is a an 'extends' attribute; the module is parameterized.
+ Name = '$handle_undefined_function',
+ Args = [{var,0,'Func'},{var,0,'Args'}],
+ DontCares = [{var,0,'_'} || _ <- Ps],
+ TuplePs = {tuple,0,[{atom,0,Mod},{var,0,'BaseVars'}|DontCares]},
+ G = [{call,0,{atom,0,is_atom},
+ [{call,0,{atom,0,element},
+ [{integer,0,1},{var,0,'BaseVars'}]}]}],
+ FixedArgs = make_lists_rev([{var,0,'Rs'},
+ {cons,0,{var,0,'BaseVars'},{nil,0}}]),
+ Body = [{'case',0,make_lists_rev([{var,0,'Args'}]),
+ [{clause,0,[{cons,0,TuplePs,{var,0,'Rs'}}],[G],
+ [make_apply({atom,0,Base}, {var,0,'Func'}, FixedArgs)]},
+ {clause,0,[{var,0,'_'}],[],
+ [make_apply({atom,0,Base}, {var,0,'Func'}, {var,0,'Args'})]}
+ ]}],
+ F = {function,0,Name,2,[{clause,0,Args,[],Body}]},
+ [F].
+
+make_apply(M, F, A) ->
+ {call,0,{remote,0,{atom,0,erlang},{atom,0,apply}},[M,F,A]}.
+
+make_lists_rev(As) ->
+ {call,0,{remote,0,{atom,0,lists},{atom,0,reverse}},As}.
+
+ensure_new(Base, Ps, Fs) ->
+ case has_new(Fs) of
+ true ->
+ Fs;
+ false ->
+ add_new(Base, Ps, Fs)
+ end.
+
+has_new([{function,_L,new,_A,_Cs} | _Fs]) ->
+ true;
+has_new([_ | Fs]) ->
+ has_new(Fs);
+has_new([]) ->
+ false.
+
+add_new(Base, Ps, Fs) ->
+ Vs = [{var,0,V} || V <- Ps],
+ As = if is_atom(Base) ->
+ [{call,0,{remote,0,{atom,0,Base},{atom,0,new}},Vs} | Vs];
+ true ->
+ Vs
+ end,
+ Body = [{call,0,{atom,0,instance},As}],
+ add_func(new, Vs, Body, Fs).
+
+add_instance(Mod, Ps, Fs) ->
+ Vs = [{var,0,V} || V <- Ps],
+ AbsMod = [{tuple,0,[{atom,0,Mod}|Vs]}],
+ add_func(instance, Vs, AbsMod, Fs).
+
+add_func(Name, Args, Body, Fs) ->
+ A = length(Args),
+ F = {function,0,Name,A,[{clause,0,Args,[],Body}]},
+ [F|Fs].
+
+collect_defined(Fs) ->
+ [{N,A} || {function,_,N,A,_} <- Fs].
+
+attribs([{attribute,Line,module,{Mod,_}=ModAs}|T], Base, _, Acc) ->
+ attribs(T, Base, ModAs, [{attribute,Line,module,Mod}|Acc]);
+attribs([{attribute,_,module,Mod}=H|T], Base, _, Acc) ->
+ attribs(T, Base, Mod, [H|Acc]);
+attribs([{attribute,Line,extends,Base}|T], Base0, Ps, Acc) when is_atom(Base) ->
+ Mod = case Ps of
+ {Mod0,_} -> Mod0;
+ Mod0 -> Mod0
+ end,
+ case Mod of
+ Base ->
+ add_error(Line, extends_self),
+ attribs(T, Base0, Ps, Acc);
+ _ ->
+ attribs(T, Base, Ps, Acc)
+ end;
+attribs([H|T], Base, Ps, Acc) ->
+ attribs(T, Base, Ps, [H|Acc]);
+attribs([], Base, Ps, Acc) ->
+ {Base,Ps,lists:reverse(Acc)}.
+
+%% This is extremely simplistic for now; all functions get an extra
+%% parameter, whether they need it or not, except for static functions.
+
+update_function_name({F,A}) when F =/= new ->
+ {F,A+1};
+update_function_name(E) ->
+ E.
+
+update_forms([{function,L,N,A,Cs}|Fs]) when N =/= new ->
+ [{function,L,N,A+1,Cs}|update_forms(Fs)];
+update_forms([F|Fs]) ->
+ [F|update_forms(Fs)];
+update_forms([]) ->
+ [].
+
+update_exps([{attribute,Line,export,Es0}|T]) ->
+ Es = [update_function_name(E) || E <- Es0],
+ [{attribute,Line,export,Es}|update_exps(T)];
+update_exps([H|T]) ->
+ [H|update_exps(T)];
+update_exps([]) ->
+ [].
+
+%% Process the program forms.
+
+forms([F0|Fs0],St0) ->
+ {F1,St1} = form(F0,St0),
+ {Fs1,St2} = forms(Fs0,St1),
+ {[F1|Fs1],St2};
+forms([], St0) ->
+ {[], St0}.
+
+%% Only function definitions are of interest here. State is not updated.
+form({function,Line,instance,_Arity,_Clauses}=F,St) ->
+ add_error(Line, define_instance),
+ {F,St};
+form({function,Line,Name0,Arity0,Clauses0},St) when Name0 =/= new ->
+ {Name,Arity,Clauses} = function(Name0, Arity0, Clauses0, St),
+ {{function,Line,Name,Arity,Clauses},St};
+%% Pass anything else through
+form(F,St) -> {F,St}.
+
+function(Name, Arity, Clauses0, St) ->
+ Clauses1 = clauses(Clauses0,St),
+ {Name,Arity,Clauses1}.
+
+clauses([C|Cs],#pmod{parameters=Ps}=St) ->
+ {clause,L,H,G,B0} = clause(C,St),
+ T = {tuple,L,[{var,L,V} || V <- ['_'|Ps]]},
+ B = [{match,L,{var,L,'_'},{var,L,V}} || V <- ['THIS'|Ps]] ++ B0,
+ [{clause,L,H++[{match,L,T,{var,L,'THIS'}}],G,B}|clauses(Cs,St)];
+clauses([],_St) -> [].
+
+clause({clause,Line,H,G,B0},St) ->
+ %% We never update H and G, so we will just copy them.
+ B1 = exprs(B0,St),
+ {clause,Line,H,G,B1}.
+
+pattern_grp([{bin_element,L1,E1,S1,T1} | Fs],St) ->
+ S2 = case S1 of
+ default ->
+ default;
+ _ ->
+ expr(S1,St)
+ end,
+ T2 = case T1 of
+ default ->
+ default;
+ _ ->
+ bit_types(T1)
+ end,
+ [{bin_element,L1,expr(E1,St),S2,T2} | pattern_grp(Fs,St)];
+pattern_grp([],_St) ->
+ [].
+
+bit_types([]) ->
+ [];
+bit_types([Atom | Rest]) when is_atom(Atom) ->
+ [Atom | bit_types(Rest)];
+bit_types([{Atom, Integer} | Rest]) when is_atom(Atom), is_integer(Integer) ->
+ [{Atom, Integer} | bit_types(Rest)].
+
+exprs([E0|Es],St) ->
+ E1 = expr(E0,St),
+ [E1|exprs(Es,St)];
+exprs([],_St) -> [].
+
+expr({var,_L,_V}=Var,_St) ->
+ Var;
+expr({integer,_Line,_I}=Integer,_St) -> Integer;
+expr({float,_Line,_F}=Float,_St) -> Float;
+expr({atom,_Line,_A}=Atom,_St) -> Atom;
+expr({string,_Line,_S}=String,_St) -> String;
+expr({char,_Line,_C}=Char,_St) -> Char;
+expr({nil,_Line}=Nil,_St) -> Nil;
+expr({cons,Line,H0,T0},St) ->
+ H1 = expr(H0,St),
+ T1 = expr(T0,St),
+ {cons,Line,H1,T1};
+expr({lc,Line,E0,Qs0},St) ->
+ Qs1 = lc_bc_quals(Qs0,St),
+ E1 = expr(E0,St),
+ {lc,Line,E1,Qs1};
+expr({bc,Line,E0,Qs0},St) ->
+ Qs1 = lc_bc_quals(Qs0,St),
+ E1 = expr(E0,St),
+ {bc,Line,E1,Qs1};
+expr({tuple,Line,Es0},St) ->
+ Es1 = expr_list(Es0,St),
+ {tuple,Line,Es1};
+expr({record,Line,Name,Is0},St) ->
+ Is = record_fields(Is0,St),
+ {record,Line,Name,Is};
+expr({record,Line,E0,Name,Is0},St) ->
+ E = expr(E0,St),
+ Is = record_fields(Is0,St),
+ {record,Line,E,Name,Is};
+expr({record_field,Line,E0,Name,Key},St) ->
+ E = expr(E0,St),
+ {record_field,Line,E,Name,Key};
+expr({block,Line,Es0},St) ->
+ Es1 = exprs(Es0,St),
+ {block,Line,Es1};
+expr({'if',Line,Cs0},St) ->
+ Cs1 = icr_clauses(Cs0,St),
+ {'if',Line,Cs1};
+expr({'case',Line,E0,Cs0},St) ->
+ E1 = expr(E0,St),
+ Cs1 = icr_clauses(Cs0,St),
+ {'case',Line,E1,Cs1};
+expr({'receive',Line,Cs0},St) ->
+ Cs1 = icr_clauses(Cs0,St),
+ {'receive',Line,Cs1};
+expr({'receive',Line,Cs0,To0,ToEs0},St) ->
+ To1 = expr(To0,St),
+ ToEs1 = exprs(ToEs0,St),
+ Cs1 = icr_clauses(Cs0,St),
+ {'receive',Line,Cs1,To1,ToEs1};
+expr({'try',Line,Es0,Scs0,Ccs0,As0},St) ->
+ Es1 = exprs(Es0,St),
+ Scs1 = icr_clauses(Scs0,St),
+ Ccs1 = icr_clauses(Ccs0,St),
+ As1 = exprs(As0,St),
+ {'try',Line,Es1,Scs1,Ccs1,As1};
+expr({'fun',_,{function,_,_,_}}=ExtFun,_St) ->
+ ExtFun;
+expr({'fun',Line,Body},St) ->
+ case Body of
+ {clauses,Cs0} ->
+ Cs1 = fun_clauses(Cs0,St),
+ {'fun',Line,{clauses,Cs1}};
+ {function,F,A} = Function ->
+ {F1,A1} = update_function_name({F,A}),
+ if A1 =:= A ->
+ {'fun',Line,Function};
+ true ->
+ %% Must rewrite local fun-name to a fun that does a
+ %% call with the extra THIS parameter.
+ As = make_vars(A, Line),
+ As1 = As ++ [{var,Line,'THIS'}],
+ Call = {call,Line,{atom,Line,F1},As1},
+ Cs = [{clause,Line,As,[],[Call]}],
+ {'fun',Line,{clauses,Cs}}
+ end;
+ {function,_M,_F,_A} = Fun4 -> %This is an error in lint!
+ {'fun',Line,Fun4}
+ end;
+expr({call,Lc,{atom,_,instance}=Name,As0},St) ->
+ %% All local functions 'instance(...)' are static by definition,
+ %% so they do not take a 'THIS' argument when called
+ As1 = expr_list(As0,St),
+ {call,Lc,Name,As1};
+expr({call,Lc,{atom,_,new}=Name,As0},St) ->
+ %% All local functions 'new(...)' are static by definition,
+ %% so they do not take a 'THIS' argument when called
+ As1 = expr_list(As0,St),
+ {call,Lc,Name,As1};
+expr({call,Lc,{atom,_Lf,F}=Atom,As0}, #pmod{defined=Def}=St) ->
+ As1 = expr_list(As0,St),
+ case gb_sets:is_member({F,length(As0)}, Def) of
+ false ->
+ %% BIF or imported function.
+ {call,Lc,Atom,As1};
+ true ->
+ %% Local function call - needs THIS parameter.
+ {call,Lc,Atom,As1 ++ [{var,0,'THIS'}]}
+ end;
+expr({call,Line,F0,As0},St) ->
+ %% Other function call
+ F1 = expr(F0,St),
+ As1 = expr_list(As0,St),
+ {call,Line,F1,As1};
+expr({'catch',Line,E0},St) ->
+ E1 = expr(E0,St),
+ {'catch',Line,E1};
+expr({match,Line,P,E0},St) ->
+ E1 = expr(E0,St),
+ {match,Line,P,E1};
+expr({bin,Line,Fs},St) ->
+ Fs2 = pattern_grp(Fs,St),
+ {bin,Line,Fs2};
+expr({op,Line,Op,A0},St) ->
+ A1 = expr(A0,St),
+ {op,Line,Op,A1};
+expr({op,Line,Op,L0,R0},St) ->
+ L1 = expr(L0,St),
+ R1 = expr(R0,St),
+ {op,Line,Op,L1,R1};
+%% The following are not allowed to occur anywhere!
+expr({remote,Line,M0,F0},St) ->
+ M1 = expr(M0,St),
+ F1 = expr(F0,St),
+ {remote,Line,M1,F1}.
+
+expr_list([E0|Es],St) ->
+ E1 = expr(E0,St),
+ [E1|expr_list(Es,St)];
+expr_list([],_St) -> [].
+
+record_fields([{record_field,L,K,E0}|T],St) ->
+ E = expr(E0,St),
+ [{record_field,L,K,E}|record_fields(T,St)];
+record_fields([],_) -> [].
+
+icr_clauses([C0|Cs],St) ->
+ C1 = clause(C0,St),
+ [C1|icr_clauses(Cs,St)];
+icr_clauses([],_St) -> [].
+
+lc_bc_quals([{generate,Line,P,E0}|Qs],St) ->
+ E1 = expr(E0,St),
+ [{generate,Line,P,E1}|lc_bc_quals(Qs,St)];
+lc_bc_quals([{b_generate,Line,P,E0}|Qs],St) ->
+ E1 = expr(E0,St),
+ [{b_generate,Line,P,E1}|lc_bc_quals(Qs,St)];
+lc_bc_quals([E0|Qs],St) ->
+ E1 = expr(E0,St),
+ [E1|lc_bc_quals(Qs,St)];
+lc_bc_quals([],_St) -> [].
+
+fun_clauses([C0|Cs],St) ->
+ C1 = clause(C0,St),
+ [C1|fun_clauses(Cs,St)];
+fun_clauses([],_St) -> [].
+
+make_vars(N, L) ->
+ make_vars(1, N, L).
+
+make_vars(N, M, L) when N =< M ->
+ V = list_to_atom("X"++integer_to_list(N)),
+ [{var,L,V} | make_vars(N + 1, M, L)];
+make_vars(_, _, _) ->
+ [].
--- /dev/null
+{application, sockjs,
+ [
+ {description, "SockJS"},
+ {vsn, "0.3.4"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, { sockjs_app, []}}
+ ]}.
--- /dev/null
+-module(sockjs).
+
+-export([send/2, close/1, close/3, info/1]).
+
+%% -type(conn() :: {sockjs_session, any()}).
+
+%% Send data over a connection.
+%% -spec send(iodata(), conn()) -> ok.
+send(Data, Conn = {sockjs_session, _}) ->
+ sockjs_session:send(Data, Conn).
+
+%% Initiate a close of a connection.
+%% -spec close(conn()) -> ok.
+close(Conn) ->
+ close(1000, "Normal closure", Conn).
+
+%% -spec close(non_neg_integer(), string(), conn()) -> ok.
+close(Code, Reason, Conn = {sockjs_session, _}) ->
+ sockjs_session:close(Code, Reason, Conn).
+
+%% -spec info(conn()) -> [{atom(), any()}].
+info(Conn = {sockjs_session, _}) ->
+ sockjs_session:info(Conn).
+
--- /dev/null
+-module(sockjs_action).
+
+% none
+-export([welcome_screen/3, options/3, iframe/3, info_test/3]).
+% send
+-export([xhr_polling/4, xhr_streaming/4, eventsource/4, htmlfile/4, jsonp/4]).
+% recv
+-export([xhr_send/4, jsonp_send/4]).
+% misc
+-export([websocket/3, rawwebsocket/3]).
+
+-include("sockjs_internal.hrl").
+
+%% --------------------------------------------------------------------------
+
+-define(IFRAME, "<!DOCTYPE html>
+<html>
+<head>
+ <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\" />
+ <meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />
+ <script>
+ document.domain = document.domain;
+ _sockjs_onload = function(){SockJS.bootstrap_iframe();};
+ </script>
+ <script src=\"~s\"></script>
+</head>
+<body>
+ <h2>Don't panic!</h2>
+ <p>This is a SockJS hidden iframe. It's used for cross domain magic.</p>
+</body>
+</html>").
+
+-define(IFRAME_HTMLFILE, "<!doctype html>
+<html><head>
+ <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\" />
+ <meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />
+</head><body><h2>Don't panic!</h2>
+ <script>
+ document.domain = document.domain;
+ var c = parent.~s;
+ c.start();
+ function p(d) {c.message(d);};
+ window.onload = function() {c.stop();};
+ </script>").
+
+%% --------------------------------------------------------------------------
+
+%% -spec welcome_screen(req(), headers(), service()) -> req().
+welcome_screen(Req, Headers, _Service) ->
+ H = [{"Content-Type", "text/plain; charset=UTF-8"}],
+ sockjs_http:reply(200, H ++ Headers,
+ "Welcome to SockJS!\n", Req).
+
+%% -spec options(req(), headers(), service()) -> req().
+options(Req, Headers, _Service) ->
+ sockjs_http:reply(204, Headers, "", Req).
+
+%% -spec iframe(req(), headers(), service()) -> req().
+iframe(Req, Headers, #service{sockjs_url = SockjsUrl}) ->
+ IFrame = io_lib:format(?IFRAME, [SockjsUrl]),
+ MD5 = "\"" ++ binary_to_list(base64:encode(erlang:md5(IFrame))) ++ "\"",
+ {H, Req2} = sockjs_http:header('If-None-Match', Req),
+ case H of
+ MD5 -> sockjs_http:reply(304, Headers, "", Req2);
+ _ -> sockjs_http:reply(
+ 200, [{"Content-Type", "text/html; charset=UTF-8"},
+ {"ETag", MD5}] ++ Headers, IFrame, Req2)
+ end.
+
+
+%% -spec info_test(req(), headers(), service()) -> req().
+info_test(Req, Headers, #service{websocket = Websocket,
+ cookie_needed = CookieNeeded}) ->
+ I = [{websocket, Websocket},
+ {cookie_needed, CookieNeeded},
+ {origins, [<<"*:*">>]},
+ {entropy, sockjs_util:rand32()}],
+ D = sockjs_json:encode({I}),
+ H = [{"Content-Type", "application/json; charset=UTF-8"}],
+ sockjs_http:reply(200, H ++ Headers, D, Req).
+
+%% --------------------------------------------------------------------------
+
+%% -spec xhr_polling(req(), headers(), service(), session()) -> req().
+xhr_polling(Req, Headers, Service, Session) ->
+ Req1 = chunk_start(Req, Headers),
+ reply_loop(Req1, Session, 1, fun fmt_xhr/1, Service).
+
+%% -spec xhr_streaming(req(), headers(), service(), session()) -> req().
+xhr_streaming(Req, Headers, Service = #service{response_limit = ResponseLimit},
+ Session) ->
+ Req1 = chunk_start(Req, Headers),
+ %% IE requires 2KB prefix:
+ %% http://blogs.msdn.com/b/ieinternals/archive/2010/04/06/comet-streaming-in-internet-explorer-with-xmlhttprequest-and-xdomainrequest.aspx
+ Req2 = chunk(Req1, list_to_binary(string:copies("h", 2048)),
+ fun fmt_xhr/1),
+ reply_loop(Req2, Session, ResponseLimit, fun fmt_xhr/1, Service).
+
+%% -spec eventsource(req(), headers(), service(), session()) -> req().
+eventsource(Req, Headers, Service = #service{response_limit = ResponseLimit},
+ SessionId) ->
+ Req1 = chunk_start(Req, Headers, "text/event-stream; charset=UTF-8"),
+ Req2 = chunk(Req1, <<$\r, $\n>>),
+ reply_loop(Req2, SessionId, ResponseLimit, fun fmt_eventsource/1, Service).
+
+
+%% -spec htmlfile(req(), headers(), service(), session()) -> req().
+htmlfile(Req, Headers, Service = #service{response_limit = ResponseLimit},
+ SessionId) ->
+ S = fun (Req1, CB) ->
+ Req2 = chunk_start(Req1, Headers, "text/html; charset=UTF-8"),
+ IFrame = iolist_to_binary(io_lib:format(?IFRAME_HTMLFILE, [CB])),
+ %% Safari needs at least 1024 bytes to parse the
+ %% website. Relevant:
+ %% http://code.google.com/p/browsersec/wiki/Part2#Survey_of_content_sniffing_behaviors
+ Padding = string:copies(" ", 1024 - size(IFrame)),
+ Req3 = chunk(Req2, [IFrame, Padding, <<"\r\n\r\n">>]),
+ reply_loop(Req3, SessionId, ResponseLimit, fun fmt_htmlfile/1, Service)
+ end,
+ verify_callback(Req, S).
+
+%% -spec jsonp(req(), headers(), service(), session()) -> req().
+jsonp(Req, Headers, Service, SessionId) ->
+ S = fun (Req1, CB) ->
+ Req2 = chunk_start(Req1, Headers),
+ reply_loop(Req2, SessionId, 1,
+ fun (Body) -> fmt_jsonp(Body, CB) end, Service)
+ end,
+ verify_callback(Req, S).
+
+verify_callback(Req, Success) ->
+ {CB, Req1} = sockjs_http:callback(Req),
+ case CB of
+ undefined ->
+ sockjs_http:reply(500, [], "\"callback\" parameter required", Req1);
+ _ ->
+ Success(Req1, CB)
+ end.
+
+%% --------------------------------------------------------------------------
+
+%% -spec xhr_send(req(), headers(), service(), session()) -> req().
+xhr_send(Req, Headers, _Service, Session) ->
+ {Body, Req1} = sockjs_http:body(Req),
+ case handle_recv(Req1, Body, Session) of
+ {error, Req2} ->
+ Req2;
+ ok ->
+ H = [{"content-type", "text/plain; charset=UTF-8"}],
+ sockjs_http:reply(204, H ++ Headers, "", Req1)
+ end.
+
+%% -spec jsonp_send(req(), headers(), service(), session()) -> req().
+jsonp_send(Req, Headers, _Service, Session) ->
+ {Body, Req1} = sockjs_http:body_qs(Req),
+ case handle_recv(Req1, Body, Session) of
+ {error, Req2} ->
+ Req2;
+ ok ->
+ H = [{"content-type", "text/plain; charset=UTF-8"}],
+ sockjs_http:reply(200, H ++ Headers, "ok", Req1)
+ end.
+
+handle_recv(Req, Body, Session) ->
+ case Body of
+ _Any when Body =:= <<>> ->
+ {error, sockjs_http:reply(500, [], "Payload expected.", Req)};
+ _Any ->
+ case sockjs_json:decode(Body) of
+ {ok, Decoded} when is_list(Decoded)->
+ sockjs_session:received(Decoded, Session),
+ ok;
+ {error, _} ->
+ {error, sockjs_http:reply(500, [],
+ "Broken JSON encoding.", Req)}
+ end
+ end.
+
+%% --------------------------------------------------------------------------
+
+-define(STILL_OPEN, {2010, "Another connection still open"}).
+
+chunk_start(Req, Headers) ->
+ chunk_start(Req, Headers, "application/javascript; charset=UTF-8").
+chunk_start(Req, Headers, ContentType) ->
+ sockjs_http:chunk_start(200, [{"Content-Type", ContentType}] ++ Headers,
+ Req).
+
+reply_loop(Req, SessionId, ResponseLimit, Fmt, Service) ->
+ Req0 = sockjs_http:hook_tcp_close(Req),
+ case sockjs_session:reply(SessionId) of
+ wait -> receive
+ %% In Cowboy we need to capture async
+ %% messages from the tcp connection -
+ %% ie: {active, once}.
+ {tcp_closed, _} ->
+ Req0;
+ %% In Cowboy we may in theory get real
+ %% http requests, this is bad.
+ {tcp, _S, Data} ->
+ error_logger:error_msg(
+ "Received unexpected data on a "
+ "long-polling http connection: ~p. "
+ "Connection aborted.~n",
+ [Data]),
+ Req1 = sockjs_http:abruptly_kill(Req),
+ Req1;
+ go ->
+ Req1 = sockjs_http:unhook_tcp_close(Req0),
+ reply_loop(Req1, SessionId, ResponseLimit,
+ Fmt, Service)
+ end;
+ session_in_use -> Frame = sockjs_util:encode_frame({close, ?STILL_OPEN}),
+ chunk_end(Req0, Frame, Fmt);
+ {close, Frame} -> Frame1 = sockjs_util:encode_frame(Frame),
+ chunk_end(Req0, Frame1, Fmt);
+ {ok, Frame} -> Frame1 = sockjs_util:encode_frame(Frame),
+ Frame2 = iolist_to_binary(Frame1),
+ Req2 = chunk(Req0, Frame2, Fmt),
+ reply_loop0(Req2, SessionId,
+ ResponseLimit - size(Frame2),
+ Fmt, Service)
+ end.
+
+reply_loop0(Req, _SessionId, ResponseLimit, _Fmt, _Service) when ResponseLimit =< 0 ->
+ chunk_end(Req);
+reply_loop0(Req, SessionId, ResponseLimit, Fmt, Service) ->
+ reply_loop(Req, SessionId, ResponseLimit, Fmt, Service).
+
+chunk(Req, Body) ->
+ {_, Req1} = sockjs_http:chunk(Body, Req),
+ Req1.
+chunk(Req, Body, Fmt) -> chunk(Req, Fmt(Body)).
+
+chunk_end(Req) -> sockjs_http:chunk_end(Req).
+chunk_end(Req, Body, Fmt) -> Req1 = chunk(Req, Body, Fmt),
+ chunk_end(Req1).
+
+%% -spec fmt_xhr(iodata()) -> iodata().
+fmt_xhr(Body) -> [Body, "\n"].
+
+%% -spec fmt_eventsource(iodata()) -> iodata().
+fmt_eventsource(Body) ->
+ Escaped = sockjs_util:url_escape(binary_to_list(iolist_to_binary(Body)),
+ "%\r\n\0"), %% $% must be first!
+ [<<"data: ">>, Escaped, <<"\r\n\r\n">>].
+
+%% -spec fmt_htmlfile(iodata()) -> iodata().
+fmt_htmlfile(Body) ->
+ Double = sockjs_json:encode(iolist_to_binary(Body)),
+ [<<"<script>\np(">>, Double, <<");\n</script>\r\n">>].
+
+%% -spec fmt_jsonp(iodata(), iodata()) -> iodata().
+fmt_jsonp(Body, Callback) ->
+ %% Yes, JSONed twice, there isn't a a better way, we must pass
+ %% a string back, and the script, will be evaled() by the
+ %% browser.
+ [Callback, "(", sockjs_json:encode(iolist_to_binary(Body)), ");\r\n"].
+
+%% --------------------------------------------------------------------------
+
+%% -spec websocket(req(), headers(), service()) -> req().
+websocket(Req, Headers, Service) ->
+ {_Any, Req1, {R1, R2}} = sockjs_handler:is_valid_ws(Service, Req),
+ case {R1, R2} of
+ {false, _} ->
+ sockjs_http:reply(400, Headers,
+ "Can \"Upgrade\" only to \"WebSocket\".", Req1);
+ {_, false} ->
+ sockjs_http:reply(400, Headers,
+ "\"Connection\" must be \"Upgrade\"", Req1);
+ {true, true} ->
+ sockjs_http:reply(400, Headers,
+ "This WebSocket request can't be handled.", Req1)
+ end.
+
+%% -spec rawwebsocket(req(), headers(), service()) -> req().
+rawwebsocket(Req, Headers, Service) ->
+ websocket(Req, Headers, Service).
--- /dev/null
+-module(sockjs_app).
+
+-behaviour(application).
+
+-export([start/2, stop/1]).
+
+%% -spec start(_, _) -> {ok, pid()}.
+start(_StartType, _StartArgs) ->
+ sockjs_session:init(),
+ sockjs_session_sup:start_link().
+
+%% -spec stop(_) -> ok.
+stop(_State) ->
+ ok.
--- /dev/null
+-module(sockjs_cowboy_handler).
+-behaviour(cowboy_http_handler).
+-behaviour(cowboy_http_websocket_handler).
+
+%% Cowboy http callbacks
+-export([init/3, handle/2, terminate/2]).
+
+%% Cowboy ws callbacks
+-export([websocket_init/3, websocket_handle/3,
+ websocket_info/3, websocket_terminate/3]).
+
+-include("sockjs_internal.hrl").
+
+%% --------------------------------------------------------------------------
+
+init({_Any, http}, Req, Service) ->
+ case sockjs_handler:is_valid_ws(Service, {cowboy, Req}) of
+ {true, {cowboy, _Req1}, _Reason} ->
+ {upgrade, protocol, cowboy_http_websocket};
+ {false, {cowboy, Req1}, _Reason} ->
+ {ok, Req1, Service}
+ end.
+
+handle(Req, Service) ->
+ {cowboy, Req3} = sockjs_handler:handle_req(Service, {cowboy, Req}),
+ {ok, Req3, Service}.
+
+terminate(_Req, _Service) ->
+ ok.
+
+%% --------------------------------------------------------------------------
+
+websocket_init(_TransportName, Req,
+ Service = #service{logger = Logger,
+ subproto_pref = SubProtocolPref}) ->
+ Req3 = case cowboy_http_req:header(<<"Sec-Websocket-Protocol">>, Req) of
+ {undefined, Req1} ->
+ Req1;
+ {SubProtocols, Req1} ->
+ SelectedSubProtocol =
+ choose_subprotocol_bin(SubProtocols, SubProtocolPref),
+ {ok, Req2} = cowboy_http_req:set_resp_header(
+ <<"Sec-Websocket-Protocol">>,
+ SelectedSubProtocol, Req1),
+ Req2
+ end,
+
+ Req4 = Logger(Service, {cowboy, Req3}, websocket),
+
+ Service1 = Service#service{disconnect_delay = 5*60*1000},
+
+ {Info, Req5} = sockjs_handler:extract_info(Req4),
+ SessionPid = sockjs_session:maybe_create(undefined, Service1, Info),
+ {RawWebsocket, {cowboy, Req7}} =
+ case sockjs_handler:get_action(Service, Req5) of
+ {{match, WS}, Req6} when WS =:= websocket orelse
+ WS =:= rawwebsocket ->
+ {WS, Req6}
+ end,
+ self() ! go,
+ {ok, Req7, {RawWebsocket, SessionPid}}.
+
+websocket_handle({text, Data}, Req, {RawWebsocket, SessionPid} = S) ->
+ case sockjs_ws_handler:received(RawWebsocket, SessionPid, Data) of
+ ok -> {ok, Req, S};
+ shutdown -> {shutdown, Req, S}
+ end;
+websocket_handle(_Unknown, Req, S) ->
+ {shutdown, Req, S}.
+
+websocket_info(go, Req, {RawWebsocket, SessionPid} = S) ->
+ case sockjs_ws_handler:reply(RawWebsocket, SessionPid) of
+ wait -> {ok, Req, S};
+ {ok, Data} -> self() ! go,
+ {reply, {text, Data}, Req, S};
+ {close, <<>>} -> {shutdown, Req, S};
+ {close, Data} -> self() ! shutdown,
+ {reply, {text, Data}, Req, S}
+ end;
+websocket_info(shutdown, Req, S) ->
+ {shutdown, Req, S}.
+
+websocket_terminate(_Reason, _Req, {RawWebsocket, SessionPid}) ->
+ sockjs_ws_handler:close(RawWebsocket, SessionPid),
+ ok.
+
+%% --------------------------------------------------------------------------
+
+choose_subprotocol_bin(SubProtocols, Pref) ->
+ choose_subprotocol(re:split(SubProtocols, ", *"), Pref).
+choose_subprotocol(SubProtocols, undefined) ->
+ erlang:hd(lists:reverse(lists:sort(SubProtocols)));
+choose_subprotocol(SubProtocols, Pref) ->
+ case lists:filter(fun (E) -> lists:member(E, SubProtocols) end, Pref) of
+ [Hd | _] -> Hd;
+ [] -> choose_subprotocol(SubProtocols, undefined)
+ end.
--- /dev/null
+-module(sockjs_filters).
+
+-include("sockjs_internal.hrl").
+
+-export([cache_for/2, h_sid/2, h_no_cache/2, xhr_cors/2,
+ xhr_options_post/2, xhr_options_get/2]).
+
+-define(YEAR, 365 * 24 * 60 * 60).
+
+%% --------------------------------------------------------------------------
+
+%% -spec cache_for(req(), headers()) -> {headers(), req()}.
+cache_for(Req, Headers) ->
+ Expires = calendar:gregorian_seconds_to_datetime(
+ calendar:datetime_to_gregorian_seconds(
+ calendar:now_to_datetime(now())) + ?YEAR),
+ H = [{"Cache-Control", "public, max-age=" ++ integer_to_list(?YEAR)},
+ {"Expires", httpd_util:rfc1123_date(Expires)}],
+ {H ++ Headers, Req}.
+
+%% -spec h_sid(req(), headers()) -> {headers(), req()}.
+h_sid(Req, Headers) ->
+ %% Some load balancers do sticky sessions, but only if there is
+ %% a JSESSIONID cookie. If this cookie isn't yet set, we shall
+ %% set it to a dumb value. It doesn't really matter what, as
+ %% session information is usually added by the load balancer.
+ {C, Req2} = sockjs_http:jsessionid(Req),
+ H = case C of
+ undefined -> [{"Set-Cookie", "JSESSIONID=dummy; path=/"}];
+ Jsid -> [{"Set-Cookie", "JSESSIONID=" ++ Jsid ++ "; path=/"}]
+ end,
+ {H ++ Headers, Req2}.
+
+%% -spec h_no_cache(req(), headers()) -> {headers(), req()}.
+h_no_cache(Req, Headers) ->
+ H = [{"Cache-Control", "no-store, no-cache, must-revalidate, max-age=0"}],
+ {H ++ Headers, Req}.
+
+%% -spec xhr_cors(req(), headers()) -> {headers(), req()}.
+xhr_cors(Req, Headers) ->
+ {OriginH, Req1} = sockjs_http:header('Origin', Req),
+ Origin = case OriginH of
+ "null" -> "*";
+ undefined -> "*";
+ O -> O
+ end,
+ {HeadersH, Req2} = sockjs_http:header(
+ 'Access-Control-Request-Headers', Req1),
+ AllowHeaders = case HeadersH of
+ undefined -> [];
+ V -> [{"Access-Control-Allow-Headers", V}]
+ end,
+ H = [{"Access-Control-Allow-Origin", Origin},
+ {"Access-Control-Allow-Credentials", "true"}],
+ {H ++ AllowHeaders ++ Headers, Req2}.
+
+%% -spec xhr_options_post(req(), headers()) -> {headers(), req()}.
+xhr_options_post(Req, Headers) ->
+ xhr_options(Req, Headers, ["OPTIONS", "POST"]).
+
+%% -spec xhr_options_get(req(), headers()) -> {headers(), req()}.
+xhr_options_get(Req, Headers) ->
+ xhr_options(Req, Headers, ["OPTIONS", "GET"]).
+
+%% -spec xhr_options(req(), headers(), list(string())) -> {headers(), req()}.
+xhr_options(Req, Headers, Methods) ->
+ H = [{"Access-Control-Allow-Methods", string:join(Methods, ", ")},
+ {"Access-Control-Max-Age", integer_to_list(?YEAR)}],
+ {H ++ Headers, Req}.
--- /dev/null
+-module(sockjs_handler).
+
+-export([init_state/4]).
+-export([is_valid_ws/2, get_action/2]).
+-export([dispatch_req/2, handle_req/2]).
+-export([extract_info/1]).
+
+-include("sockjs_internal.hrl").
+
+-define(SOCKJS_URL, "http://cdn.sockjs.org/sockjs-0.2.js").
+
+%% --------------------------------------------------------------------------
+
+%% -spec init_state(binary(), callback(), any(), list(tuple())) -> service().
+init_state(Prefix, Callback, State, Options) ->
+ #service{prefix = binary_to_list(Prefix),
+ callback = Callback,
+ state = State,
+ sockjs_url =
+ proplists:get_value(sockjs_url, Options, ?SOCKJS_URL),
+ websocket =
+ proplists:get_value(websocket, Options, true),
+ cookie_needed =
+ proplists:get_value(cookie_needed, Options, false),
+ disconnect_delay =
+ proplists:get_value(disconnect_delay, Options, 5000),
+ heartbeat_delay =
+ proplists:get_value(heartbeat_delay, Options, 25000),
+ response_limit =
+ proplists:get_value(response_limit, Options, 128*1024),
+ logger =
+ proplists:get_value(logger, Options, fun default_logger/3),
+ subproto_pref =
+ proplists:get_value(subproto_pref, Options)
+ }.
+
+%% --------------------------------------------------------------------------
+
+%% -spec is_valid_ws(service(), req()) -> {boolean(), req(), tuple()}.
+is_valid_ws(Service, Req) ->
+ case get_action(Service, Req) of
+ {{match, WS}, Req1} when WS =:= websocket orelse
+ WS =:= rawwebsocket ->
+ valid_ws_request(Service, Req1);
+ {_Else, Req1} ->
+ {false, Req1, {}}
+ end.
+
+%% -spec valid_ws_request(service(), req()) -> {boolean(), req(), tuple()}.
+valid_ws_request(_Service, Req) ->
+ {R1, Req1} = valid_ws_upgrade(Req),
+ {R2, Req2} = valid_ws_connection(Req1),
+ {R1 and R2, Req2, {R1, R2}}.
+
+valid_ws_upgrade(Req) ->
+ case sockjs_http:header('Upgrade', Req) of
+ {undefined, Req2} ->
+ {false, Req2};
+ {V, Req2} ->
+ case string:to_lower(V) of
+ "websocket" ->
+ {true, Req2};
+ _Else ->
+ {false, Req2}
+ end
+ end.
+
+valid_ws_connection(Req) ->
+ case sockjs_http:header('Connection', Req) of
+ {undefined, Req2} ->
+ {false, Req2};
+ {V, Req2} ->
+ Vs = [string:strip(T) ||
+ T <- string:tokens(string:to_lower(V), ",")],
+ {lists:member("upgrade", Vs), Req2}
+ end.
+
+%% -spec get_action(service(), req()) -> {nomatch | {match, atom()}, req()}.
+get_action(Service, Req) ->
+ {Dispatch, Req1} = dispatch_req(Service, Req),
+ case Dispatch of
+ {match, {_, Action, _, _, _}} ->
+ {{match, Action}, Req1};
+ _Else ->
+ {nomatch, Req1}
+ end.
+
+%% --------------------------------------------------------------------------
+
+strip_prefix(LongPath, Prefix) ->
+ {A, B} = lists:split(length(Prefix), LongPath),
+ case Prefix of
+ A -> {ok, B};
+ _Any -> {error, io_lib:format("Wrong prefix: ~p is not ~p", [A, Prefix])}
+ end.
+
+
+%% -type(dispatch_result() ::
+%% nomatch |
+%% {match, {send | recv | none , atom(),
+%% server(), session(), list(atom())}} |
+%% {bad_method, list(atom())}).
+
+%% -spec dispatch_req(service(), req()) -> {dispatch_result(), req()}.
+dispatch_req(#service{prefix = Prefix}, Req) ->
+ {Method, Req1} = sockjs_http:method(Req),
+ {LongPath, Req2} = sockjs_http:path(Req1),
+ {ok, PathRemainder} = strip_prefix(LongPath, Prefix),
+ {dispatch(Method, PathRemainder), Req2}.
+
+%% -spec dispatch(atom(), nonempty_string()) -> dispatch_result().
+dispatch(Method, Path) ->
+ lists:foldl(
+ fun ({Match, MethodFilters}, nomatch) ->
+ case Match(Path) of
+ nomatch ->
+ nomatch;
+ [Server, Session] ->
+ case lists:keyfind(Method, 1, MethodFilters) of
+ false ->
+ Methods = [ K ||
+ {K, _, _, _} <- MethodFilters],
+ {bad_method, Methods};
+ {_Method, Type, A, Filters} ->
+ {match, {Type, A, Server, Session, Filters}}
+ end
+ end;
+ (_, Result) ->
+ Result
+ end, nomatch, filters()).
+
+%% --------------------------------------------------------------------------
+
+filters() ->
+ OptsFilters = [h_sid, xhr_cors, cache_for, xhr_options_post],
+ %% websocket does not actually go via handle_req/3 but we need
+ %% something in dispatch/2
+ [{t("/websocket"), [{'GET', none, websocket, []}]},
+ {t("/xhr_send"), [{'POST', recv, xhr_send, [h_sid, h_no_cache, xhr_cors]},
+ {'OPTIONS', none, options, OptsFilters}]},
+ {t("/xhr"), [{'POST', send, xhr_polling, [h_sid, h_no_cache, xhr_cors]},
+ {'OPTIONS', none, options, OptsFilters}]},
+ {t("/xhr_streaming"), [{'POST', send, xhr_streaming, [h_sid, h_no_cache, xhr_cors]},
+ {'OPTIONS', none, options, OptsFilters}]},
+ {t("/jsonp_send"), [{'POST', recv, jsonp_send, [h_sid, h_no_cache]}]},
+ {t("/jsonp"), [{'GET', send, jsonp, [h_sid, h_no_cache]}]},
+ {t("/eventsource"), [{'GET', send, eventsource, [h_sid, h_no_cache]}]},
+ {t("/htmlfile"), [{'GET', send, htmlfile, [h_sid, h_no_cache]}]},
+ {p("/websocket"), [{'GET', none, rawwebsocket, []}]},
+ {p(""), [{'GET', none, welcome_screen, []}]},
+ {p("/iframe[0-9-.a-z_]*.html"), [{'GET', none, iframe, [cache_for]}]},
+ {p("/info"), [{'GET', none, info_test, [h_no_cache, xhr_cors]},
+ {'OPTIONS', none, options, [h_sid, xhr_cors, cache_for, xhr_options_get]}]}
+ ].
+
+p(S) -> fun (Path) -> re(Path, "^" ++ S ++ "[/]?\$") end.
+t(S) -> fun (Path) -> re(Path, "^/([^/.]+)/([^/.]+)" ++ S ++ "[/]?\$") end.
+
+re(Path, S) ->
+ case re:run(Path, S, [{capture, all_but_first, list}]) of
+ nomatch -> nomatch;
+ {match, []} -> [dummy, dummy];
+ {match, [Server, Session]} -> [Server, Session]
+ end.
+
+%% --------------------------------------------------------------------------
+
+%% -spec handle_req(service(), req()) -> req().
+handle_req(Service = #service{logger = Logger}, Req) ->
+ Req0 = Logger(Service, Req, http),
+
+ {Dispatch, Req1} = dispatch_req(Service, Req0),
+ handle(Dispatch, Service, Req1).
+
+handle(nomatch, _Service, Req) ->
+ sockjs_http:reply(404, [], "", Req);
+
+handle({bad_method, Methods}, _Service, Req) ->
+ MethodsStr = string:join([atom_to_list(M) || M <- Methods],
+ ", "),
+ H = [{"Allow", MethodsStr}],
+ sockjs_http:reply(405, H, "", Req);
+
+handle({match, {Type, Action, _Server, Session, Filters}}, Service, Req) ->
+ {Headers, Req2} = lists:foldl(
+ fun (Filter, {Headers0, Req1}) ->
+ sockjs_filters:Filter(Req1, Headers0)
+ end, {[], Req}, Filters),
+ case Type of
+ send ->
+ {Info, Req3} = extract_info(Req2),
+ _SPid = sockjs_session:maybe_create(Session, Service, Info),
+ sockjs_action:Action(Req3, Headers, Service, Session);
+ recv ->
+ try
+ sockjs_action:Action(Req2, Headers, Service, Session)
+ catch throw:no_session ->
+ {H, Req3} = sockjs_filters:h_sid(Req2, []),
+ sockjs_http:reply(404, H, "", Req3)
+ end;
+ none ->
+ sockjs_action:Action(Req2, Headers, Service)
+ end.
+
+%% --------------------------------------------------------------------------
+
+%% -spec default_logger(service(), req(), websocket | http) -> req().
+default_logger(_Service, Req, _Type) ->
+ {LongPath, Req1} = sockjs_http:path(Req),
+ {Method, Req2} = sockjs_http:method(Req1),
+ io:format("~s ~s~n", [Method, LongPath]),
+ Req2.
+
+%% -spec extract_info(req()) -> {info(), req()}.
+extract_info(Req) ->
+ {Peer, Req0} = sockjs_http:peername(Req),
+ {Sock, Req1} = sockjs_http:sockname(Req0),
+ {Path, Req2} = sockjs_http:path(Req1),
+ {Headers, Req3} = lists:foldl(fun (H, {Acc, R0}) ->
+ case sockjs_http:header(H, R0) of
+ {undefined, R1} -> {Acc, R1};
+ {V, R1} -> {[{H, V} | Acc], R1}
+ end
+ end, {[], Req2},
+ ['Referer', 'X-Client-Ip', 'X-Forwarded-For',
+ 'X-Cluster-Client-Ip', 'Via', 'X-Real-Ip']),
+ {[{peername, Peer},
+ {sockname, Sock},
+ {path, Path},
+ {headers, Headers}], Req3}.
--- /dev/null
+-module(sockjs_http).
+
+-export([path/1, method/1, body/1, body_qs/1, header/2, jsessionid/1,
+ callback/1, peername/1, sockname/1]).
+-export([reply/4, chunk_start/3, chunk/2, chunk_end/1]).
+-export([hook_tcp_close/1, unhook_tcp_close/1, abruptly_kill/1]).
+-include("sockjs_internal.hrl").
+
+%% --------------------------------------------------------------------------
+
+%% -spec path(req()) -> {string(), req()}.
+path({cowboy, Req}) -> {Path, Req1} = cowboy_http_req:raw_path(Req),
+ {binary_to_list(Path), {cowboy, Req1}}.
+
+%% -spec method(req()) -> {atom(), req()}.
+method({cowboy, Req}) -> {Method, Req1} = cowboy_http_req:method(Req),
+ case is_binary(Method) of
+ true -> {list_to_atom(binary_to_list(Method)), {cowboy, Req1}};
+ false -> {Method, {cowboy, Req1}}
+ end.
+
+%% -spec body(req()) -> {binary(), req()}.
+body({cowboy, Req}) -> {ok, Body, Req1} = cowboy_http_req:body(Req),
+ {Body, {cowboy, Req1}}.
+
+%% -spec body_qs(req()) -> {binary(), req()}.
+body_qs(Req) ->
+ {H, Req1} = header('Content-Type', Req),
+ case H of
+ H when H =:= "text/plain" orelse H =:= "" ->
+ body(Req1);
+ _ ->
+ %% By default assume application/x-www-form-urlencoded
+ body_qs2(Req1)
+ end.
+body_qs2({cowboy, Req}) ->
+ {BodyQS, Req1} = cowboy_http_req:body_qs(Req),
+ case proplists:get_value(<<"d">>, BodyQS) of
+ undefined ->
+ {<<>>, {cowboy, Req1}};
+ V ->
+ {V, {cowboy, Req1}}
+ end.
+
+%% -spec header(atom(), req()) -> {nonempty_string() | undefined, req()}.
+header(K, {cowboy, Req})->
+ {H, Req2} = cowboy_http_req:header(K, Req),
+ {V, Req3} = case H of
+ undefined ->
+ cowboy_http_req:header(list_to_binary(atom_to_list(K)), Req2);
+ _ -> {H, Req2}
+ end,
+ case V of
+ undefined -> {undefined, {cowboy, Req3}};
+ _ -> {binary_to_list(V), {cowboy, Req3}}
+ end.
+
+%% -spec jsessionid(req()) -> {nonempty_string() | undefined, req()}.
+jsessionid({cowboy, Req}) ->
+ {C, Req2} = cowboy_http_req:cookie(<<"JSESSIONID">>, Req),
+ case C of
+ _ when is_binary(C) ->
+ {binary_to_list(C), {cowboy, Req2}};
+ undefined ->
+ {undefined, {cowboy, Req2}}
+ end.
+
+%% -spec callback(req()) -> {nonempty_string() | undefined, req()}.
+callback({cowboy, Req}) ->
+ {CB, Req1} = cowboy_http_req:qs_val(<<"c">>, Req),
+ case CB of
+ undefined -> {undefined, {cowboy, Req1}};
+ _ -> {binary_to_list(CB), {cowboy, Req1}}
+ end.
+
+%% -spec peername(req()) -> {{inet:ip_address(), non_neg_integer()}, req()}.
+peername({cowboy, Req}) ->
+ {P, Req1} = cowboy_http_req:peer(Req),
+ {P, {cowboy, Req1}}.
+
+%% -spec sockname(req()) -> {{inet:ip_address(), non_neg_integer()}, req()}.
+sockname({cowboy, Req} = R) ->
+ {ok, _T, S} = cowboy_http_req:transport(Req),
+ %% Cowboy has peername(), but doesn't have sockname() equivalent.
+ {ok, Addr} = case S of
+ _ when is_port(S) ->
+ inet:sockname(S);
+ _ ->
+ {ok, {{0,0,0,0}, 0}}
+ end,
+ {Addr, R}.
+
+%% --------------------------------------------------------------------------
+
+%% -spec reply(non_neg_integer(), headers(), iodata(), req()) -> req().
+reply(Code, Headers, Body, {cowboy, Req}) ->
+ Body1 = iolist_to_binary(Body),
+ {ok, Req1} = cowboy_http_req:reply(Code, enbinary(Headers), Body1, Req),
+ {cowboy, Req1}.
+
+%% -spec chunk_start(non_neg_integer(), headers(), req()) -> req().
+chunk_start(Code, Headers, {cowboy, Req}) ->
+ {ok, Req1} = cowboy_http_req:chunked_reply(Code, enbinary(Headers), Req),
+ {cowboy, Req1}.
+
+%% -spec chunk(iodata(), req()) -> {ok | error, req()}.
+chunk(Chunk, {cowboy, Req} = R) ->
+ case cowboy_http_req:chunk(Chunk, Req) of
+ ok -> {ok, R};
+ {error, _E} -> {error, R}
+ %% This shouldn't happen too often, usually we
+ %% should catch tco socket closure before.
+ end.
+
+%% -spec chunk_end(req()) -> req().
+chunk_end({cowboy, _Req} = R) -> R.
+
+enbinary(L) -> [{list_to_binary(K), list_to_binary(V)} || {K, V} <- L].
+
+
+%% -spec hook_tcp_close(req()) -> req().
+hook_tcp_close(R = {cowboy, Req}) ->
+ {ok, T, S} = cowboy_http_req:transport(Req),
+ T:setopts(S,[{active,once}]),
+ R.
+
+%% -spec unhook_tcp_close(req()) -> req().
+unhook_tcp_close(R = {cowboy, Req}) ->
+ {ok, T, S} = cowboy_http_req:transport(Req),
+ T:setopts(S,[{active,false}]),
+ R.
+
+%% -spec abruptly_kill(req()) -> req().
+abruptly_kill(R = {cowboy, Req}) ->
+ {ok, T, S} = cowboy_http_req:transport(Req),
+ T:close(S),
+ R.
--- /dev/null
+
+%% -type(req() :: {cowboy, any()}).
+
+%% -type(user_session() :: nonempty_string()).
+%% -type(emittable() :: init|closed|{recv, binary()}).
+%% -type(callback() :: fun((user_session(), emittable(), any()) -> ok)).
+%% -type(logger() :: fun((any(), req(), websocket|http) -> req())).
+
+-record(service, {prefix , %% nonempty_string(),
+ callback , %% callback()
+ state , %% any()
+ sockjs_url , %% nonempty_string()
+ cookie_needed , %% boolean()
+ websocket , %% boolean()
+ disconnect_delay , %% non_neg_integer()
+ heartbeat_delay , %% non_neg_integer()
+ response_limit , %% non_neg_integer()
+ logger , %% logger()
+ subproto_pref %% [binary()]
+ }).
+
+%% -type(service() :: #service{}).
+
+%% -type(headers() :: list({nonempty_string(), nonempty_string()})).
+%% -type(server() :: nonempty_string()).
+%% -type(session() :: nonempty_string()).
+
+%% -type(frame() :: {open, nil} |
+%% {close, {non_neg_integer(), string()}} |
+%% {data, list(iodata())} |
+%% {heartbeat, nil} ).
+
+%% -type(info() :: [{atom(), any()}]).
--- /dev/null
+-module(sockjs_json).
+
+-export([encode/1, decode/1]).
+
+%% --------------------------------------------------------------------------
+
+%% -spec encode(any()) -> iodata().
+encode(Thing) ->
+ mochijson2_fork:encode(Thing).
+
+%% -spec decode(iodata()) -> {ok, any()} | {error, any()}.
+decode(Encoded) ->
+ try mochijson2_fork:decode(Encoded) of
+ V -> {ok, V}
+ catch
+ _:E -> {error, E}
+ end.
--- /dev/null
+-module(sockjs_multiplex).
+
+-behaviour(sockjs_service).
+
+-export([init_state/1]).
+-export([sockjs_init/2, sockjs_handle/3, sockjs_terminate/2]).
+
+-record(service, {callback, state, vconn}).
+
+%% --------------------------------------------------------------------------
+
+init_state(Services) ->
+ L = [{Topic, #service{callback = Callback, state = State}} ||
+ {Topic, Callback, State} <- Services],
+ {orddict:from_list(L), orddict:new()}.
+
+
+
+sockjs_init(_Conn, {_Services, _Channels} = S) ->
+ {ok, S}.
+
+sockjs_handle(Conn, Data, {Services, Channels}) ->
+ [Type, Topic, Payload] = split($,, binary_to_list(Data), 3),
+ case orddict:find(Topic, Services) of
+ {ok, Service} ->
+ Channels1 = action(Conn, {Type, Topic, Payload}, Service, Channels),
+ {ok, {Services, Channels1}};
+ _Else ->
+ {ok, {Services, Channels}}
+ end.
+
+sockjs_terminate(_Conn, {Services, Channels}) ->
+ _ = [ {emit(closed, Channel)} ||
+ {_Topic, Channel} <- orddict:to_list(Channels) ],
+ {ok, {Services, orddict:new()}}.
+
+
+action(Conn, {Type, Topic, Payload}, Service, Channels) ->
+ case {Type, orddict:is_key(Topic, Channels)} of
+ {"sub", false} ->
+ Channel = Service#service{
+ vconn = sockjs_multiplex_channel:new(
+ Conn, Topic)
+ },
+ orddict:store(Topic, emit(init, Channel), Channels);
+ {"uns", true} ->
+ Channel = orddict:fetch(Topic, Channels),
+ emit(closed, Channel),
+ orddict:erase(Topic, Channels);
+ {"msg", true} ->
+ Channel = orddict:fetch(Topic, Channels),
+ orddict:store(Topic, emit({recv, Payload}, Channel), Channels);
+ _Else ->
+ %% Ignore
+ Channels
+ end.
+
+
+emit(What, Channel = #service{callback = Callback,
+ state = State,
+ vconn = VConn}) ->
+ case Callback(VConn, What, State) of
+ {ok, State1} -> Channel#service{state = State1};
+ ok -> Channel
+ end.
+
+
+%% --------------------------------------------------------------------------
+
+split(Char, Str, Limit) ->
+ Acc = split(Char, Str, Limit, []),
+ lists:reverse(Acc).
+split(_Char, _Str, 0, Acc) -> Acc;
+split(Char, Str, Limit, Acc) ->
+ {L, R} = case string:chr(Str, Char) of
+ 0 -> {Str, ""};
+ I -> {string:substr(Str, 1, I-1), string:substr(Str, I+1)}
+ end,
+ split(Char, R, Limit-1, [L | Acc]).
--- /dev/null
+-compile({parse_transform,pmod_pt}).
+
+-module(sockjs_multiplex_channel, [Conn, Topic]).
+
+-export([send/1, close/0, close/2, info/0]).
+
+send(Data) ->
+ Conn:send(iolist_to_binary(["msg", ",", Topic, ",", Data])).
+
+close() ->
+ close(1000, "Normal closure").
+
+close(_Code, _Reason) ->
+ Conn:send(iolist_to_binary(["uns", ",", Topic])).
+
+info() ->
+ Conn:info() ++ [{topic, Topic}].
+
--- /dev/null
+-module(sockjs_service).
+
+-export([behaviour_info/1]).
+
+behaviour_info(callbacks) ->
+ [
+ {sockjs_init, 2},
+ {sockjs_handle, 3},
+ {sockjs_terminate, 2}
+ ];
+
+behaviour_info(_Other) ->
+ undefined.
--- /dev/null
+-module(sockjs_session).
+
+-behaviour(gen_server).
+
+-export([init/0, start_link/3]).
+-export([maybe_create/3, reply/1, reply/2, received/2]).
+-export([send/2, close/3, info/1]).
+
+
+-export([init/1, handle_call/3, handle_info/2, terminate/2, code_change/3,
+ handle_cast/2]).
+
+-include("sockjs_internal.hrl").
+%% -type(handle() :: {?MODULE, {pid(), info()}}).
+
+-record(session, {id , %% session(),
+ outbound_queue = queue:new() , %% queue()
+ response_pid , %% pid()
+ disconnect_tref , %% reference()
+ disconnect_delay = 5000 , %% non_neg_integer()
+ heartbeat_tref , %% reference() | triggered
+ heartbeat_delay = 25000 , %% non_neg_integer()
+ ready_state = connecting , %% connecting | open | closed
+ close_msg , %% {non_neg_integer(), string()}
+ callback,
+ state,
+ handle %% handle()
+ }).
+-define(ETS, sockjs_table).
+
+
+%% -type(session_or_undefined() :: session() | undefined).
+%% -type(session_or_pid() :: session() | pid()).
+
+%% --------------------------------------------------------------------------
+
+%% -spec init() -> ok.
+init() ->
+ _ = ets:new(?ETS, [public, named_table]),
+ ok.
+
+%% -spec start_link(session_or_undefined(), service(), info()) -> {ok, pid()}.
+start_link(SessionId, Service, Info) ->
+ gen_server:start_link(?MODULE, {SessionId, Service, Info}, []).
+
+%% -spec maybe_create(session_or_undefined(), service(), info()) -> pid().
+maybe_create(SessionId, Service, Info) ->
+ case ets:lookup(?ETS, SessionId) of
+ [] -> {ok, SPid} = sockjs_session_sup:start_child(
+ SessionId, Service, Info),
+ SPid;
+ [{_, SPid}] -> SPid
+ end.
+
+
+%% -spec received(list(iodata()), session_or_pid()) -> ok.
+received(Messages, SessionPid) when is_pid(SessionPid) ->
+ case gen_server:call(SessionPid, {received, Messages}, infinity) of
+ ok -> ok;
+ error -> throw(no_session)
+ %% TODO: should we respond 404 when session is closed?
+ end;
+received(Messages, SessionId) ->
+ received(Messages, spid(SessionId)).
+
+%% -spec send(iodata(), handle()) -> ok.
+send(Data, {?MODULE, {SPid, _}}) ->
+ gen_server:cast(SPid, {send, Data}),
+ ok.
+
+%% -spec close(non_neg_integer(), string(), handle()) -> ok.
+close(Code, Reason, {?MODULE, {SPid, _}}) ->
+ gen_server:cast(SPid, {close, Code, Reason}),
+ ok.
+
+%% -spec info(handle()) -> info().
+info({?MODULE, {_SPid, Info}}) ->
+ Info.
+
+%% -spec reply(session_or_pid()) ->
+%% wait | session_in_use | {ok | close, frame()}.
+reply(Session) ->
+ reply(Session, true).
+
+%% -spec reply(session_or_pid(), boolean()) ->
+%% wait | session_in_use | {ok | close, frame()}.
+reply(SessionPid, Multiple) when is_pid(SessionPid) ->
+ gen_server:call(SessionPid, {reply, self(), Multiple}, infinity);
+reply(SessionId, Multiple) ->
+ reply(spid(SessionId), Multiple).
+
+%% --------------------------------------------------------------------------
+
+cancel_timer_safe(Timer, Atom) ->
+ case erlang:cancel_timer(Timer) of
+ false ->
+ receive Atom -> ok
+ after 0 -> ok end;
+ _ -> ok
+ end.
+
+spid(SessionId) ->
+ case ets:lookup(?ETS, SessionId) of
+ [] -> throw(no_session);
+ [{_, SPid}] -> SPid
+ end.
+
+%% Mark a process as waiting for data.
+%% 1) The same process may ask for messages multiple times.
+mark_waiting(Pid, State = #session{response_pid = Pid,
+ disconnect_tref = undefined}) ->
+ State;
+%% 2) Noone else waiting - link and start heartbeat timeout.
+mark_waiting(Pid, State = #session{response_pid = undefined,
+ disconnect_tref = DisconnectTRef,
+ heartbeat_delay = HeartbeatDelay})
+ when DisconnectTRef =/= undefined ->
+ link(Pid),
+ cancel_timer_safe(DisconnectTRef, session_timeout),
+ TRef = erlang:send_after(HeartbeatDelay, self(), heartbeat_triggered),
+ State#session{response_pid = Pid,
+ disconnect_tref = undefined,
+ heartbeat_tref = TRef}.
+
+%% Prolong session lifetime.
+%% 1) Maybe clear up response_pid if already awaiting.
+unmark_waiting(RPid, State = #session{response_pid = RPid,
+ heartbeat_tref = HeartbeatTRef,
+ disconnect_tref = undefined,
+ disconnect_delay = DisconnectDelay}) ->
+ unlink(RPid),
+ _ = case HeartbeatTRef of
+ undefined -> ok;
+ triggered -> ok;
+ _Else -> cancel_timer_safe(HeartbeatTRef, heartbeat_triggered)
+ end,
+ TRef = erlang:send_after(DisconnectDelay, self(), session_timeout),
+ State#session{response_pid = undefined,
+ heartbeat_tref = undefined,
+ disconnect_tref = TRef};
+
+%% 2) prolong disconnect timer if no connection is waiting
+unmark_waiting(_Pid, State = #session{response_pid = undefined,
+ disconnect_tref = DisconnectTRef,
+ disconnect_delay = DisconnectDelay})
+ when DisconnectTRef =/= undefined ->
+ cancel_timer_safe(DisconnectTRef, session_timeout),
+ TRef = erlang:send_after(DisconnectDelay, self(), session_timeout),
+ State#session{disconnect_tref = TRef};
+
+%% 3) Event from someone else? Ignore.
+unmark_waiting(RPid, State = #session{response_pid = Pid,
+ disconnect_tref = undefined})
+ when Pid =/= undefined andalso Pid =/= RPid ->
+ State.
+
+%% -spec emit(emittable(), #session{}) -> #session{}.
+emit(What, State = #session{callback = Callback,
+ state = UserState,
+ handle = Handle}) ->
+ R = case Callback of
+ _ when is_function(Callback) ->
+ Callback(Handle, What, UserState);
+ _ when is_atom(Callback) ->
+ case What of
+ init -> Callback:sockjs_init(Handle, UserState);
+ {recv, Data} -> Callback:sockjs_handle(Handle, Data, UserState);
+ closed -> Callback:sockjs_terminate(Handle, UserState)
+ end
+ end,
+ case R of
+ {ok, UserState1} -> State#session{state = UserState1};
+ ok -> State
+ end.
+
+%% --------------------------------------------------------------------------
+
+%% -spec init({session_or_undefined(), service(), info()}) -> {ok, #session{}}.
+init({SessionId, #service{callback = Callback,
+ state = UserState,
+ disconnect_delay = DisconnectDelay,
+ heartbeat_delay = HeartbeatDelay}, Info}) ->
+ case SessionId of
+ undefined -> ok;
+ _Else -> ets:insert(?ETS, {SessionId, self()})
+ end,
+ process_flag(trap_exit, true),
+ TRef = erlang:send_after(DisconnectDelay, self(), session_timeout),
+ {ok, #session{id = SessionId,
+ callback = Callback,
+ state = UserState,
+ response_pid = undefined,
+ disconnect_tref = TRef,
+ disconnect_delay = DisconnectDelay,
+ heartbeat_tref = undefined,
+ heartbeat_delay = HeartbeatDelay,
+ handle = {?MODULE, {self(), Info}}}}.
+
+
+handle_call({reply, Pid, _Multiple}, _From, State = #session{
+ response_pid = undefined,
+ ready_state = connecting}) ->
+ State0 = emit(init, State),
+ State1 = unmark_waiting(Pid, State0),
+ {reply, {ok, {open, nil}},
+ State1#session{ready_state = open}};
+
+handle_call({reply, Pid, _Multiple}, _From, State = #session{
+ ready_state = closed,
+ close_msg = CloseMsg}) ->
+ State1 = unmark_waiting(Pid, State),
+ {reply, {close, {close, CloseMsg}}, State1};
+
+
+handle_call({reply, Pid, _Multiple}, _From, State = #session{
+ response_pid = RPid})
+ when RPid =/= Pid andalso RPid =/= undefined ->
+ %% don't use unmark_waiting(), this shouldn't touch the session lifetime
+ {reply, session_in_use, State};
+
+handle_call({reply, Pid, Multiple}, _From, State = #session{
+ ready_state = open,
+ response_pid = RPid,
+ heartbeat_tref = HeartbeatTRef,
+ outbound_queue = Q})
+ when RPid == undefined orelse RPid == Pid ->
+ {Messages, Q1} = case Multiple of
+ true -> {queue:to_list(Q), queue:new()};
+ false -> case queue:out(Q) of
+ {{value, Msg}, Q2} -> {[Msg], Q2};
+ {empty, Q2} -> {[], Q2}
+ end
+ end,
+ case {Messages, HeartbeatTRef} of
+ {[], triggered} -> State1 = unmark_waiting(Pid, State),
+ {reply, {ok, {heartbeat, nil}}, State1};
+ {[], _TRef} -> State1 = mark_waiting(Pid, State),
+ {reply, wait, State1};
+ _More -> State1 = unmark_waiting(Pid, State),
+ {reply, {ok, {data, Messages}},
+ State1#session{outbound_queue = Q1}}
+ end;
+
+handle_call({received, Messages}, _From, State = #session{ready_state = open}) ->
+ State2 = lists:foldl(fun(Msg, State1) ->
+ emit({recv, iolist_to_binary(Msg)}, State1)
+ end, State, Messages),
+ {reply, ok, State2};
+
+handle_call({received, _Data}, _From, State = #session{ready_state = _Any}) ->
+ {reply, error, State};
+
+handle_call(Request, _From, State) ->
+ {stop, {odd_request, Request}, State}.
+
+
+handle_cast({send, Data}, State = #session{outbound_queue = Q,
+ response_pid = RPid}) ->
+ case RPid of
+ undefined -> ok;
+ _Else -> RPid ! go
+ end,
+ {noreply, State#session{outbound_queue = queue:in(Data, Q)}};
+
+handle_cast({close, Status, Reason}, State = #session{response_pid = RPid}) ->
+ case RPid of
+ undefined -> ok;
+ _Else -> RPid ! go
+ end,
+ {noreply, State#session{ready_state = closed,
+ close_msg = {Status, Reason}}};
+
+handle_cast(Cast, State) ->
+ {stop, {odd_cast, Cast}, State}.
+
+
+handle_info({'EXIT', Pid, _Reason},
+ State = #session{response_pid = Pid}) ->
+ %% It is illegal for a connection to go away when receiving, we
+ %% may lose some messages that are in transit. Kill current
+ %% session.
+ {stop, normal, State#session{response_pid = undefined}};
+
+handle_info(force_shutdown, State) ->
+ %% Websockets may want to force closure sometimes
+ {stop, normal, State};
+
+handle_info(session_timeout, State = #session{response_pid = undefined}) ->
+ {stop, normal, State};
+
+handle_info(heartbeat_triggered, State = #session{response_pid = RPid}) when RPid =/= undefined ->
+ RPid ! go,
+ {noreply, State#session{heartbeat_tref = triggered}};
+
+handle_info(Info, State) ->
+ {stop, {odd_info, Info}, State}.
+
+
+terminate(_, State = #session{id = SessionId}) ->
+ ets:delete(?ETS, SessionId),
+ _ = emit(closed, State),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
--- /dev/null
+-module(sockjs_session_sup).
+
+-behaviour(supervisor).
+
+-export([start_link/0, start_child/3]).
+-export([init/1]).
+
+%% --------------------------------------------------------------------------
+
+%% -spec start_link() -> ignore | {'ok', pid()} | {'error', any()}.
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ {ok, {{simple_one_for_one, 10, 10},
+ [{undefined, {sockjs_session, start_link, []},
+ transient, 5000, worker, [sockjs_session]}]}}.
+
+start_child(SessionId, Service, Info) ->
+ supervisor:start_child(?MODULE, [SessionId, Service, Info]).
--- /dev/null
+-module(sockjs_util).
+
+-export([rand32/0]).
+-export([encode_frame/1]).
+-export([url_escape/2]).
+
+-include("sockjs_internal.hrl").
+
+%% --------------------------------------------------------------------------
+
+%% -spec rand32() -> non_neg_integer().
+rand32() ->
+ case get(random_seeded) of
+ undefined ->
+ {MegaSecs, Secs, MicroSecs} = now(),
+ _ = random:seed(MegaSecs, Secs, MicroSecs),
+ put(random_seeded, true);
+ _Else ->
+ ok
+ end,
+ random:uniform(erlang:trunc(math:pow(2,32)))-1.
+
+
+%% -spec encode_frame(frame()) -> iodata().
+encode_frame({open, nil}) ->
+ <<"o">>;
+encode_frame({close, {Code, Reason}}) ->
+ [<<"c">>,
+ sockjs_json:encode([Code, list_to_binary(Reason)])];
+encode_frame({data, L}) ->
+ [<<"a">>,
+ sockjs_json:encode([iolist_to_binary(D) || D <- L])];
+encode_frame({heartbeat, nil}) ->
+ <<"h">>.
+
+
+%% -spec url_escape(string(), string()) -> iolist().
+url_escape(Str, Chars) ->
+ [case lists:member(Char, Chars) of
+ true -> hex(Char);
+ false -> Char
+ end || Char <- Str].
+
+hex(C) ->
+ <<High0:4, Low0:4>> = <<C>>,
+ High = integer_to_list(High0),
+ Low = integer_to_list(Low0),
+ "%" ++ High ++ Low.
--- /dev/null
+-module(sockjs_ws_handler).
+
+-export([received/3, reply/2, close/2]).
+
+-include("sockjs_internal.hrl").
+
+%% --------------------------------------------------------------------------
+
+%% -spec received(websocket|rawwebsocket, pid(), binary()) -> ok | shutdown.
+%% Ignore empty
+received(_RawWebsocket, _SessionPid, <<>>) ->
+ ok;
+received(websocket, SessionPid, Data) ->
+ case sockjs_json:decode(Data) of
+ {ok, Msg} when is_binary(Msg) ->
+ session_received([Msg], SessionPid);
+ {ok, Messages} when is_list(Messages) ->
+ session_received(Messages, SessionPid);
+ _Else ->
+ shutdown
+ end;
+
+received(rawwebsocket, SessionPid, Data) ->
+ session_received([Data], SessionPid).
+
+session_received(Messages, SessionPid) ->
+ try sockjs_session:received(Messages, SessionPid) of
+ ok -> ok
+ catch
+ no_session -> shutdown
+ end.
+
+%% -spec reply(websocket|rawwebsocket, pid()) -> {close|open, binary()} | wait.
+reply(websocket, SessionPid) ->
+ case sockjs_session:reply(SessionPid) of
+ {W, Frame} when W =:= ok orelse W =:= close->
+ Frame1 = sockjs_util:encode_frame(Frame),
+ {W, iolist_to_binary(Frame1)};
+ wait ->
+ wait
+ end;
+reply(rawwebsocket, SessionPid) ->
+ case sockjs_session:reply(SessionPid, false) of
+ {W, Frame} when W =:= ok orelse W =:= close->
+ case Frame of
+ {open, nil} -> reply(rawwebsocket, SessionPid);
+ {close, {_Code, _Reason}} -> {close, <<>>};
+ {data, [Msg]} -> {ok, iolist_to_binary(Msg)};
+ {heartbeat, nil} -> reply(rawwebsocket, SessionPid)
+ end;
+ wait ->
+ wait
+ end.
+
+%% -spec close(websocket|rawwebsocket, pid()) -> ok.
+close(_RawWebsocket, SessionPid) ->
+ SessionPid ! force_shutdown,
+ ok.
--- /dev/null
+# The default goal
+dist:
+
+UMBRELLA_BASE_DIR:=..
+
+include $(UMBRELLA_BASE_DIR)/common.mk
+
+# We start at the initial package (i.e. the one in the current directory)
+PACKAGE_DIR:=$(call canonical_path,.)
+
+# Produce all of the releasable artifacts of this package
+.PHONY: dist
+dist: $(PACKAGE_DIR)+dist
+
+# Produce a source tarball for this package
+.PHONY: srcdist
+srcdist: $(PACKAGE_DIR)+srcdist
+
+# Clean the package and all its dependencies
+.PHONY: clean
+clean: $(PACKAGE_DIR)+clean-with-deps
+
+# Clean just the initial package
+.PHONY: clean-local
+clean-local: $(PACKAGE_DIR)+clean
+
+# Run erlang with the package, its tests, and all its dependencies
+# available.
+.PHONY: run
+run: $(PACKAGE_DIR)+run
+
+# Run the broker with the package, its tests, and all its dependencies
+# available.
+.PHONY: run-in-broker
+run-in-broker: $(PACKAGE_DIR)+run-in-broker
+
+# Runs the package's tests
+.PHONY: test
+test: $(PACKAGE_DIR)+test
+
+# Test the package with code coverage recording on. Note that
+# coverage only covers the in-broker tests.
+.PHONY: coverage
+coverage: $(PACKAGE_DIR)+coverage
+
+# Runs the package's tests
+.PHONY: check-xref
+check-xref: $(PACKAGE_DIR)+check-xref
+
+# Do the initial package
+include $(UMBRELLA_BASE_DIR)/do-package.mk
+
+# We always need the coverage package to support the coverage goal
+PACKAGE_DIR:=$(COVERAGE_PATH)
+$(eval $(call do_package,$(COVERAGE_PATH)))
--- /dev/null
+diff --git a/src/webmachine.app.src b/src/webmachine.app.src
+index eb949a2..2c46c3f 100644
+--- a/src/webmachine.app.src
++++ b/src/webmachine.app.src
+@@ -7,7 +7,6 @@
+ {registered, []},
+ {applications, [kernel,
+ stdlib,
+- crypto,
+ mochiweb]},
+ {mod, {webmachine_app, []}},
+ {env, []}
+diff --git a/src/webmachine.erl b/src/webmachine.erl
+index 47f1ce2..2e5be1b 100644
+--- a/src/webmachine.erl
++++ b/src/webmachine.erl
+@@ -28,7 +28,6 @@
+ %% @doc Start the webmachine server.
+ start() ->
+ webmachine_deps:ensure(),
+- application:start(crypto),
+ application:start(webmachine).
+
+ %% @spec stop() -> ok
+diff --git a/src/webmachine_decision_core.erl b/src/webmachine_decision_core.erl
+index 194c48d..3379388 100644
+--- a/src/webmachine_decision_core.erl
++++ b/src/webmachine_decision_core.erl
+@@ -722,32 +722,17 @@ variances() ->
+ end,
+ Accept ++ AcceptEncoding ++ AcceptCharset ++ resource_call(variances).
+
+--ifndef(old_hash).
+ md5(Bin) ->
+- crypto:hash(md5, Bin).
++ erlang:md5(Bin).
+
+ md5_init() ->
+- crypto:hash_init(md5).
++ erlang:md5_init().
+
+ md5_update(Ctx, Bin) ->
+- crypto:hash_update(Ctx, Bin).
++ erlang:md5_update(Ctx, Bin).
+
+ md5_final(Ctx) ->
+- crypto:hash_final(Ctx).
+--else.
+-md5(Bin) ->
+- crypto:md5(Bin).
+-
+-md5_init() ->
+- crypto:md5_init().
+-
+-md5_update(Ctx, Bin) ->
+- crypto:md5_update(Ctx, Bin).
+-
+-md5_final(Ctx) ->
+- crypto:md5_final(Ctx).
+--endif.
+-
++ erlang:md5_final(Ctx).
+
+ compute_body_md5() ->
+ case wrcall({req_body, 52428800}) of
+diff --git a/src/webmachine_request.erl b/src/webmachine_request.erl
+index 2a5ff7a..ee459a3 100644
+--- a/src/webmachine_request.erl
++++ b/src/webmachine_request.erl
+@@ -624,7 +624,7 @@ parts_to_body(BodyList, Size, Req) when is_list(BodyList) ->
+ {CT, _} ->
+ CT
+ end,
+- Boundary = mochihex:to_hex(crypto:rand_bytes(8)),
++ Boundary = mochihex:to_hex(mochiweb_util:rand_bytes(8)),
+ HeaderList = [{"Content-Type",
+ ["multipart/byteranges; ",
+ "boundary=", Boundary]}],
--- /dev/null
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
--- /dev/null
+include ../umbrella.mk
--- /dev/null
+UPSTREAM_SHORT_HASH:=e9359c7
--- /dev/null
+Webmachine is Copyright (c) Basho Technologies and is covered by the
+Apache License 2.0. It was downloaded from http://webmachine.basho.com/
+
--- /dev/null
+APP_NAME:=webmachine
+DEPS:=mochiweb-wrapper
+
+UPSTREAM_GIT:=https://github.com/rabbitmq/webmachine.git
+UPSTREAM_REVISION:=e9359c7092b228f671417abe68319913f1aebe46
+RETAIN_ORIGINAL_VERSION:=true
+
+WRAPPER_PATCHES:=10-remove-crypto-dependency.patch
+
+ORIGINAL_APP_FILE=$(CLONE_DIR)/src/$(APP_NAME).app.src
+DO_NOT_GENERATE_APP_FILE=true
+
+define package_rules
+
+# This rule is run *before* the one in do_package.mk
+$(PLUGINS_SRC_DIST_DIR)/$(PACKAGE_DIR)/.srcdist_done::
+ cp $(CLONE_DIR)/LICENSE $(PACKAGE_DIR)/LICENSE-Apache-Basho
+
+endef
--- /dev/null
+language: erlang
+notifications:
+ webhooks: http://basho-engbot.herokuapp.com/travis?key=66724b424957d598311ba00bb2d137fcae4eae21
+ email: eng@basho.com
+otp_release:
+ - R15B01
+ - R15B
+ - R14B04
+ - R14B03
--- /dev/null
+% -*- mode: erlang -*-
+{["src/*"],
+ [{i, "include"},
+ {outdir, "ebin"},
+ debug_info]
+}.
--- /dev/null
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
--- /dev/null
+ERL ?= erl
+APP := webmachine
+
+.PHONY: deps
+
+all: deps
+ @(./rebar compile)
+
+deps:
+ @(./rebar get-deps)
+
+clean:
+ @(./rebar clean)
+
+distclean: clean
+ @(./rebar delete-deps)
+
+edoc:
+ @$(ERL) -noshell -run edoc_run application '$(APP)' '"."' '[{preprocess, true},{includes, ["."]}]'
+
+test: all
+ @(./rebar skip_deps=true eunit)
+
+
--- /dev/null
+* webmachine
+** Overview
+
+[[http://travis-ci.org/basho/webmachine][Travis-CI]] :: [[https://secure.travis-ci.org/basho/webmachine.png]]
+
+Webmachine is an application layer that adds HTTP semantic awareness
+on top of the excellent bit-pushing and HTTP syntax-management
+provided by mochiweb, and provides a simple and clean way to connect
+that to your application's behavior.
+
+More information is available [[http://webmachine.basho.com/][here]].
+
+** Quick Start
+A shell script is provided in the =webmachine= repository to help
+users quickly and easily create a new =webmachine= application.
+
+#+BEGIN_SRC shell
+git clone git://github.com/basho/webmachine.git
+cd webmachine
+./scripts/new_webmachine.sh mydemo
+#+END_SRC
+
+A destination path can also be passed to the =new_webmachine.sh=
+script.
+
+#+BEGIN_SRC shell
+./scripts/new_webmachine.sh mydemo ~/webmachine_applications
+#+END_SRC
+
+Once a new application has been created it can be built and started.
+
+#+BEGIN_SRC shell
+cd mydemo
+make
+./start.sh
+#+END_SRC
+
+The application will be available at [[http://localhost:8000]].
+
+To learn more continue reading [[http://webmachine.basho.com/][here]].
+
+** Contributing
+ We encourage contributions to =webmachine= from the community.
+
+ 1) Fork the =webmachine= repository on [[https://github.com/basho/webmachine][Github]].
+ 2) Clone your fork or add the remote if you already have a clone of
+ the repository.
+#+BEGIN_SRC shell
+git clone git@github.com:yourusername/webmachine.git
+# or
+git remote add mine git@github.com:yourusername/webmachine.git
+#+END_SRC
+ 3) Create a topic branch for your change.
+#+BEGIN_SRC shell
+git checkout -b some-topic-branch
+#+END_SRC
+ 4) Make your change and commit. Use a clear and descriptive commit
+ message, spanning multiple lines if detailed explanation is
+ needed.
+ 5) Push to your fork of the repository and then send a pull-request
+ through Github.
+#+BEGIN_SRC shell
+git push mine some-topic-branch
+#+END_SRC
+ 6) A Basho engineer or community maintainer will review your patch
+ and merge it into the main repository or send you feedback.
--- /dev/null
+The following people have contributed to Webmachine:
+
+Andy Gross
+Justin Sheehy
+John Muellerleile
+Robert Ahrens
+Jeremy Latt
+Bryan Fink
+Ryan Tilder
+Taavi Talvik
+Marc Worrell
+Seth Falcon
+Tuncer Ayaz
+Martin Scholl
+Paul Mineiro
+Dave Smith
+Arjan Scherpenisse
+Benjamin Black
+Anthony Molinaro
+Phil Pirozhkov
+Rusty Klophaus
\ No newline at end of file
--- /dev/null
+ERL ?= erl
+APP := webmachine_demo
+
+.PHONY: deps
+
+all: deps
+ @../rebar compile
+
+deps:
+ @../rebar get-deps
+
+clean:
+ @../rebar clean
+
+distclean: clean
+ @../rebar delete-deps
+
+docs:
+ @erl -noshell -run edoc_run application '$(APP)' '"."' '[]'
--- /dev/null
+Project Skeleton for the webmachine_demo app.
+
+You should find in this directory:
+
+README : this file
+Makefile : simple make commands
+rebar : the Rebar build tool for Erlang applications
+rebar.config : configuration for Rebar
+start.sh : simple startup script for running webmachine_demo
+/ebin
+ /webmachine_demo.app : the Erlang app specification
+/src
+ /webmachine_demo_app.erl : base module for the Erlang application
+ /webmachine_demo_sup.erl : OTP supervisor for the application
+ /webmachine_demo_resource.erl : a simple example Webmachine resource
+/priv
+ /dispatch.conf : the Webmachine URL-dispatching table
+ /www : a convenient place to put your static web content
+
+You probably want to do one of a couple of things at this point:
+
+0. Build the skeleton application:
+ $ make
+ - or -
+ $ ./rebar compile
+
+1. Start up the skeleton application:
+ $ ./start.sh
+
+2. Test the basic application:
+ Visit http://localhost:8000/demo
+
+3. Change the basic application:
+ edit src/webmachine_demo_resource.erl
+
+4. Test the filesystem resource:
+ $ mkdir /tmp/fs
+ $ echo "Hello World." > /tmp/fs/demo.txt
+ Visit http://localhost:8000/fs/demo.txt
+
+5. Add some new resources:
+ edit src/YOUR_NEW_RESOURCE.erl
+ edit priv/dispatch.conf
--- /dev/null
+%%-*- mode: erlang -*-
+{["demo", '*'], webmachine_demo_resource, []}.
+{["fs", '*'], webmachine_demo_fs_resource, [{root, "/tmp/fs"}]}.
--- /dev/null
+%%-*- mode: erlang -*-
+
+{deps, [{webmachine, "1.10.*", {git, "git://github.com/basho/webmachine", "HEAD"}}]}.
--- /dev/null
+%%-*- mode: erlang -*-
+{application, webmachine_demo,
+ [
+ {description, "demo"},
+ {vsn, "1"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib,
+ crypto,
+ mochiweb,
+ webmachine
+ ]},
+ {mod, { webmachine_demo_app, []}},
+ {env, []}
+ ]}.
--- /dev/null
+-module(webmachine_demo).
+-author('Andy Gross <andy@basho.com>').
+-author('Justin Sheehy <justin@@basho.com>').
+-export([start/0, start_link/0, stop/0]).
+
+ensure_started(App) ->
+ case application:start(App) of
+ ok ->
+ ok;
+ {error, {already_started, App}} ->
+ ok
+ end.
+
+%% @spec start_link() -> {ok,Pid::pid()}
+%% @doc Starts the app for inclusion in a supervisor tree
+start_link() ->
+ ensure_started(crypto),
+ ensure_started(mochiweb),
+ application:set_env(webmachine, webmachine_logger_module,
+ webmachine_logger),
+ ensure_started(webmachine),
+ webmachine_demo_sup:start_link().
+
+%% @spec start() -> ok
+%% @doc Start the webmachine_demo server.
+start() ->
+ ensure_started(inets),
+ ensure_started(crypto),
+ ensure_started(mochiweb),
+ application:set_env(webmachine, webmachine_logger_module,
+ webmachine_logger),
+ ensure_started(webmachine),
+ application:start(webmachine_demo).
+
+%% @spec stop() -> ok
+%% @doc Stop the webmachine_demo server.
+stop() ->
+ Res = application:stop(webmachine_demo),
+ application:stop(webmachine),
+ application:stop(mochiweb),
+ application:stop(crypto),
+ Res.
--- /dev/null
+%% @author Andy Gross <andy@basho.com>
+%% @author Justin Sheehy <justin@basho.com>
+
+%% @doc Callbacks for the webmachine_demo application.
+
+-module(webmachine_demo_app).
+
+-behaviour(application).
+-export([start/2,stop/1]).
+
+
+%% @spec start(_Type, _StartArgs) -> ServerRet
+%% @doc application start callback for webmachine_demo.
+start(_Type, _StartArgs) ->
+ webmachine_demo_sup:start_link().
+
+%% @spec stop(_State) -> ServerRet
+%% @doc application stop callback for webmachine_demo.
+stop(_State) ->
+ ok.
--- /dev/null
+%% @author Bryan Fink <bryan@basho.com>
+%% @author Andy Gross <andy@basho.com>
+%% @author Justin Sheehy <justin@basho.com>
+%% @copyright 2008-2009 Basho Technologies, Inc.
+
+-module(webmachine_demo_fs_resource).
+-export([init/1]).
+-export([allowed_methods/2,
+ resource_exists/2,
+ last_modified/2,
+ content_types_provided/2,
+ content_types_accepted/2,
+ delete_resource/2,
+ post_is_create/2,
+ create_path/2,
+ provide_content/2,
+ accept_content/2,
+ generate_etag/2]).
+
+-record(context, {root,response_body=undefined,metadata=[]}).
+
+-include_lib("kernel/include/file.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+
+init(ConfigProps) ->
+ {root, Root} = proplists:lookup(root, ConfigProps),
+ {ok, #context{root=Root}}.
+
+allowed_methods(ReqData, Context) ->
+ {['HEAD', 'GET', 'PUT', 'DELETE', 'POST'], ReqData, Context}.
+
+file_path(_Context, []) ->
+ false;
+file_path(Context, Name) ->
+ RelName = case hd(Name) of
+ "/" -> tl(Name);
+ _ -> Name
+ end,
+ filename:join([Context#context.root, RelName]).
+
+file_exists(Context, Name) ->
+ NamePath = file_path(Context, Name),
+ case filelib:is_regular(NamePath) of
+ true ->
+ {true, NamePath};
+ false ->
+ false
+ end.
+
+resource_exists(ReqData, Context) ->
+ Path = wrq:disp_path(ReqData),
+ case file_exists(Context, Path) of
+ {true, _} ->
+ {true, ReqData, Context};
+ _ ->
+ case Path of
+ "p" -> {true, ReqData, Context};
+ _ -> {false, ReqData, Context}
+ end
+ end.
+
+maybe_fetch_object(Context, Path) ->
+ % if returns {true, NewContext} then NewContext has response_body
+ case Context#context.response_body of
+ undefined ->
+ case file_exists(Context, Path) of
+ {true, FullPath} ->
+ {ok, Value} = file:read_file(FullPath),
+ {true, Context#context{response_body=Value}};
+ false ->
+ {false, Context}
+ end;
+ _Body ->
+ {true, Context}
+ end.
+
+content_types_provided(ReqData, Context) ->
+ CT = webmachine_util:guess_mime(wrq:disp_path(ReqData)),
+ {[{CT, provide_content}], ReqData,
+ Context#context{metadata=[{'content-type', CT}|Context#context.metadata]}}.
+
+content_types_accepted(ReqData, Context) ->
+ CT = case wrq:get_req_header("content-type", ReqData) of
+ undefined -> "application/octet-stream";
+ X -> X
+ end,
+ {MT, _Params} = webmachine_util:media_type_to_detail(CT),
+ {[{MT, accept_content}], ReqData,
+ Context#context{metadata=[{'content-type', MT}|Context#context.metadata]}}.
+
+accept_content(ReqData, Context) ->
+ Path = wrq:disp_path(ReqData),
+ FP = file_path(Context, Path),
+ ok = filelib:ensure_dir(FP),
+ ReqData1 = case file_exists(Context, Path) of
+ {true, _} ->
+ ReqData;
+ _ ->
+ LOC = "http://" ++
+ wrq:get_req_header("host", ReqData) ++
+ "/fs/" ++ Path,
+ wrq:set_resp_header("Location", LOC, ReqData)
+ end,
+ Value = wrq:req_body(ReqData1),
+ case file:write_file(FP, Value) of
+ ok ->
+ {true, wrq:set_resp_body(Value, ReqData1), Context};
+ Err ->
+ {{error, Err}, ReqData1, Context}
+ end.
+
+post_is_create(ReqData, Context) ->
+ {true, ReqData, Context}.
+
+create_path(ReqData, Context) ->
+ case wrq:get_req_header("slug", ReqData) of
+ undefined -> {undefined, ReqData, Context};
+ Slug ->
+ case file_exists(Context, Slug) of
+ {true, _} -> {undefined, ReqData, Context};
+ _ -> {Slug, ReqData, Context}
+ end
+ end.
+
+delete_resource(ReqData, Context) ->
+ case file:delete(file_path(
+ Context, wrq:disp_path(ReqData))) of
+ ok -> {true, ReqData, Context};
+ _ -> {false, ReqData, Context}
+ end.
+
+provide_content(ReqData, Context) ->
+ case maybe_fetch_object(Context, wrq:disp_path(ReqData)) of
+ {true, NewContext} ->
+ Body = NewContext#context.response_body,
+ {Body, ReqData, Context};
+ {false, NewContext} ->
+ {error, ReqData, NewContext}
+ end.
+
+last_modified(ReqData, Context) ->
+ {true, FullPath} = file_exists(Context,
+ wrq:disp_path(ReqData)),
+ LMod = filelib:last_modified(FullPath),
+ {LMod, ReqData, Context#context{metadata=[{'last-modified',
+ httpd_util:rfc1123_date(LMod)}|Context#context.metadata]}}.
+
+hash_body(Body) -> mochihex:to_hex(binary_to_list(crypto:sha(Body))).
+
+generate_etag(ReqData, Context) ->
+ case maybe_fetch_object(Context, wrq:disp_path(ReqData)) of
+ {true, BodyContext} ->
+ ETag = hash_body(BodyContext#context.response_body),
+ {ETag, ReqData,
+ BodyContext#context{metadata=[{etag,ETag}|
+ BodyContext#context.metadata]}};
+ _ ->
+ {undefined, ReqData, Context}
+ end.
--- /dev/null
+%% @author Justin Sheehy <justin@basho.com>
+%% @copyright 2007-2009 Basho Technologies, Inc. All Rights Reserved.
+%% @doc Example webmachine_resource.
+
+-module(webmachine_demo_resource).
+-author('Justin Sheehy <justin@basho.com>').
+-export([init/1, to_html/2, to_text/2, content_types_provided/2,
+ is_authorized/2, generate_etag/2, expires/2, last_modified/2]).
+
+-include_lib("webmachine/include/webmachine.hrl").
+
+init([]) -> {ok, undefined}.
+
+content_types_provided(ReqData, Context) ->
+ {[{"text/html", to_html},{"text/plain",to_text}], ReqData, Context}.
+
+to_text(ReqData, Context) ->
+ Path = wrq:disp_path(ReqData),
+ Body = io_lib:format("Hello ~s from webmachine.~n", [Path]),
+ {Body, ReqData, Context}.
+
+to_html(ReqData, Context) ->
+ {Body, _RD, Ctx2} = to_text(ReqData, Context),
+ HBody = io_lib:format("<html><body>~s</body></html>~n",
+ [erlang:iolist_to_binary(Body)]),
+ {HBody, ReqData, Ctx2}.
+
+is_authorized(ReqData, Context) ->
+ case wrq:disp_path(ReqData) of
+ "authdemo" ->
+ case wrq:get_req_header("authorization", ReqData) of
+ "Basic "++Base64 ->
+ Str = base64:mime_decode_to_string(Base64),
+ case string:tokens(Str, ":") of
+ ["authdemo", "demo1"] ->
+ {true, ReqData, Context};
+ _ ->
+ {"Basic realm=webmachine", ReqData, Context}
+ end;
+ _ ->
+ {"Basic realm=webmachine", ReqData, Context}
+ end;
+ _ -> {true, ReqData, Context}
+ end.
+
+expires(ReqData, Context) -> {{{2021,1,1},{0,0,0}}, ReqData, Context}.
+
+last_modified(ReqData, Context) ->
+ {calendar:now_to_universal_time(os:timestamp()), ReqData, Context}.
+
+generate_etag(ReqData, Context) -> {wrq:raw_path(ReqData), ReqData, Context}.
--- /dev/null
+%% @author author <author@example.com>
+%% @copyright YYYY author.
+
+%% @doc Supervisor for the webmachine_demo application.
+
+-module(webmachine_demo_sup).
+
+-behaviour(supervisor).
+
+%% External exports
+-export([start_link/0, upgrade/0]).
+
+%% supervisor callbacks
+-export([init/1]).
+
+%% @spec start_link() -> ServerRet
+%% @doc API for starting the supervisor.
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+%% @spec upgrade() -> ok
+%% @doc Add processes if necessary.
+upgrade() ->
+ {ok, {_, Specs}} = init([]),
+
+ Old = sets:from_list(
+ [Name || {Name, _, _, _} <- supervisor:which_children(?MODULE)]),
+ New = sets:from_list([Name || {Name, _, _, _, _, _} <- Specs]),
+ Kill = sets:subtract(Old, New),
+
+ sets:fold(fun (Id, ok) ->
+ supervisor:terminate_child(?MODULE, Id),
+ supervisor:delete_child(?MODULE, Id),
+ ok
+ end, ok, Kill),
+
+ [supervisor:start_child(?MODULE, Spec) || Spec <- Specs],
+ ok.
+
+%% @spec init([]) -> SupervisorTree
+%% @doc supervisor callback.
+init([]) ->
+ Ip = case os:getenv("WEBMACHINE_IP") of false -> "0.0.0.0"; Any -> Any end,
+ {ok, Dispatch} = file:consult(filename:join(
+ [filename:dirname(code:which(?MODULE)),
+ "..", "priv", "dispatch.conf"])),
+ WebConfig = [
+ {ip, Ip},
+ {port, 8000},
+ {log_dir, "priv/log"},
+ {dispatch, Dispatch}],
+ Web = {webmachine_mochiweb,
+ {webmachine_mochiweb, start, [WebConfig]},
+ permanent, 5000, worker, dynamic},
+ Processes = [Web],
+ {ok, { {one_for_one, 10, 10}, Processes} }.
--- /dev/null
+#!/bin/sh
+cd `dirname $0`
+exec erl -pa $PWD/ebin $PWD/deps/*/ebin -boot start_sasl -s reloader -s webmachine_demo
--- /dev/null
+-export([ping/2]).
+
+-include_lib("webmachine/include/wm_reqdata.hrl").
+
+ping(ReqData, State) ->
+ {pong, ReqData, State}.
+
+
--- /dev/null
+-record(wm_log_data,
+ {resource_module :: atom(),
+ start_time :: tuple(),
+ method :: atom(),
+ headers,
+ peer,
+ path :: string(),
+ version,
+ response_code,
+ response_length,
+ end_time :: tuple(),
+ finish_time :: tuple(),
+ notes}).
+-type wm_log_data() :: #wm_log_data{}.
+
+-define(EVENT_LOGGER, webmachine_log_event).
--- /dev/null
+-record(wm_reqdata, {method, scheme, version, peer, wm_state,
+ disp_path, path, raw_path, path_info, path_tokens,
+ app_root,response_code,max_recv_body, max_recv_hunk,
+ req_cookie, req_qs, req_headers, req_body,
+ resp_redirect, resp_headers, resp_body, resp_range,
+ host_tokens, port, notes
+ }).
+
--- /dev/null
+-record(wm_reqstate, {socket=undefined,
+ metadata=orddict:new(),
+ range=undefined,
+ peer=undefined,
+ reqdata=undefined,
+ bodyfetch=undefined,
+ reqbody=undefined,
+ log_data=undefined
+ }).
+
--- /dev/null
+-record(wm_resource, {module, modstate, modexports, trace}).
--- /dev/null
+ERL ?= erl
+APP := {{appid}}
+
+.PHONY: deps
+
+all: deps
+ @./rebar compile
+
+deps:
+ @./rebar get-deps
+
+clean:
+ @./rebar clean
+
+distclean: clean
+ @./rebar delete-deps
+
+docs:
+ @erl -noshell -run edoc_run application '$(APP)' '"."' '[]'
--- /dev/null
+Project Skeleton for the {{appid}} app.
+
+You should find in this directory:
+
+README : this file
+Makefile : simple make commands
+rebar : the Rebar build tool for Erlang applications
+rebar.config : configuration for Rebar
+start.sh : simple startup script for running {{appid}}
+/ebin
+ /{{appid}}.app : the Erlang app specification
+/src
+ /{{appid}}_app.erl : base module for the Erlang application
+ /{{appid}}_sup.erl : OTP supervisor for the application
+ /{{appid}}_resource.erl : a simple example Webmachine resource
+/priv
+ /dispatch.conf : the Webmachine URL-dispatching table
+ /www : a convenient place to put your static web content
+
+You probably want to do one of a couple of things at this point:
+
+0. Build the skeleton application:
+ $ make
+ - or -
+ $ ./rebar compile
+
+1. Start up the skeleton application:
+ $ ./start.sh
+
+2. Change the basic application:
+ edit src/{{appid}}_resource.erl
+
+3. Add some new resources:
+ edit src/YOUR_NEW_RESOURCE.erl
+ edit priv/dispatch.conf
--- /dev/null
+%%-*- mode: erlang -*-
+{[], {{appid}}_resource, []}.
--- /dev/null
+%%-*- mode: erlang -*-
+
+{deps, [{webmachine, "1.10.*", {git, "git://github.com/basho/webmachine", "HEAD"}}]}.
--- /dev/null
+%%-*- mode: erlang -*-
+{application, {{appid}},
+ [
+ {description, "{{appid}}"},
+ {vsn, "1"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib,
+ inets,
+ crypto,
+ mochiweb,
+ webmachine
+ ]},
+ {mod, { {{appid}}_app, []}},
+ {env, []}
+ ]}.
--- /dev/null
+%% @author author <author@example.com>
+%% @copyright YYYY author.
+
+%% @doc {{appid}} startup code
+
+-module({{appid}}).
+-author('author <author@example.com>').
+-export([start/0, start_link/0, stop/0]).
+
+ensure_started(App) ->
+ case application:start(App) of
+ ok ->
+ ok;
+ {error, {already_started, App}} ->
+ ok
+ end.
+
+%% @spec start_link() -> {ok,Pid::pid()}
+%% @doc Starts the app for inclusion in a supervisor tree
+start_link() ->
+ ensure_started(inets),
+ ensure_started(crypto),
+ ensure_started(mochiweb),
+ application:set_env(webmachine, webmachine_logger_module,
+ webmachine_logger),
+ ensure_started(webmachine),
+ {{appid}}_sup:start_link().
+
+%% @spec start() -> ok
+%% @doc Start the {{appid}} server.
+start() ->
+ ensure_started(inets),
+ ensure_started(crypto),
+ ensure_started(mochiweb),
+ application:set_env(webmachine, webmachine_logger_module,
+ webmachine_logger),
+ ensure_started(webmachine),
+ application:start({{appid}}).
+
+%% @spec stop() -> ok
+%% @doc Stop the {{appid}} server.
+stop() ->
+ Res = application:stop({{appid}}),
+ application:stop(webmachine),
+ application:stop(mochiweb),
+ application:stop(crypto),
+ application:stop(inets),
+ Res.
--- /dev/null
+%% @author author <author@example.com>
+%% @copyright YYYY author.
+
+%% @doc Callbacks for the {{appid}} application.
+
+-module({{appid}}_app).
+-author('author <author@example.com>').
+
+-behaviour(application).
+-export([start/2,stop/1]).
+
+
+%% @spec start(_Type, _StartArgs) -> ServerRet
+%% @doc application start callback for {{appid}}.
+start(_Type, _StartArgs) ->
+ {{appid}}_sup:start_link().
+
+%% @spec stop(_State) -> ServerRet
+%% @doc application stop callback for {{appid}}.
+stop(_State) ->
+ ok.
--- /dev/null
+%% @author author <author@example.com>
+%% @copyright YYYY author.
+%% @doc Example webmachine_resource.
+
+-module({{appid}}_resource).
+-export([init/1, to_html/2]).
+
+-include_lib("webmachine/include/webmachine.hrl").
+
+init([]) -> {ok, undefined}.
+
+to_html(ReqData, State) ->
+ {"<html><body>Hello, new world</body></html>", ReqData, State}.
--- /dev/null
+%% @author author <author@example.com>
+%% @copyright YYYY author.
+
+%% @doc Supervisor for the {{appid}} application.
+
+-module({{appid}}_sup).
+-author('author <author@example.com>').
+
+-behaviour(supervisor).
+
+%% External exports
+-export([start_link/0, upgrade/0]).
+
+%% supervisor callbacks
+-export([init/1]).
+
+%% @spec start_link() -> ServerRet
+%% @doc API for starting the supervisor.
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+%% @spec upgrade() -> ok
+%% @doc Add processes if necessary.
+upgrade() ->
+ {ok, {_, Specs}} = init([]),
+
+ Old = sets:from_list(
+ [Name || {Name, _, _, _} <- supervisor:which_children(?MODULE)]),
+ New = sets:from_list([Name || {Name, _, _, _, _, _} <- Specs]),
+ Kill = sets:subtract(Old, New),
+
+ sets:fold(fun (Id, ok) ->
+ supervisor:terminate_child(?MODULE, Id),
+ supervisor:delete_child(?MODULE, Id),
+ ok
+ end, ok, Kill),
+
+ [supervisor:start_child(?MODULE, Spec) || Spec <- Specs],
+ ok.
+
+%% @spec init([]) -> SupervisorTree
+%% @doc supervisor callback.
+init([]) ->
+ Ip = case os:getenv("WEBMACHINE_IP") of false -> "0.0.0.0"; Any -> Any end,
+ {ok, App} = application:get_application(?MODULE),
+ {ok, Dispatch} = file:consult(filename:join([priv_dir(App),
+ "dispatch.conf"])),
+ Port = case os:getenv("WEBMACHINE_PORT") of
+ false -> 8000;
+ AnyPort -> AnyPort
+ end,
+ WebConfig = [
+ {ip, Ip},
+ {port, Port},
+ {log_dir, "priv/log"},
+ {dispatch, Dispatch}],
+ Web = {webmachine_mochiweb,
+ {webmachine_mochiweb, start, [WebConfig]},
+ permanent, 5000, worker, [mochiweb_socket_server]},
+ Processes = [Web],
+ {ok, { {one_for_one, 10, 10}, Processes} }.
+
+%%
+%% @doc return the priv dir
+priv_dir(Mod) ->
+ case code:priv_dir(Mod) of
+ {error, bad_name} ->
+ Ebin = filename:dirname(code:which(Mod)),
+ filename:join(filename:dirname(Ebin), "priv");
+ PrivDir ->
+ PrivDir
+ end.
--- /dev/null
+#!/bin/sh
+cd `dirname $0`
+exec erl -pa $PWD/ebin $PWD/deps/*/ebin -boot start_sasl -s reloader -s {{appid}}
--- /dev/null
+%%-*- mode: erlang -*-
+%% Basic Webmachine application skeleton
+
+%% Variables:
+%% appid: name of the application to build
+%% default = "wmskel"
+%% webmachine: path to webmachine from this template
+%% default = "../.."
+%% prefix: path where the application should be created
+%% default = "."
+{variables, [{appid, "wmskel"},
+ {webmachine, "../.."},
+ {prefix, "."}]}.
+
+%% main project files
+{template, "README", "{{prefix}}/README"}.
+{template, "Makefile", "{{prefix}}/Makefile"}.
+{template, "rebar.config", "{{prefix}}/rebar.config"}.
+{file, "{{webmachine}}/rebar", "{{prefix}}/rebar"}.
+{chmod, 8#744, "{{prefix}}/rebar"}.
+{template, "start.sh", "{{prefix}}/start.sh"}.
+{chmod, 8#744, "{{prefix}}/start.sh"}.
+
+{template, "src/wmskel.app.src", "{{prefix}}/src/{{appid}}.app.src"}.
+
+{template, "src/wmskel.erl", "{{prefix}}/src/{{appid}}.erl"}.
+{template, "src/wmskel_app.erl", "{{prefix}}/src/{{appid}}_app.erl"}.
+{template, "src/wmskel_sup.erl", "{{prefix}}/src/{{appid}}_sup.erl"}.
+{template, "src/wmskel_resource.erl", "{{prefix}}/src/{{appid}}_resource.erl"}.
+
+{template, "priv/dispatch.conf", "{{prefix}}/priv/dispatch.conf"}.
+{dir, "{{prefix}}/priv/www"}.
+
+%% dependencies
+{dir, "{{prefix}}/deps"}.
--- /dev/null
+body {
+ margin:0px;
+ padding:0px;
+}
+
+canvas#v3map {
+ margin-top:2em;
+ z-index: 1;
+}
+
+div#sizetest {
+ width:100%;
+}
+
+div#zoompanel {
+ height:2em;
+ position:fixed;
+ z-index:10;
+}
+
+div#preview {
+ position:absolute;
+ display:none;
+ background:#dddddd;
+ border:1px solid #999999;
+}
+
+div#preview ul {
+ padding: 0px 0px 0px 0.5em;
+ margin: 0px;
+ list-style: none;
+}
+
+div#infopanel {
+ z-index:20;
+ background:#dddddd;
+ position:fixed;
+ top:0px;
+ right:0px;
+ bottom:0px;
+ left:75%;
+ min-width:30em;
+ padding:5px;
+}
+
+div#infocontrols {
+ position:absolute;
+ top:0px;
+ bottom:0px;
+ left:-5px;
+ width:5px;
+ background:#999999;
+ cursor:ew-resize;
+}
+
+div#infocontrols div {
+ position:absolute;
+ left:-15px;
+ width:20px;
+ height:49px;
+ background:#999999;
+ cursor:pointer;
+}
+
+div#infocontrols div.selectedtab {
+ background:#dddddd;
+ border-top: 1px solid #999999;
+ border-left: 1px solid #999999;
+ border-bottom: 1px solid #999999;
+}
+
+div#requesttab {
+ top:2px;
+}
+
+div#responsetab {
+ top:54px;
+}
+
+div#decisiontab {
+ top:106px;
+}
+
+div#requestdetail, div#responsedetail, div#decisiondetail {
+ height:100%;
+}
+
+div#responsedetail, div#decisiondetail {
+ display:none;
+}
+
+div#infopanel ul {
+ list-style:none;
+ padding-left:0px;
+ height:5em;
+ overflow-y:scroll;
+}
+
+pre {
+ height:40%;
+ overflow:scroll;
+}
+
+div#responsebody, div#requestbody {
+ height:70%;
+ overflow-y:scroll;
+}
--- /dev/null
+var HIGHLIGHT = '#cc00cc';
+var REGULAR = '#666666';
+
+var cols = {
+ 'a':173,
+ 'b':325,
+ 'c':589,
+ 'd':797,
+ 'e':1005,
+ 'f':1195,
+ 'g':1402,
+ 'gg':1515,
+ 'h':1572,
+ 'i':1799,
+ 'j':1893,
+ 'k':1988,
+ 'l':2157,
+ 'll':2346,
+ 'm':2403,
+ 'mm':2535,
+ 'n':2554,
+ 'o':2649,
+ 'oo':2781,
+ 'ooo':2801,
+ 'p':2894,
+ 'q':3007
+};
+
+var rows = {
+ '1':221,
+ '2':298,
+ '3':373,
+ '4':448,
+ '5':524,
+ '6':599,
+ '7':675,
+ '8':751,
+ '9':826,
+ '10':902,
+ '11':977,
+ '12':1053,
+ '13':1129,
+ '14':1204,
+ '15':1280,
+ '16':1355,
+ '17':1431,
+ '18':1506,
+ '19':1583,
+ '20':1658,
+ '21':1734,
+ '22':1809,
+ '23':1885,
+ '24':1961,
+ '25':2036,
+ '26':2112
+};
+
+var edges = {
+ 'b14b13':['b14','b13'],
+
+ 'b13b12':['b13','b12'],
+ 'b13503':['b13','503'],
+
+ 'b12b11':['b12','b11'],
+ 'b12501':['b12','501'],
+
+ 'b11b10':['b11','b10'],
+ 'b11414':['b11','414'],
+
+ 'b10b9':['b10','b9'],
+ 'b10405':['b10','405'],
+
+ 'b9b8':['b9','b8'],
+ 'b9400':['b9','400'],
+
+ 'b8b7':['b8','b7'],
+ 'b8401':['b8','401'],
+
+ 'b7b6':['b7','b6'],
+ 'b7403':['b7','403'],
+
+ 'b6b5':['b6','b5'],
+ 'b6501':['b6','501a'],
+
+ 'b5b4':['b5','b4'],
+ 'b5415':['b5','415'],
+
+ 'b4b3':['b4','b3'],
+ 'b4413':['b4','b4'],
+
+ 'b3c3':['b3','c3'],
+ 'b3200':['b3','200'],
+
+ 'c3c4':['c3','c4'],
+ 'c3d4':['c3','d3','d4'],
+
+ 'c4d4':['c4','d4'],
+ 'c4406':['c4','406'],
+
+ 'd4d5':['d4','d5'],
+ 'd4e5':['d4','e4','e5'],
+
+ 'd5e5':['d5','e5'],
+ 'd5406':['d5','d7','406'],
+
+ 'e5e6':['e5','e6'],
+ 'e5f6':['e5','f5','f6'],
+
+ 'e6f6':['e6','f6'],
+ 'e6406':['e6','e7','406'],
+
+ 'f6f7':['f6','f7'],
+ 'f6g7':['f6','g6','g7'],
+
+ 'f7g7':['f7','g7'],
+ 'f7406':['f7','406'],
+
+ 'g7g8':['g7','g8'],
+ 'g7h7':['g7','h7'],
+
+ 'g8g9':['g8','g9'],
+ 'g8h10':['g8','h8','h10'],
+
+ 'g9g11':['g9','g11'],
+ 'g9h10':['g9','gg9','gg10','h10'],
+
+ 'g11h10':['g11','gg11','gg10','h10'],
+ 'g11412':['g11','g18','412a'],
+
+ 'h7i7':['h7','i7'],
+ 'h7412':['h7','412'],
+
+ 'h10h11':['h10','h11'],
+ 'h10i12':['h10','i10','i12'],
+
+ 'h11h12':['h11','h12'],
+ 'h11i12':['h11','i11','i12'],
+
+ 'h12i12':['h12','i12'],
+ 'h12412':['h12','412a'],
+
+ 'i4p3':['i4','i3','p3'],
+ 'i4301':['i4','301'],
+
+ 'i7i4':['i7','i4'],
+ 'i7k7':['i7','k7'],
+
+ 'i12l13':['i12','l12','l13'],
+ 'i12i13':['i12','i13'],
+
+ 'i13k13':['i13','k13'],
+ 'i13j18':['i13','i17','j17','j18'],
+
+ 'j18412':['j18','412a'],
+ 'j18304':['j18','304'],
+
+ 'k5l5':['k5','l5'],
+ 'k5301':['k5','301'],
+
+ 'k7k5':['k7','k5'],
+ 'k7l7':['k7','l7'],
+
+ 'k13j18':['k13','k17','j17','j18'],
+ 'k13l13':['k13','l13'],
+
+ 'l5m5':['l5','m5'],
+ 'l5307':['l5','307'],
+
+ 'l7m7':['l7','m7'],
+ 'l7404':['l7','l8','404'],
+
+ 'l13l14':['l13','l14'],
+ 'l13m16':['l13','m13','m16'],
+
+ 'l14l15':['l14','l15'],
+ 'l14m16':['l14','m14','m16'],
+
+ 'l15l17':['l15','l17'],
+ 'l15m16':['l15','ll15','ll16','m16'],
+
+ 'l17m16':['l17','ll17','ll16','m16'],
+ 'l17304':['l17','304'],
+
+ 'm5n5':['m5','n5'],
+ 'm5410':['m5','m4','410'],
+
+ 'm7n11':['m7','n7','n11'],
+ 'm7404':['m7','404'],
+
+ 'm16m20':['m16','m20'],
+ 'm16n16':['m16','n16'],
+
+ 'm20o20':['m20','o20'],
+ 'm20202':['m20','202'],
+
+ 'n5n11':['n5','n11'],
+ 'n5410':['n5','410'],
+
+ 'n11p11':['n11','p11'],
+ 'n11303':['n11','303'],
+
+ 'n16n11':['n16','n11'],
+ 'n16o16':['n16','o16'],
+
+ 'o14p11':['o14','o11','p11'],
+ 'o14409':['o14','409a'],
+
+ 'o16o14':['o16','o14'],
+ 'o16o18':['o16','o18'],
+
+ 'o18200':['o18','200a'],
+ 'o18300':['o18','oo18','300'],
+
+ 'o20o18':['o20','o18'],
+ 'o20204':['o20','204'],
+
+ 'p3p11':['p3','p11'],
+ 'p3409':['p3','409'],
+
+ 'p11o20':['p11','p20','o20'],
+ 'p11201':['p11','q11','201']
+};
+
+var ends = {
+ '200': {col:'a', row:'3', width:190},
+ '200a': {col:'mm', row:'18', width:116},
+ '201': {col:'q', row:'12', width:154},
+ '202': {col:'m', row:'21', width:116},
+ '204': {col:'o', row:'21', width:152},
+
+ '300': {col:'oo', row:'19', width:152},
+ '301': {col:'k', row:'4', width:154},
+ '303': {col:'m', row:'11', width:116},
+ '304': {col:'l', row:'18', width:116},
+ '307': {col:'l', row:'4', width:154},
+
+ '400': {col:'a', row:'9', width:190},
+ '401': {col:'a', row:'8', width:190},
+ '403': {col:'a', row:'7', width:190},
+ '404': {col:'m', row:'8', width:116},
+ '405': {col:'a', row:'10', width:190},
+ '406': {col:'c', row:'7', width:152},
+ '409': {col:'p', row:'2', width:116},
+ '409a': {col:'oo', row:'14', width:116},
+ '410': {col:'n', row:'4', width:116},
+ '412': {col:'h', row:'6', width:152},
+ '412a': {col:'h', row:'18', width:152},
+ '413': {col:'a', row:'4', width:190},
+ '414': {col:'a', row:'11', width:190},
+ '415': {col:'a', row:'5', width:190},
+
+ '501a': {col:'a', row:'6', width:190},
+ '501': {col:'a', row:'12', width:190},
+ '503': {col:'a', row:'13', width:190}
+};
+
+var canvas;
+
+function decorateTrace() {
+ trace[0].x = cols[trace[0].d[0]];
+ trace[0].y = rows[trace[0].d.slice(1)];
+ trace[0].previewCalls = previewCalls(trace[0]);
+
+ for (var i = 1; i < trace.length; i++) {
+ trace[i].x = cols[trace[i].d[0]];
+ trace[i].y = rows[trace[i].d.slice(1)];
+ trace[i].previewCalls = previewCalls(trace[i]);
+
+ var path = edges[trace[i-1].d+trace[i].d];
+ if (path) {
+ trace[i].path = [path.length-1];
+ for (var p = 1; p < path.length; p++) {
+ trace[i].path[p-1] = getSeg(path[p-1], path[p], p == path.length-1);
+ }
+ } else {
+ trace[i].path = [];
+ }
+ }
+
+ var path = edges[trace[i-1].d+response.code];
+ if (path) {
+ var end = ends[path[path.length-1]];
+ response.x = cols[end.col];
+ response.y = rows[end.row];
+ response.width = end.width;
+ response.type = 'normal';
+
+ response.path = [path.length-1];
+ for (var p = 1; p < path.length; p++) {
+ response.path[p-1] = getSeg(path[p-1], path[p], p == path.length-1);
+ }
+ } else {
+ var ld = trace[trace.length-1];
+ response.x = ld.x+50;
+ response.y = ld.y-50;
+ response.width = 38;
+ response.type = 'other';
+
+ response.path = [
+ {x1: ld.x+10, y1: ld.y-10,
+ x2: ld.x+36, y2: ld.y-36}
+ ];
+ }
+};
+
+function previewCalls(dec) {
+ var prev = '';
+ for (var i = 0; i < dec.calls.length; i++) {
+ if (dec.calls[i].output != "wmtrace_not_exported")
+ prev += '<li>'+dec.calls[i].module+':'+dec.calls[i]['function']+'</li>';
+ }
+ return prev;
+};
+
+function drawTrace() {
+ drawDecision(trace[0]);
+ for (var i = 1; i < trace.length; i++) {
+ drawPath(trace[i].path);
+ drawDecision(trace[i]);
+ }
+
+ drawPath(response.path);
+ drawResponse();
+};
+
+function drawResponse() {
+ if (response.type == 'normal') {
+ var context = canvas.getContext('2d');
+ context.strokeStyle=HIGHLIGHT;
+ context.lineWidth=4;
+
+ context.beginPath();
+ context.rect(response.x-(response.width/2),
+ response.y-19,
+ response.width,
+ 38);
+ context.stroke();
+ } else {
+ var context = canvas.getContext('2d');
+ context.strokeStyle='#ff0000';
+ context.lineWidth=4;
+
+ context.beginPath();
+ context.arc(response.x, response.y, 19,
+ 0, 2*3.14159, false);
+ context.stroke();
+
+ }
+};
+
+function drawDecision(dec) {
+ var context = canvas.getContext('2d');
+
+ if (dec.previewCalls == '')
+ context.strokeStyle=REGULAR;
+ else
+ context.strokeStyle=HIGHLIGHT;
+ context.lineWidth=4;
+
+ context.beginPath();
+ context.moveTo(dec.x, dec.y-19);
+ context.lineTo(dec.x+19, dec.y);
+ context.lineTo(dec.x, dec.y+19);
+ context.lineTo(dec.x-19, dec.y);
+ context.closePath();
+ context.stroke();
+};
+
+function drawPath(path) {
+ var context = canvas.getContext('2d');
+ context.strokeStyle=REGULAR;
+ context.lineWidth=4;
+
+ context.beginPath();
+ context.moveTo(path[0].x1, path[0].y1);
+ for (var p = 0; p < path.length; p++) {
+ context.lineTo(path[p].x2, path[p].y2);
+ }
+ context.stroke();
+};
+
+function getSeg(p1, p2, last) {
+ var seg = {
+ x1:cols[p1[0]],
+ y1:rows[p1.slice(1)]
+ };
+ if (ends[p2]) {
+ seg.x2 = cols[ends[p2].col];
+ seg.y2 = rows[ends[p2].row];
+ } else {
+ seg.x2 = cols[p2[0]];
+ seg.y2 = rows[p2.slice(1)];
+ }
+
+ if (seg.x1 == seg.x2) {
+ if (seg.y1 < seg.y2) {
+ seg.y1 = seg.y1+19;
+ if (last) seg.y2 = seg.y2-19;
+ } else {
+ seg.y1 = seg.y1-19;
+ if (last) seg.y2 = seg.y2+19;
+ }
+ } else {
+ //assume seg.y1 == seg.y2
+ if (seg.x1 < seg.x2) {
+ seg.x1 = seg.x1+19;
+ if (last) seg.x2 = seg.x2-(ends[p2] ? (ends[p2].width/2) : 19);
+ } else {
+ seg.x1 = seg.x1-19;
+ if (last) seg.x2 = seg.x2+(ends[p2] ? (ends[p2].width/2) : 19);
+ }
+ }
+ return seg;
+};
+
+function traceDecision(name) {
+ for (var i = trace.length-1; i >= 0; i--)
+ if (trace[i].d == name) return trace[i];
+};
+
+var detailPanels = {};
+function initDetailPanels() {
+ var windowWidth = document.getElementById('sizetest').clientWidth;
+ var infoPanel = document.getElementById('infopanel');
+ var panelWidth = windowWidth-infoPanel.offsetLeft;
+
+ var panels = {
+ 'request': document.getElementById('requestdetail'),
+ 'response': document.getElementById('responsedetail'),
+ 'decision': document.getElementById('decisiondetail')
+ };
+
+ var tabs = {
+ 'request': document.getElementById('requesttab'),
+ 'response': document.getElementById('responsetab'),
+ 'decision': document.getElementById('decisiontab')
+ };
+
+ var decisionId = document.getElementById('decisionid');
+ var decisionCalls = document.getElementById('decisioncalls');
+ var callInput = document.getElementById('callinput');
+ var callOutput = document.getElementById('calloutput');
+
+ var lastUsedPanelWidth = windowWidth-infoPanel.offsetLeft;
+
+ var setPanelWidth = function(width) {
+ infoPanel.style.left = (windowWidth-width)+'px';
+ canvas.style.marginRight = (width+20)+'px';
+ panelWidth = width;
+ };
+ setPanelWidth(panelWidth);
+
+ var ensureVisible = function() {
+ if (windowWidth-infoPanel.offsetLeft < 10)
+ setPanelWidth(lastUsedPanelWidth);
+ };
+
+ var decChoices = '';
+ for (var i = 0; i < trace.length; i++) {
+ decChoices += '<option value="'+trace[i].d+'">'+trace[i].d+'</option>';
+ }
+ decisionId.innerHTML = decChoices;
+ decisionId.selectedIndex = -1;
+
+ decisionId.onchange = function() {
+ detailPanels.setDecision(traceDecision(decisionId.value));
+ }
+
+ detailPanels.setDecision = function(dec) {
+ decisionId.value = dec.d;
+
+ var calls = [];
+ for (var i = 0; i < dec.calls.length; i++) {
+ calls.push('<option value="'+dec.d+'-'+i+'">');
+ calls.push(dec.calls[i].module+':'+dec.calls[i]['function']);
+ calls.push('</option>');
+ }
+ decisionCalls.innerHTML = calls.join('');
+ decisionCalls.selectedIndex = 0;
+
+ decisionCalls.onchange();
+ };
+
+ detailPanels.show = function(name) {
+ for (p in panels) {
+ if (p == name) {
+ panels[p].style.display = 'block';
+ tabs[p].className = 'selectedtab';
+ }
+ else {
+ panels[p].style.display = 'none';
+ tabs[p].className = '';
+ }
+ }
+ ensureVisible();
+ };
+
+ detailPanels.hide = function() {
+ setPanelWidth(0);
+ }
+
+ decisionCalls.onchange = function() {
+ var val = decisionCalls.value;
+ if (val) {
+ var dec = traceDecision(val.substring(0, val.indexOf('-')));
+ var call = dec.calls[parseInt(val.substring(val.indexOf('-')+1, val.length))];
+
+ if (call.output != "wmtrace_not_exported") {
+ callInput.style.color='#000000';
+ callInput.innerHTML = call.input;
+ if (call.output != null) {
+ callOutput.style.color = '#000000';
+ callOutput.innerHTML = call.output;
+ } else {
+ callOutput.style.color = '#ff0000';
+ callOutput.textContent = 'Error: '+call.module+':'+call['function']+' never returned';
+ }
+ } else {
+ callInput.style.color='#999999';
+ callInput.textContent = call.module+':'+call['function']+' was not exported';
+ callOutput.textContent = '';
+ }
+ } else {
+ callInput.textContent = '';
+ callOutput.textContent = '';
+ }
+ };
+
+ var headersList = function(headers) {
+ var h = '';
+ for (n in headers) h += '<li>'+n+': '+headers[n];
+ return h;
+ };
+
+ document.getElementById('requestmethod').innerHTML = request.method;
+ document.getElementById('requestpath').innerHTML = request.path;
+ document.getElementById('requestheaders').innerHTML = headersList(request.headers);
+ document.getElementById('requestbody').innerHTML = request.body;
+
+ document.getElementById('responsecode').innerHTML = response.code;
+ document.getElementById('responseheaders').innerHTML = headersList(response.headers);
+ document.getElementById('responsebody').innerHTML = response.body;
+
+
+ var infoControls = document.getElementById('infocontrols');
+ var md = false;
+ var dragged = false;
+ var msoff = 0;
+ infoControls.onmousedown = function(ev) {
+ md = true;
+ dragged = false;
+ msoff = ev.clientX-infoPanel.offsetLeft;
+ };
+
+ infoControls.onclick = function(ev) {
+ if (dragged) {
+ lastUsedPanelWidth = panelWidth;
+ }
+ else if (panelWidth < 10) {
+ switch(ev.target.id) {
+ case 'requesttab': detailPanels.show('request'); break;
+ case 'responsetab': detailPanels.show('response'); break;
+ case 'decisiontab': detailPanels.show('decision'); break;
+ default: ensureVisible();
+ }
+ } else {
+ var name = 'none';
+ switch(ev.target.id) {
+ case 'requesttab': name = 'request'; break;
+ case 'responsetab': name = 'response'; break;
+ case 'decisiontab': name = 'decision'; break;
+ }
+
+ if (panels[name] && panels[name].style.display != 'block')
+ detailPanels.show(name);
+ else
+ detailPanels.hide();
+ }
+
+ return false;
+ };
+
+ document.onmousemove = function(ev) {
+ if (md) {
+ dragged = true;
+ panelWidth = windowWidth-(ev.clientX-msoff);
+ if (panelWidth < 0) {
+ panelWidth = 0;
+ infoPanel.style.left = windowWidth+"px";
+ }
+ else if (panelWidth > windowWidth-21) {
+ panelWidth = windowWidth-21;
+ infoPanel.style.left = '21px';
+ }
+ else
+ infoPanel.style.left = (ev.clientX-msoff)+"px";
+
+ canvas.style.marginRight = panelWidth+20+"px";
+ return false;
+ }
+ };
+
+ document.onmouseup = function() { md = false; };
+
+ window.onresize = function() {
+ windowWidth = document.getElementById('sizetest').clientWidth;
+ infoPanel.style.left = windowWidth-panelWidth+'px';
+ };
+};
+
+window.onload = function() {
+ canvas = document.getElementById('v3map');
+
+ initDetailPanels();
+
+ var scale = 0.25;
+ var coy = canvas.offsetTop;
+ function findDecision(ev) {
+ var x = (ev.clientX+window.pageXOffset)/scale;
+ var y = (ev.clientY+window.pageYOffset-coy)/scale;
+
+ for (var i = trace.length-1; i >= 0; i--) {
+ if (x >= trace[i].x-19 && x <= trace[i].x+19 &&
+ y >= trace[i].y-19 && y <= trace[i].y+19)
+ return trace[i];
+ }
+ };
+
+ var preview = document.getElementById('preview');
+ var previewId = document.getElementById('previewid');
+ var previewCalls = document.getElementById('previewcalls');
+ function previewDecision(dec) {
+ preview.style.left = (dec.x*scale)+'px';
+ preview.style.top = (dec.y*scale+coy+15)+'px';
+ preview.style.display = 'block';
+ previewId.textContent = dec.d;
+
+ previewCalls.innerHTML = dec.previewCalls;
+ };
+
+ function overResponse(ev) {
+ var x = (ev.clientX+window.pageXOffset)/scale;
+ var y = (ev.clientY+window.pageYOffset-coy)/scale;
+
+ return (x >= response.x-(response.width/2)
+ && x <= response.x+(response.width/2)
+ && y >= response.y-19 && y <= response.y+19);
+ };
+
+ decorateTrace();
+
+ var bg = new Image(3138, 2184);
+
+ function drawMap() {
+ var ctx = canvas.getContext("2d");
+
+ ctx.save();
+ ctx.scale(1/scale, 1/scale);
+ ctx.fillStyle = '#ffffff';
+ ctx.fillRect(0, 0, 3138, 2184);
+ ctx.restore();
+
+ ctx.drawImage(bg, 0, 0);
+ drawTrace();
+ };
+
+ bg.onload = function() {
+ canvas.getContext("2d").scale(scale, scale);
+ drawMap(scale);
+
+ canvas.onmousemove = function(ev) {
+ if (findDecision(ev)) {
+ canvas.style.cursor = 'pointer';
+ previewDecision(findDecision(ev));
+ }
+ else {
+ preview.style.display = 'none';
+ if (overResponse(ev))
+ canvas.style.cursor = 'pointer';
+ else
+ canvas.style.cursor = 'default';
+ }
+ };
+
+ canvas.onclick = function(ev) {
+ var dec = findDecision(ev);
+ if (dec) {
+ detailPanels.setDecision(dec);
+ detailPanels.show('decision');
+ } else if (overResponse(ev)) {
+ detailPanels.show('response');
+ }
+ };
+
+ document.getElementById('zoomin').onclick = function() {
+ scale = scale*2;
+ canvas.getContext("2d").scale(2, 2);
+ drawMap();
+ };
+
+ document.getElementById('zoomout').onclick = function() {
+ scale = scale/2;
+ canvas.getContext("2d").scale(0.5, 0.5);
+ drawMap();
+ };
+ };
+
+ bg.onerror = function() {
+ alert('Failed to load background image.');
+ };
+
+ bg.src = 'static/map.png';
+};
--- /dev/null
+<html>
+<head>
+<title>It Worked</title>
+</head>
+<body>
+Running.
+</body>
+</html>
--- /dev/null
+%%-*- mode: erlang -*-
+{erl_opts, [warnings_as_errors]}.
+{cover_enabled, true}.
+{edoc_opts, [{preprocess, true}]}.
+
+{deps, [
+ {mochiweb, "1.5.1*", {git, "git://github.com/basho/mochiweb",
+ {tag, "1.5.1p6"}}}
+ ]}.
--- /dev/null
+case erlang:system_info(otp_release) =< "R15B01" of
+ true ->
+ HashDefine = [{d,old_hash}],
+ case lists:keysearch(erl_opts, 1, CONFIG) of
+ {value, {erl_opts, Opts}} ->
+ lists:keyreplace(erl_opts,1,CONFIG,{erl_opts,Opts++HashDefine});
+ false ->
+ CONFIG ++ [{erl_opts, HashDefine}]
+ end;
+ false -> CONFIG
+end.
--- /dev/null
+#!/usr/bin/env bash
+
+SCRIPT=${0##*/}
+NAME=$1
+DESTDIR=$2
+
+usage() {
+ echo "usage: new_webmachine.sh name [destdir]"
+}
+
+if [ -z $NAME ] || [[ $NAME =~ ^[\.\~\/] ]]; then
+ usage
+ exit 1
+fi
+
+erl -noshell -eval 'halt(if is_atom('"$NAME"') -> 0; true -> 1 end).'
+if [[ $? -ne 0 ]]; then
+ echo $SCRIPT: \""$NAME"\" is not allowed as a project name
+ echo ' The project name must begin with a lowercase letter and'
+ echo ' contain only alphanumeric characters and underscores.'
+ usage
+ exit 1
+fi
+
+if [ -z $DESTDIR ]; then
+ DESTDIR="."
+elif [[ $DESTDIR =~ /${NAME}$ ]]; then
+ DESTDIR=${DESTDIR%/*}
+fi
+
+if [ ! -e $DESTDIR ]; then
+ $(mkdir -p $DESTDIR)
+fi
+
+ABSDEST=$(cd $DESTDIR && pwd)
+
+cd ${0%/*}/../priv
+
+../rebar create template=wmskel appid=$NAME prefix=$ABSDEST/$NAME
--- /dev/null
+%%-*- mode: erlang -*-
+{application, webmachine,
+ [
+ {description, "webmachine"},
+ {vsn, "1.10.3"},
+ {modules, []},
+ {registered, []},
+ {applications, [kernel,
+ stdlib,
+ mochiweb]},
+ {mod, {webmachine_app, []}},
+ {env, []}
+ ]}.
--- /dev/null
+%% @author Justin Sheehy <justin@basho.com>
+%% @author Andy Gross <andy@basho.com>
+%% @copyright 2007-2009 Basho Technologies
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+
+-module(webmachine).
+-author('Justin Sheehy <justin@basho.com>').
+-author('Andy Gross <andy@basho.com>').
+-export([start/0, stop/0]).
+-export([new_request/2]).
+
+-include("webmachine_logger.hrl").
+-include("wm_reqstate.hrl").
+-include("wm_reqdata.hrl").
+
+%% @spec start() -> ok
+%% @doc Start the webmachine server.
+start() ->
+ webmachine_deps:ensure(),
+ application:start(webmachine).
+
+%% @spec stop() -> ok
+%% @doc Stop the webmachine server.
+stop() ->
+ application:stop(webmachine).
+
+new_request(mochiweb, Request) ->
+ Method = Request:get(method),
+ Scheme = Request:get(scheme),
+ Version = Request:get(version),
+ {Headers, RawPath} = case application:get_env(webmachine, rewrite_module) of
+ {ok, RewriteMod} ->
+ do_rewrite(RewriteMod,
+ Method,
+ Scheme,
+ Version,
+ Request:get(headers),
+ Request:get(raw_path));
+ undefined ->
+ {Request:get(headers), Request:get(raw_path)}
+ end,
+ Socket = Request:get(socket),
+ InitState = #wm_reqstate{socket=Socket,
+ reqdata=wrq:create(Method,Scheme,Version,RawPath,Headers)},
+
+ InitReq = {webmachine_request,InitState},
+ {Peer, ReqState} = InitReq:get_peer(),
+ PeerState = ReqState#wm_reqstate{reqdata=wrq:set_peer(Peer,
+ ReqState#wm_reqstate.reqdata)},
+ LogData = #wm_log_data{start_time=now(),
+ method=Method,
+ headers=Headers,
+ peer=PeerState#wm_reqstate.peer,
+ path=RawPath,
+ version=Version,
+ response_code=404,
+ response_length=0},
+ webmachine_request:new(PeerState#wm_reqstate{log_data=LogData}).
+
+do_rewrite(RewriteMod, Method, Scheme, Version, Headers, RawPath) ->
+ case RewriteMod:rewrite(Method, Scheme, Version, Headers, RawPath) of
+ %% only raw path has been rewritten (older style rewriting)
+ NewPath when is_list(NewPath) -> {Headers, NewPath};
+
+ %% headers and raw path rewritten (new style rewriting)
+ {NewHeaders, NewPath} -> {NewHeaders,NewPath}
+ end.
--- /dev/null
+%% @author Justin Sheehy <justin@basho.com>
+%% @author Andy Gross <andy@basho.com>
+%% @copyright 2007-2008 Basho Technologies
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+
+%% @doc Callbacks for the webmachine application.
+
+-module(webmachine_app).
+-author('Justin Sheehy <justin@basho.com>').
+-author('Andy Gross <andy@basho.com>').
+
+-behaviour(application).
+
+-export([start/2,
+ stop/1]).
+
+-include("webmachine_logger.hrl").
+
+%% @spec start(_Type, _StartArgs) -> ServerRet
+%% @doc application start callback for webmachine.
+start(_Type, _StartArgs) ->
+ webmachine_deps:ensure(),
+ {ok, _Pid} = SupLinkRes = webmachine_sup:start_link(),
+ Handlers = case application:get_env(webmachine, log_handlers) of
+ undefined ->
+ [];
+ {ok, Val} ->
+ Val
+ end,
+ %% handlers failing to start are handled in the handler_watcher
+ _ = [supervisor:start_child(webmachine_logger_watcher_sup,
+ [?EVENT_LOGGER, Module, Config]) ||
+ {Module, Config} <- Handlers],
+ SupLinkRes.
+
+%% @spec stop(_State) -> ServerRet
+%% @doc application stop callback for webmachine.
+stop(_State) ->
+ ok.
--- /dev/null
+%% @author Justin Sheehy <justin@basho.com>
+%% @author Andy Gross <andy@basho.com>
+%% @author Bryan Fink <bryan@basho.com>
+%% @copyright 2007-2009 Basho Technologies
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+
+%% @doc Decision core for webmachine
+
+-module(webmachine_decision_core).
+-author('Justin Sheehy <justin@basho.com>').
+-author('Andy Gross <andy@basho.com>').
+-author('Bryan Fink <bryan@basho.com>').
+-export([handle_request/2]).
+-export([do_log/1]).
+-include("webmachine_logger.hrl").
+
+handle_request(Resource, ReqState) ->
+ [erase(X) || X <- [decision, code, req_body, bytes_written, tmp_reqstate]],
+ put(resource, Resource),
+ put(reqstate, ReqState),
+ try
+ d(v3b13)
+ catch
+ error:_ ->
+ error_response(erlang:get_stacktrace())
+ end.
+
+wrcall(X) ->
+ RS0 = get(reqstate),
+ Req = webmachine_request:new(RS0),
+ {Response, RS1} = Req:call(X),
+ put(reqstate, RS1),
+ Response.
+
+resource_call(Fun) ->
+ Resource = get(resource),
+ {Reply, NewResource, NewRS} = Resource:do(Fun,get()),
+ put(resource, NewResource),
+ put(reqstate, NewRS),
+ Reply.
+
+get_header_val(H) -> wrcall({get_req_header, H}).
+
+method() -> wrcall(method).
+
+d(DecisionID) ->
+ put(decision, DecisionID),
+ log_decision(DecisionID),
+ decision(DecisionID).
+
+respond(Code) when is_integer(Code) ->
+ respond({Code, undefined});
+respond({_, _}=CodeAndPhrase) ->
+ Resource = get(resource),
+ EndTime = now(),
+ respond(CodeAndPhrase, Resource, EndTime).
+
+respond({Code, _ReasonPhrase}=CodeAndPhrase, Resource, EndTime)
+ when Code >= 400, Code < 600 ->
+ error_response(CodeAndPhrase, Resource, EndTime);
+respond({304, _ReasonPhrase}=CodeAndPhrase, Resource, EndTime) ->
+ wrcall({remove_resp_header, "Content-Type"}),
+ case resource_call(generate_etag) of
+ undefined -> nop;
+ ETag -> wrcall({set_resp_header, "ETag", webmachine_util:quoted_string(ETag)})
+ end,
+ case resource_call(expires) of
+ undefined -> nop;
+ Exp ->
+ wrcall({set_resp_header, "Expires",
+ webmachine_util:rfc1123_date(Exp)})
+ end,
+ finish_response(CodeAndPhrase, Resource, EndTime);
+respond(CodeAndPhrase, Resource, EndTime) ->
+ finish_response(CodeAndPhrase, Resource, EndTime).
+
+finish_response({Code, _}=CodeAndPhrase, Resource, EndTime) ->
+ put(code, Code),
+ wrcall({set_response_code, CodeAndPhrase}),
+ resource_call(finish_request),
+ wrcall({send_response, CodeAndPhrase}),
+ RMod = wrcall({get_metadata, 'resource_module'}),
+ Notes = wrcall(notes),
+ LogData0 = wrcall(log_data),
+ LogData = LogData0#wm_log_data{resource_module=RMod,
+ end_time=EndTime,
+ notes=Notes},
+ spawn(fun() -> do_log(LogData) end),
+ Resource:stop().
+
+error_response(Reason) ->
+ error_response(500, Reason).
+
+error_response(Code, Reason) ->
+ Resource = get(resource),
+ EndTime = now(),
+ error_response({Code, undefined}, Reason, Resource, EndTime).
+
+error_response({Code, _}=CodeAndPhrase, Resource, EndTime) ->
+ error_response({Code, _}=CodeAndPhrase,
+ webmachine_error:reason(Code),
+ Resource,
+ EndTime).
+
+error_response({Code, _}=CodeAndPhrase, Reason, Resource, EndTime) ->
+ {ok, ErrorHandler} = application:get_env(webmachine, error_handler),
+ {ErrorHTML, ReqState} = ErrorHandler:render_error(
+ Code, {webmachine_request,get(reqstate)}, Reason),
+ put(reqstate, ReqState),
+ wrcall({set_resp_body, ErrorHTML}),
+ finish_response(CodeAndPhrase, Resource, EndTime).
+
+decision_test(Test,TestVal,TrueFlow,FalseFlow) ->
+ case Test of
+ {error, Reason} -> error_response(Reason);
+ {error, Reason0, Reason1} -> error_response({Reason0, Reason1});
+ {halt, Code} -> respond(Code);
+ TestVal -> decision_flow(TrueFlow, Test);
+ _ -> decision_flow(FalseFlow, Test)
+ end.
+
+decision_test_fn({error, Reason}, _TestFn, _TrueFlow, _FalseFlow) ->
+ error_response(Reason);
+decision_test_fn({error, R0, R1}, _TestFn, _TrueFlow, _FalseFlow) ->
+ error_response({R0, R1});
+decision_test_fn({halt, Code}, _TestFn, _TrueFlow, _FalseFlow) ->
+ respond(Code);
+decision_test_fn(Test,TestFn,TrueFlow,FalseFlow) ->
+ case TestFn(Test) of
+ true -> decision_flow(TrueFlow, Test);
+ false -> decision_flow(FalseFlow, Test)
+ end.
+
+decision_flow(X, TestResult) when is_integer(X) ->
+ if X >= 500 -> error_response(X, TestResult);
+ true -> respond(X)
+ end;
+decision_flow(X, _TestResult) when is_atom(X) -> d(X).
+
+do_log(LogData) ->
+ webmachine_log:log_access(LogData).
+
+log_decision(DecisionID) ->
+ Resource = get(resource),
+ Resource:log_d(DecisionID).
+
+%% "Service Available"
+decision(v3b13) ->
+ decision_test(resource_call(ping), pong, v3b13b, 503);
+decision(v3b13b) ->
+ decision_test(resource_call(service_available), true, v3b12, 503);
+%% "Known method?"
+decision(v3b12) ->
+ decision_test(lists:member(method(), resource_call(known_methods)),
+ true, v3b11, 501);
+%% "URI too long?"
+decision(v3b11) ->
+ decision_test(resource_call(uri_too_long), true, 414, v3b10);
+%% "Method allowed?"
+decision(v3b10) ->
+ Methods = resource_call(allowed_methods),
+ case lists:member(method(), Methods) of
+ true ->
+ d(v3b9);
+ false ->
+ wrcall({set_resp_headers, [{"Allow",
+ string:join([atom_to_list(M) || M <- Methods], ", ")}]}),
+ respond(405)
+ end;
+
+%% "Content-MD5 present?"
+decision(v3b9) ->
+ decision_test(get_header_val("content-md5"), undefined, v3b9b, v3b9a);
+%% "Content-MD5 valid?"
+decision(v3b9a) ->
+ case resource_call(validate_content_checksum) of
+ {error, Reason} ->
+ error_response(Reason);
+ {halt, Code} ->
+ respond(Code);
+ not_validated ->
+ Checksum = base64:decode(get_header_val("content-md5")),
+ BodyHash = compute_body_md5(),
+ case BodyHash =:= Checksum of
+ true -> d(v3b9b);
+ _ ->
+ respond(400)
+ end;
+ false ->
+ respond(400);
+ _ -> d(v3b9b)
+ end;
+%% "Malformed?"
+decision(v3b9b) ->
+ decision_test(resource_call(malformed_request), true, 400, v3b8);
+%% "Authorized?"
+decision(v3b8) ->
+ case resource_call(is_authorized) of
+ true -> d(v3b7);
+ {error, Reason} ->
+ error_response(Reason);
+ {halt, Code} ->
+ respond(Code);
+ AuthHead ->
+ wrcall({set_resp_header, "WWW-Authenticate", AuthHead}),
+ respond(401)
+ end;
+%% "Forbidden?"
+decision(v3b7) ->
+ decision_test(resource_call(forbidden), true, 403, v3b6);
+%% "Okay Content-* Headers?"
+decision(v3b6) ->
+ decision_test(resource_call(valid_content_headers), true, v3b5, 501);
+%% "Known Content-Type?"
+decision(v3b5) ->
+ decision_test(resource_call(known_content_type), true, v3b4, 415);
+%% "Req Entity Too Large?"
+decision(v3b4) ->
+ decision_test(resource_call(valid_entity_length), true, v3b3, 413);
+%% "OPTIONS?"
+decision(v3b3) ->
+ case method() of
+ 'OPTIONS' ->
+ Hdrs = resource_call(options),
+ wrcall({set_resp_headers, Hdrs}),
+ respond(200);
+ _ ->
+ d(v3c3)
+ end;
+%% Accept exists?
+decision(v3c3) ->
+ PTypes = [Type || {Type,_Fun} <- resource_call(content_types_provided)],
+ case get_header_val("accept") of
+ undefined ->
+ wrcall({set_metadata, 'content-type', hd(PTypes)}),
+ d(v3d4);
+ _ ->
+ d(v3c4)
+ end;
+%% Acceptable media type available?
+decision(v3c4) ->
+ PTypes = [Type || {Type,_Fun} <- resource_call(content_types_provided)],
+ AcceptHdr = get_header_val("accept"),
+ case webmachine_util:choose_media_type(PTypes, AcceptHdr) of
+ none ->
+ respond(406);
+ MType ->
+ wrcall({set_metadata, 'content-type', MType}),
+ d(v3d4)
+ end;
+%% Accept-Language exists?
+decision(v3d4) ->
+ decision_test(get_header_val("accept-language"),
+ undefined, v3e5, v3d5);
+%% Acceptable Language available? %% WMACH-46 (do this as proper conneg)
+decision(v3d5) ->
+ decision_test(resource_call(language_available), true, v3e5, 406);
+%% Accept-Charset exists?
+decision(v3e5) ->
+ case get_header_val("accept-charset") of
+ undefined -> decision_test(choose_charset("*"),
+ none, 406, v3f6);
+ _ -> d(v3e6)
+ end;
+%% Acceptable Charset available?
+decision(v3e6) ->
+ decision_test(choose_charset(get_header_val("accept-charset")),
+ none, 406, v3f6);
+%% Accept-Encoding exists?
+% (also, set content-type header here, now that charset is chosen)
+decision(v3f6) ->
+ CType = wrcall({get_metadata, 'content-type'}),
+ CSet = case wrcall({get_metadata, 'chosen-charset'}) of
+ undefined -> "";
+ CS -> "; charset=" ++ CS
+ end,
+ wrcall({set_resp_header, "Content-Type", CType ++ CSet}),
+ case get_header_val("accept-encoding") of
+ undefined ->
+ decision_test(choose_encoding("identity;q=1.0,*;q=0.5"),
+ none, 406, v3g7);
+ _ -> d(v3f7)
+ end;
+%% Acceptable encoding available?
+decision(v3f7) ->
+ decision_test(choose_encoding(get_header_val("accept-encoding")),
+ none, 406, v3g7);
+%% "Resource exists?"
+decision(v3g7) ->
+ % this is the first place after all conneg, so set Vary here
+ case variances() of
+ [] -> nop;
+ Variances ->
+ wrcall({set_resp_header, "Vary", string:join(Variances, ", ")})
+ end,
+ decision_test(resource_call(resource_exists), true, v3g8, v3h7);
+%% "If-Match exists?"
+decision(v3g8) ->
+ decision_test(get_header_val("if-match"), undefined, v3h10, v3g9);
+%% "If-Match: * exists"
+decision(v3g9) ->
+ decision_test(get_header_val("if-match"), "*", v3h10, v3g11);
+%% "ETag in If-Match"
+decision(v3g11) ->
+ ETags = webmachine_util:split_quoted_strings(get_header_val("if-match")),
+ decision_test_fn(resource_call(generate_etag),
+ fun(ETag) -> lists:member(ETag, ETags) end,
+ v3h10, 412);
+%% "If-Match exists"
+%% (note: need to reflect this change at in next version of diagram)
+decision(v3h7) ->
+ decision_test(get_header_val("if-match"), undefined, v3i7, 412);
+%% "If-unmodified-since exists?"
+decision(v3h10) ->
+ decision_test(get_header_val("if-unmodified-since"),undefined,v3i12,v3h11);
+%% "I-UM-S is valid date?"
+decision(v3h11) ->
+ IUMSDate = get_header_val("if-unmodified-since"),
+ decision_test(webmachine_util:convert_request_date(IUMSDate),
+ bad_date, v3i12, v3h12);
+%% "Last-Modified > I-UM-S?"
+decision(v3h12) ->
+ ReqDate = get_header_val("if-unmodified-since"),
+ ReqErlDate = webmachine_util:convert_request_date(ReqDate),
+ ResErlDate = resource_call(last_modified),
+ decision_test(ResErlDate > ReqErlDate,
+ true, 412, v3i12);
+%% "Moved permanently? (apply PUT to different URI)"
+decision(v3i4) ->
+ case resource_call(moved_permanently) of
+ {true, MovedURI} ->
+ wrcall({set_resp_header, "Location", MovedURI}),
+ respond(301);
+ false ->
+ d(v3p3);
+ {error, Reason} ->
+ error_response(Reason);
+ {halt, Code} ->
+ respond(Code)
+ end;
+%% PUT?
+decision(v3i7) ->
+ decision_test(method(), 'PUT', v3i4, v3k7);
+%% "If-none-match exists?"
+decision(v3i12) ->
+ decision_test(get_header_val("if-none-match"), undefined, v3l13, v3i13);
+%% "If-None-Match: * exists?"
+decision(v3i13) ->
+ decision_test(get_header_val("if-none-match"), "*", v3j18, v3k13);
+%% GET or HEAD?
+decision(v3j18) ->
+ decision_test(lists:member(method(),['GET','HEAD']),
+ true, 304, 412);
+%% "Moved permanently?"
+decision(v3k5) ->
+ case resource_call(moved_permanently) of
+ {true, MovedURI} ->
+ wrcall({set_resp_header, "Location", MovedURI}),
+ respond(301);
+ false ->
+ d(v3l5);
+ {error, Reason} ->
+ error_response(Reason);
+ {halt, Code} ->
+ respond(Code)
+ end;
+%% "Previously existed?"
+decision(v3k7) ->
+ decision_test(resource_call(previously_existed), true, v3k5, v3l7);
+%% "Etag in if-none-match?"
+decision(v3k13) ->
+ ETags = webmachine_util:split_quoted_strings(get_header_val("if-none-match")),
+ decision_test_fn(resource_call(generate_etag),
+ %% Membership test is a little counter-intuitive here; if the
+ %% provided ETag is a member, we follow the error case out
+ %% via v3j18.
+ fun(ETag) -> lists:member(ETag, ETags) end,
+ v3j18, v3l13);
+%% "Moved temporarily?"
+decision(v3l5) ->
+ case resource_call(moved_temporarily) of
+ {true, MovedURI} ->
+ wrcall({set_resp_header, "Location", MovedURI}),
+ respond(307);
+ false ->
+ d(v3m5);
+ {error, Reason} ->
+ error_response(Reason);
+ {halt, Code} ->
+ respond(Code)
+ end;
+%% "POST?"
+decision(v3l7) ->
+ decision_test(method(), 'POST', v3m7, 404);
+%% "IMS exists?"
+decision(v3l13) ->
+ decision_test(get_header_val("if-modified-since"), undefined, v3m16, v3l14);
+%% "IMS is valid date?"
+decision(v3l14) ->
+ IMSDate = get_header_val("if-modified-since"),
+ decision_test(webmachine_util:convert_request_date(IMSDate),
+ bad_date, v3m16, v3l15);
+%% "IMS > Now?"
+decision(v3l15) ->
+ NowDateTime = calendar:universal_time(),
+ ReqDate = get_header_val("if-modified-since"),
+ ReqErlDate = webmachine_util:convert_request_date(ReqDate),
+ decision_test(ReqErlDate > NowDateTime,
+ true, v3m16, v3l17);
+%% "Last-Modified > IMS?"
+decision(v3l17) ->
+ ReqDate = get_header_val("if-modified-since"),
+ ReqErlDate = webmachine_util:convert_request_date(ReqDate),
+ ResErlDate = resource_call(last_modified),
+ decision_test(ResErlDate =:= undefined orelse ResErlDate > ReqErlDate,
+ true, v3m16, 304);
+%% "POST?"
+decision(v3m5) ->
+ decision_test(method(), 'POST', v3n5, 410);
+%% "Server allows POST to missing resource?"
+decision(v3m7) ->
+ decision_test(resource_call(allow_missing_post), true, v3n11, 404);
+%% "DELETE?"
+decision(v3m16) ->
+ decision_test(method(), 'DELETE', v3m20, v3n16);
+%% DELETE enacted immediately?
+%% Also where DELETE is forced.
+decision(v3m20) ->
+ Result = resource_call(delete_resource),
+ %% DELETE may have body and TCP connection will be closed unless body is read.
+ %% See mochiweb_request:should_close.
+ maybe_flush_body_stream(),
+ decision_test(Result, true, v3m20b, 500);
+decision(v3m20b) ->
+ decision_test(resource_call(delete_completed), true, v3o20, 202);
+%% "Server allows POST to missing resource?"
+decision(v3n5) ->
+ decision_test(resource_call(allow_missing_post), true, v3n11, 410);
+%% "Redirect?"
+decision(v3n11) ->
+ Stage1 = case resource_call(post_is_create) of
+ true ->
+ case resource_call(create_path) of
+ undefined -> error_response("post_is_create w/o create_path");
+ NewPath ->
+ case is_list(NewPath) of
+ false -> error_response({"create_path not a string", NewPath});
+ true ->
+ BaseUri = case resource_call(base_uri) of
+ undefined -> wrcall(base_uri);
+ Any ->
+ case [lists:last(Any)] of
+ "/" -> lists:sublist(Any, erlang:length(Any) - 1);
+ _ -> Any
+ end
+ end,
+ FullPath = filename:join(["/", wrcall(path), NewPath]),
+ wrcall({set_disp_path, NewPath}),
+ case wrcall({get_resp_header, "Location"}) of
+ undefined -> wrcall({set_resp_header, "Location", BaseUri ++ FullPath});
+ _ -> ok
+ end,
+
+ Res = accept_helper(),
+ case Res of
+ {respond, Code} -> respond(Code);
+ {halt, Code} -> respond(Code);
+ {error, _,_} -> error_response(Res);
+ {error, _} -> error_response(Res);
+ _ -> stage1_ok
+ end
+ end
+ end;
+ _ ->
+ case resource_call(process_post) of
+ true ->
+ encode_body_if_set(),
+ stage1_ok;
+ {halt, Code} -> respond(Code);
+ Err -> error_response(Err)
+ end
+ end,
+ case Stage1 of
+ stage1_ok ->
+ case wrcall(resp_redirect) of
+ true ->
+ case wrcall({get_resp_header, "Location"}) of
+ undefined ->
+ Reason = "Response had do_redirect but no Location",
+ error_response(500, Reason);
+ _ ->
+ respond(303)
+ end;
+ _ ->
+ d(v3p11)
+ end;
+ _ -> nop
+ end;
+%% "POST?"
+decision(v3n16) ->
+ decision_test(method(), 'POST', v3n11, v3o16);
+%% Conflict?
+decision(v3o14) ->
+ case resource_call(is_conflict) of
+ true -> respond(409);
+ _ -> Res = accept_helper(),
+ case Res of
+ {respond, Code} -> respond(Code);
+ {halt, Code} -> respond(Code);
+ {error, _,_} -> error_response(Res);
+ {error, _} -> error_response(Res);
+ _ -> d(v3p11)
+ end
+ end;
+%% "PUT?"
+decision(v3o16) ->
+ decision_test(method(), 'PUT', v3o14, v3o18);
+%% Multiple representations?
+% (also where body generation for GET and HEAD is done)
+decision(v3o18) ->
+ BuildBody = case method() of
+ 'GET' -> true;
+ 'HEAD' -> true;
+ _ -> false
+ end,
+ FinalBody = case BuildBody of
+ true ->
+ case resource_call(generate_etag) of
+ undefined -> nop;
+ ETag -> wrcall({set_resp_header, "ETag", webmachine_util:quoted_string(ETag)})
+ end,
+ CT = wrcall({get_metadata, 'content-type'}),
+ case resource_call(last_modified) of
+ undefined -> nop;
+ LM ->
+ wrcall({set_resp_header, "Last-Modified",
+ webmachine_util:rfc1123_date(LM)})
+ end,
+ case resource_call(expires) of
+ undefined -> nop;
+ Exp ->
+ wrcall({set_resp_header, "Expires",
+ webmachine_util:rfc1123_date(Exp)})
+ end,
+ F = hd([Fun || {Type,Fun} <- resource_call(content_types_provided),
+ CT =:= webmachine_util:format_content_type(Type)]),
+ resource_call(F);
+ false -> nop
+ end,
+ case FinalBody of
+ {error, _} -> error_response(FinalBody);
+ {error, _,_} -> error_response(FinalBody);
+ {halt, Code} -> respond(Code);
+ nop -> d(v3o18b);
+ _ -> wrcall({set_resp_body,
+ encode_body(FinalBody)}),
+ d(v3o18b)
+ end;
+
+decision(v3o18b) ->
+ decision_test(resource_call(multiple_choices), true, 300, 200);
+%% Response includes an entity?
+decision(v3o20) ->
+ decision_test(wrcall(has_resp_body), true, v3o18, 204);
+%% Conflict?
+decision(v3p3) ->
+ case resource_call(is_conflict) of
+ true -> respond(409);
+ _ -> Res = accept_helper(),
+ case Res of
+ {respond, Code} -> respond(Code);
+ {halt, Code} -> respond(Code);
+ {error, _,_} -> error_response(Res);
+ {error, _} -> error_response(Res);
+ _ -> d(v3p11)
+ end
+ end;
+
+%% New resource? (at this point boils down to "has location header")
+decision(v3p11) ->
+ case wrcall({get_resp_header, "Location"}) of
+ undefined -> d(v3o20);
+ _ -> respond(201)
+ end.
+
+accept_helper() ->
+ accept_helper(get_header_val("Content-Type")).
+
+accept_helper(undefined) ->
+ accept_helper("application/octet-stream");
+accept_helper([]) ->
+ accept_helper("application/octet-stream");
+accept_helper(CT) ->
+ {MT, MParams} = webmachine_util:media_type_to_detail(CT),
+ wrcall({set_metadata, 'mediaparams', MParams}),
+ case [Fun || {Type,Fun} <-
+ resource_call(content_types_accepted), MT =:= Type] of
+ [] -> {respond,415};
+ AcceptedContentList ->
+ F = hd(AcceptedContentList),
+ case resource_call(F) of
+ true ->
+ encode_body_if_set(),
+ true;
+ Result -> Result
+ end
+ end.
+
+encode_body_if_set() ->
+ case wrcall(has_resp_body) of
+ true ->
+ Body = wrcall(resp_body),
+ wrcall({set_resp_body, encode_body(Body)}),
+ true;
+ _ -> false
+ end.
+
+encode_body(Body) ->
+ ChosenCSet = wrcall({get_metadata, 'chosen-charset'}),
+ Charsetter =
+ case resource_call(charsets_provided) of
+ no_charset -> fun(X) -> X end;
+ CP -> hd([Fun || {CSet,Fun} <- CP, ChosenCSet =:= CSet])
+ end,
+ ChosenEnc = wrcall({get_metadata, 'content-encoding'}),
+ Encoder = hd([Fun || {Enc,Fun} <- resource_call(encodings_provided),
+ ChosenEnc =:= Enc]),
+ case Body of
+ {stream, StreamBody} ->
+ {stream, make_encoder_stream(Encoder, Charsetter, StreamBody)};
+ {known_length_stream, 0, _StreamBody} ->
+ {known_length_stream, 0, empty_stream()};
+ {known_length_stream, Size, StreamBody} ->
+ case method() of
+ 'HEAD' ->
+ {known_length_stream, Size, empty_stream()};
+ _ ->
+ {known_length_stream, Size, make_encoder_stream(Encoder, Charsetter, StreamBody)}
+ end;
+ {stream, Size, Fun} ->
+ {stream, Size, make_size_encoder_stream(Encoder, Charsetter, Fun)};
+ {writer, BodyFun} ->
+ {writer, {Encoder, Charsetter, BodyFun}};
+ _ ->
+ Encoder(Charsetter(iolist_to_binary(Body)))
+ end.
+
+%% @private
+empty_stream() ->
+ {<<>>, fun() -> {<<>>, done} end}.
+
+make_encoder_stream(Encoder, Charsetter, {Body, done}) ->
+ {Encoder(Charsetter(Body)), done};
+make_encoder_stream(Encoder, Charsetter, {Body, Next}) ->
+ {Encoder(Charsetter(Body)),
+ fun() -> make_encoder_stream(Encoder, Charsetter, Next()) end}.
+
+make_size_encoder_stream(Encoder, Charsetter, Fun) ->
+ fun(Start, End) ->
+ make_encoder_stream(Encoder, Charsetter, Fun(Start, End))
+ end.
+
+choose_encoding(AccEncHdr) ->
+ Encs = [Enc || {Enc,_Fun} <- resource_call(encodings_provided)],
+ case webmachine_util:choose_encoding(Encs, AccEncHdr) of
+ none -> none;
+ ChosenEnc ->
+ case ChosenEnc of
+ "identity" ->
+ nop;
+ _ ->
+ wrcall({set_resp_header, "Content-Encoding",ChosenEnc})
+ end,
+ wrcall({set_metadata, 'content-encoding',ChosenEnc}),
+ ChosenEnc
+ end.
+
+choose_charset(AccCharHdr) ->
+ case resource_call(charsets_provided) of
+ no_charset ->
+ no_charset;
+ CL ->
+ CSets = [CSet || {CSet,_Fun} <- CL],
+ case webmachine_util:choose_charset(CSets, AccCharHdr) of
+ none -> none;
+ Charset ->
+ wrcall({set_metadata, 'chosen-charset',Charset}),
+ Charset
+ end
+ end.
+
+variances() ->
+ Accept = case length(resource_call(content_types_provided)) of
+ 1 -> [];
+ 0 -> [];
+ _ -> ["Accept"]
+ end,
+ AcceptEncoding = case length(resource_call(encodings_provided)) of
+ 1 -> [];
+ 0 -> [];
+ _ -> ["Accept-Encoding"]
+ end,
+ AcceptCharset = case resource_call(charsets_provided) of
+ no_charset -> [];
+ CP ->
+ case length(CP) of
+ 1 -> [];
+ 0 -> [];
+ _ -> ["Accept-Charset"]
+ end
+ end,
+ Accept ++ AcceptEncoding ++ AcceptCharset ++ resource_call(variances).
+
+md5(Bin) ->
+ erlang:md5(Bin).
+
+md5_init() ->
+ erlang:md5_init().
+
+md5_update(Ctx, Bin) ->
+ erlang:md5_update(Ctx, Bin).
+
+md5_final(Ctx) ->
+ erlang:md5_final(Ctx).
+
+compute_body_md5() ->
+ case wrcall({req_body, 52428800}) of
+ stream_conflict ->
+ compute_body_md5_stream();
+ Body ->
+ md5(Body)
+ end.
+
+compute_body_md5_stream() ->
+ MD5Ctx = md5_init(),
+ compute_body_md5_stream(MD5Ctx, wrcall({stream_req_body, 8192}), <<>>).
+
+compute_body_md5_stream(MD5, {Hunk, done}, Body) ->
+ %% Save the body so it can be retrieved later
+ put(reqstate, wrq:set_resp_body(Body, get(reqstate))),
+ md5_final(md5_update(MD5, Hunk));
+compute_body_md5_stream(MD5, {Hunk, Next}, Body) ->
+ compute_body_md5_stream(md5_update(MD5, Hunk), Next(), <<Body/binary, Hunk/binary>>).
+
+maybe_flush_body_stream() ->
+ maybe_flush_body_stream(wrcall({stream_req_body, 8192})).
+
+maybe_flush_body_stream(stream_conflict) ->
+ ok;
+maybe_flush_body_stream({_Hunk, done}) ->
+ ok;
+maybe_flush_body_stream({_Hunk, Next}) ->
+ maybe_flush_body_stream(Next()).
--- /dev/null
+%% @author Justin Sheehy <justin@basho.com>
+%% @author Andy Gross <andy@basho.com>
+%% @copyright 2007-2008 Basho Technologies
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+
+%% @doc Ensure that the relatively-installed dependencies are on the code
+%% loading path, and locate resources relative
+%% to this application's path.
+
+-module(webmachine_deps).
+-author('Justin Sheehy <justin@basho.com>').
+-author('Andy Gross <andy@basho.com>').
+
+-export([ensure/0, ensure/1]).
+-export([get_base_dir/0, get_base_dir/1]).
+-export([local_path/1, local_path/2]).
+-export([deps_on_path/0, new_siblings/1]).
+
+%% @spec deps_on_path() -> [ProjNameAndVers]
+%% @doc List of project dependencies on the path.
+deps_on_path() ->
+ ordsets:from_list([filename:basename(filename:dirname(X)) || X <- code:get_path()]).
+
+%% @spec new_siblings(Module) -> [Dir]
+%% @doc Find new siblings paths relative to Module that aren't already on the
+%% code path.
+new_siblings(Module) ->
+ Existing = deps_on_path(),
+ SiblingEbin = [ X || X <- filelib:wildcard(local_path(["deps", "*", "ebin"], Module)),
+ filename:basename(filename:dirname(X)) /= %% don't include self
+ filename:basename(filename:dirname(
+ filename:dirname(
+ filename:dirname(X)))) ],
+ Siblings = [filename:dirname(X) || X <- SiblingEbin,
+ ordsets:is_element(
+ filename:basename(filename:dirname(X)),
+ Existing) =:= false],
+ lists:filter(fun filelib:is_dir/1,
+ lists:append([[filename:join([X, "ebin"]),
+ filename:join([X, "include"])] ||
+ X <- Siblings])).
+
+
+%% @spec ensure(Module) -> ok
+%% @doc Ensure that all ebin and include paths for dependencies
+%% of the application for Module are on the code path.
+ensure(Module) ->
+ code:add_paths(new_siblings(Module)),
+ ok.
+
+%% @spec ensure() -> ok
+%% @doc Ensure that the ebin and include paths for dependencies of
+%% this application are on the code path. Equivalent to
+%% ensure(?Module).
+ensure() ->
+ ensure(?MODULE).
+
+%% @spec get_base_dir(Module) -> string()
+%% @doc Return the application directory for Module. It assumes Module is in
+%% a standard OTP layout application in the ebin or src directory.
+get_base_dir(Module) ->
+ {file, Here} = code:is_loaded(Module),
+ filename:dirname(filename:dirname(Here)).
+
+%% @spec get_base_dir() -> string()
+%% @doc Return the application directory for this application. Equivalent to
+%% get_base_dir(?MODULE).
+get_base_dir() ->
+ get_base_dir(?MODULE).
+
+%% @spec local_path([string()], Module) -> string()
+%% @doc Return an application-relative directory from Module's application.
+local_path(Components, Module) ->
+ filename:join([get_base_dir(Module) | Components]).
+
+%% @spec local_path(Components) -> string()
+%% @doc Return an application-relative directory for this application.
+%% Equivalent to local_path(Components, ?MODULE).
+local_path(Components) ->
+ local_path(Components, ?MODULE).
--- /dev/null
+%% @author Robert Ahrens <rahrens@basho.com>
+%% @author Justin Sheehy <justin@basho.com>
+%% @copyright 2007-2009 Basho Technologies
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+
+%% @doc Module for URL-dispatch by pattern matching.
+
+-module(webmachine_dispatcher).
+-author('Robert Ahrens <rahrens@basho.com>').
+-author('Justin Sheehy <justin@basho.com>').
+-author('Bryan Fink <bryan@basho.com>').
+
+-export([dispatch/3, dispatch/4]).
+
+-define(SEPARATOR, $\/).
+-define(MATCH_ALL, '*').
+
+%% @spec dispatch(Path::string(), DispatchList::[matchterm()],
+%% wrq:reqdata()) ->
+%% dispterm() | dispfail()
+%% @doc Interface for URL dispatching.
+%% See also http://bitbucket.org/justin/webmachine/wiki/DispatchConfiguration
+dispatch(PathAsString, DispatchList, RD) ->
+ dispatch([], PathAsString, DispatchList, RD).
+
+%% @spec dispatch(Host::string(), Path::string(),
+%% DispatchList::[matchterm()], wrq:reqdata()) ->
+%% dispterm() | dispfail()
+%% @doc Interface for URL dispatching.
+%% See also http://bitbucket.org/justin/webmachine/wiki/DispatchConfiguration
+dispatch(HostAsString, PathAsString, DispatchList, RD) ->
+ Path = string:tokens(PathAsString, [?SEPARATOR]),
+ % URIs that end with a trailing slash are implicitly one token
+ % "deeper" than we otherwise might think as we are "inside"
+ % a directory named by the last token.
+ ExtraDepth = case lists:last(PathAsString) == ?SEPARATOR of
+ true -> 1;
+ _ -> 0
+ end,
+ {Host, Port} = split_host_port(HostAsString, wrq:scheme(RD)),
+ try_host_binding(DispatchList, Host, Port, Path, ExtraDepth, RD).
+
+split_host_port(HostAsString, Scheme) ->
+ case string:tokens(HostAsString, ":") of
+ [HostPart, PortPart] ->
+ {split_host(HostPart), list_to_integer(PortPart)};
+ [HostPart] ->
+ {split_host(HostPart), default_port(Scheme)};
+ [] ->
+ %% no host header
+ {[], default_port(Scheme)};
+ _ ->
+ %% Invalid host header
+ {invalid_host, default_port(Scheme)}
+ end.
+
+split_host(HostAsString) ->
+ string:tokens(HostAsString, ".").
+
+default_port(http) -> 80;
+default_port(https) -> 443.
+
+%% @type matchterm() = hostmatchterm() | pathmatchterm().
+% The dispatch configuration is a list of these terms, and the
+% first one whose host and path terms match the input is used.
+% Using a pathmatchterm() here is equivalent to using a hostmatchterm()
+% of the form {{['*'],'*'}, [pathmatchterm()]}.
+
+%% @type hostmatchterm() = {hostmatch(), [pathmatchterm()]}.
+% The dispatch configuration contains a list of these terms, and the
+% first one whose host and one pathmatchterm match is used.
+
+%% @type hostmatch() = [hostterm()] | {[hostterm()], portterm()}.
+% A host header (Host, X-Forwarded-For, etc.) will be matched against
+% this term. Using a raws [hostterm()] list is equivalent to using
+% {[hostterm()], '*'}.
+
+%% @type hostterm() = '*' | string() | atom().
+% A list of hostterms is matched against a '.'-separated hostname.
+% The '*' hosterm matches all remaining tokens, and is only allowed at
+% the head of the list.
+% A string hostterm will match a token of exactly the same string.
+% Any atom hostterm other than '*' will match any token and will
+% create a binding in the result if a complete match occurs.
+
+%% @type portterm() = '*' | integer() | atom().
+% A portterm is matched against the integer port after any ':' in
+% the hostname, or 80 if no port is found.
+% The '*' portterm patches any port
+% An integer portterm will match a port of exactly the same integer.
+% Any atom portterm other than '*' will match any port and will
+% create a binding in the result if a complete match occurs.
+
+%% @type pathmatchterm() = {[pathterm()], matchmod(), matchopts()} |
+%% {[pathterm()], guardfun(), matchmod(), matchopts()}.
+% The dispatch configuration contains a list of these terms, and the
+% first one whose list of pathterms matches the input path is used.
+
+%% @type pathterm() = '*' | string() | atom().
+% A list of pathterms is matched against a '/'-separated input path.
+% The '*' pathterm matches all remaining tokens.
+% A string pathterm will match a token of exactly the same string.
+% Any atom pathterm other than '*' will match any token and will
+% create a binding in the result if a complete match occurs.
+
+%% @type guardfun() = (wrq:reqdata()) -> boolean()
+%% | {Mod::atom(), Fun::atom()}.
+% This function or tuple representing a function, if present, is
+% called after a successful match of the host, port, and path for a
+% dispatch entry. The function should take a single argument, the
+% request data object, and return a boolean. If the return value is
+% 'true', then this dispatch entry is used to service the
+% request. Otherwise, webmachine will continue with the next dispatch
+% entry.
+
+%% @type matchmod() = atom().
+% This atom, if present in a successful matchterm, will appear in
+% the resulting dispterm. In Webmachine this is used to name the
+% resource module that will handle the matching request.
+
+%% @type matchopts() = [term()].
+% This term, if present in a successful matchterm, will appear in
+% the resulting dispterm. In Webmachine this is used to provide
+% arguments to the resource module handling the matching request.
+
+%% @type dispterm() = {matchmod(), matchopts(), pathtokens(),
+%% bindings(), approot(), stringpath()}.
+
+%% @type pathtokens() = [pathtoken()].
+% This is the list of tokens matched by a trailing '*' pathterm.
+
+%% @type pathtoken() = string().
+
+%% @type bindings() = [{bindingterm(),pathtoken()}].
+% This is a proplist of bindings indicated by atom terms in the
+% matching spec, bound to the matching tokens in the request path.
+
+%% @type approot() = string().
+
+%% @type stringpath() = string().
+% This is the path portion matched by a trailing '*' pathterm.
+
+%% @type dispfail() = {no_dispatch_match, pathtokens()}.
+
+try_host_binding(_Dispatch, invalid_host, _Port, _Path, _Depth, _RD) ->
+ {error, invalid_host};
+try_host_binding(Dispatch, Host, Port, Path, Depth, RD) ->
+ %% save work during each dispatch attempt by reversing Host up front
+ try_host_binding1(Dispatch, lists:reverse(Host), Port, Path, Depth, RD).
+
+try_host_binding1([], Host, Port, Path, _Depth, _RD) ->
+ %% Host was reversed inbound, correct it for result
+ {no_dispatch_match, {lists:reverse(Host), Port}, Path};
+try_host_binding1([Dispatch|Rest], Host, Port, Path, Depth, RD) ->
+ {{HostSpec,PortSpec},PathSpec} =
+ case Dispatch of
+ {{H,P},S} -> {{H,P},S};
+ {H,S} -> {{H,?MATCH_ALL},S};
+ S -> {{[?MATCH_ALL],?MATCH_ALL},[S]}
+ end,
+ case bind_port(PortSpec, Port, []) of
+ {ok, PortBindings} ->
+ case bind(lists:reverse(HostSpec), Host, PortBindings, 0) of
+ {ok, RevHostRemainder, HostBindings, _} ->
+ %% Host was reversed inbound, correct it for remainder
+ HostRemainder = lists:reverse(RevHostRemainder),
+ case try_path_binding(PathSpec, Path, HostRemainder, Port, HostBindings, Depth, RD) of
+ {Mod, Props, PathRemainder, PathBindings,
+ AppRoot, StringPath} ->
+ {Mod, Props, HostRemainder, Port, PathRemainder,
+ PathBindings, AppRoot, StringPath};
+ {no_dispatch_match, _} ->
+ try_host_binding1(Rest, Host, Port, Path, Depth, RD)
+ end;
+ fail ->
+ try_host_binding1(Rest, Host, Port, Path, Depth, RD)
+ end;
+ fail ->
+ try_host_binding1(Rest, Host, Port, Path, Depth, RD)
+ end.
+
+bind_port(Port, Port, Bindings) -> {ok, Bindings};
+bind_port(?MATCH_ALL, _Port, Bindings) -> {ok, Bindings};
+bind_port(PortAtom, Port, Bindings) when is_atom(PortAtom) ->
+ {ok, [{PortAtom, Port}|Bindings]};
+bind_port(_, _, _) -> fail.
+
+try_path_binding([], PathTokens, _, _, _, _, _) ->
+ {no_dispatch_match, PathTokens};
+try_path_binding([PathSpec|Rest], PathTokens, HostRemainder, Port, HostBindings, ExtraDepth, RD) ->
+ {PathSchema, Guard, Mod, Props} =
+ case PathSpec of
+ {P, M, Pr} -> {P, undefined, M, Pr};
+ {P, G, M, Pr} -> {P, G, M, Pr}
+ end,
+
+ case bind(PathSchema, PathTokens, HostBindings, 0) of
+ {ok, Remainder, NewBindings, Depth} ->
+ AppRoot = calculate_app_root(Depth + ExtraDepth),
+ StringPath = reconstitute(Remainder),
+ PathInfo = orddict:from_list(NewBindings),
+ RD1 =
+ case RD of
+ testing_ignore_dialyzer_warning_here ->
+ testing_ignore_dialyzer_warning_here;
+ _ ->
+ wrq:load_dispatch_data(PathInfo, HostRemainder, Port, Remainder,
+ AppRoot, StringPath, RD)
+ end,
+ case run_guard(Guard, RD1) of
+ true ->
+ {Mod, Props, Remainder, NewBindings, AppRoot, StringPath};
+ false ->
+ try_path_binding(Rest, PathTokens, HostRemainder, Port, HostBindings, ExtraDepth, RD)
+ end;
+ fail ->
+ try_path_binding(Rest, PathTokens, HostRemainder, Port, HostBindings, ExtraDepth, RD)
+ end.
+
+run_guard(undefined, _RD) ->
+ true;
+run_guard(Fun, RD) when is_function(Fun) ->
+ try
+ Fun(RD) == true
+ catch _Type : Msg ->
+ error_logger:error_msg("Error running guard ~p: ~p~n", [Fun, Msg]),
+ throw({error_running_guard, Fun, Msg})
+ end;
+run_guard({Mod, Fun}, RD) ->
+ try
+ Mod:Fun(RD) == true
+ catch _Type : Msg ->
+ error_logger:error_msg("Error running guard ~p:~p/1: ~p~n", [Mod, Fun, Msg]),
+ throw({error_running_guard, {Mod, Fun}, Msg})
+ end;
+run_guard(Other, _) ->
+ error_logger:error_msg("Unknown guard type in webmachine_dispatcher: ~p~n", [Other]),
+ throw({unknown_guard_type, Other}).
+
+bind([], [], Bindings, Depth) ->
+ {ok, [], Bindings, Depth};
+bind([?MATCH_ALL], Rest, Bindings, Depth) when is_list(Rest) ->
+ {ok, Rest, Bindings, Depth + length(Rest)};
+bind(_, [], _, _) ->
+ fail;
+bind([Token|RestToken],[Match|RestMatch],Bindings,Depth) when is_atom(Token) ->
+ bind(RestToken, RestMatch, [{Token, Match}|Bindings], Depth + 1);
+bind([Token|RestToken], [Token|RestMatch], Bindings, Depth) ->
+ bind(RestToken, RestMatch, Bindings, Depth + 1);
+bind(_, _, _, _) ->
+ fail.
+
+reconstitute([]) -> "";
+reconstitute(UnmatchedTokens) -> string:join(UnmatchedTokens, [?SEPARATOR]).
+
+calculate_app_root(1) -> ".";
+calculate_app_root(N) when N > 1 ->
+ string:join(lists:duplicate(N, ".."), [?SEPARATOR]).
+
+%%
+%% TEST
+%%
+-ifdef(TEST).
+
+-include_lib("eunit/include/eunit.hrl").
+-include("wm_reqstate.hrl").
+-include("wm_reqdata.hrl").
+
+app_root_test() ->
+ ?assertEqual(".", calculate_app_root(1)),
+ ?assertEqual("../..", calculate_app_root(2)),
+ ?assertEqual("../../..", calculate_app_root(3)),
+ ?assertEqual("../../../..", calculate_app_root(4)).
+
+reconstitute_test() ->
+ ?assertEqual("", reconstitute([])),
+ ?assertEqual("foo", reconstitute(["foo"])),
+ ?assertEqual("foo/bar", reconstitute(["foo","bar"])),
+ ?assertEqual("foo/bar/baz", reconstitute(["foo","bar","baz"])).
+
+split_host_test() ->
+ ?assertEqual(["foo","bar","baz"], split_host("foo.bar.baz")).
+
+split_host_port_test() ->
+ ?assertEqual({[], 80}, split_host_port("", http)),
+ ?assertEqual({["foo","bar","baz"], 80},
+ split_host_port("foo.bar.baz:80", http)),
+ ?assertEqual({["foo","bar","baz"], 1234},
+ split_host_port("foo.bar.baz:1234", http)),
+
+ ?assertEqual({[], 443}, split_host_port("", https)),
+ ?assertEqual({["foo","bar","baz"], 443},
+ split_host_port("foo.bar.baz", https)),
+ ?assertEqual({["foo","bar","baz"], 1234},
+ split_host_port("foo.bar.baz:1234", https)).
+
+%% port binding
+bind_port_simple_match_test() ->
+ ?assertEqual({ok, []}, bind_port(80, 80, [])),
+ ?assertEqual({ok, [{foo, bar}]},
+ bind_port(1234, 1234, [{foo, bar}])).
+
+bind_port_matchall_test() ->
+ ?assertEqual({ok, []}, bind_port('*', 80, [])),
+ ?assertEqual({ok, [{foo, bar}]},
+ bind_port('*', 1234, [{foo, bar}])).
+
+bind_port_match_test() ->
+ ?assertEqual({ok, [{foo, 80}]}, bind_port(foo, 80, [])),
+ {ok, WholeBinding} = bind_port(foo, 1234, [{bar, baz}]),
+ ?assertEqual(2, length(WholeBinding)),
+ ?assertEqual(1234, proplists:get_value(foo, WholeBinding)),
+ ?assertEqual(baz, proplists:get_value(bar, WholeBinding)).
+
+ind_port_fail_test() ->
+ ?assertEqual(fail, bind_port(80, 1234, [])).
+
+%% path binding
+
+bind_path_empty_test() ->
+ ?assertEqual({ok, [], [], 0}, bind([], [], [], 0)),
+ ?assertEqual({ok, [], [{x,"a"}], 1},
+ bind([], [], [{x,"a"}], 1)).
+
+bind_path_matchall_test() ->
+ ?assertEqual({ok, [], [], 1},
+ bind(['*'], [], [], 1)),
+ ?assertEqual({ok, ["a","b"], [], 2},
+ bind(['*'], ["a","b"], [], 0)).
+
+bind_path_fail_longer_match_test() ->
+ ?assertEqual(fail, bind(["x"], [], [], 0)),
+ ?assertEqual(fail, bind([foo], [], [], 0)).
+
+bind_path_with_binding_test() ->
+ ?assertEqual({ok, [], [{foo, "a"}], 1},
+ bind([foo], ["a"], [], 0)),
+ {ok, Rest, Bind, Depth} = bind([foo,'*'], ["a","b"], [{bar, baz}], 1),
+ ?assertEqual(["b"], Rest),
+ ?assertEqual(3, Depth),
+ ?assertEqual(2, length(Bind)),
+ ?assertEqual("a", proplists:get_value(foo, Bind)),
+ ?assertEqual(baz, proplists:get_value(bar, Bind)).
+
+bind_path_string_match_test() ->
+ ?assertEqual({ok, [], [], 1},
+ bind(["a"], ["a"], [], 0)),
+ ?assertEqual({ok, [], [{foo, bar}], 4},
+ bind(["a","b","c"], ["a","b","c"], [{foo, bar}], 1)).
+
+bind_path_string_fail_test() ->
+ ?assertEqual(fail, bind(["a"], ["b"], [], 0)),
+ ?assertEqual(fail, bind(["a","b"], ["a","c"], [], 0)).
+
+try_path_matching_test() ->
+ RD = testing_ignore_dialyzer_warning_here,
+ ?assertEqual({bar, baz, [], [], ".", ""},
+ try_path_binding([{["foo"], bar, baz}], ["foo"], [], 80, [], 0, RD)),
+ Dispatch = [{["a", x], foo, bar},
+ {["b", y], baz, quux},
+ {["b", y, '*'], baz2, quux2}],
+ ?assertEqual({foo, bar, [], [{x, "c"}], "../..", []},
+ try_path_binding(Dispatch, ["a","c"], [], 80, [], 0, RD)),
+ ?assertEqual({baz, quux, [], [{y, "c"}], "../..", []},
+ try_path_binding(Dispatch, ["b","c"], [], 80, [], 0, RD)),
+ ?assertEqual({baz2, quux2, ["z"], [{y, "c"}], "../../..", "z"},
+ try_path_binding(Dispatch, ["b","c","z"], [], 80, [], 0, RD)),
+ ?assertEqual({baz2, quux2, ["z","v"], [{y, "c"}], "../../../..", "z/v"},
+ try_path_binding(Dispatch, ["b","c","z","v"], [], 80, [], 0, RD)).
+
+try_path_failing_test() ->
+ RD = testing_ignore_dialyzer_warning_here,
+ ?assertEqual({no_dispatch_match, ["a"]},
+ try_path_binding([{["b"], x, y}], ["a"], [], 80, [], 0, RD)).
+
+%% host binding
+
+try_host_binding_nohosts_test() ->
+ RD = testing_ignore_dialyzer_warning_here,
+ PathDispatches = [{["a"], foo, bar},
+ {["b"], baz, quux}],
+ ?assertEqual(try_host_binding([{{['*'],'*'},PathDispatches}],
+ ["quux","baz"], 80, ["a"], 0, RD),
+ try_host_binding(PathDispatches,
+ ["quux","baz"], 80, ["a"], 0, RD)),
+ ?assertEqual(try_host_binding([{{['*'],'*'},PathDispatches}],
+ ["quux","baz"], 80, ["b"], 0, RD),
+ try_host_binding(PathDispatches,
+ ["quux","baz"], 80, ["b"], 0, RD)),
+ ?assertEqual(try_host_binding([ {{['*'],'*'},[D]} || D <- PathDispatches],
+ ["quux","baz"], 1234, ["a"], 0, RD),
+ try_host_binding(PathDispatches,
+ ["quux","baz"], 1234, ["a"], 0, RD)),
+ ?assertEqual(try_host_binding([ {{['*'],'*'},[D]} || D <- PathDispatches],
+ ["quux","baz"], 1234, ["b"], 0, RD),
+ try_host_binding(PathDispatches,
+ ["quux","baz"], 1234, ["b"], 0, RD)).
+
+try_host_binding_noport_test() ->
+ RD = testing_ignore_dialyzer_warning_here,
+ Dispatch = [{["foo","bar"], [{["a"],x,y}]},
+ {["baz","quux"],[{["b"],z,q}]},
+ {[m,"quux"], [{["c"],r,s}]},
+ {['*',"quux"], [{["d"],t,u}]}],
+ ExplicitWildPort = [ {{H, '*'},P} || {H, P} <- Dispatch ],
+ ?assertEqual(try_host_binding(ExplicitWildPort,
+ ["bar","foo"], 80, ["a"], 0, RD),
+ try_host_binding(Dispatch,
+ ["bar","foo"], 80, ["a"], 0, RD)),
+ ?assertEqual(try_host_binding(ExplicitWildPort,
+ ["quux","baz"], 1234, ["b"], 0, RD),
+ try_host_binding(Dispatch,
+ ["quux","baz"], 1234, ["b"], 0, RD)),
+ ?assertEqual(try_host_binding(ExplicitWildPort,
+ ["quux","yes"], 81, ["c"], 0, RD),
+ try_host_binding(Dispatch,
+ ["quux","yes"], 81, ["c"], 0, RD)),
+ ?assertEqual(try_host_binding(ExplicitWildPort,
+ ["quux","no"], 82, ["d"], 0, RD),
+ try_host_binding(Dispatch,
+ ["quux","no"], 82, ["d"], 0, RD)).
+
+try_host_binding_fullmatch_test() ->
+ RD = testing_ignore_dialyzer_warning_here,
+ Dispatch = [{{["foo","bar"],80},[{["a"],x,y}]},
+ {{[foo,"bar"],80}, [{["b"],z,q}]},
+ {{[foo,"bar"],baz}, [{["c"],r,s}]},
+ {{['*',"bar"],'*'}, [{["d"],t,u}]}],
+ ?assertEqual({x, y, [], 80, [], [], ".", ""},
+ try_host_binding(Dispatch,
+ ["foo","bar"], 80, ["a"], 0, RD)),
+ ?assertEqual({z, q, [], 80, [], [{foo,"baz"}], ".", ""},
+ try_host_binding(Dispatch,
+ ["baz","bar"], 80, ["b"], 0, RD)),
+ {Mod, Props, HostRemainder, Port, PathRemainder,
+ PathBindings, AppRoot, StringPath}=
+ try_host_binding(Dispatch, ["quux","bar"], 1234, ["c"], 0, RD),
+ ?assertEqual(r, Mod),
+ ?assertEqual(s, Props),
+ ?assertEqual("", HostRemainder),
+ ?assertEqual(1234, Port),
+ ?assertEqual([], PathRemainder),
+ ?assertEqual(2, length(PathBindings)),
+ ?assertEqual("quux", proplists:get_value(foo, PathBindings)),
+ ?assertEqual(1234, proplists:get_value(baz, PathBindings)),
+ ?assertEqual(".", AppRoot),
+ ?assertEqual("", StringPath),
+ ?assertEqual({t, u, ["foo","quux"], 80, [], [], ".", ""},
+ try_host_binding(Dispatch, ["foo","quux","bar"],80,["d"],0, RD)).
+
+try_host_binding_wildcard_token_order_test() ->
+ RD = wrq:create('GET', http, {1,1}, "testing", mochiweb_headers:from_list([])),
+ Dispatch = [{{['*',"quux","com"],80},[{['*'],x,y}]}],
+ ?assertEqual({x,y,["foo","bar","baz"],80,[],[],".",""},
+ dispatch("foo.bar.baz.quux.com","/",Dispatch,RD)).
+
+try_host_binding_fail_test() ->
+ RD = testing_ignore_dialyzer_warning_here,
+ ?assertEqual({no_dispatch_match, {["bar","foo"], 1234}, ["x","y","z"]},
+ try_host_binding([], ["bar","foo"], 1234, ["x","y","z"], 0, RD)).
+
+dispatch_test() ->
+ RD = wrq:create('GET', http, {1,1}, "testing", mochiweb_headers:from_list([])),
+ TrueFun = fun(_) -> true end,
+ FalseFun = fun(_) -> false end,
+
+ ?assertEqual({x, y, [], 80, [], [], "../../..", ""},
+ dispatch("a/b/c",[{["a","b","c"],x,y}], RD)),
+ ?assertEqual({x, y, [], 80, [], [], "../../..", ""},
+ dispatch("a/b/c",[{["a","b","c"],TrueFun,x,y}], RD)),
+ ?assertEqual({no_dispatch_match, {[],80},["a","b","c"]},
+ dispatch("a/b/c",[{["a","b","c"],FalseFun,x,y}], RD)),
+ ?assertEqual({x, y, [], 80, [], [], "../../..", ""},
+ dispatch("foo.bar", "a/b/c",
+ [{{["foo","bar"],80},[{["a","b","c"],x,y}]}], RD)),
+ ?assertEqual({x, y, [], 1234, [], [], "../../..", ""},
+ dispatch("foo.bar:1234", "a/b/",
+ [{{["foo","bar"],1234},[{["a","b"],x,y}]}], RD)),
+ ?assertEqual({no_dispatch_match, {["baz","bar"],8000}, ["q","r"]},
+ dispatch("baz.bar:8000", "q/r",
+ [{{["foo","bar"],80},[{["a","b","c"],x,y}]}], RD)).
+
+guard1_test() ->
+ %% Basic guard test. Match everything.
+ Guard = fun(_) -> true end,
+ DispatchList = [{['*'], Guard, foo, bar}],
+ ?assertEqual(
+ {foo, bar, [], 80, ["test"], [], ".", "test"},
+ dispatch("test", DispatchList, make_reqdata("/test"))),
+ ok.
+
+guard2_test() ->
+ %% Basic guard test. Use guard to prevent all matches.
+ Guard = fun(_) -> false end,
+ DispatchList = [{['*'], Guard, foo, bar}],
+ ?assertEqual(
+ {no_dispatch_match, {[], 80}, ["test"]},
+ dispatch("test", DispatchList, make_reqdata("/test"))),
+ ok.
+
+guard3_test() ->
+ %% Check that path_info and path_tokens are passed to the guard...
+ Guard =
+ fun(RD) ->
+ ?assertEqual("a", wrq:path_info(a, RD)),
+ ?assertEqual("b", wrq:path_info(b, RD)),
+ ?assertEqual("c", wrq:path_info(c, RD)),
+ ?assertEqual(["d", "e"], wrq:path_tokens(RD)),
+ true
+ end,
+ DispatchList = [{[a,b,c,'*'], Guard, foo, bar}],
+ ?assertEqual(
+ {foo,bar,[],80, ["d","e"],
+ [{c,"c"},{b,"b"},{a,"a"}],
+ "../../../../..","d/e"},
+ dispatch("a/b/c/d/e", DispatchList, make_reqdata("/a/b/c/d/e"))),
+ ok.
+
+guard4_test() ->
+ %% Check that host and port are possed to the guard...
+ Guard =
+ fun(RD) ->
+ ?assertEqual("0", wrq:path_info(x, RD)),
+ ?assertEqual("0", wrq:path_info(y, RD)),
+ ?assertEqual("1", wrq:path_info(z, RD)),
+ ?assertEqual(80, wrq:port(RD)),
+ true
+ end,
+ DispatchList=
+ [{
+ {["127",x,y,z], 80},
+ [
+ {['*'], Guard, foo, bar}
+ ]
+ }],
+ ?assertEqual(
+ {foo,bar,[],80,
+ ["a","b","c","d","e"],
+ [{x,"0"},{y,"0"},{z,"1"}],
+ "../../../../..","a/b/c/d/e"},
+ dispatch("127.0.0.1", "a/b/c/d/e", DispatchList, make_reqdata("http://127.0.0.1:80/a/b/c/d/e"))),
+ ok.
+
+make_reqdata(Path) ->
+ %% Helper function to construct a request and return the ReqData
+ %% object.
+ MochiReq = mochiweb_request:new(testing, 'GET', Path, {1, 1},
+ mochiweb_headers:make([])),
+ Req = webmachine:new_request(mochiweb, MochiReq),
+ {RD, _} = Req:get_reqdata(),
+ RD.
+
+-endif.
--- /dev/null
+%% Copyright (c) 2011-2013 Basho Technologies, Inc. All Rights Reserved.
+%%
+%% This file is provided to you under the Apache License,
+%% Version 2.0 (the "License"); you may not use this file
+%% except in compliance with the License. You may obtain
+%% a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing,
+%% software distributed under the License is distributed on an
+%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+%% KIND, either express or implied. See the License for the
+%% specific language governing permissions and limitations
+%% under the License.
+
+%% @doc Default HTTP error reasons for webmachine error responsesf
+
+-module(webmachine_error).
+
+-export([reason/1]).
+
+-spec reason(pos_integer()) -> string().
+reason(400) ->
+ "Bad Request";
+reason(401) ->
+ "Unauthorized";
+reason(402) ->
+ "Payment Requested";
+reason(403) ->
+ "Forbidden";
+reason(404) ->
+ "Not Found";
+reason(405) ->
+ "Method Not Allowed";
+reason(406) ->
+ "Not Acceptable";
+reason(407) ->
+ "Proxy Authentication Required";
+reason(408) ->
+ "Request Timeout";
+reason(409) ->
+ "Conflict";
+reason(410) ->
+ "Gone";
+reason(411) ->
+ "Length Required";
+reason(412) ->
+ "Precondition Failed";
+reason(413) ->
+ "Request Entity Too Large";
+reason(414) ->
+ "Request-URI Too Long";
+reason(415) ->
+ "Unsupported Media Type";
+reason(416) ->
+ "Request Range Not Satisfiable";
+reason(417) ->
+ "Expectation Failed";
+reason(500) ->
+ "Internal Server Error";
+reason(501) ->
+ "Not Implemented";
+reason(502) ->
+ "Bad Gateway";
+reason(503) ->
+ "Service Unavailable";
+reason(504) ->
+ "Gateway Timeout";
+reason(505) ->
+ "HTTP Version Not Supported";
+reason(Code) when Code >= 400, Code < 500 ->
+ "Client Error";
+reason(Code) when Code >= 500 ->
+ "Server Error".
--- /dev/null
+%% @author Justin Sheehy <justin@basho.com>
+%% @author Andy Gross <andy@basho.com>
+%% @author Jeremy Latt <jeremy@basho.com>
+%% @copyright 2007-2008 Basho Technologies
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+
+
+%% @doc Some fairly minimal error message formatters.
+
+-module(webmachine_error_handler).
+-author('Justin Sheehy <justin@basho.com>').
+-author('Andy Gross <andy@basho.com>').
+-author('Jeremy Latt <jeremy@basho.com>').
+
+-export([render_error/3]).
+
+render_error(Code, Req, Reason) ->
+ case Req:has_response_body() of
+ {true,_} ->
+ maybe_log(Req, Reason),
+ Req:response_body();
+ {false,_} -> render_error_body(Code, Req:trim_state(), Reason)
+ end.
+
+render_error_body(404, Req, _Reason) ->
+ {ok, ReqState} = Req:add_response_header("Content-Type", "text/html"),
+ {<<"<HTML><HEAD><TITLE>404 Not Found</TITLE></HEAD><BODY><H1>Not Found</H1>The requested document was not found on this server.<P><HR><ADDRESS>mochiweb+webmachine web server</ADDRESS></BODY></HTML>">>, ReqState};
+
+render_error_body(500, Req, Reason) ->
+ {ok, ReqState} = Req:add_response_header("Content-Type", "text/html"),
+ maybe_log(Req, Reason),
+ STString = io_lib:format("~p", [Reason]),
+ ErrorStart = "<html><head><title>500 Internal Server Error</title></head><body><h1>Internal Server Error</h1>The server encountered an error while processing this request:<br><pre>",
+ ErrorEnd = "</pre><P><HR><ADDRESS>mochiweb+webmachine web server</ADDRESS></body></html>",
+ ErrorIOList = [ErrorStart,STString,ErrorEnd],
+ {erlang:iolist_to_binary(ErrorIOList), ReqState};
+
+render_error_body(501, Req, _Reason) ->
+ {ok, ReqState} = Req:add_response_header("Content-Type", "text/html"),
+ {Method,_} = Req:method(),
+ error_logger:error_msg("Webmachine does not support method ~p~n",
+ [Method]),
+ ErrorStr = io_lib:format("<html><head><title>501 Not Implemented</title>"
+ "</head><body><h1>Not Implemented</h1>"
+ "The server does not support the ~p method.<br>"
+ "<P><HR><ADDRESS>mochiweb+webmachine web server"
+ "</ADDRESS></body></html>",
+ [Method]),
+ {erlang:iolist_to_binary(ErrorStr), ReqState};
+
+render_error_body(503, Req, _Reason) ->
+ {ok, ReqState} = Req:add_response_header("Content-Type", "text/html"),
+ error_logger:error_msg("Webmachine cannot fulfill"
+ " the request at this time"),
+ ErrorStr = "<html><head><title>503 Service Unavailable</title>"
+ "</head><body><h1>Service Unavailable</h1>"
+ "The server is currently unable to handle "
+ "the request due to a temporary overloading "
+ "or maintenance of the server.<br>"
+ "<P><HR><ADDRESS>mochiweb+webmachine web server"
+ "</ADDRESS></body></html>",
+ {list_to_binary(ErrorStr), ReqState};
+
+render_error_body(Code, Req, Reason) ->
+ {ok, ReqState} = Req:add_response_header("Content-Type", "text/html"),
+ ReasonPhrase = httpd_util:reason_phrase(Code),
+ Body = ["<html><head><title>",
+ integer_to_list(Code),
+ " ",
+ ReasonPhrase,
+ "</title></head><body><h1>",
+ ReasonPhrase,
+ "</h1>",
+ Reason,
+ "<p><hr><address>mochiweb+webmachine web server</address></body></html>"],
+ {iolist_to_binary(Body), ReqState}.
+
+maybe_log(_Req, {error, {exit, normal, _Stack}}) ->
+ %% webmachine_request did an exit(normal), so suppress this
+ %% message. This usually happens when a chunked upload is
+ %% interrupted by network failure.
+ ok;
+maybe_log(Req, Reason) ->
+ {Path,_} = Req:path(),
+ error_logger:error_msg("webmachine error: path=~p~n~p~n", [Path, Reason]).
--- /dev/null
+%% Copyright (c) 2011-2012 Basho Technologies, Inc. All Rights Reserved.
+%%
+%% This file is provided to you under the Apache License,
+%% Version 2.0 (the "License"); you may not use this file
+%% except in compliance with the License. You may obtain
+%% a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing,
+%% software distributed under the License is distributed on an
+%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+%% KIND, either express or implied. See the License for the
+%% specific language governing permissions and limitations
+%% under the License.
+
+%% @doc Helper functions for webmachine's default log handlers
+
+-module(webmachine_log).
+
+-include("webmachine_logger.hrl").
+
+-export([add_handler/2,
+ call/2,
+ call/3,
+ datehour/0,
+ datehour/1,
+ defer_refresh/1,
+ delete_handler/1,
+ fix_log/2,
+ fmt_ip/1,
+ fmtnow/0,
+ log_access/1,
+ log_close/3,
+ log_open/1,
+ log_open/2,
+ log_write/2,
+ maybe_rotate/3,
+ month/1,
+ refresh/2,
+ suffix/1,
+ zeropad/2,
+ zone/0]).
+
+-record(state, {hourstamp :: non_neg_integer(),
+ filename :: string(),
+ handle :: file:io_device()}).
+
+%% @doc Add a handler to receive log events
+-type add_handler_result() :: ok | {'EXIT', term()} | term().
+-spec add_handler(atom() | {atom(), term()}, term()) -> add_handler_result().
+add_handler(Mod, Args) ->
+ gen_event:add_handler(?EVENT_LOGGER, Mod, Args).
+
+%% @doc Make a synchronous call directly to a specific event handler
+%% module
+-type error() :: {error, bad_module} | {'EXIT', term()} | term().
+-spec call(atom(), term()) -> term() | error().
+call(Mod, Msg) ->
+ gen_event:call(?EVENT_LOGGER, Mod, Msg).
+
+%% @doc Make a synchronous call directly to a specific event handler
+%% module
+-spec call(atom(), term(), timeout()) -> term() | error().
+call(Mod, Msg, Timeout) ->
+ gen_event:call(?EVENT_LOGGER, Mod, Msg, Timeout).
+
+%% @doc Return a four-tuple containing year, month, day, and hour
+%% of the current time.
+-type datehour() :: {calendar:year(), calendar:month(), calendar:day(), calendar:hour()}.
+-spec datehour() -> datehour().
+datehour() ->
+ datehour(os:timestamp()).
+
+%% @doc Return a four-tuple containing year, month, day, and hour
+%% of the specified time.
+-spec datehour(erlang:timestamp()) -> datehour().
+datehour(TS) ->
+ {{Y, M, D}, {H, _, _}} = calendar:now_to_universal_time(TS),
+ {Y, M, D, H}.
+
+%% @doc Defer the refresh of a log file.
+-spec defer_refresh(atom()) -> {ok, timer:tref()} | {error, term()}.
+defer_refresh(Mod) ->
+ {_, {_, M, S}} = calendar:universal_time(),
+ Time = 1000 * (3600 - ((M * 60) + S)),
+ timer:apply_after(Time, ?MODULE, refresh, [Mod, os:timestamp()]).
+
+%% @doc Remove a log handler
+-type delete_handler_result() :: term() | {error, module_not_found} | {'EXIT', term()}.
+-spec delete_handler(atom() | {atom(), term()}) -> delete_handler_result().
+delete_handler(Mod) ->
+ gen_event:delete_handler(?EVENT_LOGGER, Mod, []).
+
+%% Seek backwards to the last valid log entry
+-spec fix_log(file:io_device(), non_neg_integer()) -> ok.
+fix_log(_FD, 0) ->
+ ok;
+fix_log(FD, 1) ->
+ {ok, 0} = file:position(FD, 0),
+ ok;
+fix_log(FD, Location) ->
+ case file:pread(FD, Location - 1, 1) of
+ {ok, [$\n | _]} ->
+ ok;
+ {ok, _} ->
+ fix_log(FD, Location - 1)
+ end.
+
+%% @doc Format an IP address or host name
+-spec fmt_ip(undefined | string() | inet:ip4_address() | inet:ip6_address()) -> string().
+fmt_ip(IP) when is_tuple(IP) ->
+ inet_parse:ntoa(IP);
+fmt_ip(undefined) ->
+ "0.0.0.0";
+fmt_ip(HostName) ->
+ HostName.
+
+%% @doc Format the current time into a string
+-spec fmtnow() -> string().
+fmtnow() ->
+ {{Year, Month, Date}, {Hour, Min, Sec}} = calendar:local_time(),
+ io_lib:format("[~2..0w/~s/~4..0w:~2..0w:~2..0w:~2..0w ~s]",
+ [Date,month(Month),Year, Hour, Min, Sec, zone()]).
+
+%% @doc Notify registered log event handler of an access event.
+-spec log_access(wm_log_data()) -> ok.
+log_access(#wm_log_data{}=LogData) ->
+ gen_event:sync_notify(?EVENT_LOGGER, {log_access, LogData}).
+
+%% @doc Close a log file.
+-spec log_close(atom(), string(), file:io_device()) -> ok | {error, term()}.
+log_close(Mod, Name, FD) ->
+ error_logger:info_msg("~p: closing log file: ~p~n", [Mod, Name]),
+ file:close(FD).
+
+%% @doc Open a new log file for writing
+-spec log_open(string()) -> {file:io_device(), non_neg_integer()}.
+log_open(FileName) ->
+ DateHour = datehour(),
+ {log_open(FileName, DateHour), DateHour}.
+
+%% @doc Open a new log file for writing
+-spec log_open(string(), non_neg_integer()) -> file:io_device().
+log_open(FileName, DateHour) ->
+ LogName = FileName ++ suffix(DateHour),
+ error_logger:info_msg("opening log file: ~p~n", [LogName]),
+ filelib:ensure_dir(LogName),
+ {ok, FD} = file:open(LogName, [read, write, raw]),
+ {ok, Location} = file:position(FD, eof),
+ fix_log(FD, Location),
+ file:truncate(FD),
+ FD.
+
+-spec log_write(file:io_device(), iolist()) -> ok | {error, term()}.
+log_write(FD, IoData) ->
+ file:write(FD, lists:flatten(IoData)).
+
+%% @doc Rotate a log file if the hour it represents
+%% has passed.
+-spec maybe_rotate(atom(), erlang:timestamp(), #state{}) -> #state{}.
+maybe_rotate(Mod, Time, State) ->
+ ThisHour = datehour(Time),
+ if ThisHour == State#state.hourstamp ->
+ State;
+ true ->
+ defer_refresh(Mod),
+ log_close(Mod, State#state.filename, State#state.handle),
+ Handle = log_open(State#state.filename, ThisHour),
+ State#state{hourstamp=ThisHour, handle=Handle}
+ end.
+
+%% @doc Convert numeric month value to the abbreviation
+-spec month(1..12) -> string().
+month(1) ->
+ "Jan";
+month(2) ->
+ "Feb";
+month(3) ->
+ "Mar";
+month(4) ->
+ "Apr";
+month(5) ->
+ "May";
+month(6) ->
+ "Jun";
+month(7) ->
+ "Jul";
+month(8) ->
+ "Aug";
+month(9) ->
+ "Sep";
+month(10) ->
+ "Oct";
+month(11) ->
+ "Nov";
+month(12) ->
+ "Dec".
+
+%% @doc Make a synchronous call to instruct a log handler to refresh
+%% itself.
+-spec refresh(atom(), erlang:timestamp()) -> ok | {error, term()}.
+refresh(Mod, Time) ->
+ call(Mod, {refresh, Time}, infinity).
+
+-spec suffix(datehour()) -> string().
+suffix({Y, M, D, H}) ->
+ YS = zeropad(Y, 4),
+ MS = zeropad(M, 2),
+ DS = zeropad(D, 2),
+ HS = zeropad(H, 2),
+ lists:flatten([$., YS, $_, MS, $_, DS, $_, HS]).
+
+-spec zeropad(integer(), integer()) -> string().
+zeropad(Num, MinLength) ->
+ NumStr = integer_to_list(Num),
+ zeropad_str(NumStr, MinLength - length(NumStr)).
+
+-spec zeropad_str(string(), integer()) -> string().
+zeropad_str(NumStr, Zeros) when Zeros > 0 ->
+ zeropad_str([$0 | NumStr], Zeros - 1);
+zeropad_str(NumStr, _) ->
+ NumStr.
+
+-spec zone() -> string().
+zone() ->
+ Time = erlang:universaltime(),
+ LocalTime = calendar:universal_time_to_local_time(Time),
+ DiffSecs = calendar:datetime_to_gregorian_seconds(LocalTime) -
+ calendar:datetime_to_gregorian_seconds(Time),
+ zone((DiffSecs/3600)*100).
+
+%% Ugly reformatting code to get times like +0000 and -1300
+
+-spec zone(integer()) -> string().
+zone(Val) when Val < 0 ->
+ io_lib:format("-~4..0w", [trunc(abs(Val))]);
+zone(Val) when Val >= 0 ->
+ io_lib:format("+~4..0w", [trunc(abs(Val))]).
--- /dev/null
+%% Copyright (c) 2011-2013 Basho Technologies, Inc. All Rights Reserved.
+%%
+%% This file is provided to you under the Apache License,
+%% Version 2.0 (the "License"); you may not use this file
+%% except in compliance with the License. You may obtain
+%% a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing,
+%% software distributed under the License is distributed on an
+%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+%% KIND, either express or implied. See the License for the
+%% specific language governing permissions and limitations
+%% under the License.
+
+%% @doc Default log handler for webmachine
+
+-module(webmachine_log_handler).
+
+-behaviour(gen_event).
+
+%% gen_event callbacks
+-export([init/1,
+ handle_call/2,
+ handle_event/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3]).
+
+-include("webmachine_logger.hrl").
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+-endif.
+
+-record(state, {hourstamp, filename, handle}).
+
+-define(FILENAME, "access.log").
+
+%% ===================================================================
+%% gen_event callbacks
+%% ===================================================================
+
+%% @private
+init([BaseDir]) ->
+ webmachine_log:defer_refresh(?MODULE),
+ FileName = filename:join(BaseDir, ?FILENAME),
+ {Handle, DateHour} = webmachine_log:log_open(FileName),
+ {ok, #state{filename=FileName, handle=Handle, hourstamp=DateHour}}.
+
+%% @private
+handle_call({_Label, MRef, get_modules}, State) ->
+ {ok, {MRef, [?MODULE]}, State};
+handle_call({refresh, Time}, State) ->
+ {ok, ok, webmachine_log:maybe_rotate(?MODULE, Time, State)};
+handle_call(_Request, State) ->
+ {ok, ok, State}.
+
+%% @private
+handle_event({log_access, LogData}, State) ->
+ NewState = webmachine_log:maybe_rotate(?MODULE, os:timestamp(), State),
+ Msg = format_req(LogData),
+ webmachine_log:log_write(NewState#state.handle, Msg),
+ {ok, NewState};
+handle_event(_Event, State) ->
+ {ok, State}.
+
+%% @private
+handle_info(_Info, State) ->
+ {ok, State}.
+
+%% @private
+terminate(_Reason, _State) ->
+ ok.
+
+%% @private
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%% ===================================================================
+%% Internal functions
+%% ===================================================================
+
+format_req(#wm_log_data{method=Method,
+ headers=Headers,
+ peer=Peer,
+ path=Path,
+ version=Version,
+ response_code=ResponseCode,
+ response_length=ResponseLength}) ->
+ User = "-",
+ Time = webmachine_log:fmtnow(),
+ Status = case ResponseCode of
+ {Code, _ReasonPhrase} when is_integer(Code) ->
+ integer_to_list(Code);
+ _ when is_integer(ResponseCode) ->
+ integer_to_list(ResponseCode);
+ _ ->
+ ResponseCode
+ end,
+ Length = integer_to_list(ResponseLength),
+ Referer =
+ case mochiweb_headers:get_value("Referer", Headers) of
+ undefined -> "";
+ R -> R
+ end,
+ UserAgent =
+ case mochiweb_headers:get_value("User-Agent", Headers) of
+ undefined -> "";
+ U -> U
+ end,
+ fmt_alog(Time, Peer, User, atom_to_list(Method), Path, Version,
+ Status, Length, Referer, UserAgent).
+
+fmt_alog(Time, Ip, User, Method, Path, {VM,Vm},
+ Status, Length, Referrer, UserAgent) ->
+ [webmachine_log:fmt_ip(Ip), " - ", User, [$\s], Time, [$\s, $"], Method, " ", Path,
+ " HTTP/", integer_to_list(VM), ".", integer_to_list(Vm), [$",$\s],
+ Status, [$\s], Length, [$\s,$"], Referrer,
+ [$",$\s,$"], UserAgent, [$",$\n]].
--- /dev/null
+%% Copyright (c) 2011-2012 Basho Technologies, Inc. All Rights Reserved.
+%%
+%% This file is provided to you under the Apache License,
+%% Version 2.0 (the "License"); you may not use this file
+%% except in compliance with the License. You may obtain
+%% a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing,
+%% software distributed under the License is distributed on an
+%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+%% KIND, either express or implied. See the License for the
+%% specific language governing permissions and limitations
+%% under the License.
+
+%% @doc A process that does a gen_event:add_sup_handler and attempts to re-add
+%% event handlers when they exit.
+
+%% @private
+
+-module(webmachine_logger_watcher).
+
+-behaviour(gen_server).
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+-endif.
+
+%% callbacks
+-export([init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3]).
+
+-export([start_link/3,
+ start/3]).
+
+-record(state, {module, config, event}).
+
+start_link(Event, Module, Config) ->
+ gen_server:start_link(?MODULE, [Event, Module, Config], []).
+
+start(Event, Module, Config) ->
+ gen_server:start(?MODULE, [Event, Module, Config], []).
+
+init([Event, Module, Config]) ->
+ install_handler(Event, Module, Config),
+ {ok, #state{event=Event, module=Module, config=Config}}.
+
+handle_call(_Call, _From, State) ->
+ {reply, ok, State}.
+
+handle_cast(_Request, State) ->
+ {noreply, State}.
+
+handle_info({gen_event_EXIT, Module, normal}, #state{module=Module} = State) ->
+ {stop, normal, State};
+handle_info({gen_event_EXIT, Module, shutdown}, #state{module=Module} = State) ->
+ {stop, normal, State};
+handle_info({gen_event_EXIT, Module, _Reason}, #state{module=Module,
+ config=Config, event=Event} = State) ->
+ install_handler(Event, Module, Config),
+ {noreply, State};
+handle_info(reinstall_handler, #state{module=Module, config=Config, event=Event} = State) ->
+ install_handler(Event, Module, Config),
+ {noreply, State};
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%% internal
+
+install_handler(Event, Module, Config) ->
+ case gen_event:add_sup_handler(Event, Module, Config) of
+ ok ->
+ ok;
+ _Error ->
+ erlang:send_after(5000, self(), reinstall_handler),
+ ok
+ end.
--- /dev/null
+%% Copyright (c) 2011-2012 Basho Technologies, Inc. All Rights Reserved.
+%%
+%% This file is provided to you under the Apache License,
+%% Version 2.0 (the "License"); you may not use this file
+%% except in compliance with the License. You may obtain
+%% a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing,
+%% software distributed under the License is distributed on an
+%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+%% KIND, either express or implied. See the License for the
+%% specific language governing permissions and limitations
+%% under the License.
+
+%% @doc A supervisor for monitoring webmachine_logger_handler_watcher processes.
+
+%% @private
+
+-module(webmachine_logger_watcher_sup).
+
+-behaviour(supervisor).
+
+%% API
+-export([start_link/0]).
+
+%% Callbacks
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ {ok, {{simple_one_for_one, 10, 60},
+ [
+ {webmachine_logger_watcher, {webmachine_logger_watcher, start_link, []},
+ transient, 5000, worker, [webmachine_logger_watcher]}
+ ]}}.
--- /dev/null
+%% @author Justin Sheehy <justin@basho.com>
+%% @author Andy Gross <andy@basho.com>
+%% @copyright 2007-2008 Basho Technologies
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+
+%% @doc Mochiweb interface for webmachine.
+-module(webmachine_mochiweb).
+-author('Justin Sheehy <justin@basho.com>').
+-author('Andy Gross <andy@basho.com>').
+-export([start/1, stop/0, loop/2]).
+
+%% The `log_dir' option is deprecated, but remove it from the
+%% options list if it is present
+-define(WM_OPTIONS, [error_handler,
+ log_dir,
+ rewrite_module,
+ resource_module_option]).
+
+-define (WM_OPTION_DEFAULTS, [{error_handler, webmachine_error_handler}]).
+
+start(Options) ->
+ {DispatchList, PName, DGroup, WMOptions, OtherOptions} = get_wm_options(Options),
+ webmachine_router:init_routes(DGroup, DispatchList),
+ [application_set_unless_env_or_undef(K, V) || {K, V} <- WMOptions],
+ MochiName = list_to_atom(to_list(PName) ++ "_mochiweb"),
+ LoopFun = fun(X) -> loop(DGroup, X) end,
+ mochiweb_http:start([{name, MochiName}, {loop, LoopFun} | OtherOptions]).
+
+stop() ->
+ {registered_name, PName} = process_info(self(), registered_name),
+ MochiName = list_to_atom(atom_to_list(PName) ++ "_mochiweb"),
+ mochiweb_http:stop(MochiName).
+
+loop(Name, MochiReq) ->
+ Req = webmachine:new_request(mochiweb, MochiReq),
+ DispatchList = webmachine_router:get_routes(Name),
+ Host = case host_headers(Req) of
+ [H|_] -> H;
+ [] -> []
+ end,
+ {Path, _} = Req:path(),
+ {RD, _} = Req:get_reqdata(),
+
+ %% Run the dispatch code, catch any errors...
+ try webmachine_dispatcher:dispatch(Host, Path, DispatchList, RD) of
+ {error, invalid_host} ->
+ handle_error(400, "Invalid Host", Req);
+ {no_dispatch_match, _UnmatchedHost, _UnmatchedPathTokens} ->
+ handle_error(404, {none, none, []}, Req);
+ {Mod, ModOpts, HostTokens, Port, PathTokens, Bindings,
+ AppRoot, StringPath} ->
+ BootstrapResource = webmachine_resource:new(x,x,x,x),
+ {ok,RS1} = Req:load_dispatch_data(Bindings,HostTokens,Port,
+ PathTokens,AppRoot,StringPath),
+ XReq1 = {webmachine_request,RS1},
+ try
+ {ok, Resource} = BootstrapResource:wrap(Mod, ModOpts),
+ {ok,RS2} = XReq1:set_metadata('resource_module',
+ resource_module(Mod, ModOpts)),
+ webmachine_decision_core:handle_request(Resource, RS2)
+ catch
+ error:Error ->
+ handle_error(500, {error, Error}, Req)
+ end
+ catch
+ Type : Error ->
+ handle_error(500, {Type, Error}, Req)
+ end.
+
+handle_error(Code, Error, Req) ->
+ {ok, ErrorHandler} = application:get_env(webmachine, error_handler),
+ {ErrorHTML,ReqState1} =
+ ErrorHandler:render_error(Code, Req, Error),
+ Req1 = {webmachine_request,ReqState1},
+ {ok,ReqState2} = Req1:append_to_response_body(ErrorHTML),
+ Req2 = {webmachine_request,ReqState2},
+ {ok,ReqState3} = Req2:send_response(Code),
+ Req3 = {webmachine_request,ReqState3},
+ {LogData,_ReqState4} = Req3:log_data(),
+ spawn(webmachine_log, log_access, [LogData]).
+
+get_wm_option(OptName, {WMOptions, OtherOptions}) ->
+ {Value, UpdOtherOptions} =
+ handle_get_option_result(get_option(OptName, OtherOptions), OptName),
+ {[{OptName, Value} | WMOptions], UpdOtherOptions}.
+
+handle_get_option_result({undefined, Options}, Name) ->
+ {proplists:get_value(Name, ?WM_OPTION_DEFAULTS), Options};
+handle_get_option_result(GetOptRes, _) ->
+ GetOptRes.
+
+get_wm_options(Options) ->
+ {DispatchList, Options1} = get_option(dispatch, Options),
+ {Name, Options2} =
+ case get_option(name, Options1) of
+ {undefined, Opts2} ->
+ {webmachine, Opts2};
+ NRes -> NRes
+ end,
+ {DGroup, Options3} =
+ case get_option(dispatch_group, Options2) of
+ {undefined, Opts3} ->
+ {default, Opts3};
+ RRes -> RRes
+ end,
+ {WMOptions, RestOptions} = lists:foldl(fun get_wm_option/2, {[], Options3}, ?WM_OPTIONS),
+ {DispatchList, Name, DGroup, WMOptions, RestOptions}.
+
+get_option(Option, Options) ->
+ case lists:keytake(Option, 1, Options) of
+ false -> {undefined, Options};
+ {value, {Option, Value}, NewOptions} -> {Value, NewOptions}
+ end.
+
+application_set_unless_env_or_undef(_Var, undefined) ->
+ ok;
+application_set_unless_env_or_undef(Var, Value) ->
+ application_set_unless_env(webmachine, Var, Value).
+
+application_set_unless_env(App, Var, Value) ->
+ Current = application:get_all_env(App),
+ CurrentKeys = proplists:get_keys(Current),
+ case lists:member(Var, CurrentKeys) of
+ true ->
+ ok;
+ false ->
+ application:set_env(App, Var, Value)
+ end.
+
+host_headers(Req) ->
+ [ V || {V,_ReqState} <- [Req:get_header_value(H)
+ || H <- ["x-forwarded-host",
+ "x-forwarded-server",
+ "host"]],
+ V /= undefined].
+
+get_app_env(Key) ->
+ application:get_env(webmachine, Key).
+
+%% @private
+%% @doc This function is used for cases where it may be desirable to
+%% override the value that is set in the request metadata under the
+%% `resource_module' key. An example would be a pattern where a set of
+%% resource modules shares a lot of common functionality that is
+%% contained in a single module and is used as the resource in all
+%% dispatch rules and the `ModOpts' are used to specify a smaller
+%% set of callbacks for resource specialization.
+resource_module(Mod, ModOpts) ->
+ resource_module(Mod, ModOpts, get_app_env(resource_module_option)).
+
+resource_module(Mod, _, undefined) ->
+ Mod;
+resource_module(Mod, ModOpts, {ok, OptionVal}) ->
+ proplists:get_value(OptionVal, ModOpts, Mod).
+
+to_list(L) when is_list(L) ->
+ L;
+to_list(A) when is_atom(A) ->
+ atom_to_list(A).
--- /dev/null
+%% @author Justin Sheehy <justin@basho.com>
+%% @author Andy Gross <andy@basho.com>
+%% @copyright 2009 Basho Technologies
+
+%% @doc Utility for parsing multipart form bodies.
+
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+
+%% http://www.apache.org/licenses/LICENSE-2.0
+
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+
+-module(webmachine_multipart).
+-author('Justin Sheehy <justin@basho.com>').
+-author('Andy Gross <andy@basho.com>').
+-export([get_all_parts/2,stream_parts/2, find_boundary/1]).
+
+% @type incoming_req_body() = binary().
+% The request body, in "multipart/form-data" (rfc2388) form,
+
+% @type boundary() = string().
+% The multipart boundary, as taken from the containing message's content-type.
+
+% @type fpart() = {fpartname(), {[fparam()],[fheader()]}, fcontent()}.
+% A single part of a multipart form.
+
+% @type fpartname() = string().
+% The name from the form field of a form part.
+
+% @type fparam() = {binary(), binary()}.
+% A key-value parameter from the content-disposition header in a form part.
+
+% @type fheader() = {binary(), binary()}.
+% A header name and value supplied within a form part.
+
+% @type fcontent() = binary().
+% The body content within a form part.
+
+% @doc Find the multipart boundary for a request.
+% @spec find_boundary(wrq:wm_reqdata()) -> boundary()
+find_boundary(ReqData) ->
+ ContentType = wrq:get_req_header("content-type", ReqData),
+ string:substr(ContentType, string:str(ContentType, "boundary=")
+ + length("boundary=")).
+
+% @doc Turn a multipart form into component parts.
+% @spec get_all_parts(incoming_req_body(), boundary()) -> [fpart()]
+get_all_parts(Body, Boundary) when is_binary(Body), is_list(Boundary) ->
+ StreamStruct = send_streamed_body(Body,1024),
+ getparts1(stream_parts(StreamStruct, Boundary), []).
+
+% @doc Similar to get_all_parts/2, but for streamed/chunked bodies.
+% Takes as input the result of wrq:stream_req_body/2, and provides
+% either the atom 'done_parts' when no more parts are available, or
+% a tuple with the next part and a function. That function will
+% have 0-arity and the same return type as stream_parts/2 itself.
+% @spec stream_parts(wm_stream(), boundary()) ->
+% 'done_parts' | {fpart(), function()}
+stream_parts(StreamStruct, Boundary) ->
+ stream_form(StreamStruct, "--" ++ Boundary, []).
+
+stream_form(_, _, [<<"----\n">>|_]) -> done_parts;
+stream_form(_, _, [<<"--\n">>|_]) -> done_parts;
+stream_form({Hunk, Next}, Boundary, []) ->
+ stream_form(get_more_data(Next), Boundary, re:split(Hunk, Boundary,[]));
+stream_form({Hunk, Next}, Boundary, [<<>>|DQ]) ->
+ stream_form({Hunk, Next}, Boundary, DQ);
+stream_form({Hunk, Next}, Boundary, [H|[T1|T2]]) ->
+ {make_part(H), fun() ->
+ stream_form({Hunk, Next}, Boundary, [T1|T2]) end};
+stream_form({Hunk, really_done}, Boundary, DQ) ->
+ DQBin = iolist_to_binary(DQ),
+ FullHunk = <<DQBin/binary, Hunk/binary>>,
+ stream_parts(re:split(FullHunk, Boundary,[]));
+stream_form({Hunk, Next}, Boundary, [Single]) ->
+ FullHunk = <<Single/binary, Hunk/binary>>,
+ stream_form(get_more_data(Next), Boundary, re:split(FullHunk, Boundary,[])).
+
+stream_parts([]) -> done_parts;
+% browsers are fun, and terminate posts slightly differently from each other:
+stream_parts([<<"----\n">>]) -> done_parts;
+stream_parts([<<"--\n">>]) -> done_parts;
+stream_parts([<<"----\r\n">>]) -> done_parts;
+stream_parts([<<"--\r\n">>]) -> done_parts;
+stream_parts([<<"--\r\n--\n">>]) -> done_parts;
+stream_parts([<<"--\r\n--\r\n">>]) -> done_parts;
+stream_parts([H|T]) -> {make_part(H), fun() -> stream_parts(T) end}.
+
+get_more_data(done) -> {<<"--\n">>, really_done};
+get_more_data(Fun) -> Fun().
+
+make_part(PartData) ->
+ %% Remove the trailing \r\n
+ [HeadData, BodyWithCRLF] = re:split(PartData, "\\r\\n\\r\\n", [{parts,2}]),
+ BodyLen = size(BodyWithCRLF) - 2,
+ <<Body:BodyLen/binary, _/binary>> = BodyWithCRLF,
+
+ HeadList = [list_to_binary(X) ||
+ X <- string:tokens(binary_to_list(HeadData), "\r\n")],
+ {Name, Params, Headers} = make_headers(HeadList),
+ {Name, {Params,Headers}, Body}.
+
+make_headers(X) ->
+ make_headers(X, name_undefined, params_undefined, []).
+make_headers([], Name, Params, Headers) -> {Name, Params, Headers};
+make_headers([<<>>|HL], Name, Params, Headers) ->
+ make_headers(HL, Name, Params, Headers);
+make_headers(
+ [<<"Content-Disposition: form-data; ", Names/binary>>|HL],
+ _, _, Headers) ->
+ {Name, Params} = extract_names(Names),
+ make_headers(HL, Name, Params, Headers);
+make_headers([H|HL], Name, Params, Headers) ->
+ make_headers(HL, Name, Params, [cheap_parse_header(H)|Headers]).
+
+extract_names(NamesString) ->
+ Params = [{K, V} ||
+ {K, [<<>>, V, <<>>]} <- [{K0, re:split(V0,"\"",[])} ||
+ [K0, V0] <- [re:split(N, "=", [{parts, 2}]) ||
+ N <- re:split(NamesString, "; ", [])]]],
+ Name = hd([binary_to_list(V) || {<<"name">>,V} <- Params]),
+ {Name, Params}.
+
+cheap_parse_header(HeadBin) ->
+ [K,V] = re:split(HeadBin, ": ", [{parts,2}]),
+ {K,V}.
+
+getparts1(done_parts, Acc) ->
+ lists:reverse(Acc);
+getparts1({Part, Streamer}, Acc) ->
+ getparts1(Streamer(), [Part|Acc]).
+
+send_streamed_body(Body, Max) ->
+ HunkLen=8*Max,
+ case Body of
+ <<A:HunkLen,Rest/binary>> ->
+ {<<A:HunkLen>>, fun() -> send_streamed_body(Rest,Max) end};
+ _ ->
+ {Body, done}
+ end.
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+body_test() ->
+ Body = <<"------------ae0gL6gL6Ij5KM7Ef1KM7ei4ae0cH2\r\nContent-Disposition: form-data; name=\"Filename\"\r\n\r\ntestfile.txt\r\n------------ae0gL6gL6Ij5KM7Ef1KM7ei4ae0cH2\r\nContent-Disposition: form-data; name=\"Filedata\"; filename=\"testfile.txt\"\r\nContent-Type: application/octet-stream\r\n\r\n%%% The contents of this file are a test,\n%%% do not be alarmed.\n\r\n------------ae0gL6gL6Ij5KM7Ef1KM7ei4ae0cH2\r\nContent-Disposition: form-data; name=\"Upload\"\r\n\r\nSubmit Query\r\n------------ae0gL6gL6Ij5KM7Ef1KM7ei4ae0cH2--">>,
+ Boundary = "----------ae0gL6gL6Ij5KM7Ef1KM7ei4ae0cH2",
+ ?assertEqual(
+ [{"Filename",
+ {[{<<"name">>,<<"Filename">>}],[]},
+ <<"testfile.txt">>},
+ {"Filedata",
+ {[{<<"name">>,<<"Filedata">>},
+ {<<"filename">>,<<"testfile.txt">>}],
+ [{<<"Content-Type">>,<<"application/octet-stream">>}]},
+ <<"%%% The contents of this file are a test,\n%%% do not be alarmed.\n">>},
+ {"Upload",{[{<<"name">>,<<"Upload">>}],[]},
+ <<"Submit Query">>}],
+ get_all_parts(Body, Boundary)).
+
+body2_test() ->
+ Body = <<"-----------------------------89205314411538515011004844897\r\nContent-Disposition: form-data; name=\"Filedata\"; filename=\"akamai.txt\"\r\nContent-Type: text/plain\r\n\r\nCAMBRIDGE, MA - February 18, 2009 - Akamai Technologies, Inc. (NASDAQ: AKAM), the leader in powering rich media, dynamic transactions and enterprise applications online, today announced that its Service & Support organization was awarded top honors for Innovation in Customer Service at the 3rd Annual Stevie Awards for Sales & Customer Service, an international competition recognizing excellence in disciplines that are crucial to business success.\n\n\"We have always set incredibly high standards with respect to the service and support we provide our customers,\" said Sanjay Singh, vice president of Global Service & Support at Akamai. \"Our support team provides highly responsive service around the clock to our global customer base and, as a result, has become an extension of our customers' online businesses. This prestigious award is validation of Akamai's commitment to customer service and technical support.\"\n\nAkamai Service & Support professionals are dedicated to working with customers on a daily basis to fine tune, optimize, and support their Internet initiatives. Akamai's winning submission highlighted the key pillars of its service and support offering, as well as the initiatives established to meet customer requirements for proactive communication, simplification, and faster response times.\n\n\"This year's honorees demonstrate that even in challenging economic times, it's possible for organizations to continue to shine in sales and customer service, the two most important functions in business: acquiring and keeping customers,\" said Michael Gallagher, president of the Stevie Awards.\n\nThe awards are presented by the Stevie Awards, which organizes several of the world's leading business awards shows, including the prestigious American Business Awards. Nicknamed the Stevies for the Greek word \"crowned,\" winners were announced during a gala banquet on Monday, February 9 at Caesars Palace in Las Vegas. Nominated customer service and sales executives from the U.S.A. and several other countries attended. More than 500 entries from companies of all sizes and in virtually every industry were submitted to this year's competition. There are 27 categories for customer service professionals, as well as 41 categories for sales professionals.\n\nDetails about the Stevie Awards for Sales & Customer Service and the list of honorees in all categories are available at www.stevieawards.com/sales. \n\r\n-----------------------------89205314411538515011004844897--\r\n">>,
+ Boundary = "---------------------------89205314411538515011004844897",
+ ?assertEqual(
+ [{"Filedata",
+ {[{<<"name">>,<<"Filedata">>},
+ {<<"filename">>,<<"akamai.txt">>}],
+ [{<<"Content-Type">>,<<"text/plain">>}]},
+ <<"CAMBRIDGE, MA - February 18, 2009 - Akamai Technologies, Inc. (NASDAQ: AKAM), the leader in powering rich media, dynamic transactions and enterprise applications online, today announced that its Service & Support organization was awarded top honors for Innovation in Customer Service at the 3rd Annual Stevie Awards for Sales & Customer Service, an international competition recognizing excellence in disciplines that are crucial to business success.\n\n\"We have always set incredibly high standards with respect to the service and support we provide our customers,\" said Sanjay Singh, vice president of Global Service & Support at Akamai. \"Our support team provides highly responsive service around the clock to our global customer base and, as a result, has become an extension of our customers' online businesses. This prestigious award is validation of Akamai's commitment to customer service and technical support.\"\n\nAkamai Service & Support professionals are dedicated to working with customers on a daily basis to fine tune, optimize, and support their Internet initiatives. Akamai's winning submission highlighted the key pillars of its service and support offering, as well as the initiatives established to meet customer requirements for proactive communication, simplification, and faster response times.\n\n\"This year's honorees demonstrate that even in challenging economic times, it's possible for organizations to continue to shine in sales and customer service, the two most important functions in business: acquiring and keeping customers,\" said Michael Gallagher, president of the Stevie Awards.\n\nThe awards are presented by the Stevie Awards, which organizes several of the world's leading business awards shows, including the prestigious American Business Awards. Nicknamed the Stevies for the Greek word \"crowned,\" winners were announced during a gala banquet on Monday, February 9 at Caesars Palace in Las Vegas. Nominated customer service and sales executives from the U.S.A. and several other countries attended. More than 500 entries from companies of all sizes and in virtually every industry were submitted to this year's competition. There are 27 categories for customer service professionals, as well as 41 categories for sales professionals.\n\nDetails about the Stevie Awards for Sales & Customer Service and the list of honorees in all categories are available at www.stevieawards.com/sales. \n">>
+ }],
+ get_all_parts(Body,Boundary)).
+
+firefox_test() ->
+ Body = <<"-----------------------------823378840143542612896544303\r\nContent-Disposition: form-data; name=\"upload-test\"; filename=\"abcdef.txt\"\r\nContent-Type: text/plain\r\n\r\n01234567890123456789012345678901234567890123456789\r\n-----------------------------823378840143542612896544303--\r\n">>,
+ Boundary = "---------------------------823378840143542612896544303",
+ ?assertEqual(
+ [{"upload-test",
+ {[{<<"name">>,<<"upload-test">>},
+ {<<"filename">>,<<"abcdef.txt">>}],
+ [{<<"Content-Type">>,<<"text/plain">>}]},
+ <<"01234567890123456789012345678901234567890123456789">>}],
+ get_all_parts(Body,Boundary)).
+
+chrome_test() ->
+ Body = <<"------WebKitFormBoundaryIHB9Xyi7ZCNKJusP\r\nContent-Disposition: form-data; name=\"upload-test\"; filename=\"abcdef.txt\"\r\nContent-Type: text/plain\r\n\r\n01234567890123456789012345678901234567890123456789\r\n------WebKitFormBoundaryIHB9Xyi7ZCNKJusP--\r\n">>,
+ Boundary = "----WebKitFormBoundaryIHB9Xyi7ZCNKJusP",
+ ?assertEqual(
+ [{"upload-test",
+ {[{<<"name">>,<<"upload-test">>},
+ {<<"filename">>,<<"abcdef.txt">>}],
+ [{<<"Content-Type">>,<<"text/plain">>}]},
+ <<"01234567890123456789012345678901234567890123456789">>}],
+ get_all_parts(Body,Boundary)).
+
+-endif.
--- /dev/null
+%% Copyright (c) 2011-2013 Basho Technologies, Inc. All Rights Reserved.
+%%
+%% This file is provided to you under the Apache License,
+%% Version 2.0 (the "License"); you may not use this file
+%% except in compliance with the License. You may obtain
+%% a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing,
+%% software distributed under the License is distributed on an
+%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+%% KIND, either express or implied. See the License for the
+%% specific language governing permissions and limitations
+%% under the License.
+
+%% @doc Default performance log handler for webmachine
+
+-module(webmachine_perf_log_handler).
+
+-behaviour(gen_event).
+
+%% gen_event callbacks
+-export([init/1,
+ handle_call/2,
+ handle_event/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3]).
+
+-include("webmachine_logger.hrl").
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+-endif.
+
+-record(state, {hourstamp, filename, handle}).
+
+-define(FILENAME, "perf.log").
+
+%% ===================================================================
+%% gen_event callbacks
+%% ===================================================================
+
+%% @private
+init([BaseDir]) ->
+ webmachine_log:defer_refresh(?MODULE),
+ FileName = filename:join(BaseDir, ?FILENAME),
+ {Handle, DateHour} = webmachine_log:log_open(FileName),
+ {ok, #state{filename=FileName, handle=Handle, hourstamp=DateHour}}.
+
+%% @private
+handle_call({_Label, MRef, get_modules}, State) ->
+ {ok, {MRef, [?MODULE]}, State};
+handle_call({refresh, Time}, State) ->
+ {ok, ok, webmachine_log:maybe_rotate(?MODULE, Time, State)};
+handle_call(_Request, State) ->
+ {ok, ok, State}.
+
+%% @private
+handle_event({log_access, LogData}, State) ->
+ NewState = webmachine_log:maybe_rotate(?MODULE, os:timestamp(), State),
+ Msg = format_req(LogData),
+ webmachine_log:log_write(NewState#state.handle, Msg),
+ {ok, NewState};
+handle_event(_Event, State) ->
+ {ok, State}.
+
+%% @private
+handle_info(_Info, State) ->
+ {ok, State}.
+
+%% @private
+terminate(_Reason, _State) ->
+ ok.
+
+%% @private
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%% ===================================================================
+%% Internal functions
+%% ===================================================================
+
+format_req(#wm_log_data{resource_module=Mod,
+ start_time=StartTime,
+ method=Method,
+ peer=Peer,
+ path=Path,
+ version=Version,
+ response_code=ResponseCode,
+ response_length=ResponseLength,
+ end_time=EndTime,
+ finish_time=FinishTime}) ->
+ Time = webmachine_log:fmtnow(),
+ Status = case ResponseCode of
+ {Code, _ReasonPhrase} when is_integer(Code) ->
+ integer_to_list(Code);
+ _ when is_integer(ResponseCode) ->
+ integer_to_list(ResponseCode);
+ _ ->
+ ResponseCode
+ end,
+ Length = integer_to_list(ResponseLength),
+ TTPD = webmachine_util:now_diff_milliseconds(EndTime, StartTime),
+ TTPS = webmachine_util:now_diff_milliseconds(FinishTime, EndTime),
+ fmt_plog(Time, Peer, atom_to_list(Method), Path, Version,
+ Status, Length, atom_to_list(Mod), integer_to_list(TTPD),
+ integer_to_list(TTPS)).
+
+fmt_plog(Time, Ip, Method, Path, {VM,Vm}, Status, Length, Mod, TTPD, TTPS) ->
+ [webmachine_log:fmt_ip(Ip), " - ", [$\s], Time, [$\s, $"], Method, " ", Path,
+ " HTTP/", integer_to_list(VM), ".", integer_to_list(Vm), [$",$\s],
+ Status, [$\s], Length, " " , Mod, " ", TTPD, " ", TTPS, $\n].
--- /dev/null
+%% @author Justin Sheehy <justin@basho.com>
+%% @author Andy Gross <andy@basho.com>
+%% @copyright 2007-2012 Basho Technologies
+%% Based on mochiweb_request.erl, which is Copyright 2007 Mochi Media, Inc.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+
+%% @doc Webmachine HTTP Request Abstraction. The functions in this module
+%% can be invoked using either parameterized module syntax or regular
+%% invocation syntax. Since the Ericsson OTP team is removing the
+%% parameterized module syntax in version R16, we encourage you to write
+%% your applications using regular function syntax.
+%%
+%% To use parameterized module syntax, you create an instance and then
+%% invoke functions on that instance, like this:
+%%
+%% <pre><code>
+%% Req = webmachine_request:new(ReqState),
+%% Result = Req:some_fun(Args),
+%% </code></pre>
+%%
+%% where `ReqState' is an instance of a `#wm_reqstate' record. The runtime
+%% then ensures the `ReqState' variable is implicitly passed to each
+%% function invoked through the `Req' instance.
+%%
+%% To call functions using regular syntax, simply explicitly pass the
+%% `ReqState' variable yourself; note there's no need to call
+%% `webmachine_request:new/1' to perform regular invocations.
+
+-module(webmachine_request).
+-author('Justin Sheehy <justin@basho.com>').
+-author('Andy Gross <andy@basho.com>').
+
+-export([get_peer/1]). % used in initialization
+-export([call/2]). % internal switching interface, used by wrcall
+
+% actual interface for resource functions
+-export([
+ new/1,
+ trim_state/1,
+ get_reqdata/1,
+ set_reqdata/2,
+ socket/1,
+ method/1,
+ version/1,
+ disp_path/1,
+ path/1,
+ raw_path/1,
+ get_req_header/2,
+ req_headers/1,
+ req_body/2,
+ stream_req_body/2,
+ headers/1,
+ resp_headers/1,
+ out_headers/1,
+ get_out_header/2,
+ has_out_header/2,
+ peer/1,
+ get_header_value/2,
+ add_response_header/3,
+ add_response_headers/2,
+ remove_response_header/2,
+ merge_response_headers/2,
+ append_to_response_body/2,
+ send_response/2,
+ response_code/1,
+ set_response_code/2,
+ set_resp_body/2,
+ response_body/1,
+ has_response_body/1,
+ do_redirect/1,
+ resp_redirect/1,
+ set_metadata/3,
+ get_metadata/2,
+ get_path_info/1,
+ get_path_info/2,
+ load_dispatch_data/7,
+ get_path_tokens/1,
+ get_app_root/1,
+ parse_cookie/1,
+ get_cookie_value/2,
+ parse_qs/1,
+ get_qs_value/2,
+ get_qs_value/3,
+ range/1,
+ log_data/1
+ ]).
+
+-include("webmachine_logger.hrl").
+-include("wm_reqstate.hrl").
+-include("wm_reqdata.hrl").
+
+-define(WMVSN, "1.10.0").
+-define(QUIP, "never breaks eye contact").
+-define(IDLE_TIMEOUT, infinity).
+
+new(#wm_reqstate{}=ReqState) ->
+ {?MODULE, ReqState}.
+
+trim_state({?MODULE, ReqState}) ->
+ TrimData = (ReqState#wm_reqstate.reqdata)#wm_reqdata{wm_state='WMSTATE'},
+ webmachine_request:new(ReqState#wm_reqstate{reqdata=TrimData});
+trim_state(ReqState) ->
+ trim_state({?MODULE, ReqState}).
+
+get_peer({?MODULE, ReqState}=Req) ->
+ case ReqState#wm_reqstate.peer of
+ undefined ->
+ PeerName = case ReqState#wm_reqstate.socket of
+ testing -> {ok, {{127,0,0,1}, 80}};
+ {ssl,SslSocket} -> ssl:peername(SslSocket);
+ _ -> inet:peername(ReqState#wm_reqstate.socket)
+ end,
+ Peer = peer_from_peername(PeerName, Req),
+ NewReqState = ReqState#wm_reqstate{peer=Peer},
+ {Peer, NewReqState};
+ _ ->
+ {ReqState#wm_reqstate.peer, ReqState}
+ end;
+get_peer(ReqState) ->
+ get_peer({?MODULE, ReqState}).
+
+peer_from_peername({ok, {Addr={10, _, _, _}, _Port}}, Req) ->
+ x_peername(inet_parse:ntoa(Addr), Req);
+peer_from_peername({ok, {Addr={172, Second, _, _}, _Port}}, Req)
+ when (Second > 15) andalso (Second < 32) ->
+ x_peername(inet_parse:ntoa(Addr), Req);
+peer_from_peername({ok, {Addr={192, 168, _, _}, _Port}}, Req) ->
+ x_peername(inet_parse:ntoa(Addr), Req);
+peer_from_peername({ok, {{127, 0, 0, 1}, _Port}}, Req) ->
+ x_peername("127.0.0.1", Req);
+peer_from_peername({ok, {Addr, _Port}}, _Req) ->
+ inet_parse:ntoa(Addr).
+
+x_peername(Default, Req) ->
+ case get_header_value("x-forwarded-for", Req) of
+ {undefined, _} ->
+ Default;
+ {Hosts, _} ->
+ string:strip(lists:last(string:tokens(Hosts, ",")))
+ end.
+
+call(base_uri, {?MODULE, ReqState}) ->
+ {wrq:base_uri(ReqState#wm_reqstate.reqdata), ReqState};
+call(socket, {?MODULE, ReqState}) -> {ReqState#wm_reqstate.socket,ReqState};
+call(get_reqdata, {?MODULE, ReqState}) -> {ReqState#wm_reqstate.reqdata, ReqState};
+call({set_reqdata, RD}, {?MODULE, ReqState}) ->
+ {ok, ReqState#wm_reqstate{reqdata=RD}};
+call(method, {?MODULE, ReqState}) ->
+ {wrq:method(ReqState#wm_reqstate.reqdata), ReqState};
+call(version, {?MODULE, ReqState}) ->
+ {wrq:version(ReqState#wm_reqstate.reqdata), ReqState};
+call(raw_path, {?MODULE, ReqState}) ->
+ {wrq:raw_path(ReqState#wm_reqstate.reqdata), ReqState};
+call(req_headers, {?MODULE, ReqState}) ->
+ {wrq:req_headers(ReqState#wm_reqstate.reqdata), ReqState};
+call({req_body, MaxRecvBody}, {?MODULE, ReqState}) ->
+ case ReqState#wm_reqstate.bodyfetch of
+ stream ->
+ {stream_conflict, ReqState};
+ standard ->
+ {ReqState#wm_reqstate.reqbody, ReqState};
+ undefined ->
+ RD=(ReqState#wm_reqstate.reqdata)#wm_reqdata{
+ max_recv_body=MaxRecvBody},
+ NewReqState=ReqState#wm_reqstate{reqdata=RD},
+ NewBody = case get(req_body) of
+ undefined ->
+ NewB = do_recv_body(NewReqState),
+ put(req_body, NewB),
+ NewB;
+ B -> B
+ end,
+ NewRD = RD#wm_reqdata{req_body=NewBody},
+ {NewBody, NewReqState#wm_reqstate{
+ bodyfetch=standard,reqdata=NewRD,reqbody=NewBody}}
+ end;
+call({stream_req_body, MaxHunk}, {?MODULE, ReqState}) ->
+ case ReqState#wm_reqstate.bodyfetch of
+ standard ->
+ {stream_conflict, ReqState};
+ _ ->
+ {recv_stream_body(ReqState, MaxHunk),
+ ReqState#wm_reqstate{bodyfetch=stream}}
+ end;
+call(resp_headers, {?MODULE, ReqState}) ->
+ {wrq:resp_headers(ReqState#wm_reqstate.reqdata), ReqState};
+call(resp_redirect, {?MODULE, ReqState}) ->
+ {wrq:resp_redirect(ReqState#wm_reqstate.reqdata), ReqState};
+call({get_resp_header, HdrName}, {?MODULE, ReqState}) ->
+ Reply = mochiweb_headers:get_value(HdrName,
+ wrq:resp_headers(ReqState#wm_reqstate.reqdata)),
+ {Reply, ReqState};
+call(get_path_info, {?MODULE, ReqState}) ->
+ PropList = orddict:to_list(wrq:path_info(ReqState#wm_reqstate.reqdata)),
+ {PropList, ReqState};
+call({get_path_info, Key}, {?MODULE, ReqState}) ->
+ {wrq:path_info(Key, ReqState#wm_reqstate.reqdata), ReqState};
+call(peer, Req) -> get_peer(Req);
+call(range, Req) -> get_range(Req);
+call(response_code, {?MODULE, ReqState}) ->
+ {wrq:response_code(ReqState#wm_reqstate.reqdata), ReqState};
+call(app_root, {?MODULE, ReqState}) ->
+ {wrq:app_root(ReqState#wm_reqstate.reqdata), ReqState};
+call(disp_path, {?MODULE, ReqState}) ->
+ {wrq:disp_path(ReqState#wm_reqstate.reqdata), ReqState};
+call(path, {?MODULE, ReqState}) ->
+ {wrq:path(ReqState#wm_reqstate.reqdata), ReqState};
+call({get_req_header, K}, {?MODULE, ReqState}) ->
+ {wrq:get_req_header(K, ReqState#wm_reqstate.reqdata), ReqState};
+call({set_response_code, Code}, {?MODULE, ReqState}) ->
+ {ok, ReqState#wm_reqstate{reqdata=wrq:set_response_code(
+ Code, ReqState#wm_reqstate.reqdata)}};
+call({set_resp_header, K, V}, {?MODULE, ReqState}) ->
+ {ok, ReqState#wm_reqstate{reqdata=wrq:set_resp_header(
+ K, V, ReqState#wm_reqstate.reqdata)}};
+call({set_resp_headers, Hdrs}, {?MODULE, ReqState}) ->
+ {ok, ReqState#wm_reqstate{reqdata=wrq:set_resp_headers(
+ Hdrs, ReqState#wm_reqstate.reqdata)}};
+call({remove_resp_header, K}, {?MODULE, ReqState}) ->
+ {ok, ReqState#wm_reqstate{reqdata=wrq:remove_resp_header(
+ K, ReqState#wm_reqstate.reqdata)}};
+call({merge_resp_headers, Hdrs}, {?MODULE, ReqState}) ->
+ {ok, ReqState#wm_reqstate{reqdata=wrq:merge_resp_headers(
+ Hdrs, ReqState#wm_reqstate.reqdata)}};
+call({append_to_response_body, Data}, {?MODULE, ReqState}) ->
+ {ok, ReqState#wm_reqstate{reqdata=wrq:append_to_response_body(
+ Data, ReqState#wm_reqstate.reqdata)}};
+call({set_disp_path, P}, {?MODULE, ReqState}) ->
+ {ok, ReqState#wm_reqstate{reqdata=wrq:set_disp_path(
+ P, ReqState#wm_reqstate.reqdata)}};
+call(do_redirect, {?MODULE, ReqState}) ->
+ {ok, ReqState#wm_reqstate{
+ reqdata=wrq:do_redirect(true, ReqState#wm_reqstate.reqdata)}};
+call({send_response, Code}, Req) when is_integer(Code) ->
+ call({send_response, {Code, undefined}}, Req);
+call({send_response, {Code, ReasonPhrase}=CodeAndReason}, Req) when is_integer(Code) ->
+ {Reply, NewState} =
+ case Code of
+ 200 ->
+ send_ok_response(ReasonPhrase, Req);
+ _ ->
+ send_response(CodeAndReason, Req)
+ end,
+ LogData = NewState#wm_reqstate.log_data,
+ NewLogData = LogData#wm_log_data{finish_time=now()},
+ {Reply, NewState#wm_reqstate{log_data=NewLogData}};
+call(resp_body, {?MODULE, ReqState}) ->
+ {wrq:resp_body(ReqState#wm_reqstate.reqdata), ReqState};
+call({set_resp_body, Body}, {?MODULE, ReqState}) ->
+ {ok, ReqState#wm_reqstate{reqdata=wrq:set_resp_body(Body,
+ ReqState#wm_reqstate.reqdata)}};
+call(has_resp_body, {?MODULE, ReqState}) ->
+ Reply = case wrq:resp_body(ReqState#wm_reqstate.reqdata) of
+ undefined -> false;
+ <<>> -> false;
+ _ -> true
+ end,
+ {Reply, ReqState};
+call({get_metadata, Key}, {?MODULE, ReqState}) ->
+ Reply = case orddict:find(Key, ReqState#wm_reqstate.metadata) of
+ {ok, Value} -> Value;
+ error -> undefined
+ end,
+ {Reply, ReqState};
+call({set_metadata, Key, Value}, {?MODULE, ReqState}) ->
+ NewDict = orddict:store(Key, Value, ReqState#wm_reqstate.metadata),
+ {ok, ReqState#wm_reqstate{metadata=NewDict}};
+call(path_tokens, {?MODULE, ReqState}) ->
+ {wrq:path_tokens(ReqState#wm_reqstate.reqdata), ReqState};
+call(req_cookie, {?MODULE, ReqState}) ->
+ {wrq:req_cookie(ReqState#wm_reqstate.reqdata), ReqState};
+call(req_qs, {?MODULE, ReqState}) ->
+ {wrq:req_qs(ReqState#wm_reqstate.reqdata), ReqState};
+call({load_dispatch_data, PathProps, HostTokens, Port,
+ PathTokens, AppRoot, DispPath}, {?MODULE, ReqState}) ->
+ PathInfo = orddict:from_list(PathProps),
+ NewState = ReqState#wm_reqstate{reqdata=wrq:load_dispatch_data(
+ PathInfo,HostTokens,Port,PathTokens,AppRoot,
+ DispPath,ReqState#wm_reqstate.reqdata)},
+ {ok, NewState};
+call(log_data, {?MODULE, ReqState}) -> {ReqState#wm_reqstate.log_data, ReqState};
+call(notes, {?MODULE, ReqState}) -> {wrq:get_notes(ReqState#wm_reqstate.reqdata), ReqState};
+call(Arg, #wm_reqstate{}=ReqState) -> call(Arg, {?MODULE, ReqState}).
+
+get_header_value(K, {?MODULE, ReqState}) ->
+ {wrq:get_req_header(K, ReqState#wm_reqstate.reqdata), ReqState};
+get_header_value(K, ReqState) ->
+ get_header_value(K, {?MODULE, ReqState}).
+
+get_outheader_value(K, {?MODULE, ReqState}) ->
+ {mochiweb_headers:get_value(K,
+ wrq:resp_headers(ReqState#wm_reqstate.reqdata)), ReqState};
+get_outheader_value(K, ReqState) ->
+ get_outheader_value(K, {?MODULE, ReqState}).
+
+send(Socket, Data) ->
+ case mochiweb_socket:send(Socket, iolist_to_binary(Data)) of
+ ok -> ok;
+ {error,closed} -> ok;
+ _ -> exit(normal)
+ end.
+
+send_stream_body(Socket, X) -> send_stream_body(Socket, X, 0).
+send_stream_body(Socket, {<<>>, done}, SoFar) ->
+ send_chunk(Socket, <<>>),
+ SoFar;
+send_stream_body(Socket, {Data, done}, SoFar) ->
+ Size = send_chunk(Socket, Data),
+ send_chunk(Socket, <<>>),
+ Size + SoFar;
+send_stream_body(Socket, {<<>>, Next}, SoFar) ->
+ send_stream_body(Socket, Next(), SoFar);
+send_stream_body(Socket, {[], Next}, SoFar) ->
+ send_stream_body(Socket, Next(), SoFar);
+send_stream_body(Socket, {Data, Next}, SoFar) ->
+ Size = send_chunk(Socket, Data),
+ send_stream_body(Socket, Next(), Size + SoFar).
+
+send_stream_body_no_chunk(Socket, {Data, done}) ->
+ send(Socket, Data);
+send_stream_body_no_chunk(Socket, {Data, Next}) ->
+ send(Socket, Data),
+ send_stream_body_no_chunk(Socket, Next()).
+
+send_writer_body(Socket, {Encoder, Charsetter, BodyFun}) ->
+ put(bytes_written, 0),
+ Writer = fun(Data) ->
+ Size = send_chunk(Socket, Encoder(Charsetter(Data))),
+ put(bytes_written, get(bytes_written) + Size),
+ Size
+ end,
+ BodyFun(Writer),
+ send_chunk(Socket, <<>>),
+ get(bytes_written).
+
+send_chunk(Socket, Data) ->
+ Size = iolist_size(Data),
+ send(Socket, [mochihex:to_hex(Size), <<"\r\n">>, Data, <<"\r\n">>]),
+ Size.
+
+send_ok_response(ReasonPhrase, {?MODULE, ReqState}=Req) ->
+ RD0 = ReqState#wm_reqstate.reqdata,
+ {Range, State} = get_range(Req),
+ case Range of
+ X when X =:= undefined; X =:= fail; X =:= ignore ->
+ send_response({200, ReasonPhrase}, Req);
+ Ranges ->
+ {PartList, Size} = range_parts(RD0, Ranges),
+ case PartList of
+ [] -> %% no valid ranges
+ %% could be 416, for now we'll just return 200
+ send_response({200, ReasonPhrase}, Req);
+ PartList ->
+ {RangeHeaders, RangeBody} =
+ parts_to_body(PartList, Size, Req),
+ RespHdrsRD = wrq:set_resp_headers(
+ [{"Accept-Ranges", "bytes"} | RangeHeaders], RD0),
+ RespBodyRD = wrq:set_resp_body(
+ RangeBody, RespHdrsRD),
+ NewState = State#wm_reqstate{reqdata=RespBodyRD},
+ send_response({206, ReasonPhrase}, NewState, Req)
+ end
+ end.
+
+send_response(Code, #wm_reqstate{}=ReqState) -> send_response(Code,ReqState,{?MODULE,ReqState});
+send_response(Code, {?MODULE, ReqState}=Req) -> send_response(Code,ReqState,Req).
+send_response(Code, PassedState=#wm_reqstate{reqdata=RD}, _Req) ->
+ Body0 = wrq:resp_body(RD),
+ {Body,Length} = case Body0 of
+ {stream, StreamBody} -> {{stream, StreamBody}, chunked};
+ {known_length_stream, Size, StreamBody} -> {{known_length_stream, StreamBody}, Size};
+ {stream, Size, Fun} -> {{stream, Fun(0, Size-1)}, chunked};
+ {writer, WriteBody} -> {{writer, WriteBody}, chunked};
+ _ -> {Body0, iolist_size([Body0])}
+ end,
+ send(PassedState#wm_reqstate.socket,
+ [make_version(wrq:version(RD)),
+ make_code(Code), <<"\r\n">> |
+ make_headers(Code, Length, RD)]),
+ FinalLength = case wrq:method(RD) of
+ 'HEAD' -> Length;
+ _ ->
+ case Body of
+ {stream, Body2} ->
+ send_stream_body(PassedState#wm_reqstate.socket, Body2);
+ {known_length_stream, Body2} ->
+ send_stream_body_no_chunk(PassedState#wm_reqstate.socket, Body2),
+ Length;
+ {writer, Body2} ->
+ send_writer_body(PassedState#wm_reqstate.socket, Body2);
+ _ ->
+ send(PassedState#wm_reqstate.socket, Body),
+ Length
+ end
+ end,
+ InitLogData = PassedState#wm_reqstate.log_data,
+ FinalLogData = InitLogData#wm_log_data{response_code=Code,
+ response_length=FinalLength},
+ {ok, PassedState#wm_reqstate{reqdata=wrq:set_response_code(Code, RD),
+ log_data=FinalLogData}}.
+
+%% @doc Infer body length from transfer-encoding and content-length headers.
+body_length(Req) ->
+ case get_header_value("transfer-encoding", Req) of
+ {undefined, _} ->
+ case get_header_value("content-length", Req) of
+ {undefined, _} -> undefined;
+ {Length, _} -> list_to_integer(Length)
+ end;
+ {"chunked", _} -> chunked;
+ Unknown -> {unknown_transfer_encoding, Unknown}
+ end.
+
+%% @doc Receive the body of the HTTP request (defined by Content-Length).
+%% Will only receive up to the default max-body length
+do_recv_body(PassedState=#wm_reqstate{reqdata=RD}) ->
+ MRH = RD#wm_reqdata.max_recv_hunk,
+ MRB = RD#wm_reqdata.max_recv_body,
+ read_whole_stream(recv_stream_body(PassedState, MRH), [], MRB, 0).
+
+read_whole_stream({Hunk,_}, _, MaxRecvBody, SizeAcc)
+ when SizeAcc + byte_size(Hunk) > MaxRecvBody ->
+ {error, req_body_too_large};
+read_whole_stream({Hunk,Next}, Acc0, MaxRecvBody, SizeAcc) ->
+ HunkSize = byte_size(Hunk),
+ if SizeAcc + HunkSize > MaxRecvBody ->
+ {error, req_body_too_large};
+ true ->
+ Acc = [Hunk|Acc0],
+ case Next of
+ done -> iolist_to_binary(lists:reverse(Acc));
+ _ -> read_whole_stream(Next(), Acc,
+ MaxRecvBody, SizeAcc + HunkSize)
+ end
+ end.
+
+recv_stream_body(PassedState=#wm_reqstate{reqdata=RD}, MaxHunkSize) ->
+ put(mochiweb_request_recv, true),
+ case get_header_value("expect", PassedState) of
+ {"100-continue", _} ->
+ send(PassedState#wm_reqstate.socket,
+ [make_version(wrq:version(RD)),
+ make_code(100), <<"\r\n\r\n">>]);
+ _Else ->
+ ok
+ end,
+ case body_length(PassedState) of
+ {unknown_transfer_encoding, X} -> exit({unknown_transfer_encoding, X});
+ undefined -> {<<>>, done};
+ 0 -> {<<>>, done};
+ chunked -> recv_chunked_body(PassedState#wm_reqstate.socket,
+ MaxHunkSize);
+ Length -> recv_unchunked_body(PassedState#wm_reqstate.socket,
+ MaxHunkSize, Length)
+ end.
+
+recv_unchunked_body(Socket, MaxHunk, DataLeft) ->
+ case MaxHunk >= DataLeft of
+ true ->
+ {ok,Data1} = mochiweb_socket:recv(Socket,DataLeft,?IDLE_TIMEOUT),
+ {Data1, done};
+ false ->
+ {ok,Data2} = mochiweb_socket:recv(Socket,MaxHunk,?IDLE_TIMEOUT),
+ {Data2,
+ fun() -> recv_unchunked_body(Socket, MaxHunk, DataLeft-MaxHunk)
+ end}
+ end.
+
+recv_chunked_body(Socket, MaxHunk) ->
+ case read_chunk_length(Socket, false) of
+ 0 -> {<<>>, done};
+ ChunkLength -> recv_chunked_body(Socket,MaxHunk,ChunkLength)
+ end.
+recv_chunked_body(Socket, MaxHunk, LeftInChunk) ->
+ case MaxHunk >= LeftInChunk of
+ true ->
+ {ok,Data1} = mochiweb_socket:recv(Socket,LeftInChunk,?IDLE_TIMEOUT),
+ {Data1,
+ fun() -> recv_chunked_body(Socket, MaxHunk)
+ end};
+ false ->
+ {ok,Data2} = mochiweb_socket:recv(Socket,MaxHunk,?IDLE_TIMEOUT),
+ {Data2,
+ fun() -> recv_chunked_body(Socket, MaxHunk, LeftInChunk-MaxHunk)
+ end}
+ end.
+
+read_chunk_length(Socket, MaybeLastChunk) ->
+ mochiweb_socket:setopts(Socket, [{packet, line}]),
+ case mochiweb_socket:recv(Socket, 0, ?IDLE_TIMEOUT) of
+ {ok, Header} ->
+ mochiweb_socket:setopts(Socket, [{packet, raw}]),
+ Splitter = fun (C) ->
+ C =/= $\r andalso C =/= $\n andalso C =/= $
+ andalso C =/= 59 % semicolon
+ end,
+ {Hex, _Rest} = lists:splitwith(Splitter, binary_to_list(Header)),
+ case Hex of
+ [] ->
+ %% skip the \r\n at the end of a chunk, or
+ %% allow [badly formed] last chunk header to be
+ %% empty instead of '0' explicitly
+ if MaybeLastChunk -> 0;
+ true -> read_chunk_length(Socket, true)
+ end;
+ _ ->
+ erlang:list_to_integer(Hex, 16)
+ end;
+ _ ->
+ exit(normal)
+ end.
+
+get_range({?MODULE, #wm_reqstate{reqdata = RD}=ReqState}=Req) ->
+ case RD#wm_reqdata.resp_range of
+ ignore_request ->
+ {ignore, ReqState#wm_reqstate{range=undefined}};
+ follow_request ->
+ case get_header_value("range", Req) of
+ {undefined, _} ->
+ {undefined, ReqState#wm_reqstate{range=undefined}};
+ {RawRange, _} ->
+ Range = mochiweb_http:parse_range_request(RawRange),
+ {Range, ReqState#wm_reqstate{range=Range}}
+ end
+ end.
+
+range_parts(_RD=#wm_reqdata{resp_body={file, IoDevice}}, Ranges) ->
+ Size = mochiweb_io:iodevice_size(IoDevice),
+ F = fun (Spec, Acc) ->
+ case mochiweb_http:range_skip_length(Spec, Size) of
+ invalid_range ->
+ Acc;
+ V ->
+ [V | Acc]
+ end
+ end,
+ LocNums = lists:foldr(F, [], Ranges),
+ {ok, Data} = file:pread(IoDevice, LocNums),
+ Bodies = lists:zipwith(fun ({Skip, Length}, PartialBody) ->
+ {Skip, Skip + Length - 1, PartialBody}
+ end,
+ LocNums, Data),
+ {Bodies, Size};
+
+range_parts(RD=#wm_reqdata{resp_body={stream, {Hunk,Next}}}, Ranges) ->
+ % for now, streamed bodies are read in full for range requests
+ MRB = RD#wm_reqdata.max_recv_body,
+ range_parts(read_whole_stream({Hunk,Next}, [], MRB, 0), Ranges);
+
+range_parts(_RD=#wm_reqdata{resp_body={known_length_stream, Size, StreamBody}},
+ Ranges) ->
+ SkipLengths = [ mochiweb_http:range_skip_length(R, Size) || R <- Ranges],
+ {[ {Skip, Skip+Length-1, {known_length_stream, Length, StreamBody}} ||
+ {Skip, Length} <- SkipLengths ],
+ Size};
+
+range_parts(_RD=#wm_reqdata{resp_body={stream, Size, StreamFun}}, Ranges) ->
+ SkipLengths = [ mochiweb_http:range_skip_length(R, Size) || R <- Ranges],
+ {[ {Skip, Skip+Length-1, StreamFun} || {Skip, Length} <- SkipLengths ],
+ Size};
+
+range_parts(#wm_reqdata{resp_body=Body}, Ranges) when is_binary(Body); is_list(Body) ->
+ range_parts(Body, Ranges);
+
+range_parts(Body0, Ranges) when is_binary(Body0); is_list(Body0) ->
+ Body = iolist_to_binary(Body0),
+ Size = size(Body),
+ F = fun(Spec, Acc) ->
+ case mochiweb_http:range_skip_length(Spec, Size) of
+ invalid_range ->
+ Acc;
+ {Skip, Length} ->
+ <<_:Skip/binary,
+ PartialBody:Length/binary,
+ _/binary>> = Body,
+ [{Skip, Skip + Length - 1, PartialBody} | Acc]
+ end
+ end,
+ {lists:foldr(F, [], Ranges), Size}.
+
+parts_to_body([{Start, End, Body0}], Size, Req) ->
+ %% return body for a range reponse with a single body
+ ContentType =
+ case get_outheader_value("content-type", Req) of
+ {undefined, _} ->
+ "text/html";
+ {CT, _} ->
+ CT
+ end,
+ HeaderList = [{"Content-Type", ContentType},
+ {"Content-Range",
+ ["bytes ",
+ mochiweb_util:make_io(Start), "-",
+ mochiweb_util:make_io(End),
+ "/", mochiweb_util:make_io(Size)]}],
+ Body = case Body0 of
+ _ when is_function(Body0) ->
+ {known_length_stream, End - Start + 1, Body0(Start, End)};
+ {known_length_stream, ContentSize, StreamBody} ->
+ {known_length_stream, ContentSize, StreamBody};
+ _ ->
+ Body0
+ end,
+ {HeaderList, Body};
+parts_to_body(BodyList, Size, Req) when is_list(BodyList) ->
+ %% return
+ %% header Content-Type: multipart/byteranges; boundary=441934886133bdee4
+ %% and multipart body
+ ContentType =
+ case get_outheader_value("content-type", Req) of
+ {undefined, _} ->
+ "text/html";
+ {CT, _} ->
+ CT
+ end,
+ Boundary = mochihex:to_hex(mochiweb_util:rand_bytes(8)),
+ HeaderList = [{"Content-Type",
+ ["multipart/byteranges; ",
+ "boundary=", Boundary]}],
+ MultiPartBody = case hd(BodyList) of
+ {_, _, Fun} when is_function(Fun) ->
+ stream_multipart_body(BodyList, ContentType,
+ Boundary, Size);
+ _ ->
+ multipart_body(BodyList, ContentType,
+ Boundary, Size)
+ end,
+ {HeaderList, MultiPartBody}.
+
+multipart_body([], _ContentType, Boundary, _Size) ->
+ end_boundary(Boundary);
+multipart_body([{Start, End, Body} | BodyList],
+ ContentType, Boundary, Size) ->
+ [part_preamble(Boundary, ContentType, Start, End, Size),
+ Body, <<"\r\n">>
+ | multipart_body(BodyList, ContentType, Boundary, Size)].
+
+boundary(B) -> [<<"--">>, B, <<"\r\n">>].
+end_boundary(B) -> [<<"--">>, B, <<"--\r\n">>].
+
+part_preamble(Boundary, CType, Start, End, Size) ->
+ [boundary(Boundary),
+ <<"Content-Type: ">>, CType, <<"\r\n">>,
+ <<"Content-Range: bytes ">>,
+ mochiweb_util:make_io(Start), <<"-">>, mochiweb_util:make_io(End),
+ <<"/">>, mochiweb_util:make_io(Size),
+ <<"\r\n\r\n">>].
+
+stream_multipart_body(BodyList, ContentType, Boundary, Size) ->
+ Helper = stream_multipart_body_helper(
+ BodyList, ContentType, Boundary, Size),
+ %% executing Helper() here is an optimization;
+ %% it's just as valid to say {<<>>, Helper}
+ {stream, Helper()}.
+
+stream_multipart_body_helper([], _CType, Boundary, _Size) ->
+ fun() -> {end_boundary(Boundary), done} end;
+stream_multipart_body_helper([{Start, End, Fun}|Rest],
+ CType, Boundary, Size) ->
+ fun() ->
+ {part_preamble(Boundary, CType, Start, End, Size),
+ stream_multipart_part_helper(
+ fun() -> Fun(Start, End) end,
+ Rest, CType, Boundary, Size)}
+ end.
+
+stream_multipart_part_helper(Fun, Rest, CType, Boundary, Size) ->
+ fun() ->
+ case Fun() of
+ {Data, done} ->
+ %% when this part is done, start the next part
+ {[Data, <<"\r\n">>],
+ stream_multipart_body_helper(
+ Rest, CType, Boundary, Size)};
+ {Data, Next} ->
+ %% this subpart has more data coming
+ {Data, stream_multipart_part_helper(
+ Next, Rest, CType, Boundary, Size)}
+ end
+ end.
+
+make_code({Code, undefined}) when is_integer(Code) ->
+ make_code({Code, httpd_util:reason_phrase(Code)});
+make_code({Code, ReasonPhrase}) when is_integer(Code) ->
+ [integer_to_list(Code), [" ", ReasonPhrase]];
+make_code(Code) when is_integer(Code) ->
+ make_code({Code, httpd_util:reason_phrase(Code)});
+make_code(Io) when is_list(Io); is_binary(Io) ->
+ Io.
+
+make_version({1, 0}) ->
+ <<"HTTP/1.0 ">>;
+make_version(_) ->
+ <<"HTTP/1.1 ">>.
+
+make_headers({Code, _ReasonPhrase}, Length, RD) ->
+ make_headers(Code, Length, RD);
+make_headers(Code, Length, RD) when is_integer(Code) ->
+ Hdrs0 = case Code of
+ 304 ->
+ mochiweb_headers:make(wrq:resp_headers(RD));
+ _ ->
+ case Length of
+ chunked ->
+ mochiweb_headers:enter(
+ "Transfer-Encoding","chunked",
+ mochiweb_headers:make(wrq:resp_headers(RD)));
+ _ ->
+ mochiweb_headers:enter(
+ "Content-Length",integer_to_list(Length),
+ mochiweb_headers:make(wrq:resp_headers(RD)))
+ end
+ end,
+ case application:get_env(webmachine, server_name) of
+ undefined -> ServerHeader = "MochiWeb/1.1 WebMachine/" ++ ?WMVSN ++ " (" ++ ?QUIP ++ ")";
+ {ok, ServerHeader} when is_list(ServerHeader) -> ok
+ end,
+ WithSrv = mochiweb_headers:enter("Server", ServerHeader, Hdrs0),
+ Hdrs = case mochiweb_headers:get_value("date", WithSrv) of
+ undefined ->
+ mochiweb_headers:enter("Date", httpd_util:rfc1123_date(), WithSrv);
+ _ ->
+ WithSrv
+ end,
+ F = fun({K, V}, Acc) ->
+ [mochiweb_util:make_io(K), <<": ">>, V, <<"\r\n">> | Acc]
+ end,
+ lists:foldl(F, [<<"\r\n">>], mochiweb_headers:to_list(Hdrs)).
+
+get_reqdata(#wm_reqstate{}=ReqState) -> call(get_reqdata, {?MODULE, ReqState});
+get_reqdata(Req) -> call(get_reqdata, Req).
+
+set_reqdata(RD, #wm_reqstate{}=ReqState) -> call({set_reqdata, RD}, {?MODULE, ReqState});
+set_reqdata(RD, Req) -> call({set_reqdata, RD}, Req).
+
+socket(#wm_reqstate{}=ReqState) -> call(socket, {?MODULE, ReqState});
+socket(Req) -> call(socket, Req).
+
+method(#wm_reqstate{}=ReqState) -> call(method, {?MODULE, ReqState});
+method(Req) -> call(method, Req).
+
+version(#wm_reqstate{}=ReqState) -> call(version, {?MODULE, ReqState});
+version(Req) -> call(version, Req).
+
+disp_path(#wm_reqstate{}=ReqState) -> call(disp_path, {?MODULE, ReqState});
+disp_path(Req) -> call(disp_path, Req).
+
+path(#wm_reqstate{}=ReqState) -> call(path, {?MODULE, ReqState});
+path(Req) -> call(path, Req).
+
+raw_path(#wm_reqstate{}=ReqState) -> call(raw_path, {?MODULE, ReqState});
+raw_path(Req) -> call(raw_path, Req).
+
+req_headers(#wm_reqstate{}=ReqState) -> call(req_headers, {?MODULE, ReqState});
+req_headers(Req) -> call(req_headers, Req).
+headers(Req) -> req_headers(Req).
+
+req_body(MaxRevBody, #wm_reqstate{}=ReqState) -> call({req_body,MaxRevBody}, {?MODULE, ReqState});
+req_body(MaxRevBody, Req) -> call({req_body,MaxRevBody}, Req).
+stream_req_body(MaxHunk, #wm_reqstate{}=ReqState) ->
+ call({stream_req_body, MaxHunk}, {?MODULE, ReqState});
+stream_req_body(MaxHunk, Req) -> call({stream_req_body, MaxHunk}, Req).
+
+resp_headers(#wm_reqstate{}=ReqState) -> call(resp_headers, {?MODULE, ReqState});
+resp_headers(Req) -> call(resp_headers, Req).
+out_headers(Req) -> resp_headers(Req).
+
+get_resp_header(HeaderName, Req) ->
+ call({get_resp_header, HeaderName}, Req).
+get_out_header(HeaderName, #wm_reqstate{}=ReqState) ->
+ get_resp_header(HeaderName, {?MODULE, ReqState});
+get_out_header(HeaderName, Req) -> get_resp_header(HeaderName, Req).
+
+has_resp_header(HeaderName, Req) ->
+ case get_out_header(HeaderName, Req) of
+ {undefined, _} -> false;
+ {_, _} -> true
+ end.
+has_out_header(HeaderName, #wm_reqstate{}=ReqState) ->
+ has_resp_header(HeaderName, {?MODULE, ReqState});
+has_out_header(HeaderName, Req) -> has_resp_header(HeaderName, Req).
+
+has_resp_body(#wm_reqstate{}=ReqState) -> call(has_resp_body, {?MODULE, ReqState});
+has_resp_body(Req) -> call(has_resp_body, Req).
+has_response_body(Req) -> has_resp_body(Req).
+
+response_code(#wm_reqstate{}=ReqState) -> call(response_code, {?MODULE, ReqState});
+response_code(Req) -> call(response_code, Req).
+set_response_code(Code, {?MODULE, ReqState}=Req) ->
+ call({ReqState, set_response_code, Code}, Req);
+set_response_code(Code, ReqState) ->
+ set_response_code(Code, {?MODULE, ReqState}).
+
+peer(#wm_reqstate{}=ReqState) -> call(peer, {?MODULE, ReqState});
+peer(Req) -> call(peer, Req).
+
+range(#wm_reqstate{}=ReqState) -> call(range, {?MODULE, ReqState});
+range(Req) -> call(range, Req).
+
+req_cookie(#wm_reqstate{}=ReqState) -> call(req_cookie, {?MODULE, ReqState});
+req_cookie(Req) -> call(req_cookie, Req).
+parse_cookie(Req) -> req_cookie(Req).
+get_cookie_value(Key, #wm_reqstate{}=ReqState) -> get_cookie_value(Key, {?MODULE, ReqState});
+get_cookie_value(Key, Req) ->
+ {ReqCookie, NewReqState} = req_cookie(Req),
+ case lists:keyfind(Key, 1, ReqCookie) of
+ false -> {undefined, NewReqState};
+ {Key, Value} -> {Value, NewReqState}
+ end.
+
+req_qs(#wm_reqstate{}=ReqState) -> call(req_qs, {?MODULE, ReqState});
+req_qs(Req) -> call(req_qs, Req).
+parse_qs(Req) -> req_qs(Req).
+get_qs_value(Key, #wm_reqstate{}=ReqState) -> get_qs_value(Key, {?MODULE, ReqState});
+get_qs_value(Key, Req) ->
+ {ReqQS, NewReqState} = req_qs(Req),
+ case lists:keyfind(Key, 1, ReqQS) of
+ false -> {undefined, NewReqState};
+ {Key, Value} -> {Value, NewReqState}
+ end.
+get_qs_value(Key, Default, #wm_reqstate{}=ReqState) ->
+ get_qs_value(Key, Default, {?MODULE, ReqState});
+get_qs_value(Key, Default, Req) ->
+ {ReqQS, NewReqState} = req_qs(Req),
+ case lists:keyfind(Key, 1, ReqQS) of
+ false -> {Default, NewReqState};
+ {Key, Value} -> {Value, NewReqState}
+ end.
+set_resp_body(Body, #wm_reqstate{}=ReqState) -> call({set_resp_body, Body}, {?MODULE, ReqState});
+set_resp_body(Body, Req) -> call({set_resp_body, Body}, Req).
+resp_body(#wm_reqstate{}=ReqState) -> call(resp_body, {?MODULE, ReqState});
+resp_body(Req) -> call(resp_body, Req).
+response_body(Req) -> resp_body(Req).
+
+get_req_header(K, #wm_reqstate{}=ReqState) -> call({get_req_header, K}, {?MODULE, ReqState});
+get_req_header(K, Req) -> call({get_req_header, K}, Req).
+
+set_resp_header(K, V, Req) -> call({set_resp_header, K, V}, Req).
+add_response_header(K, V, #wm_reqstate{}=ReqState) -> set_resp_header(K, V, {?MODULE, ReqState});
+add_response_header(K, V, Req) -> set_resp_header(K, V, Req).
+
+set_resp_headers(Hdrs, Req) -> call({set_resp_headers, Hdrs}, Req).
+add_response_headers(Hdrs, #wm_reqstate{}=ReqState) -> set_resp_headers(Hdrs, {?MODULE, ReqState});
+add_response_headers(Hdrs, Req) -> set_resp_headers(Hdrs, Req).
+
+remove_resp_header(K, Req) -> call({remove_resp_header, K}, Req).
+remove_response_header(K, #wm_reqstate{}=ReqState) -> remove_resp_header(K, {?MODULE, ReqState});
+remove_response_header(K, Req) -> remove_resp_header(K, Req).
+
+merge_resp_headers(Hdrs, Req) -> call({merge_resp_headers, Hdrs}, Req).
+merge_response_headers(Hdrs, #wm_reqstate{}=ReqState) ->
+ merge_resp_headers(Hdrs, {?MODULE, ReqState});
+merge_response_headers(Hdrs, Req) -> merge_resp_headers(Hdrs, Req).
+
+append_to_response_body(Data, #wm_reqstate{}=ReqState) ->
+ call({append_to_response_body, Data}, {?MODULE, ReqState});
+append_to_response_body(Data, Req) ->
+ call({append_to_response_body, Data}, Req).
+
+do_redirect(#wm_reqstate{}=ReqState) -> call(do_redirect, {?MODULE, ReqState});
+do_redirect(Req) -> call(do_redirect, Req).
+
+resp_redirect(#wm_reqstate{}=ReqState) -> call(resp_redirect, {?MODULE, ReqState});
+resp_redirect(Req) -> call(resp_redirect, Req).
+
+get_metadata(Key, #wm_reqstate{}=ReqState) -> call({get_metadata, Key}, {?MODULE, ReqState});
+get_metadata(Key, Req) -> call({get_metadata, Key}, Req).
+
+set_metadata(Key, Value, #wm_reqstate{}=ReqState) ->
+ call({set_metadata, Key, Value}, {?MODULE, ReqState});
+set_metadata(Key, Value, Req) -> call({set_metadata, Key, Value}, Req).
+
+get_path_info(#wm_reqstate{}=ReqState) -> call(get_path_info, {?MODULE, ReqState});
+get_path_info(Req) -> call(get_path_info, Req).
+
+get_path_info(Key, #wm_reqstate{}=ReqState) -> call({get_path_info, Key}, {?MODULE, ReqState});
+get_path_info(Key, Req) -> call({get_path_info, Key}, Req).
+
+path_tokens(#wm_reqstate{}=ReqState) -> call(path_tokens, {?MODULE, ReqState});
+path_tokens(Req) -> call(path_tokens, Req).
+get_path_tokens(Req) -> path_tokens(Req).
+
+app_root(#wm_reqstate{}=ReqState) -> call(app_root, {?MODULE, ReqState});
+app_root(Req) -> call(app_root, Req).
+get_app_root(Req) -> app_root(Req).
+
+load_dispatch_data(Bindings, HostTokens, Port, PathTokens,
+ AppRoot, DispPath, #wm_reqstate{}=ReqState) ->
+ call({load_dispatch_data, Bindings, HostTokens, Port,
+ PathTokens, AppRoot, DispPath}, {?MODULE, ReqState});
+load_dispatch_data(Bindings, HostTokens, Port, PathTokens,
+ AppRoot, DispPath, Req) ->
+ call({load_dispatch_data, Bindings, HostTokens, Port,
+ PathTokens, AppRoot, DispPath}, Req).
+
+log_data(#wm_reqstate{}=ReqState) -> call(log_data, {?MODULE, ReqState});
+log_data(Req) -> call(log_data, Req).
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+reqdata_test() ->
+ ReqData = #wm_reqdata{req_headers = mochiweb_headers:make([])},
+ {ok, ReqState} = set_reqdata(ReqData, #wm_reqstate{}),
+ ?assertEqual(ReqData, element(1, get_reqdata(ReqState))).
+
+header_test() ->
+ HdrName = "Accept",
+ HdrValue = "application/json",
+ ReqData = #wm_reqdata{req_headers = mochiweb_headers:make([{HdrName, HdrValue}])},
+ {ok, ReqState} = set_reqdata(ReqData, #wm_reqstate{}),
+ ?assertEqual({HdrValue, ReqState}, get_header_value(HdrName, ReqState)),
+ ?assertEqual({HdrValue, ReqState}, get_req_header(HdrName, ReqState)).
+
+metadata_test() ->
+ Key = "webmachine",
+ Value = "eunit",
+ {ok, ReqState} = set_metadata(Key, Value, #wm_reqstate{metadata=orddict:new()}),
+ ?assertEqual({Value, ReqState}, get_metadata(Key, ReqState)).
+
+peer_test() ->
+ Self = self(),
+ Pid = spawn_link(fun() ->
+ {ok, LS} = gen_tcp:listen(0, [binary, {active, false}]),
+ {ok, {_, Port}} = inet:sockname(LS),
+ Self ! {port, Port},
+ {ok, S} = gen_tcp:accept(LS),
+ receive
+ stop ->
+ ok
+ after 2000 ->
+ ok
+ end,
+ gen_tcp:close(S),
+ gen_tcp:close(LS)
+ end),
+ receive
+ {port, Port} ->
+ {ok, S} = gen_tcp:connect({127,0,0,1}, Port, [binary, {active, false}]),
+ ReqData = #wm_reqdata{req_headers = mochiweb_headers:make([])},
+ ReqState = #wm_reqstate{socket=S, reqdata=ReqData},
+ ?assertEqual({S, ReqState}, socket(ReqState)),
+ {"127.0.0.1", NReqState} = get_peer(ReqState),
+ ?assertEqual("127.0.0.1", NReqState#wm_reqstate.peer),
+ Pid ! stop,
+ gen_tcp:close(S)
+ after 2000 ->
+ exit({error, listener_fail})
+ end.
+
+-endif.
--- /dev/null
+%% @author Justin Sheehy <justin@basho.com>
+%% @author Andy Gross <andy@basho.com>
+%% @copyright 2007-2012 Basho Technologies
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+
+-module(webmachine_resource).
+-author('Justin Sheehy <justin@basho.com>').
+-author('Andy Gross <andy@basho.com>').
+-export([new/4, wrap/2, wrap/3]).
+-export([do/3,log_d/2,stop/1]).
+
+-include("wm_resource.hrl").
+-include("wm_reqdata.hrl").
+-include("wm_reqstate.hrl").
+
+new(R_Mod, R_ModState, R_ModExports, R_Trace) ->
+ {?MODULE, R_Mod, R_ModState, R_ModExports, R_Trace}.
+
+default(ping) ->
+ no_default;
+default(service_available) ->
+ true;
+default(resource_exists) ->
+ true;
+default(auth_required) ->
+ true;
+default(is_authorized) ->
+ true;
+default(forbidden) ->
+ false;
+default(allow_missing_post) ->
+ false;
+default(malformed_request) ->
+ false;
+default(uri_too_long) ->
+ false;
+default(known_content_type) ->
+ true;
+default(valid_content_headers) ->
+ true;
+default(valid_entity_length) ->
+ true;
+default(options) ->
+ [];
+default(allowed_methods) ->
+ ['GET', 'HEAD'];
+default(known_methods) ->
+ ['GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'TRACE', 'CONNECT', 'OPTIONS'];
+default(content_types_provided) ->
+ [{"text/html", to_html}];
+default(content_types_accepted) ->
+ [];
+default(delete_resource) ->
+ false;
+default(delete_completed) ->
+ true;
+default(post_is_create) ->
+ false;
+default(create_path) ->
+ undefined;
+default(base_uri) ->
+ undefined;
+default(process_post) ->
+ false;
+default(language_available) ->
+ true;
+default(charsets_provided) ->
+ no_charset; % this atom causes charset-negotation to short-circuit
+ % the default setting is needed for non-charset responses such as image/png
+ % an example of how one might do actual negotiation
+ % [{"iso-8859-1", fun(X) -> X end}, {"utf-8", make_utf8}];
+default(encodings_provided) ->
+ [{"identity", fun(X) -> X end}];
+ % this is handy for auto-gzip of GET-only resources:
+ % [{"identity", fun(X) -> X end}, {"gzip", fun(X) -> zlib:gzip(X) end}];
+default(variances) ->
+ [];
+default(is_conflict) ->
+ false;
+default(multiple_choices) ->
+ false;
+default(previously_existed) ->
+ false;
+default(moved_permanently) ->
+ false;
+default(moved_temporarily) ->
+ false;
+default(last_modified) ->
+ undefined;
+default(expires) ->
+ undefined;
+default(generate_etag) ->
+ undefined;
+default(finish_request) ->
+ true;
+default(validate_content_checksum) ->
+ not_validated;
+default(_) ->
+ no_default.
+
+wrap(Mod, Args, {?MODULE, _, _, _, _}) ->
+ wrap(Mod, Args).
+
+wrap(Mod, Args) ->
+ case Mod:init(Args) of
+ {ok, ModState} ->
+ {ok, webmachine_resource:new(Mod, ModState,
+ orddict:from_list(Mod:module_info(exports)), false)};
+ {{trace, Dir}, ModState} ->
+ {ok, File} = open_log_file(Dir, Mod),
+ log_decision(File, v3b14),
+ log_call(File, attempt, Mod, init, Args),
+ log_call(File, result, Mod, init, {{trace, Dir}, ModState}),
+ {ok, webmachine_resource:new(Mod, ModState,
+ orddict:from_list(Mod:module_info(exports)), File)};
+ _ ->
+ {stop, bad_init_arg}
+ end.
+
+do(#wm_resource{}=Res, Fun, ReqProps) ->
+ #wm_resource{module=R_Mod, modstate=R_ModState,
+ modexports=R_ModExports, trace=R_Trace} = Res,
+ do(Fun, ReqProps, {?MODULE, R_Mod, R_ModState, R_ModExports, R_Trace});
+do(Fun, ReqProps, {?MODULE, R_Mod, _, R_ModExports, R_Trace}=Req)
+ when is_atom(Fun) andalso is_list(ReqProps) ->
+ case lists:keyfind(reqstate, 1, ReqProps) of
+ false -> RState0 = undefined;
+ {reqstate, RState0} -> ok
+ end,
+ put(tmp_reqstate, empty),
+ {Reply, ReqData, NewModState} = handle_wm_call(Fun,
+ (RState0#wm_reqstate.reqdata)#wm_reqdata{wm_state=RState0},
+ Req),
+ ReqState = case get(tmp_reqstate) of
+ empty -> RState0;
+ X -> X
+ end,
+ %% Do not need the embedded state anymore
+ TrimData = ReqData#wm_reqdata{wm_state=undefined},
+ {Reply,
+ webmachine_resource:new(R_Mod, NewModState, R_ModExports, R_Trace),
+ ReqState#wm_reqstate{reqdata=TrimData}}.
+
+handle_wm_call(Fun, ReqData, {?MODULE,R_Mod,R_ModState,R_ModExports,R_Trace}=Req) ->
+ case default(Fun) of
+ no_default ->
+ resource_call(Fun, ReqData, Req);
+ Default ->
+ case orddict:is_key(Fun, R_ModExports) of
+ true ->
+ resource_call(Fun, ReqData, Req);
+ false ->
+ if is_pid(R_Trace) ->
+ log_call(R_Trace,
+ not_exported,
+ R_Mod, Fun, [ReqData, R_ModState]);
+ true -> ok
+ end,
+ {Default, ReqData, R_ModState}
+ end
+ end.
+
+trim_trace([{M,F,[RD = #wm_reqdata{},S]}|STRest]) ->
+ TrimState = (RD#wm_reqdata.wm_state)#wm_reqstate{reqdata='REQDATA'},
+ TrimRD = RD#wm_reqdata{wm_state=TrimState},
+ [{M,F,[TrimRD,S]}|STRest];
+trim_trace(X) -> X.
+
+resource_call(F, ReqData, {?MODULE, R_Mod, R_ModState, _, R_Trace}) ->
+ case R_Trace of
+ false -> nop;
+ _ -> log_call(R_Trace, attempt, R_Mod, F, [ReqData, R_ModState])
+ end,
+ Result = try
+ apply(R_Mod, F, [ReqData, R_ModState])
+ catch C:R ->
+ Reason = {C, R, trim_trace(erlang:get_stacktrace())},
+ {{error, Reason}, ReqData, R_ModState}
+ end,
+ case R_Trace of
+ false -> nop;
+ _ -> log_call(R_Trace, result, R_Mod, F, Result)
+ end,
+ Result.
+
+log_d(#wm_resource{}=Res, DecisionID) ->
+ #wm_resource{module=R_Mod, modstate=R_ModState,
+ modexports=R_ModExports, trace=R_Trace} = Res,
+ log_d(DecisionID, {?MODULE, R_Mod, R_ModState, R_ModExports, R_Trace});
+log_d(DecisionID, {?MODULE, _, _, _, R_Trace}) ->
+ case R_Trace of
+ false -> nop;
+ _ -> log_decision(R_Trace, DecisionID)
+ end.
+
+stop(#wm_resource{trace=R_Trace}) -> close_log_file(R_Trace);
+stop({?MODULE, _, _, _, R_Trace}) -> close_log_file(R_Trace).
+
+log_call(File, Type, M, F, Data) ->
+ io:format(File,
+ "{~p, ~p, ~p,~n ~p}.~n",
+ [Type, M, F, escape_trace_data(Data)]).
+
+escape_trace_data(Fun) when is_function(Fun) ->
+ {'WMTRACE_ESCAPED_FUN',
+ [erlang:fun_info(Fun, module),
+ erlang:fun_info(Fun, name),
+ erlang:fun_info(Fun, arity),
+ erlang:fun_info(Fun, type)]};
+escape_trace_data(Pid) when is_pid(Pid) ->
+ {'WMTRACE_ESCAPED_PID', pid_to_list(Pid)};
+escape_trace_data(Port) when is_port(Port) ->
+ {'WMTRACE_ESCAPED_PORT', erlang:port_to_list(Port)};
+escape_trace_data(List) when is_list(List) ->
+ escape_trace_list(List, []);
+escape_trace_data(R=#wm_reqstate{}) ->
+ list_to_tuple(
+ escape_trace_data(
+ tuple_to_list(R#wm_reqstate{reqdata='WMTRACE_NESTED_REQDATA'})));
+escape_trace_data(Tuple) when is_tuple(Tuple) ->
+ list_to_tuple(escape_trace_data(tuple_to_list(Tuple)));
+escape_trace_data(Other) ->
+ Other.
+
+escape_trace_list([Head|Tail], Acc) ->
+ escape_trace_list(Tail, [escape_trace_data(Head)|Acc]);
+escape_trace_list([], Acc) ->
+ %% proper, nil-terminated list
+ lists:reverse(Acc);
+escape_trace_list(Final, Acc) ->
+ %% non-nil-terminated list, like the dict module uses
+ lists:reverse(tl(Acc))++[hd(Acc)|escape_trace_data(Final)].
+
+log_decision(File, DecisionID) ->
+ io:format(File, "{decision, ~p}.~n", [DecisionID]).
+
+open_log_file(Dir, Mod) ->
+ Now = {_,_,US} = now(),
+ {{Y,M,D},{H,I,S}} = calendar:now_to_universal_time(Now),
+ Filename = io_lib:format(
+ "~s/~p-~4..0B-~2..0B-~2..0B"
+ "-~2..0B-~2..0B-~2..0B.~6..0B.wmtrace",
+ [Dir, Mod, Y, M, D, H, I, S, US]),
+ file:open(Filename, [write]).
+
+close_log_file(File) when is_pid(File) ->
+ file:close(File);
+close_log_file(_) ->
+ ok.
--- /dev/null
+%% @author Kevin A. Smith <ksmith@basho.com>
+%% @copyright 2007-2010 Basho Technologies
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+
+%% @doc Module to add and remove dynamic routes to webmachine's routing
+%% table. Dynamic routes are not persistent between executions of
+%% a webmachine application. They will need to be added to the
+%% the table each time webmachine restarts.
+-module(webmachine_router).
+
+-behaviour(gen_server).
+
+%% API
+-export([start_link/0,
+ add_route/1,
+ add_route/2,
+ remove_route/1,
+ remove_route/2,
+ remove_resource/1,
+ remove_resource/2,
+ get_routes/0,
+ get_routes/1,
+ init_routes/1,
+ init_routes/2
+ ]).
+
+%% gen_server callbacks
+-export([init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3]).
+
+%% @type hostmatchterm() = {hostmatch(), [pathmatchterm()]}.
+% The dispatch configuration contains a list of these terms, and the
+% first one whose host and one pathmatchterm match is used.
+
+%% @type pathmatchterm() = {[pathterm()], matchmod(), matchopts()}.
+% The dispatch configuration contains a list of these terms, and the
+% first one whose list of pathterms matches the input path is used.
+
+%% @type pathterm() = '*' | string() | atom().
+% A list of pathterms is matched against a '/'-separated input path.
+% The '*' pathterm matches all remaining tokens.
+% A string pathterm will match a token of exactly the same string.
+% Any atom pathterm other than '*' will match any token and will
+% create a binding in the result if a complete match occurs.
+
+%% @type matchmod() = atom().
+% This atom, if present in a successful matchterm, will appear in
+% the resulting dispterm. In Webmachine this is used to name the
+% resource module that will handle the matching request.
+
+%% @type matchopts() = [term()].
+% This term, if present in a successful matchterm, will appear in
+% the resulting dispterm. In Webmachine this is used to provide
+% arguments to the resource module handling the matching request.
+
+-define(SERVER, ?MODULE).
+
+%% @spec add_route(hostmatchterm() | pathmatchterm()) -> ok
+%% @doc Adds a route to webmachine's route table. The route should
+%% be the format documented here:
+%% http://bitbucket.org/justin/webmachine/wiki/DispatchConfiguration
+add_route(Route) ->
+ add_route(default, Route).
+
+add_route(Name, Route) ->
+ gen_server:call(?SERVER, {add_route, Name, Route}, infinity).
+
+%% @spec remove_route(hostmatchterm() | pathmatchterm()) -> ok
+%% @doc Removes a route from webamchine's route table. The route
+%% route must be properly formatted
+%% @see add_route/2
+remove_route(Route) ->
+ remove_route(default, Route).
+
+remove_route(Name, Route) ->
+ gen_server:call(?SERVER, {remove_route, Name, Route}, infinity).
+
+%% @spec remove_resource(atom()) -> ok
+%% @doc Removes all routes for a specific resource module.
+remove_resource(Resource) when is_atom(Resource) ->
+ remove_resource(default, Resource).
+
+remove_resource(Name, Resource) when is_atom(Resource) ->
+ gen_server:call(?SERVER, {remove_resource, Name, Resource}, infinity).
+
+%% @spec get_routes() -> [{[], res, []}]
+%% @doc Retrieve a list of routes and resources set in webmachine's
+%% route table.
+get_routes() ->
+ get_routes(default).
+
+get_routes(Name) ->
+ get_dispatch_list(Name).
+
+%% @spec init_routes() -> ok
+%% @doc Set the default routes, unless the routing table isn't empty.
+init_routes(DefaultRoutes) ->
+ init_routes(default, DefaultRoutes).
+
+init_routes(Name, DefaultRoutes) ->
+ gen_server:call(?SERVER, {init_routes, Name, DefaultRoutes}, infinity).
+
+%% @spec start_link() -> {ok, pid()} | {error, any()}
+%% @doc Starts the webmachine_router gen_server.
+start_link() ->
+ %% We expect to only be called from webmachine_sup
+ %%
+ %% Set up the ETS configuration table.
+ try ets:new(?MODULE, [named_table, public, set, {keypos, 1},
+ {read_concurrency, true}]) of
+ _Result ->
+ ok
+ catch
+ error:badarg ->
+ %% The table already exists, which is fine. The webmachine_router
+ %% probably crashed and this is a restart.
+ ok
+ end,
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+%% @private
+init([]) ->
+ {ok, undefined}.
+
+%% @private
+handle_call({remove_resource, Name, Resource}, _From, State) ->
+ DL = filter_by_resource(Resource, get_dispatch_list(Name)),
+ {reply, set_dispatch_list(Name, DL), State};
+
+handle_call({remove_route, Name, Route}, _From, State) ->
+ DL = [D || D <- get_dispatch_list(Name),
+ D /= Route],
+ {reply, set_dispatch_list(Name, DL), State};
+
+handle_call({add_route, Name, Route}, _From, State) ->
+ DL = [Route|[D || D <- get_dispatch_list(Name),
+ D /= Route]],
+ {reply, set_dispatch_list(Name, DL), State};
+
+handle_call({init_routes, Name, DefaultRoutes}, _From, State) ->
+ %% if the table lacks a dispatch_list row, set it
+ ets:insert_new(?MODULE, {Name, DefaultRoutes}),
+ {reply, ok, State};
+
+handle_call(_Request, _From, State) ->
+ {reply, ignore, State}.
+
+%% @private
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+%% @private
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+%% @private
+terminate(_Reason, _State) ->
+ ok.
+
+%% @private
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%% Internal functions
+
+%% @doc Remove any dispatch rule that directs requests to `Resource'
+filter_by_resource(Resource, Dispatch) ->
+ lists:foldr(filter_by_resource(Resource), [], Dispatch).
+
+filter_by_resource(Resource) ->
+ fun({_, R, _}, Acc) when R == Resource -> % basic dispatch
+ Acc;
+ ({_, _, R, _}, Acc) when R == Resource -> % guarded dispatch
+ Acc;
+ ({Host, Disp}, Acc) -> % host-based dispatch
+ [{Host, filter_by_resource(Resource, Disp)}|Acc];
+ (Other, Acc) -> % dispatch not mentioning this resource
+ [Other|Acc]
+ end.
+
+get_dispatch_list(Name) ->
+ case ets:lookup(?MODULE, Name) of
+ [{Name, Dispatch}] ->
+ Dispatch;
+ [] ->
+ []
+ end.
+
+set_dispatch_list(Name, DispatchList) ->
+ true = ets:insert(?MODULE, {Name, DispatchList}),
+ ok.
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+add_remove_route_test() ->
+ {ok, Pid} = webmachine_router:start_link(),
+ unlink(Pid),
+ PathSpec = {["foo"], foo, []},
+ webmachine_router:add_route(PathSpec),
+ [PathSpec] = get_routes(),
+ webmachine_router:remove_route(PathSpec),
+ [] = get_routes(),
+ exit(Pid, kill).
+
+add_remove_resource_test() ->
+ {ok, Pid} = webmachine_router:start_link(),
+ unlink(Pid),
+ PathSpec1 = {["foo"], foo, []},
+ PathSpec2 = {["bar"], foo, []},
+ PathSpec3 = {["baz"], bar, []},
+ PathSpec4 = {["foo"], fun(_) -> true end, foo, []},
+ PathSpec5 = {["foo"], {webmachine_router, test_guard}, foo, []},
+ webmachine_router:add_route(PathSpec1),
+ webmachine_router:add_route(PathSpec2),
+ webmachine_router:add_route(PathSpec3),
+ webmachine_router:remove_resource(foo),
+ [PathSpec3] = get_routes(),
+ webmachine_router:add_route(PathSpec4),
+ webmachine_router:remove_resource(foo),
+ [PathSpec3] = get_routes(),
+ webmachine_router:add_route(PathSpec5),
+ webmachine_router:remove_resource(foo),
+ [PathSpec3] = get_routes(),
+ webmachine_router:remove_route(PathSpec3),
+ [begin
+ PathSpec = {"localhost", [HostPath]},
+ webmachine_router:add_route(PathSpec),
+ webmachine_router:remove_resource(foo),
+ [{"localhost", []}] = get_routes(),
+ webmachine_router:remove_route({"localhost", []})
+ end || HostPath <- [PathSpec1, PathSpec4, PathSpec5]],
+ exit(Pid, kill).
+
+no_dupe_path_test() ->
+ {ok, Pid} = webmachine_router:start_link(),
+ unlink(Pid),
+ PathSpec = {["foo"], foo, []},
+ webmachine_router:add_route(PathSpec),
+ webmachine_router:add_route(PathSpec),
+ [PathSpec] = get_routes(),
+ exit(Pid, kill).
+
+supervisor_restart_keeps_routes_test() ->
+ {ok, Pid} = webmachine_router:start_link(),
+ unlink(Pid),
+ PathSpec = {["foo"], foo, []},
+ webmachine_router:add_route(PathSpec),
+ [PathSpec] = get_routes(),
+ OldRouter = whereis(webmachine_router),
+ exit(whereis(webmachine_router), kill),
+ timer:sleep(100),
+ NewRouter = whereis(webmachine_router),
+ ?assert(OldRouter /= NewRouter),
+ [PathSpec] = get_routes(),
+ exit(Pid, kill).
+
+-endif.
--- /dev/null
+%% @author Justin Sheehy <justin@basho.com>
+%% @author Andy Gross <andy@basho.com>
+%% @copyright 2007-2008 Basho Technologies
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+
+%% @doc Supervisor for the webmachine application.
+
+-module(webmachine_sup).
+
+-behaviour(supervisor).
+
+%% External exports
+-export([start_link/0, upgrade/0]).
+
+%% supervisor callbacks
+-export([init/1]).
+
+-include("webmachine_logger.hrl").
+
+%% @spec start_link() -> ServerRet
+%% @doc API for starting the supervisor.
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+%% @spec upgrade() -> ok
+%% @doc Add processes if necessary.
+upgrade() ->
+ {ok, {_, Specs}} = init([]),
+
+ Old = sets:from_list(
+ [Name || {Name, _, _, _} <- supervisor:which_children(?MODULE)]),
+ New = sets:from_list([Name || {Name, _, _, _, _, _} <- Specs]),
+ Kill = sets:subtract(Old, New),
+
+ sets:fold(fun (Id, ok) ->
+ supervisor:terminate_child(?MODULE, Id),
+ supervisor:delete_child(?MODULE, Id),
+ ok
+ end, ok, Kill),
+
+ [supervisor:start_child(?MODULE, Spec) || Spec <- Specs],
+ ok.
+
+%% @spec init([]) -> SupervisorTree
+%% @doc supervisor callback.
+init([]) ->
+ Router = {webmachine_router,
+ {webmachine_router, start_link, []},
+ permanent, 5000, worker, [webmachine_router]},
+ LogHandler = [{webmachine_logger, {gen_event, start_link, [{local, ?EVENT_LOGGER}]},
+ permanent, 5000, worker, [dynamic]},
+ {webmachine_logger_watcher_sup, {webmachine_logger_watcher_sup, start_link, []},
+ permanent, 5000, supervisor, [webmachine_logger_watcher_sup]}],
+ {ok, {{one_for_one, 9, 10}, LogHandler ++ [Router]}}.
--- /dev/null
+%% @author Justin Sheehy <justin@basho.com>
+%% @author Andy Gross <andy@basho.com>
+%% @copyright 2007-2008 Basho Technologies
+%% (guess_mime/1 derived from code copyright 2007 Mochi Media, Inc.)
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+
+%% @doc Utilities for parsing, quoting, and negotiation.
+
+-module(webmachine_util).
+-export([guess_mime/1]).
+-export([convert_request_date/1, compare_ims_dates/2]).
+-export([rfc1123_date/1]).
+-export([choose_media_type/2, format_content_type/1]).
+-export([choose_charset/2]).
+-export([choose_encoding/2]).
+-export([now_diff_milliseconds/2]).
+-export([media_type_to_detail/1,
+ quoted_string/1,
+ split_quoted_strings/1]).
+-export([parse_range/2]).
+
+-ifdef(TEST).
+-ifdef(EQC).
+-include_lib("eqc/include/eqc.hrl").
+-endif.
+-include_lib("eunit/include/eunit.hrl").
+-export([accept_header_to_media_types/1]).
+-endif.
+
+convert_request_date(Date) ->
+ try
+ case httpd_util:convert_request_date(Date) of
+ ReqDate -> ReqDate
+ end
+ catch
+ error:_ -> bad_date
+ end.
+
+%% returns true if D1 > D2
+compare_ims_dates(D1, D2) ->
+ GD1 = calendar:datetime_to_gregorian_seconds(D1),
+ GD2 = calendar:datetime_to_gregorian_seconds(D2),
+ GD1 > GD2.
+
+%% @doc Convert tuple style GMT datetime to RFC1123 style one
+rfc1123_date({{YYYY, MM, DD}, {Hour, Min, Sec}}) ->
+ DayNumber = calendar:day_of_the_week({YYYY, MM, DD}),
+ lists:flatten(io_lib:format("~s, ~2.2.0w ~3.s ~4.4.0w ~2.2.0w:~2.2.0w:~2.2.0w GMT",
+ [httpd_util:day(DayNumber), DD, httpd_util:month(MM),
+ YYYY, Hour, Min, Sec])).
+
+%% @spec guess_mime(string()) -> string()
+%% @doc Guess the mime type of a file by the extension of its filename.
+guess_mime(File) ->
+ case filename:extension(File) of
+ ".bz2" ->
+ "application/x-bzip2";
+ ".css" ->
+ "text/css";
+ ".eot" ->
+ "application/vnd.ms-fontobject";
+ ".gif" ->
+ "image/gif";
+ ".gz" ->
+ "application/x-gzip";
+ ".htc" ->
+ "text/x-component";
+ ".html" ->
+ "text/html";
+ ".ico" ->
+ "image/x-icon";
+ ".jpeg" ->
+ "image/jpeg";
+ ".jpg" ->
+ "image/jpeg";
+ ".js" ->
+ "application/x-javascript";
+ ".less" ->
+ "text/css";
+ ".m4v" ->
+ "video/mp4";
+ ".manifest" ->
+ "text/cache-manifest";
+ ".mp4" ->
+ "video/mp4";
+ ".oga" ->
+ "audio/ogg";
+ ".ogg" ->
+ "audio/ogg";
+ ".ogv" ->
+ "video/ogg";
+ ".otf" ->
+ "font/opentyp";
+ ".png" ->
+ "image/png";
+ ".svg" ->
+ "image/svg+xml";
+ ".svgz" ->
+ "image/svg+xml";
+ ".swf" ->
+ "application/x-shockwave-flash";
+ ".tar" ->
+ "application/x-tar";
+ ".tgz" ->
+ "application/x-gzip";
+ ".ttc" ->
+ "application/x-font-ttf";
+ ".ttf" ->
+ "application/x-font-ttf";
+ ".vcf" ->
+ "text/x-vcard";
+ ".webm" ->
+ "video/web";
+ ".webp" ->
+ "image/web";
+ ".woff" ->
+ "application/x-font-woff";
+ ".xhtml" ->
+ "application/xhtml+xml";
+ ".xml" ->
+ "application/xml";
+ ".zip" ->
+ "application/zip";
+ _ ->
+ "text/plain"
+ end.
+
+choose_media_type(Provided,AcceptHead) ->
+ % Return the Content-Type we will serve for a request.
+ % If there is no acceptable/available match, return the atom "none".
+ % AcceptHead is the value of the request's Accept header
+ % Provided is a list of media types the resource can provide.
+ % each is either a string e.g. -- "text/html"
+ % or a string and parameters e.g. -- {"text/html",[{level,1}]}
+ % (the plain string case with no parameters is much more common)
+ Requested = accept_header_to_media_types(AcceptHead),
+ Prov1 = normalize_provided(Provided),
+ choose_media_type1(Prov1,Requested).
+choose_media_type1(_Provided,[]) ->
+ none;
+choose_media_type1(Provided,[H|T]) ->
+ {_Pri,Type,Params} = H,
+ case media_match({Type,Params}, Provided) of
+ [] -> choose_media_type1(Provided,T);
+ [{CT_T,CT_P}|_] -> format_content_type(CT_T,CT_P)
+ end.
+
+media_match(_,[]) -> [];
+media_match({"*/*",[]},[H|_]) -> [H];
+media_match({Type,Params},Provided) ->
+ [{T1,P1} || {T1,P1} <- Provided,
+ media_type_match(Type,T1), media_params_match(Params,P1)].
+media_type_match(Req,Prov) ->
+ case Req of
+ "*" -> % might as well not break for lame (Gomez) clients
+ true;
+ "*/*" ->
+ true;
+ Prov ->
+ true;
+ _ ->
+ [R1|R2] = string:tokens(Req,"/"),
+ [P1,_P2] = string:tokens(Prov,"/"),
+ case R2 of
+ ["*"] ->
+ case R1 of
+ P1 -> true;
+ _ -> false
+ end;
+ _ -> false
+ end
+ end.
+media_params_match(Req,Prov) ->
+ lists:sort(Req) =:= lists:sort(Prov).
+
+prioritize_media(TyParam) ->
+ {Type, Params} = TyParam,
+ prioritize_media(Type,Params,[]).
+prioritize_media(Type,Params,Acc) ->
+ case Params of
+ [] ->
+ {1, Type, Acc};
+ _ ->
+ [{Tok,Val}|Rest] = Params,
+ case Tok of
+ "q" ->
+ QVal = case Val of
+ "1" ->
+ 1;
+ "0" ->
+ 0;
+ [$.|_] ->
+ %% handle strange FeedBurner Accept
+ list_to_float([$0|Val]);
+ _ -> list_to_float(Val)
+ end,
+ {QVal, Type, Rest ++ Acc};
+ _ ->
+ prioritize_media(Type,Rest,[{Tok,Val}|Acc])
+ end
+ end.
+
+media_type_to_detail(MType) ->
+ mochiweb_util:parse_header(MType).
+
+accept_header_to_media_types(HeadVal) ->
+ % given the value of an accept header, produce an ordered list
+ % based on the q-values. Results are [{Type,Params}] with the
+ % head of the list being the highest-priority requested type.
+ try
+ lists:reverse(lists:keysort(1,
+ [prioritize_media(media_type_to_detail(MType)) ||
+ MType <- [string:strip(X) || X <- string:tokens(HeadVal, ",")]]))
+ catch _:_ -> []
+ end.
+
+normalize_provided(Provided) ->
+ [normalize_provided1(X) || X <- Provided].
+normalize_provided1(Type) when is_list(Type) -> {Type, []};
+normalize_provided1({Type,Params}) -> {Type, normalize_media_params(Params)}.
+
+normalize_media_params(Params) ->
+ normalize_media_params(Params,[]).
+
+normalize_media_params([],Acc) ->
+ Acc;
+normalize_media_params([{K,V}|T], Acc) when is_atom(K) ->
+ normalize_media_params(T,[{atom_to_list(K),V}|Acc]);
+normalize_media_params([H|T], Acc) ->
+ normalize_media_params(T, [H|Acc]).
+
+
+format_content_type(Type) when is_list(Type) ->
+ Type;
+format_content_type({Type,Params}) ->
+ format_content_type(Type,Params).
+
+format_content_type(Type,[]) -> Type;
+format_content_type(Type,[{K,V}|T]) when is_atom(K) ->
+ format_content_type(Type, [{atom_to_list(K),V}|T]);
+format_content_type(Type,[{K,V}|T]) ->
+ format_content_type(Type ++ "; " ++ K ++ "=" ++ V, T).
+
+choose_charset(CSets, AccCharHdr) -> do_choose(CSets, AccCharHdr, "ISO-8859-1").
+
+choose_encoding(Encs, AccEncHdr) -> do_choose(Encs, AccEncHdr, "identity").
+
+do_choose(Choices, Header, Default) ->
+ Accepted = build_conneg_list(string:tokens(Header, ",")),
+ DefaultPrio = [P || {P,C} <- Accepted, C =:= Default],
+ StarPrio = [P || {P,C} <- Accepted, C =:= "*"],
+ DefaultOkay = case DefaultPrio of
+ [] ->
+ case StarPrio of
+ [0.0] -> no;
+ _ -> yes
+ end;
+ [0.0] -> no;
+ _ -> yes
+ end,
+ AnyOkay = case StarPrio of
+ [] -> no;
+ [0.0] -> no;
+ _ -> yes
+ end,
+ do_choose(Default, DefaultOkay, AnyOkay, Choices, Accepted).
+do_choose(_Default, _DefaultOkay, _AnyOkay, [], []) ->
+ none;
+do_choose(_Default, _DefaultOkay, _AnyOkay, [], _Accepted) ->
+ none;
+do_choose(Default, DefaultOkay, AnyOkay, Choices, []) ->
+ case AnyOkay of
+ yes -> hd(Choices);
+ no ->
+ case DefaultOkay of
+ yes ->
+ case lists:member(Default, Choices) of
+ true -> Default;
+ _ -> none
+ end;
+ no -> none
+ end
+ end;
+do_choose(Default, DefaultOkay, AnyOkay, Choices, [AccPair|AccRest]) ->
+ {Prio, Acc} = AccPair,
+ case Prio of
+ 0.0 ->
+ do_choose(Default, DefaultOkay, AnyOkay,
+ lists:delete(Acc, Choices), AccRest);
+ _ ->
+ LAcc = string:to_lower(Acc),
+ LChoices = [string:to_lower(X) || X <- Choices],
+ % doing this a little more work than needed in
+ % order to be easily insensitive but preserving
+ case lists:member(LAcc, LChoices) of
+ true ->
+ hd([X || X <- Choices,
+ string:to_lower(X) =:= LAcc]);
+ false -> do_choose(Default, DefaultOkay, AnyOkay,
+ Choices, AccRest)
+ end
+ end.
+
+build_conneg_list(AccList) ->
+ build_conneg_list(AccList, []).
+build_conneg_list([], Result) -> lists:reverse(lists:sort(Result));
+build_conneg_list([Acc|AccRest], Result) ->
+ XPair = list_to_tuple([string:strip(X) || X <- string:tokens(Acc, ";")]),
+ Pair = case XPair of
+ {Choice, "q=" ++ PrioStr} ->
+ case PrioStr of
+ "0" -> {0.0, Choice};
+ "1" -> {1.0, Choice};
+ [$.|_] ->
+ %% handle strange FeedBurner Accept
+ {list_to_float([$0|PrioStr]), Choice};
+ _ -> {list_to_float(PrioStr), Choice}
+ end;
+ {Choice} ->
+ {1.0, Choice}
+ end,
+ build_conneg_list(AccRest,[Pair|Result]).
+
+
+quoted_string([$" | _Rest] = Str) ->
+ Str;
+quoted_string(Str) ->
+ escape_quotes(Str, [$"]). % Initialize Acc with opening quote
+
+escape_quotes([], Acc) ->
+ lists:reverse([$" | Acc]); % Append final quote
+escape_quotes([$\\, Char | Rest], Acc) ->
+ escape_quotes(Rest, [Char, $\\ | Acc]); % Any quoted char should be skipped
+escape_quotes([$" | Rest], Acc) ->
+ escape_quotes(Rest, [$", $\\ | Acc]); % Unquoted quotes should be escaped
+escape_quotes([Char | Rest], Acc) ->
+ escape_quotes(Rest, [Char | Acc]).
+
+split_quoted_strings(Str) ->
+ split_quoted_strings(Str, []).
+
+split_quoted_strings([], Acc) ->
+ lists:reverse(Acc);
+split_quoted_strings([$" | Rest], Acc) ->
+ {Str, Cont} = unescape_quoted_string(Rest, []),
+ split_quoted_strings(Cont, [Str | Acc]);
+split_quoted_strings([_Skip | Rest], Acc) ->
+ split_quoted_strings(Rest, Acc).
+
+unescape_quoted_string([], Acc) ->
+ {lists:reverse(Acc), []};
+unescape_quoted_string([$\\, Char | Rest], Acc) -> % Any quoted char should be unquoted
+ unescape_quoted_string(Rest, [Char | Acc]);
+unescape_quoted_string([$" | Rest], Acc) -> % Quote indicates end of this string
+ {lists:reverse(Acc), Rest};
+unescape_quoted_string([Char | Rest], Acc) ->
+ unescape_quoted_string(Rest, [Char | Acc]).
+
+
+%% @type now() = {MegaSecs, Secs, MicroSecs}
+
+%% This is faster than timer:now_diff() because it does not use bignums.
+%% But it returns *milliseconds* (timer:now_diff returns microseconds.)
+%% From http://www.erlang.org/ml-archive/erlang-questions/200205/msg00027.html
+
+%% @doc Compute the difference between two now() tuples, in milliseconds.
+%% @spec now_diff_milliseconds(now(), now()) -> integer()
+now_diff_milliseconds(undefined, undefined) ->
+ 0;
+now_diff_milliseconds(undefined, T2) ->
+ now_diff_milliseconds(os:timestamp(), T2);
+now_diff_milliseconds({M,S,U}, {M,S1,U1}) ->
+ ((S-S1) * 1000) + ((U-U1) div 1000);
+now_diff_milliseconds({M,S,U}, {M1,S1,U1}) ->
+ ((M-M1)*1000000+(S-S1))*1000 + ((U-U1) div 1000).
+
+-spec parse_range(RawRange::string(), ResourceLength::non_neg_integer()) ->
+ [{Start::non_neg_integer(), End::non_neg_integer()}].
+parse_range(RawRange, ResourceLength) when is_list(RawRange) ->
+ parse_range(mochiweb_http:parse_range_request(RawRange), ResourceLength, []).
+
+parse_range([], _ResourceLength, Acc) ->
+ lists:reverse(Acc);
+parse_range([Spec | Rest], ResourceLength, Acc) ->
+ case mochiweb_http:range_skip_length(Spec, ResourceLength) of
+ invalid_range ->
+ parse_range(Rest, ResourceLength, Acc);
+ {Skip, Length} ->
+ parse_range(Rest, ResourceLength, [{Skip, Skip + Length - 1} | Acc])
+ end.
+
+%%
+%% TEST
+%%
+-ifdef(TEST).
+
+choose_media_type_test() ->
+ Provided = "text/html",
+ ShouldMatch = ["*", "*/*", "text/*", "text/html"],
+ WantNone = ["foo", "text/xml", "application/*", "foo/bar/baz"],
+ [ ?assertEqual(Provided, choose_media_type([Provided], I))
+ || I <- ShouldMatch ],
+ [ ?assertEqual(none, choose_media_type([Provided], I))
+ || I <- WantNone ].
+
+choose_media_type_qval_test() ->
+ Provided = ["text/html", "image/jpeg"],
+ HtmlMatch = ["image/jpeg;q=0.5, text/html",
+ "text/html, image/jpeg; q=0.5",
+ "text/*; q=0.8, image/*;q=0.7",
+ "text/*;q=.8, image/*;q=.7"], %% strange FeedBurner format
+ JpgMatch = ["image/*;q=1, text/html;q=0.9",
+ "image/png, image/*;q=0.3"],
+ [ ?assertEqual("text/html", choose_media_type(Provided, I))
+ || I <- HtmlMatch ],
+ [ ?assertEqual("image/jpeg", choose_media_type(Provided, I))
+ || I <- JpgMatch ].
+
+accept_header_to_media_types_test() ->
+ Header1 = "text/html,application/xhtml+xml,application/xml,application/x-javascript,*/*;q=0.5",
+ Header2 = "audio/*; q=0, audio/basic",
+ OddHeader = "text/html,application/xhtml+xml,application/xml,application/x-javascript,*/*;q=0,5",
+ Result1 = accept_header_to_media_types(Header1),
+ Result2 = accept_header_to_media_types(Header2),
+ Result3 = accept_header_to_media_types(OddHeader),
+ ExpResult1 = [{1,"application/x-javascript", []},
+ {1,"application/xml",[]},
+ {1,"application/xhtml+xml",[]},
+ {1,"text/html",[]},
+ {0.5,"*/*",[]}],
+ ExpResult2 = [{1,"audio/basic",[]},{0,"audio/*",[]}],
+ ExpResult3 = [{1, "5", []},
+ {1,"application/x-javascript", []},
+ {1,"application/xml",[]},
+ {1,"application/xhtml+xml",[]},
+ {1,"text/html",[]},
+ {0,"*/*",[]}],
+ ?assertEqual(ExpResult1, Result1),
+ ?assertEqual(ExpResult2, Result2),
+ ?assertEqual(ExpResult3, Result3).
+
+media_type_extra_whitespace_test() ->
+ MType = "application/x-www-form-urlencoded ; charset = utf8",
+ ?assertEqual({"application/x-www-form-urlencoded",[{"charset","utf8"}]},
+ webmachine_util:media_type_to_detail(MType)).
+
+format_content_type_test() ->
+ Types = ["audio/vnd.wave; codec=31",
+ "text/x-okie; charset=iso-8859-1; declaration=<f950118.AEB0@XIson.com>"],
+ [?assertEqual(Type, format_content_type(
+ webmachine_util:media_type_to_detail(Type)))
+ || Type <- Types],
+ ?assertEqual(hd(Types), format_content_type("audio/vnd.wave", [{codec, "31"}])).
+
+convert_request_date_test() ->
+ ?assertMatch({{_,_,_},{_,_,_}},
+ convert_request_date("Wed, 30 Dec 2009 14:39:02 GMT")),
+ ?assertMatch(bad_date,
+ convert_request_date(<<"does not handle binaries">>)).
+
+compare_ims_dates_test() ->
+ Late = {{2009,12,30},{14,39,02}},
+ Early = {{2009,12,30},{13,39,02}},
+ ?assertEqual(true, compare_ims_dates(Late, Early)),
+ ?assertEqual(false, compare_ims_dates(Early, Late)).
+
+rfc1123_date_test() ->
+ ?assertEqual("Thu, 11 Jul 2013 04:33:19 GMT",
+ rfc1123_date({{2013, 7, 11}, {4, 33, 19}})).
+
+guess_mime_test() ->
+ TextTypes = [".html",".css",".htc",".manifest",".txt"],
+ AppTypes = [".xhtml",".xml",".js",".swf",".zip",".bz2",
+ ".gz",".tar",".tgz"],
+ ImgTypes = [".jpg",".jpeg",".gif",".png",".ico",".svg"],
+ ?assertEqual([], [ T || T <- TextTypes,
+ 1 /= string:str(guess_mime(T),"text/") ]),
+ ?assertEqual([], [ T || T <- AppTypes,
+ 1 /= string:str(guess_mime(T),"application/") ]),
+ ?assertEqual([], [ T || T <- ImgTypes,
+ 1 /= string:str(guess_mime(T),"image/") ]).
+
+
+now_diff_milliseconds_test() ->
+ Late = {10, 10, 10},
+ Early1 = {10, 9, 9},
+ Early2 = {9, 9, 9},
+ ?assertEqual(1000, now_diff_milliseconds(Late, Early1)),
+ ?assertEqual(1000001000, now_diff_milliseconds(Late, Early2)).
+
+-ifdef(EQC).
+
+-define(QC_OUT(P),
+ eqc:on_output(fun(Str, Args) -> io:format(user, Str, Args) end, P)).
+
+prop_quoted_string() ->
+ ?FORALL(String0, non_empty(list(oneof([char(), $", [$\\, char()]]))),
+ begin
+ String = lists:flatten(String0),
+
+ Quoted = quoted_string(String),
+ case String of
+ [$" | _] ->
+ ?assertEqual(String, Quoted),
+ true;
+ _ ->
+ %% Properties:
+ %% * strings must begin/end with quote
+ %% * All other quotes should be escaped
+ ?assertEqual($", hd(Quoted)),
+ ?assertEqual($", lists:last(Quoted)),
+ Partial = lists:reverse(tl(lists:reverse(tl(Quoted)))),
+ case check_quote(Partial) of
+ true ->
+ true;
+ false ->
+ io:format(user, "----\n", []),
+ io:format(user, "In: ~p\n", [[integer_to_list(C) || C <- String]]),
+ io:format(user, "Out: ~p\n", [[integer_to_list(C) || C <- Quoted]]),
+ false
+ end
+ end
+ end).
+
+check_quote([]) ->
+ true;
+check_quote([$\\, _Any | Rest]) ->
+ check_quote(Rest);
+check_quote([$" | _Rest]) ->
+ false;
+check_quote([_Any | Rest]) ->
+ check_quote(Rest).
+
+prop_quoted_string_test() ->
+ ?assert(eqc:quickcheck(?QC_OUT(prop_quoted_string()))).
+
+-endif. % EQC
+-endif. % TEST
--- /dev/null
+%% @author Bryan Fink <bryan@basho.com>
+%% @doc Webmachine trace file interpretter.
+-module(wmtrace_resource).
+
+-export([add_dispatch_rule/2,
+ remove_dispatch_rules/0]).
+
+-export([ping/2,
+ init/1,
+ resource_exists/2,
+ content_types_provided/2,
+ produce_html/2,
+ produce_javascript/2,
+ produce_map/2,
+ produce_css/2]).
+
+-include("wm_reqdata.hrl").
+
+-record(ctx, {trace_dir, trace}).
+
+-define(MAP_EXTERNAL, "static/map.png").
+-define(MAP_INTERNAL, "http-headers-status-v3.png").
+-define(SCRIPT_EXTERNAL, "static/wmtrace.js").
+-define(SCRIPT_INTERNAL, "wmtrace.js").
+-define(STYLE_EXTERNAL, "static/wmtrace.css").
+-define(STYLE_INTERNAL, "wmtrace.css").
+
+%%
+%% Dispatch Modifiers
+%%
+
+%% @spec add_dispatch_rule(string(), string()) -> ok
+%% @doc Add a dispatch rule to point at wmtrace_resource.
+%% Example: to serve wmtrace_resource from
+%% http://yourhost/dev/wmtrace/
+%% with trace files on disk at
+%% priv/traces
+%% call:
+%% add_dispatch_rule("dev/wmtrace", "priv/traces")
+add_dispatch_rule(BasePath, TracePath) when is_list(BasePath),
+ is_list(TracePath) ->
+ Parts = string:tokens(BasePath, "/"),
+ webmachine_router:add_route({Parts ++ ['*'], ?MODULE, [{trace_dir, TracePath}]}).
+
+%% @spec remove_dispatch_rules() -> ok
+%% @doc Remove all dispatch rules pointing to wmtrace_resource.
+remove_dispatch_rules() ->
+ webmachine_router:remove_resource(?MODULE).
+
+%%
+%% Resource
+%%
+
+ping(ReqData, State) ->
+ {pong, ReqData, State}.
+
+init(Config) ->
+ {trace_dir, TraceDir} = proplists:lookup(trace_dir, Config),
+ {trace_dir_exists, true} = {trace_dir_exists, filelib:is_dir(TraceDir)},
+ {ok, #ctx{trace_dir=TraceDir}}.
+
+resource_exists(RD, Ctx) ->
+ case wrq:disp_path(RD) of
+ [] ->
+ case lists:reverse(wrq:raw_path(RD)) of
+ [$/|_] ->
+ {true, RD, Ctx};
+ _ ->
+ {{halt, 303},
+ wrq:set_resp_header("Location",
+ wrq:raw_path(RD)++"/",
+ RD),
+ Ctx}
+ end;
+ ?MAP_EXTERNAL ->
+ {filelib:is_file(wm_path(?MAP_INTERNAL)), RD, Ctx};
+ ?SCRIPT_EXTERNAL ->
+ {filelib:is_file(wm_path(?SCRIPT_INTERNAL)), RD, Ctx};
+ ?STYLE_EXTERNAL ->
+ {filelib:is_file(wm_path(?STYLE_INTERNAL)), RD, Ctx};
+ TraceName ->
+ TracePath = filename:join([Ctx#ctx.trace_dir, TraceName]),
+ {filelib:is_file(TracePath), RD, Ctx#ctx{trace=TracePath}}
+ end.
+
+wm_path(File) ->
+ filename:join([code:priv_dir(webmachine), "trace", File]).
+
+content_types_provided(RD, Ctx) ->
+ case wrq:disp_path(RD) of
+ ?MAP_EXTERNAL ->
+ {[{"image/png", produce_map}], RD, Ctx};
+ ?SCRIPT_EXTERNAL ->
+ {[{"text/javascript", produce_javascript}], RD, Ctx};
+ ?STYLE_EXTERNAL ->
+ {[{"text/css", produce_css}], RD, Ctx};
+ _ ->
+ {[{"text/html", produce_html}], RD, Ctx}
+ end.
+
+produce_html(RD, Ctx=#ctx{trace=undefined}) ->
+ Dir = filename:absname(Ctx#ctx.trace_dir),
+ Files = lists:reverse(
+ lists:sort(
+ filelib:fold_files(Dir,
+ ".*\.wmtrace",
+ false,
+ fun(F, Acc) ->
+ [filename:basename(F)|Acc]
+ end,
+ []))),
+ {trace_list_html(Dir, Files), RD, Ctx};
+produce_html(RD, Ctx) ->
+ Filename = filename:absname(Ctx#ctx.trace),
+ {ok, Data} = file:consult(Filename),
+ {trace_html(Filename, Data), RD, Ctx}.
+
+trace_list_html(Dir, Files) ->
+ html([],
+ [head([],
+ title([], ["Webmachine Trace List for ",Dir])),
+ body([],
+ [h1([], ["Traces in ",Dir]),
+ ul([],
+ [ li([], a([{"href", F}], F)) || F <- Files ])
+ ])
+ ]).
+
+trace_html(Filename, Data) ->
+ {Request, Response, Trace} = encode_trace(Data),
+ html([],
+ [head([],
+ [title([],["Webmachine Trace ",Filename]),
+ linkblock([{"rel", "stylesheet"},
+ {"type", "text/css"},
+ {"href", "static/wmtrace.css"}],
+ []),
+ script([{"type", "text/javascript"},
+ {"src", "static/wmtrace.js"}],
+ []),
+ script([{"type", "text/javascript"}],
+ mochiweb_html:escape(
+ lists:flatten(
+ ["var request=",Request,";\n"
+ "var response=",Response,";\n"
+ "var trace=",Trace,";"])))
+ ]),
+ body([],
+ [divblock([{"id", "zoompanel"}],
+ [button([{"id", "zoomout"}], ["zoom out"]),
+ button([{"id", "zoomin"}], ["zoom in"])
+ ]),
+ canvas([{"id", "v3map"},
+ {"width", "3138"},
+ {"height", "2184"}],
+ []),
+ divblock([{"id", "sizetest"}], []),
+ divblock([{"id", "preview"}],
+ [divblock([{"id", "previewid"}],[]),
+ ul([{"id", "previewcalls"}], [])
+ ]),
+ divblock([{"id", "infopanel"}],
+ [divblock([{"id", "infocontrols"}],
+ [divblock([{"id", "requesttab"},
+ {"class", "selectedtab"}],"Q"),
+ divblock([{"id", "responsetab"}], "R"),
+ divblock([{"id", "decisiontab"}], "D")
+ ]),
+ divblock([{"id", "requestdetail"}],
+ [divblock([],
+ [span([{"id", "requestmethod"}], []),
+ " ",
+ span([{"id", "requestpath"}], [])]),
+ ul([{"id", "requestheaders"}], []),
+ divblock([{"id", "requestbody"}],
+ [])
+ ]),
+ divblock([{"id", "responsedetail"}],
+ [divblock([{"id", "responsecode"}], []),
+ ul([{"id", "responseheaders"}], []),
+ divblock([{"id", "responsebody"}], [])
+ ]),
+ divblock([{"id", "decisiondetail"}],
+ [divblock([],
+ ["Decision: ",
+ select([{"id", "decisionid"}], [])
+ ]),
+ divblock([],
+ ["Calls:",
+ select([{"id", "decisioncalls"}], [])
+ ]),
+ divblock([], "Input:"),
+ pre([{"id", "callinput"}], []),
+ divblock([], "Output:"),
+ pre([{"id", "calloutput"}], [])
+ ])
+ ])
+ ])
+ ]).
+
+produce_javascript(RD, Ctx) ->
+ {ok, Script} = file:read_file(wm_path(?SCRIPT_INTERNAL)),
+ {Script, RD, Ctx}.
+
+produce_map(RD, Ctx) ->
+ {ok, Map} = file:read_file(wm_path(?MAP_INTERNAL)),
+ {Map, RD, Ctx}.
+
+produce_css(RD, Ctx) ->
+ {ok, Script} = file:read_file(wm_path(?STYLE_INTERNAL)),
+ {Script, RD, Ctx}.
+
+%%
+%% Trace Encoding
+%%
+
+encode_trace(Data) ->
+ {Request, Response, Trace} = aggregate_trace(Data),
+ {mochijson:encode(encode_request(Request)),
+ mochijson:encode(encode_response(Response)),
+ mochijson:encode({array, [ encode_trace_part(P) || P <- Trace ]})}.
+
+aggregate_trace(RawTrace) ->
+ {Request, Response, Trace} = lists:foldl(fun aggregate_trace_part/2,
+ {undefined, 500, []},
+ RawTrace),
+ {Request, Response, lists:reverse(Trace)}.
+
+aggregate_trace_part({decision, Decision}, {Q, R, Acc}) ->
+ BDN = base_decision_name(Decision),
+ case Acc of
+ [{BDN,_}|_] -> {Q, R, Acc}; %% subdecision (ex. v3b13b)
+ _ ->
+ {Q, R, [{base_decision_name(Decision), []}|Acc]}
+ end;
+aggregate_trace_part({attempt, Module, Function, Args},
+ {Q, R, [{Decision,Calls}|Acc]}) ->
+ {maybe_extract_request(Function, Args, Q),
+ R, [{Decision,[{Module, Function, Args, wmtrace_null}|Calls]}|Acc]};
+aggregate_trace_part({result, Module, Function, Result},
+ {Q, R, [{Decision,[{Module,Function,Args,_}|Calls]}|Acc]}) ->
+ {Q, maybe_extract_response(Function, Result, R),
+ [{Decision,[{Module, Function, Args, Result}|Calls]}|Acc]};
+aggregate_trace_part({not_exported, Module, Function, Args},
+ {Q, R, [{Decision,Calls}|Acc]}) ->
+ {Q, maybe_extract_response(Function, Args, R),
+ [{Decision,[{Module, Function, Args, wmtrace_not_exported}|Calls]}
+ |Acc]}.
+
+maybe_extract_request(ping, [ReqData,_], _) ->
+ ReqData;
+maybe_extract_request(_, _, R) ->
+ R.
+
+maybe_extract_response(finish_request, [ReqData,_], _) ->
+ ReqData;
+maybe_extract_response(finish_request, {_, ReqData, _}, _) ->
+ ReqData;
+maybe_extract_response(_, _, R) ->
+ R.
+
+base_decision_name(Decision) ->
+ [$v,$3|D] = atom_to_list(Decision), %% strip 'v3'
+ case lists:reverse(D) of
+ [A|RD] when A >= $a, A =< $z ->
+ lists:reverse(RD); %% strip 'b' off end of some
+ _ ->
+ D
+ end.
+
+encode_request(ReqData) when is_record(ReqData, wm_reqdata) ->
+ {struct, [{"method", atom_to_list(
+ wrq:method(ReqData))},
+ {"path", wrq:raw_path(ReqData)},
+ {"headers", encode_headers(wrq:req_headers(ReqData))},
+ {"body", case ReqData#wm_reqdata.req_body of
+ undefined -> [];
+ Body when is_atom(Body) ->
+ atom_to_list(Body);
+ Body -> lists:flatten(io_lib:format("~s", [Body]))
+ end}]}.
+
+encode_response(ReqData) ->
+ {struct, [{"code", integer_to_list(
+ wrq:response_code(ReqData))},
+ {"headers", encode_headers(wrq:resp_headers(ReqData))},
+ {"body", lists:flatten(io_lib:format("~s", [wrq:resp_body(ReqData)]))}]}.
+
+encode_headers(Headers) when is_list(Headers) ->
+ {struct, [ {N, V} || {N, V} <- Headers ]};
+encode_headers(Headers) ->
+ encode_headers(mochiweb_headers:to_list(Headers)).
+
+encode_trace_part({Decision, Calls}) ->
+ {struct, [{"d", Decision},
+ {"calls",
+ {array, [ {struct,
+ [{"module", Module},
+ {"function", Function},
+ {"input", encode_trace_io(Input)},
+ {"output", encode_trace_io(Output)}]}
+ || {Module, Function, Input, Output}
+ <- lists:reverse(Calls) ]}}]}.
+
+encode_trace_io(wmtrace_null) -> null;
+encode_trace_io(wmtrace_not_exported) -> "wmtrace_not_exported";
+encode_trace_io(Data) ->
+ lists:flatten(io_lib:format("~p", [Data])).
+
+%%
+%% HTML Building
+%%
+
+-define(TAG(T), T(Attrs, Content) ->
+ tag(??T, Attrs, Content)).
+
+?TAG(head).
+?TAG(script).
+?TAG(title).
+?TAG(body).
+?TAG(h1).
+?TAG(ul).
+?TAG(li).
+?TAG(a).
+?TAG(canvas).
+?TAG(select).
+?TAG(pre).
+?TAG(span).
+?TAG(button).
+
+html(_Attrs, Content) ->
+ [<<"<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">">>,
+ <<"<html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"en\" lang=\"en\">">>,
+ Content,
+ <<"</html>">>].
+
+divblock(Attrs, Content) ->
+ tag("div", Attrs, Content). %% div is a reserved word
+
+linkblock(Attrs, Content) ->
+ tag("link", Attrs, Content). %% link is a reserved word
+
+tag(Name, Attrs, Content) ->
+ ["<",Name,
+ [ [" ",K,"=\"",V,"\""] || {K, V} <- Attrs ],
+ if Content == empty -> "/>";
+ true ->
+ [">",
+ Content,
+ "</",Name,">"]
+ end].
--- /dev/null
+%% @author Justin Sheehy <justin@basho.com>
+%% @copyright 2007-2009 Basho Technologies
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+
+-module(wrq).
+-author('Justin Sheehy <justin@basho.com>').
+
+-export([create/4, create/5,load_dispatch_data/7]).
+-export([method/1,scheme/1,version/1,peer/1,disp_path/1,path/1,raw_path/1,
+ path_info/1,response_code/1,req_cookie/1,req_qs/1,req_headers/1,
+ req_body/1,stream_req_body/2,resp_redirect/1,resp_headers/1,
+ resp_body/1,app_root/1,path_tokens/1, host_tokens/1, port/1,
+ base_uri/1]).
+-export([path_info/2,get_req_header/2,do_redirect/2,fresh_resp_headers/2,
+ get_resp_header/2,set_resp_header/3,set_resp_headers/2,
+ set_disp_path/2,set_req_body/2,set_resp_body/2,set_response_code/2,
+ merge_resp_headers/2,remove_resp_header/2,
+ append_to_resp_body/2,append_to_response_body/2, set_resp_range/2,
+ max_recv_body/1,set_max_recv_body/2,
+ get_cookie_value/2,get_qs_value/2,get_qs_value/3,set_peer/2,
+ add_note/3, get_notes/1]).
+
+% @type reqdata(). The opaque data type used for req/resp data structures.
+-include("wm_reqdata.hrl").
+-include("wm_reqstate.hrl").
+
+
+create(Method,Version,RawPath,Headers) ->
+ create(Method,http,Version,RawPath,Headers).
+create(Method,Scheme,Version,RawPath,Headers) ->
+ create(#wm_reqdata{method=Method,scheme=Scheme,version=Version,
+ raw_path=RawPath,req_headers=Headers,
+ wm_state=defined_on_call,
+ path="defined_in_create",
+ req_cookie=defined_in_create,
+ req_qs=defined_in_create,
+ peer="defined_in_wm_req_srv_init",
+ req_body=not_fetched_yet,
+ max_recv_body=(1024*(1024*1024)),
+ % Stolen from R13B03 inet_drv.c's TCP_MAX_PACKET_SIZE definition
+ max_recv_hunk=(64*(1024*1024)),
+ app_root="defined_in_load_dispatch_data",
+ path_info=orddict:new(),
+ path_tokens=defined_in_load_dispatch_data,
+ disp_path=defined_in_load_dispatch_data,
+ resp_redirect=false, resp_headers=mochiweb_headers:empty(),
+ resp_body = <<>>, response_code=500,
+ resp_range = follow_request,
+ notes=[]}).
+create(RD = #wm_reqdata{raw_path=RawPath}) ->
+ {Path, _, _} = mochiweb_util:urlsplit_path(RawPath),
+ Cookie = case get_req_header("cookie", RD) of
+ undefined -> [];
+ Value -> mochiweb_cookies:parse_cookie(Value)
+ end,
+ {_, QueryString, _} = mochiweb_util:urlsplit_path(RawPath),
+ ReqQS = mochiweb_util:parse_qs(QueryString),
+ RD#wm_reqdata{path=Path,req_cookie=Cookie,req_qs=ReqQS}.
+load_dispatch_data(PathInfo, HostTokens, Port, PathTokens, AppRoot,
+ DispPath, RD) ->
+ RD#wm_reqdata{path_info=PathInfo,host_tokens=HostTokens,
+ port=Port,path_tokens=PathTokens,
+ app_root=AppRoot,disp_path=DispPath}.
+
+method(_RD = #wm_reqdata{method=Method}) -> Method.
+
+scheme(_RD = #wm_reqdata{scheme=Scheme}) -> Scheme.
+
+version(_RD = #wm_reqdata{version=Version})
+ when is_tuple(Version), size(Version) == 2,
+ is_integer(element(1,Version)), is_integer(element(2,Version)) -> Version.
+
+peer(_RD = #wm_reqdata{peer=Peer}) when is_list(Peer) -> Peer.
+
+app_root(_RD = #wm_reqdata{app_root=AR}) when is_list(AR) -> AR.
+
+% all three paths below are strings
+disp_path(_RD = #wm_reqdata{disp_path=DP}) when is_list(DP) -> DP.
+
+path(_RD = #wm_reqdata{path=Path}) when is_list(Path) -> Path.
+
+raw_path(_RD = #wm_reqdata{raw_path=RawPath}) when is_list(RawPath) -> RawPath.
+
+path_info(_RD = #wm_reqdata{path_info=PathInfo}) -> PathInfo. % dict
+
+path_tokens(_RD = #wm_reqdata{path_tokens=PathT}) -> PathT. % list of strings
+
+host_tokens(_RD = #wm_reqdata{host_tokens=HostT}) -> HostT. % list of strings
+
+port(_RD = #wm_reqdata{port=Port}) -> Port. % integer
+
+response_code(_RD = #wm_reqdata{response_code={C,_ReasonPhrase}}) when is_integer(C) -> C;
+response_code(_RD = #wm_reqdata{response_code=C}) when is_integer(C) -> C.
+
+req_cookie(_RD = #wm_reqdata{req_cookie=C}) when is_list(C) -> C. % string
+
+%% @spec req_qs(reqdata()) -> [{Key, Value}]
+req_qs(_RD = #wm_reqdata{req_qs=QS}) when is_list(QS) -> QS.
+
+req_headers(_RD = #wm_reqdata{req_headers=ReqH}) -> ReqH. % mochiheaders
+
+req_body(_RD = #wm_reqdata{wm_state=ReqState0,max_recv_body=MRB}) ->
+ Req = webmachine_request:new(ReqState0),
+ {ReqResp, ReqState} = Req:req_body(MRB),
+ put(tmp_reqstate, ReqState),
+ maybe_conflict_body(ReqResp).
+
+stream_req_body(_RD = #wm_reqdata{wm_state=ReqState0}, MaxHunk) ->
+ Req = webmachine_request:new(ReqState0),
+ {ReqResp, ReqState} = Req:stream_req_body(MaxHunk),
+ put(tmp_reqstate, ReqState),
+ maybe_conflict_body(ReqResp).
+
+max_recv_body(_RD = #wm_reqdata{max_recv_body=X}) when is_integer(X) -> X.
+
+set_max_recv_body(X, RD) when is_integer(X) -> RD#wm_reqdata{max_recv_body=X}.
+
+maybe_conflict_body(BodyResponse) ->
+ case BodyResponse of
+ stream_conflict ->
+ exit("wrq:req_body and wrq:stream_req_body conflict");
+ {error, req_body_too_large} ->
+ exit("request body too large");
+ _ ->
+ BodyResponse
+ end.
+
+resp_redirect(_RD = #wm_reqdata{resp_redirect=true}) -> true;
+resp_redirect(_RD = #wm_reqdata{resp_redirect=false}) -> false.
+
+resp_headers(_RD = #wm_reqdata{resp_headers=RespH}) -> RespH. % mochiheaders
+
+resp_body(_RD = #wm_reqdata{resp_body=undefined}) -> undefined;
+resp_body(_RD = #wm_reqdata{resp_body={stream,X}}) -> {stream,X};
+resp_body(_RD = #wm_reqdata{resp_body={known_length_stream,X,Y}}) -> {known_length_stream,X,Y};
+resp_body(_RD = #wm_reqdata{resp_body={stream,X,Y}}) -> {stream,X,Y};
+resp_body(_RD = #wm_reqdata{resp_body={writer,X}}) -> {writer,X};
+resp_body(_RD = #wm_reqdata{resp_body=RespB}) when is_binary(RespB) -> RespB;
+resp_body(_RD = #wm_reqdata{resp_body=RespB}) -> iolist_to_binary(RespB).
+
+%% --
+
+path_info(Key, RD) when is_atom(Key) ->
+ case orddict:find(Key, path_info(RD)) of
+ {ok, Value} when is_list(Value); is_integer(Value) ->
+ Value; % string (for host or path match)
+ % or integer (for port match)
+ error -> undefined
+ end.
+
+get_req_header(HdrName, RD) -> % string->string
+ mochiweb_headers:get_value(HdrName, req_headers(RD)).
+
+do_redirect(true, RD) -> RD#wm_reqdata{resp_redirect=true};
+do_redirect(false, RD) -> RD#wm_reqdata{resp_redirect=false}.
+
+set_peer(P, RD) when is_list(P) -> RD#wm_reqdata{peer=P}. % string
+
+set_disp_path(P, RD) when is_list(P) -> RD#wm_reqdata{disp_path=P}. % string
+
+set_req_body(Body, RD) -> RD#wm_reqdata{req_body=Body}.
+
+set_resp_body(Body, RD) -> RD#wm_reqdata{resp_body=Body}.
+
+set_response_code({Code, _ReasonPhrase}=CodeAndReason, RD) when is_integer(Code) ->
+ RD#wm_reqdata{response_code=CodeAndReason};
+set_response_code(Code, RD) when is_integer(Code) ->
+ RD#wm_reqdata{response_code=Code}.
+
+get_resp_header(HdrName, _RD=#wm_reqdata{resp_headers=RespH}) ->
+ mochiweb_headers:get_value(HdrName, RespH).
+set_resp_header(K, V, RD=#wm_reqdata{resp_headers=RespH})
+ when is_list(K),is_list(V) ->
+ RD#wm_reqdata{resp_headers=mochiweb_headers:enter(K, V, RespH)}.
+set_resp_headers(Hdrs, RD=#wm_reqdata{resp_headers=RespH}) ->
+ F = fun({K, V}, Acc) -> mochiweb_headers:enter(K, V, Acc) end,
+ RD#wm_reqdata{resp_headers=lists:foldl(F, RespH, Hdrs)}.
+fresh_resp_headers(Hdrs, RD) ->
+ F = fun({K, V}, Acc) -> mochiweb_headers:enter(K, V, Acc) end,
+ RD#wm_reqdata{resp_headers=lists:foldl(F, mochiweb_headers:empty(), Hdrs)}.
+remove_resp_header(K, RD=#wm_reqdata{resp_headers=RespH}) when is_list(K) ->
+ RD#wm_reqdata{resp_headers=mochiweb_headers:from_list(
+ proplists:delete(K,
+ mochiweb_headers:to_list(RespH)))}.
+
+merge_resp_headers(Hdrs, RD=#wm_reqdata{resp_headers=RespH}) ->
+ F = fun({K, V}, Acc) -> mochiweb_headers:insert(K, V, Acc) end,
+ NewHdrs = lists:foldl(F, RespH, Hdrs),
+ RD#wm_reqdata{resp_headers=NewHdrs}.
+
+append_to_resp_body(Data, RD) -> append_to_response_body(Data, RD).
+append_to_response_body(Data, RD=#wm_reqdata{resp_body=RespB}) ->
+ case is_binary(Data) of
+ true ->
+ Data0 = RespB,
+ Data1 = <<Data0/binary,Data/binary>>,
+ RD#wm_reqdata{resp_body=Data1};
+ false -> % MUST BE an iolist! else, fail.
+ append_to_response_body(iolist_to_binary(Data), RD)
+ end.
+
+-spec set_resp_range(follow_request | ignore_request, #wm_reqdata{}) -> #wm_reqdata{}.
+%% follow_request : range responce for range request, normal responce for non-range one
+%% ignore_request : normal resopnse for either range reuqest or non-range one
+set_resp_range(RespRange, RD)
+ when RespRange =:= follow_request orelse RespRange =:= ignore_request ->
+ RD#wm_reqdata{resp_range = RespRange}.
+
+get_cookie_value(Key, RD) when is_list(Key) -> % string
+ case lists:keyfind(Key, 1, req_cookie(RD)) of
+ false -> undefined;
+ {Key, Value} -> Value
+ end.
+
+get_qs_value(Key, RD) when is_list(Key) -> % string
+ case lists:keyfind(Key, 1, req_qs(RD)) of
+ false -> undefined;
+ {Key, Value} -> Value
+ end.
+
+get_qs_value(Key, Default, RD) when is_list(Key) ->
+ case lists:keyfind(Key, 1, req_qs(RD)) of
+ false -> Default;
+ {Key, Value} -> Value
+ end.
+add_note(K, V, RD) -> RD#wm_reqdata{notes=[{K, V} | RD#wm_reqdata.notes]}.
+
+get_notes(RD) -> RD#wm_reqdata.notes.
+
+base_uri(RD) ->
+ Scheme = erlang:atom_to_list(RD#wm_reqdata.scheme),
+ Host = string:join(RD#wm_reqdata.host_tokens, "."),
+ PortString = port_string(RD#wm_reqdata.scheme, RD#wm_reqdata.port),
+ Scheme ++ "://" ++ Host ++ PortString.
+
+port_string(Scheme, Port) ->
+ case Scheme of
+ http ->
+ case Port of
+ 80 -> "";
+ _ -> ":" ++ erlang:integer_to_list(Port)
+ end;
+ https ->
+ case Port of
+ 443 -> "";
+ _ -> ":" ++ erlang:integer_to_list(Port)
+ end;
+ _ -> ":" ++ erlang:integer_to_list(Port)
+ end.
+
+%%
+%% Tests
+%%
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+make_wrq(Method, RawPath, Headers) ->
+ make_wrq(Method, http, RawPath, Headers).
+
+make_wrq(Method, Scheme, RawPath, Headers) ->
+ create(Method, Scheme, {1,1}, RawPath, mochiweb_headers:from_list(Headers)).
+
+accessor_test() ->
+ R0 = make_wrq('GET', "/foo?a=1&b=2", [{"Cookie", "foo=bar"}]),
+ R = set_peer("127.0.0.1", R0),
+ ?assertEqual('GET', method(R)),
+ ?assertEqual({1,1}, version(R)),
+ ?assertEqual("/foo", path(R)),
+ ?assertEqual("/foo?a=1&b=2", raw_path(R)),
+ ?assertEqual([{"a", "1"}, {"b", "2"}], req_qs(R)),
+ ?assertEqual({"1", "2"}, {get_qs_value("a", R), get_qs_value("b", R)}),
+ ?assertEqual("3", get_qs_value("c", "3", R)),
+ ?assertEqual([{"foo", "bar"}], req_cookie(R)),
+ ?assertEqual("bar", get_cookie_value("foo", R)),
+ ?assertEqual("127.0.0.1", peer(R)).
+
+simple_dispatch_test() ->
+ R0 = make_wrq('GET', "/foo?a=1&b=2", [{"Cookie", "foo=bar"}]),
+ R1 = set_peer("127.0.0.1", R0),
+ {_, _, HostTokens, Port, PathTokens, Bindings, AppRoot, StringPath} =
+ webmachine_dispatcher:dispatch("127.0.0.1", "/foo",
+ [{["foo"], foo_resource, []}], R1),
+ R = load_dispatch_data(Bindings,
+ HostTokens,
+ Port,
+ PathTokens,
+ AppRoot,
+ StringPath,
+ R1),
+ ?assertEqual(".", app_root(R)),
+ ?assertEqual(80, port(R)),
+ ?assertEqual("http://127.0.0.1", base_uri(R)).
+
+base_uri_test_() ->
+ Make_req =
+ fun(Scheme, Host) ->
+ R0 = make_wrq('GET', Scheme, "/foo?a=1&b=2",
+ [{"Cookie", "foo=bar"}]),
+ R1 = set_peer("127.0.0.1", R0),
+ DispatchRule = {["foo"], foo_resource, []},
+ {_, _, HostTokens, Port, PathTokens,
+ Bindings, AppRoot,StringPath} =
+ webmachine_dispatcher:dispatch(Host, "/foo", [DispatchRule],
+ R1),
+ load_dispatch_data(Bindings,
+ HostTokens,
+ Port,
+ PathTokens,
+ AppRoot,
+ StringPath,
+ R1)
+ end,
+ Tests = [{{http, "somewhere.com:8080"}, "http://somewhere.com:8080"},
+ {{https, "somewhere.com:8080"}, "https://somewhere.com:8080"},
+
+ {{http, "somewhere.com"}, "http://somewhere.com"},
+ {{https, "somewhere.com"}, "https://somewhere.com"},
+
+ {{http, "somewhere.com:80"}, "http://somewhere.com"},
+ {{https, "somewhere.com:443"}, "https://somewhere.com"},
+ {{https, "somewhere.com:80"}, "https://somewhere.com:80"},
+ {{http, "somewhere.com:443"}, "http://somewhere.com:443"}],
+ [ ?_assertEqual(Expect, base_uri(Make_req(Scheme, Host)))
+ || {{Scheme, Host}, Expect} <- Tests ].
+
+-endif.
--- /dev/null
+#!/bin/sh
+cd `dirname $0`
+exec erl -pa $PWD/ebin $PWD/deps/*/ebin -boot start_sasl -s reloader -s webmachine
--- /dev/null
+#!/bin/sh
+cd `dirname $0`
+exec erl -pa $PWD/ebin $PWD/deps/*/ebin -boot start_sasl -s webmachine
--- /dev/null
+%% @author Justin Sheehy <justin@basho.com>
+%% @author Andy Gross <andy@basho.com>
+%% @copyright 2007-2010 Basho Technologies
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+
+-module(etag_test).
+
+
+-ifdef(EQC).
+
+-include("wm_reqdata.hrl").
+-include_lib("eqc/include/eqc.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-define(QC_OUT(P),
+ eqc:on_output(fun(Str, Args) -> io:format(user, Str, Args) end, P)).
+
+unique(L) ->
+ lists:reverse(lists:foldl(fun(Elem, Acc) ->
+ case lists:member(Elem, Acc) of
+ true ->
+ Acc;
+ false ->
+ [Elem | Acc]
+ end
+ end, [], L)).
+
+etag(Bin) ->
+ integer_to_list(erlang:crc32(Bin)).
+
+etag_list([]) ->
+ "*";
+etag_list(Bins) ->
+ string:join([[$", etag(B), $"] || B <- Bins], ",").
+
+http_request(_Match, _IfVals, _NewVal, 0) ->
+ error;
+http_request(Match, IfVals, NewVal, Count) ->
+ case httpc:request(put, {"http://localhost:12000/etagtest/foo",
+ [{Match, etag_list(IfVals)}],
+ "binary/octet-stream",
+ NewVal},
+ [], []) of
+ {ok, Result} ->
+ {ok, Result};
+ {error, socket_closed_remotely} ->
+ io:format(user, "Retry!\n", []),
+ http_request(Match, IfVals, NewVal, Count-1)
+ end.
+
+etag_prop() ->
+ ?LET({AllVals, Match}, {non_empty(list(binary())), oneof(["If-Match", "If-None-Match"])},
+ ?FORALL({IfVals0, CurVal, NewVal},
+ {list(oneof(AllVals)), oneof(AllVals), oneof(AllVals)},
+ begin
+ ets:insert(?MODULE, [{etag, etag(CurVal)}]),
+ IfVals = unique(IfVals0),
+ {ok, Result} = http_request(Match, IfVals, NewVal, 3),
+ Code = element(2, element(1, Result)),
+ ExpectedCode =
+ expected_response_code(Match,
+ IfVals,
+ lists:member(CurVal, IfVals)),
+ equals(ExpectedCode, Code)
+ end)).
+
+expected_response_code("If-Match", _, true) ->
+ 204;
+expected_response_code("If-Match", [], false) ->
+ 204;
+expected_response_code("If-Match", _, false) ->
+ 412;
+expected_response_code("If-None-Match", _, true) ->
+ 412;
+expected_response_code("If-None-Match", [], false) ->
+ 412;
+expected_response_code("If-None-Match", _, false) ->
+ 204.
+
+etag_test_() ->
+ {spawn,
+ [{setup,
+ fun setup/0,
+ fun cleanup/1,
+ [
+ {timeout, 12,
+ ?_assert(eqc:quickcheck(eqc:testing_time(10, ?QC_OUT(etag_prop()))))}
+ ]}]}.
+
+setup() ->
+ %% Setup ETS table to hold current etag value
+ ets:new(?MODULE, [named_table, public]),
+
+ %% Spin up webmachine
+ WebConfig = [{ip, "0.0.0.0"}, {port, 12000},
+ {dispatch, [{["etagtest", '*'], ?MODULE, []}]}],
+ {ok, Pid0} = webmachine_sup:start_link(),
+ {ok, Pid1} = webmachine_mochiweb:start(WebConfig),
+ link(Pid1),
+ {Pid0, Pid1}.
+
+cleanup({Pid0, Pid1}) ->
+ %% clean up
+ unlink(Pid0),
+ exit(Pid0, kill),
+ unlink(Pid1),
+ exit(Pid1, kill).
+
+init([]) ->
+ {ok, undefined}.
+
+allowed_methods(ReqData, Context) ->
+ {['PUT'], ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{"binary/octet-stream", on_put}], ReqData, Context}.
+
+on_put(ReqData, Context) ->
+ {ok, ReqData, Context}.
+
+generate_etag(ReqData, Context) ->
+ case ets:lookup(?MODULE, etag) of
+ [] ->
+ {undefined, ReqData, Context};
+ [{etag, ETag}] ->
+ {ETag, ReqData, Context}
+ end.
+
+ping(ReqData, State) ->
+ {pong, ReqData, State}.
+
+-endif.
--- /dev/null
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+ <meta name="author" content="Basho Technologies" />
+ <meta name="description" content="Webmachine blogging" />
+ <meta name="keywords" content="webmachine http rest web" />
+ <meta http-equiv="content-type" content="text/html;charset=utf-8" />
+ <link rel="stylesheet" href="css/style-1c.css" type="text/css" />
+ <title>other writing about Webmachine</title>
+</head>
+<body>
+ <div id="content">
+ <h1><span class="hr"></span><a href="/">webmachine</a></h1>
+ <ul id="top">
+ <li><a href="/">Home</a></li>
+ <li><a href="http://bitbucket.org/justin/webmachine/">Source Code</a></li>
+ <li><a href="contact.html">Contact</a></li>
+ </ul>
+ <div id="left">
+<h3>What else can I read?</h3>
+
+<p>
+In addition to the documentation on this site and the source code on bitbucket, some useful Webmachine writing can be found on blogs and elsewhere. A small sampling of these is here:
+</p>
+<p>
+The <a href="http://blog.therestfulway.com/">restful way</a> blog is almost entirely dedicated to Webmachine.
+</p>
+<p>
+The <a href="http://blog.beerriot.com/">BeerRiot blog</a> contains multiple topics, including a wealth of examples and discussion of Webmachine.
+</p>
+<p>
+The blog of <a href="http://blog.argv0.net/">Andy Gross</a> has some
+useful hints and tutorials.
+</p>
+<p>
+Paul Mineiro posted about
+<a href="http://dukesoferl.blogspot.com/2009/08/dynamically-loading-webmachine.html">dynamically loading webmachine resources</a>.
+</p>
+<p>
+<a href="http://weblog.hypotheticalabs.com/?page_id=413">Kevin Smith</a>
+teaches an excellent Erlang training class which often culminates with
+development of applications in Webmachine.
+</p>
+<p>
+There is a fairly low traffic
+<a href="http://lists.therestfulway.com/mailman/listinfo/webmachine_lists.therestfulway.com">mailing list</a>
+for technical discussion of Webmachine.
+</p>
+ </div>
+ <div id="footer">
+
+ </div>
+ </div>
+
+<script type="text/javascript">
+var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
+document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
+</script>
+<script type="text/javascript">
+try {
+var pageTracker = _gat._getTracker("UA-4979965-5");
+pageTracker._trackPageview();
+} catch(err) {}</script>
+
+</body>
+</html>
+
--- /dev/null
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+ <meta name="author" content="Basho Technologies" />
+ <meta name="description" content="Webmachine contact info" />
+ <meta name="keywords" content="webmachine http rest web" />
+ <meta http-equiv="content-type" content="text/html;charset=utf-8" />
+ <link rel="stylesheet" href="css/style-1c.css" type="text/css" />
+ <title>Webmachine contact information</title>
+</head>
+<body>
+ <div id="content">
+ <h1><span class="hr"></span><a href="/">webmachine</a></h1>
+ <ul id="top">
+ <li><a href="/">Home</a></li>
+ <li><a href="http://bitbucket.org/justin/webmachine/">Source Code</a></li>
+ <li><a href="contact.html">Contact</a></li>
+ </ul>
+ <div id="left">
+<h3>Webmachine contact information</h3>
+
+<p>
+Have questions about Webmachine that aren't answered here? We'd love
+to hear from you.
+</p>
+<p>
+Subscribers to the <a href="http://lists.therestfulway.com/mailman/listinfo/webmachine_lists.therestfulway.com">Webmachine mailing list</a> include both users and the core development team.
+</p>
+
+ </div>
+ <div id="footer">
+
+ </div>
+ </div>
+
+<script type="text/javascript">
+var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
+document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
+</script>
+<script type="text/javascript">
+try {
+var pageTracker = _gat._getTracker("UA-4979965-5");
+pageTracker._trackPageview();
+} catch(err) {}</script>
+
+</body>
+</html>
+
--- /dev/null
+/* global reset */
+*{ margin: 0; padding: 0; }*
+:focus, :active { outline: 0; }
+
+body { font: .9em Georgia, "Times New Roman", Sans-Serif; background: #fff url(../images/bg.gif) repeat-x; color: #333; }
+a { color: #3333FF; text-decoration: none; }
+img { border: 0; }
+h1 { float: left; margin: 20px 0 50px; font-size: 4em; color: #fff; }
+h2 { font-size: 2.4em; font-weight: normal; margin: 0 0 20px; }
+h2 a:hover { background: #3333FF; color: #fff; }
+.hr { color: #ccc; }
+p { margin: 5px 0 15px; line-height: 1.6em; }
+#content { margin: 0 auto; width: 900px; }
+#top { float: right; margin: 38px 0 30px 0;}
+ #top li { list-style: none; display: inline; }
+ #top li a { float: left; padding: 6px 20px; margin: 3px 2px 0 0; color: #3333FF; }
+ #top li a.current { color: #fff; background: #3333FF; }
+ #top li a:hover { background: #808080; color: #fff; }
+#intro { clear: both; padding: 15px 0 1px 20px; border: 1px solid #dedede; font-size: 1.3em; background: #eee; margin: 0 0 30px; }
+#left { float: left; width: 830px; margin: 0 0 15px; }
+#right { float: right; width: 0px; }
+#right h3 { border-bottom: 1px solid #ccc; margin: 0 0 10px; }
+#right img { margin: 0 3px 3px 0; border: 2px solid #eee; padding: 2px; }
+#right li { list-style: none; }
+ #right li a { display: block; border-bottom: 1px solid #ccc; padding: 5px 5px; }
+#footer { clear: both; padding: 15px 0; border-top: 1px solid #ccc; }
+ #r { float: right; }
+dt { font-weight:bold; }
+dd { margin: 0.5em 0 0.5em 1em; }
+
+/* table */
+
+.fwf {
+ font: 12px "Courier";
+ color: #111;
+}
+
+.lhcol {
+ width: 200px;
+}
+
+.x_check {
+ padding-left:3px;
+ font: 12px "Courier";
+}
+
+table {
+ width: 100%;
+ padding: 0;
+ border-spacing: 0px;
+ border-collapse: collapse;
+ margin: 5px;
+ margin-bottom: 15px;
+}
+
+th {
+ font: bold 12px "Georgia", Verdana, Arial, Helvetica, sans-serif;
+ /* color: #4f6b72; */
+ border: 1px solid #999;
+ letter-spacing: 2px;
+ text-transform: uppercase;
+ text-align: left;
+ padding: 6px 6px 6px 12px;
+ background: #ddd;
+}
+
+th.nobg {
+ border-top: 0;
+ border-left: 0;
+ border-right: 1px solid #999;
+ background: none;
+}
+
+td {
+ border: 1px solid #999;
+ background: #fff;
+ padding: 6px 6px 6px 12px;
+ vertical-align: top;
+ font: inherit;
+ color: #222;
+}
+
+
+td.alt {
+ background: #F5FAFA;
+ color: #797268;
+}
+
+th.spec {
+ border-left: 1px solid #999;
+ border-top: 0;
+ background: #fff url(images/bullet1.gif) no-repeat;
+ font: bold 10px "Trebuchet MS", Verdana, Arial, Helvetica, sans-serif;
+}
+
+th.specalt {
+ border-left: 1px solid #999;
+ border-top: 0;
+ background: #f5fafa url(images/bullet2.gif) no-repeat;
+ font: bold 10px "Trebuchet MS", Verdana, Arial, Helvetica, sans-serif;
+ color: #797268;
+}
+
--- /dev/null
+/* global reset */
+*{ margin: 0; padding: 0; }*
+:focus, :active { outline: 0; }
+
+body { font: .9em Georgia, "Times New Roman", Sans-Serif; background: #fff url(../images/bg.gif) repeat-x; color: #333; }
+a { color: #3333FF; text-decoration: none; }
+img { border: 0; }
+h1 { float: left; margin: 20px 0 50px; font-size: 4em; color: #fff; }
+h2 { font-size: 2.4em; font-weight: normal; margin: 0 0 20px; }
+h2 a:hover { background: #3333FF; color: #fff; }
+.hr { color: #ccc; }
+p { margin: 5px 0 15px; line-height: 1.6em; }
+#content { margin: 0 auto; width: 900px; }
+#top { float: right; margin: 38px 0 30px 0;}
+ #top li { list-style: none; display: inline; }
+ #top li a { float: left; padding: 6px 20px; margin: 3px 2px 0 0; color: #3333FF; }
+ #top li a.current { color: #fff; background: #3333FF; }
+ #top li a:hover { background: #808080; color: #fff; }
+#intro { clear: both; padding: 15px 0 1px 20px; border: 1px solid #dedede; font-size: 1.3em; background: #eee; margin: 0 0 30px; }
+#left { float: left; width: 550px; margin: 0 0 15px; }
+#right { float: right; width: 280px; }
+#right h3 { border-bottom: 1px solid #ccc; margin: 0 0 10px; }
+#right img { margin: 0 3px 3px 0; border: 0 solid #eee; padding: 2px; }
+#right li { list-style: none; }
+ #right li a { display: block; border-bottom: 1px solid #ccc; padding: 5px 5px; }
+#footer { clear: both; padding: 15px 0; border-top: 1px solid #ccc; }
+ #r { float: right; }
+dt { font-weight:bold; }
+dd { margin: 0.5em 0 0.5em 1em; }
+
+/* table */
+
+.fwf {
+ font: 12px "Courier";
+ color: #111;
+}
+
+.lhcol {
+ width: 200px;
+}
+
+.x_check {
+ padding-left:3px;
+ font: 12px "Courier";
+}
+
+table {
+ width: 100%;
+ padding: 0;
+ border-spacing: 0px;
+ border-collapse: collapse;
+ margin: 5px;
+ margin-bottom: 15px;
+}
+
+th {
+ font: bold 12px "Georgia", Verdana, Arial, Helvetica, sans-serif;
+ /* color: #4f6b72; */
+ border: 1px solid #999;
+ letter-spacing: 2px;
+ text-transform: uppercase;
+ text-align: left;
+ padding: 6px 6px 6px 12px;
+ background: #ddd;
+}
+
+th.nobg {
+ border-top: 0;
+ border-left: 0;
+ border-right: 1px solid #999;
+ background: none;
+}
+
+td {
+ border: 1px solid #999;
+ background: #fff;
+ padding: 6px 6px 6px 12px;
+ vertical-align: top;
+ font: inherit;
+ color: #222;
+}
+
+
+td.alt {
+ background: #F5FAFA;
+ color: #797268;
+}
+
+th.spec {
+ border-left: 1px solid #999;
+ border-top: 0;
+ background: #fff url(images/bullet1.gif) no-repeat;
+ font: bold 10px "Trebuchet MS", Verdana, Arial, Helvetica, sans-serif;
+}
+
+th.specalt {
+ border-left: 1px solid #999;
+ border-top: 0;
+ background: #f5fafa url(images/bullet2.gif) no-repeat;
+ font: bold 10px "Trebuchet MS", Verdana, Arial, Helvetica, sans-serif;
+ color: #797268;
+}
+
--- /dev/null
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+ <meta name="author" content="Basho Technologies" />
+ <meta name="description" content="Webmachine debugging" />
+ <meta name="keywords" content="webmachine http rest web" />
+ <meta http-equiv="content-type" content="text/html;charset=utf-8" />
+ <link rel="stylesheet" href="css/style-1c.css" type="text/css" />
+ <title>Webmachine debugging</title>
+</head>
+<body>
+ <div id="content">
+ <h1><span class="hr"></span><a href="/">webmachine</a></h1>
+ <ul id="top">
+ <li><a href="/">Home</a></li>
+ <li><a href="http://bitbucket.org/justin/webmachine/">Source Code</a></li>
+ <li><a href="contact.html">Contact</a></li>
+ </ul>
+ <div id="left">
+<h3>Webmachine debugging</h3>
+
+<p>
+Having trouble with your webmachine resource? Try debugging it with
+the webmachine trace utility!
+</p>
+
+<h2>Basic Use</h2>
+
+<p>
+To get started, first change your resource's <code>init/1</code>
+function to return <code>{trace, Path}</code> instead of
+<code>ok</code>. For example:
+</p>
+
+<pre>
+init(Config) ->
+ {{trace, "/tmp"}, Config}. %% debugging code
+ %%{ok, Config}. %% regular code
+</pre>
+
+<p>
+Rebuild and reload the module, then in your webmachine application's
+shell, type:
+</p>
+
+<pre>
+wmtrace_resource:add_dispatch_rule("wmtrace", "/tmp").
+</pre>
+
+<p>
+Now issue the HTTP request that you're trying to debug. Once it has
+finished, point your browser at <code>http://YOUR_HOST/wmtrace/</code>.
+You'll see one or more trace files available for inspection.
+Click on one of them to navigate to the trace inspection utility,
+which will look something like this:
+</p>
+
+<p><img src="images/basic-trace-labeled.png" alt="Basic Trace" title="Basic Trace" /></p>
+
+<p>
+The example above is a trace of a resource that responded to a GET of
+the root path (as you can see in the <em>Detail Panel</em>), and ran
+all the way to a 200 response code (as you can see in the <em>Decision
+Graph</em>).
+</p>
+
+<p>
+The graph starts small, so you can get a quick view of the path your
+resource took through it. You can zoom in and out of the <em>Decision
+Graph</em> by using the <em>Zoom Controls</em>.
+</p>
+
+<p>
+The path your resource took through the graph is highlighted with a
+dark grey line. Hovering your mouse over the outlined decision points
+along that line will pop up a tool tip with the name of the decision,
+and the names of the functions in your resource that were called at
+that point. Clicking on a decision will flip the <em>Detail
+Panel</em> to the <em>Decision Tab</em>, where information about that
+decision will be displayed.
+</p>
+<p>
+If your resource traversed the graph all the way to one of the
+standard, non-error return codes, the box for that return code will be
+outlined. If your resource instead returned a non-standard or error
+code, a red circle will be drawn next to the last decision point your
+resource reached. Clicking on either of these types of endpoints will
+flip the <em>Detail Panel</em> to the <em>Response Tab</em>, where
+information about the response will be displayed.
+</p>
+<p>
+The <em>Detail Panel</em> has three tabs: Request (labeled
+<em>Q</em>), Response (labeled <em>R</em>), and Decision (labeled
+<em>D</em>). Clicking each of these will show information about the
+request, response, or current decision, respectively.
+</p>
+
+<p><img src="images/basic-trace-request-tab.png" alt="Detail Panel Request Tab" title="Detail Panel Request Tab" /></p>
+
+<p>
+The <em>Request Tab</em> shows details about what the client
+requested. The method and path are displayed at the top, headers
+below that, and body (if available) at the bottom.
+</p>
+
+<p><img src="images/basic-trace-response-tab.png" alt="Detail Panel Response Tab" title="Detail Panel Response Tab" /></p>
+
+<p>
+The <em>Response Tab</em> shows details about how your resource
+responded. The response code is displayed at the top, headers below
+that, and body (if available) at the bottom.
+</p>
+
+<p><img src="images/basic-trace-decision-tab.png" alt="Detail Panel Decision Tab" title="Detail Panel Decision Tab" /></p>
+
+<p>
+The <em>Decision Tab</em> shows details about the currently selected
+decision. The decision's name is displayed in a dropdown at the top
+(changing this dropdown or clicking on a decision in the graph will
+change which decision is displayed). The list of the functions called
+in the resource's module is displayed in a dropdown below the
+decision. The arguments with which the function was called are
+displayed just below the function's name, and the return value of the
+function is displayed at the bottom of the panel.
+</p>
+<p>
+The <em>Detail Panel</em> can be resized by clicking and dragging the
+tabs or the dark grey border to the left or right. Clicking the
+border will toggle the panel's visibility.
+</p>
+
+<h2>Configuration Details</h2>
+
+<p>
+The Webmachine trace tool is divided into two parts: one produces the
+trace logs, while the other produces the visualization.
+</p>
+
+<h3>Trace Log Production Configuration</h3>
+
+<p>
+You may configure the path under which trace files are stored by
+specifying that path as the <code>Path</code> part of your resource
+module's <code>init/1</code> return value. Every time a request is
+made to that resource, a new trace file will be created in the
+specified directory.
+</p>
+<p>
+<strong><em>Warning</em></strong>: Trace files can be large. It is
+advised that you do not leave tracing enabled on a
+large-content-producing or high-hit-rate resource.
+</p>
+<p>
+The path may be either absolute:
+</p>
+<pre>
+init(Config) ->
+ {{trace, "/tmp/traces"}, Config}. %% absolute path /tmp/traces
+</pre>
+
+<p>or relative to your application's root:</p>
+
+<p>
+<pre>
+init(Config) ->
+ {{trace, "traces"}, Config}. %% "traces" directory in application's root
+</pre>
+</p>
+
+<h3>Trace Viewer Configuration</h3>
+
+<p>
+The viewer is configured by its entry in the
+<a href="dispatcher.html">dispatch list</a>.
+Two functions make modifying that entry easy:
+<code>wmtrace_resource:add_dispatch_rule/2</code>
+and <code>wmtrace_resource:remove_dispatch_rules/0</code>.
+</p>
+<p>
+Call <code>add_dispatch_rule/2</code> with the HTTP-exported path, and
+the path to the trace files. For example, to expose the viewer at
+<code>http://YOUR_HOST/dev/wmtrace/</code> and point it at the trace
+files in <code>/tmp/traces</code>, type in your application's erlang
+shell:
+</p>
+
+<pre>
+wmtrace_resource:add_dispatch_rule("dev/wmtrace", "/tmp/traces").
+</pre>
+
+<p>
+If you know that you always want the viewer enabled and configured in
+a specific way, you can also add a line like the following to your
+application's dispatch list:
+</p>
+
+<pre>
+{["dev", "wmtrace", '*'], wmtrace_resource, [{trace_dir, "/tmp/traces"}]}
+</pre>
+
+<p>
+To disable all trace viewer resources at any point, just execute
+<code>wmtrace_resource:remove_dispatch_rules/0</code> in your
+application's erlang shell.
+</p>
+
+<h2>Trace Log Format</h2>
+
+<p>
+The trace file is fairly straightforward, if you want to read it with
+<code>less</code>:
+</p>
+
+<ul><li><code> {decision, X}. </code> indicates that a decision point was reached
+
+</li><li><code> {attempt, Module, Function, Args}. </code> indicates that a call to <code> Module:Function(Args) </code> was made.
+</li><li><code> {result, Module, Function, Result}. </code> indicates that the call to <code> Module:Function(Args) </code> returned <code> Result </code>.
+
+</li><li><code> {not_expored, Module, Function, Args}. </code> indicates that <code> Module:Function(Args) </code> would have been called, but it was not exported (or not defined) by the module
+</li></ul>
+
+<p>
+The format should be such that a <code>file:consult/1</code> will
+give you a list of the lines as erlang terms.
+</p>
+
+<h2>Special Handling for Funs and Pids</h2>
+
+<p>
+Funs and Pids don't roundtrip through file serialization very well
+(<code>file:consult/1</code> will blow up on a fun or pid written to a
+file with <code>io:format("~p", [FunOrPid])</code>). To deal with
+this, the trace logger prints a recognizable tuple translation instead
+of the fun or pid.
+</p>
+
+<h3>Fun Translation</h3>
+
+<p>
+Funs you might see in Erlang source as <code>fun io:format/2</code>
+will appear in a trace log as:
+</p>
+
+<pre>
+{'WMTRACE_ESCAPED_FUN',[{module,io},
+ {name,format},
+ {arity,2},
+ {type,external}]}
+</pre>
+
+<p>
+Those that would be in Erlang source as <code> fun() -> ok end
+</code> will appear in a trace log as:
+</p>
+
+<pre>
+{'WMTRACE_ESCAPED_FUN',[{module,sampletrace_resource},
+ {name,'-to_html/2-fun-0-'},
+ {arity,0},
+ {type,local}]}
+</pre>
+
+<h3>Pid Translation</h3>
+
+<p>
+Pids are simply logged in a marked tuple, after being run through
+<code>erlang:pid_to_list/1</code>:
+</p>
+
+<pre>
+{'WMTRACE_ESCAPED_PID',"<0.74.0>"}
+</pre>
+
+ </div>
+ <div id="footer">
+
+ </div>
+ </div>
+
+<script type="text/javascript">
+var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
+document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
+</script>
+<script type="text/javascript">
+try {
+var pageTracker = _gat._getTracker("UA-4979965-5");
+pageTracker._trackPageview();
+} catch(err) {}</script>
+
+</body>
+</html>
+
--- /dev/null
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+ <meta name="author" content="Basho Technologies" />
+ <meta name="description" content="Webmachine decision flow" />
+ <meta name="keywords" content="webmachine http rest web" />
+ <meta http-equiv="content-type" content="text/html;charset=utf-8" />
+ <link rel="stylesheet" href="css/style-1c.css" type="text/css" />
+ <title>Webmachine decision flow</title>
+</head>
+<body>
+ <div id="content">
+ <h1><span class="hr"></span><a href="/">webmachine</a></h1>
+ <ul id="top">
+ <li><a href="/">Home</a></li>
+ <li><a href="http://bitbucket.org/justin/webmachine/">Source Code</a></li>
+ <li><a href="contact.html">Contact</a></li>
+ </ul>
+ <div id="left">
+<h3>Webmachine decision flow</h3>
+
+<p>
+This diagram is illustrative of the flow of processing that a
+<a href="resources.html">webmachine resource</a> goes through
+from inception to response.
+</p>
+<p>
+Version 1 of this diagram, from Alan Dean, was the inspiration for
+webmachine_decision_core. Versions 2 and 3 were created in
+collaboration between Alan Dean and Justin Sheehy.
+</p>
+<p>
+A copy of v3 is found in the webmachine source tree for convenience.
+</p>
+<p>
+<img src="images/http-headers-status-v3.png" alt="http diagram v3" title="http diagram v3" />
+</p>
+
+ </div>
+ <div id="footer">
+
+ </div>
+ </div>
+
+<script type="text/javascript">
+var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
+document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
+</script>
+<script type="text/javascript">
+try {
+var pageTracker = _gat._getTracker("UA-4979965-5");
+pageTracker._trackPageview();
+} catch(err) {}</script>
+
+</body>
+</html>
+
--- /dev/null
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+ <meta name="author" content="Basho Technologies" />
+ <meta name="description" content="Webmachine request dispatching" />
+ <meta name="keywords" content="webmachine http rest web" />
+ <meta http-equiv="content-type" content="text/html;charset=utf-8" />
+ <link rel="stylesheet" href="css/style-1c.css" type="text/css" />
+ <title>Webmachine request dispatching</title>
+</head>
+<body>
+ <div id="content">
+ <h1><span class="hr"></span><a href="/">webmachine</a></h1>
+ <ul id="top">
+ <li><a href="/">Home</a></li>
+ <li><a href="http://bitbucket.org/justin/webmachine/">Source Code</a></li>
+ <li><a href="contact.html">Contact</a></li>
+ </ul>
+ <div id="left">
+<h3>Webmachine request dispatching</h3>
+
+<p>
+This page describes the configuration of URI dispatch to resources
+in a webmachine application. The dispatch map data structure is a list
+of 3-tuples, where each entry is of the form {pathspec, resource,
+args}. The first pathspec in the list that matches the URI for a
+request will cause the corresponding resource to be used in handling
+that request.
+
+</p>
+<p>
+
+A <code>pathspec</code> is a list of pathterms. A pathterm is any of
+[string,atom,star] where star is just the atom of "*". The
+pathspec-matching is done by breaking up the request URI into tokens
+via the "/" separator and matching those tokens against the
+pathterms. A string pathterm will match a token if the token is equal
+to that string. A non-star atom will match any single token. The star
+atom (* in single quotes) will match any number of tokens, but may
+only be present as the last pathterm in a pathspec. If all tokens are
+matched and all pathterms are used, then the pathspec matches. The
+tokens used are available in <code>wrq:path_tokens(ReqData)</code>
+in the resource functions.
+
+</p>
+<p>
+
+Any atom pathterms that were used in a match will cause a binding in
+the path_info element of the request's
+<a href="reqdata.html">ReqData</a>. If
+there was a <code>foo</code> atom that matched the token
+<code>"bar"</code>, then <code>wrq:path_info(foo, ReqData)</code> will
+return <code>"bar"</code> inside the resource calls, and in any case
+<code>wrq:path_info(ReqData)</code> will return a Dict term with all
+the bindings, accessible via the <code>dict</code> standard library
+module. If there was a star pathterm in the pathspec, then
+<code>wrq:disp_path(ReqData)</code> in a resource function will return
+the URI portion that was matched by the star.
+
+</p>
+<p>
+
+The <code> resource </code> is an atom identifying a
+<a href="resources.html">resource</a> that
+should handle a matching request. It will have the <code>args</code>
+(which must be a list) passed to its init function before request
+handling begins.
+
+</p>
+<p>
+
+In the default directory structure for a new webmachine application,
+the dispatch terms will be in file:consult form in
+"priv/dispatch.conf" under the application root.
+
+</p>
+<h3 id="examples">Examples</h3>
+
+<p>
+
+The examples below are taken from
+<a href="http://www.erlang-factory.com/conference/SFBayAreaErlangFactory2009/speakers/justinsheehy">Justin Sheehy's slide at Erlang Factory 2009</a>
+</p>
+
+<table><tr><th>Dispatch Rule</th><th>URL</th><th>wrq:disp_path</th><th>wrq:path</th><th>wrq:path_info</th><th>wrq:path_tokens</th></tr>
+<tr><td>{["a"], some_resource, []}</td><td>/a</td><td>""</td><td>"/a"</td><td>[]</td><td>[]</td></tr>
+
+<tr><td>{["a", '*'], some_resource, []}</td><td>/a</td><td>""</td><td>"/a"</td><td>[]</td><td>[]</td></tr>
+<tr><td>{["a", '*'], some_resource, []}</td><td>/a/b/c</td><td>"b/c"</td><td>"/a/b/c"</td><td>[]</td><td>["b", "c"]</td></tr>
+<tr><td>{["a", foo], some_resource, []}</td><td>/a/b</td><td>""</td><td>"/a/b"</td><td>[{foo, "b"}]</td><td>[]</td></tr>
+
+<tr><td>{["a", foo, '*'], some_resource, []}</td><td>/a/b</td><td>""</td><td>"/a/b"</td><td>[{foo, "b"}]</td><td>[]</td></tr>
+<tr><td>{["a", foo, '*'], some_resource, []}</td><td>/a/b/c/d</td><td>"c/d"</td><td>"/a/b/c/d"</td><td>[{foo, "b"}]</td><td>["c", "d"]</td></tr>
+</table>
+
+<p>Query strings are easy too:</p>
+
+<ul><li>Given rule: {["a", foo, '*'], some_resource, []}
+</li><li>And URL: /a/b/c/d?fee=ah&fie=ha
+</li><li>Then wrq:get_qs_value("fie",ReqData) -> "ha"
+</li></ul>
+
+ </div>
+ <div id="footer">
+
+ </div>
+ </div>
+
+<script type="text/javascript">
+var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
+document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
+</script>
+<script type="text/javascript">
+try {
+var pageTracker = _gat._getTracker("UA-4979965-5");
+pageTracker._trackPageview();
+} catch(err) {}</script>
+
+</body>
+</html>
+
--- /dev/null
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+ <meta name="author" content="Basho Technologies" />
+ <meta name="description" content="Webmachine documentation" />
+ <meta name="keywords" content="webmachine http rest web" />
+ <meta http-equiv="content-type" content="text/html;charset=utf-8" />
+ <link rel="stylesheet" href="css/style-1c.css" type="text/css" />
+ <title>Webmachine documentation</title>
+</head>
+<body>
+ <div id="content">
+ <h1><span class="hr"></span><a href="/">webmachine</a></h1>
+ <ul id="top">
+ <li><a href="/">Home</a></li>
+ <li><a href="http://bitbucket.org/justin/webmachine/">Source Code</a></li>
+ <li><a href="contact.html">Contact</a></li>
+ </ul>
+ <div id="left">
+<h3>documentation</h3>
+
+<p>
+There are a lot of places you can choose to start reading about
+Webmachine. A few of your options are:
+</p>
+
+<ul>
+ <li><a href="quickstart.html">get started right away</a></li>
+ <li><a href="example_resources.html">read some examples</a></li>
+ <li><a href="mechanics.html">understand Webmachine's execution model</a></li>
+ <li><a href="dispatcher.html">configure your URL dispatching</a></li>
+ <li><a href="resources.html">learn about the functions that can make up a resource</a></li>
+ <li><a href="reqdata.html">see how your resource can access the HTTP Request</a></li>
+ <li><a href="http://blog.therestfulway.com/2009/05/video-slideshow-introducing-webmachine.html">watch a video</a></li>
+ <li><a href="http://lists.therestfulway.com/mailman/listinfo/webmachine_lists.therestfulway.com">join the mailing list</a></li>
+ <li><a href="debugging.html">debug your application </a></li>
+ <li><a href="blogs.html">check out some blogs </a></li>
+</ul>
+
+ </div>
+ <div id="footer">
+
+ </div>
+ </div>
+
+<script type="text/javascript">
+var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
+document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
+</script>
+<script type="text/javascript">
+try {
+var pageTracker = _gat._getTracker("UA-4979965-5");
+pageTracker._trackPageview();
+} catch(err) {}</script>
+
+</body>
+</html>
+
--- /dev/null
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+ <meta name="author" content="Basho Technologies" />
+ <meta name="description" content="Webmachine examples" />
+ <meta name="keywords" content="webmachine http rest web" />
+ <meta http-equiv="content-type" content="text/html;charset=utf-8" />
+ <link rel="stylesheet" href="css/style-1c.css" type="text/css" />
+ <title>Webmachine examples</title>
+</head>
+<body>
+ <div id="content">
+ <h1><span class="hr"></span><a href="/">webmachine</a></h1>
+ <ul id="top">
+ <li><a href="/">Home</a></li>
+ <li><a href="http://bitbucket.org/justin/webmachine/">Source Code</a></li>
+ <li><a href="contact.html">Contact</a></li>
+ </ul>
+ <div id="left">
+<h3>Webmachine examples</h3>
+
+<p>
+The simplest possible example is the one produced via the
+<a href="quickstart.html">quickstart</a>.
+</p>
+<p>
+For an example of a read/write filesystem server showing several
+interesting features and supporting GET, PUT, DELETE, and POST, see
+<a href="http://bitbucket.org/justin/webmachine/src/tip/demo/src/demo_fs_resource.erl">demo_fs_resource</a>.
+</p>
+<p>
+For a very simple resource demonstrating content negotiation, basic
+auth, and some caching headers, see
+<a href="http://bitbucket.org/justin/webmachine/src/tip/demo/src/webmachine_demo_resource.erl">webmachine_demo_resource</a>.
+</p>
+<p>
+Some example code based on webmachine_demo_resource follows.
+</p>
+<p>
+The simplest working resource could export only one function in
+addition to init/1:
+</p>
+
+<pre>
+-module(webmachine_demo_resource).
+-export([init/1, to_html/2]).
+-include_lib("webmachine/include/webmachine.hrl").
+
+init([]) -> {ok, undefined}.
+
+to_html(ReqData, Context) -> {"<html><body>Hello, new world</body></html>", ReqData, Context}.
+</pre>
+
+<p>
+That's really it -- a working webmachine resource. That resource will
+respond to all valid GET requests with the exact same response.
+</p>
+<p>
+Many interesting bits of HTTP are handled automatically by
+Webmachine. For instance, if a client sends a request to that trivial
+resource with an Accept header that does not allow for a text/html
+response, they will receive a 406 Not Acceptable.
+</p>
+<p>
+Suppose I wanted to serve a plaintext client as well. I could note
+that I provide more than just HTML:
+</p>
+
+<pre>
+content_types_provided(ReqData, Context) ->
+ {[{"text/html", to_html},{"text/plain",to_text}], ReqData, Context}.
+</pre>
+
+<p>
+I already have my HTML representation produced, so I add a text one:
+(and while I'm at it, I'll show that it's trivial to produce dynamic content as well)
+</p>
+
+<pre>
+to_text(ReqData, Context) ->
+ Path = wrq:disp_path(ReqData),
+ Body = io_lib:format("Hello ~s from webmachine.~n", [Path]),
+ {Body, ReqData, Context}.
+</pre>
+
+<p>
+Now that this resource provides multiple media types, it automatically performs conneg:
+</p>
+
+<pre>
+$ telnet localhost 8000
+Trying 127.0.0.1...
+Connected to localhost.
+Escape character is '^]'.
+GET /demo/a/resource/path HTTP/1.1
+Accept: text/plain
+
+HTTP/1.1 200 OK
+Vary: Accept
+Server: MochiWeb/1.1 WebMachine/0.97
+Date: Sun, 15 Mar 2009 02:54:02 GMT
+Content-Type: text/plain
+Content-Length: 39
+
+Hello a/resource/path from webmachine.
+</pre>
+
+<p>
+What about authorization? Webmachine resources default to assuming the
+client is authorized, but that can easily be overridden. Here's an
+overly simplistic but illustrative example:
+</p>
+
+<pre>
+is_authorized(ReqData, Context) ->
+ case wrq:disp_path(ReqData) of
+ "authdemo" ->
+ case wrq:get_req_header("authorization", ReqData) of
+ "Basic "++Base64 ->
+ Str = base64:mime_decode_to_string(Base64),
+ case string:tokens(Str, ":") of
+ ["authdemo", "demo1"] ->
+ {true, ReqData, Context};
+ _ ->
+ {"Basic realm=webmachine", ReqData, Context}
+ end;
+ _ ->
+ {"Basic realm=webmachine", ReqData, Context}
+ end;
+ _ -> {true, ReqData, Context}
+ end.
+</pre>
+
+<p>
+With that function in the resource, all paths except
+<code>/authdemo</code> from this resource's root are authorized.
+For that one path,
+the UA will be asked to do basic authorization with the user/pass of
+authdemo/demo1. It should go without saying that this isn't quite the
+same function that we use in our real apps, but it is nice and simple.
+</p>
+<p>
+If you've generated the application from the
+<a href="quickstart.html">quickstart</a>, make sure
+you've added this line to your dispatch.conf file:
+</p>
+<pre>
+{["demo", '*'], mywebdemo_resource, []}.
+</pre>
+<p>
+Now you can point your browser at
+<code>http://localhost:8000/demo/authdemo</code> with the demo app running:
+</p>
+
+<pre>
+$ curl -v http://localhost:8000/demo/authdemo
+> GET /demo/authdemo HTTP/1.1
+> Host: localhost:8000
+> Accept: */*
+>
+< HTTP/1.1 401 Unauthorized
+< WWW-Authenticate: Basic realm=webmachine
+< Server: MochiWeb/1.1 WebMachine/0.97
+< Date: Sun, 15 Mar 2009 02:57:43 GMT
+< Content-Length: 0
+
+<
+</pre>
+<p></p>
+<pre>
+$ curl -v -u authdemo:demo1 http://localhost:8000/demo/authdemo
+* Server auth using Basic with user 'authdemo'
+> GET /demo/authdemo HTTP/1.1
+> Authorization: Basic YXV0aGRlbW86ZGVtbzE=
+> Host: localhost:8000
+> Accept: */*
+>
+< HTTP/1.1 200 OK
+< Vary: Accept
+< Server: MochiWeb/1.1 WebMachine/0.97
+
+< Date: Sun, 15 Mar 2009 02:59:02 GMT
+< Content-Type: text/html
+< Content-Length: 59
+<
+<html><body>Hello authdemo from webmachine.
+</body></html>
+</pre>
+
+<p>
+HTTP caching support is also quite easy, with functions allowing
+resources to define (e.g.) <code>last_modified</code>,
+<code>expires</code>, and <code>generate_etag.</code> For instance, since
+representations of this resource vary only by URI Path, I could use an
+extremely simple entity tag unfit for most real applications but
+sufficient for this example:
+</p>
+
+<pre>
+generate_etag(ReqData, Context) -> {wrq:raw_path(ReqData), ReqData, Context}.
+</pre>
+
+<p>Similarly, here's a trivial expires rule:</p>
+
+<pre>
+expires(ReqData, Context) -> {{{2021,1,1},{0,0,0}}, ReqData, Context}.
+</pre>
+
+<p>
+And now the response from our earlier request is appropriately tagged:
+</p>
+
+<pre>
+HTTP/1.1 200 OK
+Vary: Accept
+Server: MochiWeb/1.1 WebMachine/0.97
+Expires: Fri, 01 Jan 2021 00:00:00 GMT
+ETag: /demo/authdemo
+Date: Sun, 15 Mar 2009 02:59:02 GMT
+Content-Type: text/html
+Content-Length: 59
+
+<html><body>Hello authdemo from webmachine.
+</body></html>
+</pre>
+
+<p>
+For more details, read the source of the resources linked at the top
+of this page.
+</p>
+
+ </div>
+ <div id="footer">
+
+ </div>
+ </div>
+
+<script type="text/javascript">
+var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
+document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
+</script>
+<script type="text/javascript">
+try {
+var pageTracker = _gat._getTracker("UA-4979965-5");
+pageTracker._trackPageview();
+} catch(err) {}</script>
+
+</body>
+</html>
+
--- /dev/null
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+ <meta name="author" content="Basho Technologies" />
+ <meta name="description" content="Webmachine - software shaped like the Web - basho technologies" />
+ <meta name="keywords" content="webmachine http rest web" />
+ <meta http-equiv="content-type" content="text/html;charset=utf-8" />
+ <link rel="stylesheet" href="css/style.css" type="text/css" />
+ <title>Webmachine - software shaped like the Web</title>
+</head>
+<body>
+ <div id="content">
+ <h1><span class="hr"></span><a href="/">webmachine</a></h1>
+ <ul id="top">
+ <li><a class="current" href="/">Home</a></li>
+ <li><a href="http://bitbucket.org/justin/webmachine/">Source Code</a></li>
+ <li><a href="contact.html">Contact</a></li>
+ </ul>
+
+ <div id="intro">
+ <p>Webmachine is not much like the Web frameworks you're used to. You can call Webmachine a REST toolkit if you like, and we won't argue with you.</p>
+ </div>
+ <div id="left">
+<p>
+Webmachine is an application layer that adds HTTP semantic awareness on top of the excellent bit-pushing and HTTP syntax-management provided by
+<a href="http://code.google.com/p/mochiweb/">mochiweb</a>,
+and provides a simple and clean way to connect that to your
+application's behavior.
+</p>
+<p>
+A Webmachine application is a set of resources, each of which is a set of
+<a href="resources.html">functions</a> over the state of the resource. We really mean functions here, not object-methods, infinite-server-loops, or any other such construction. This aspect of Webmachine is one of the reasons why Webmachine applications are relatively easy to understand and extend.
+</p>
+<p>
+These functions give you a place to define the representations and other Web-relevant properties of your application's resources -- with the emphasis that the first-class things on the Web are resources and that their essential properties of interaction are already quite well defined and usefully constrained.
+</p>
+<p>
+For most Webmachine applications, most of the functions are quite small and isolated. One of the nice effects of this is that a quick reading of a resource will give you an understanding of the application, its Web behavior, and the relationship between them. Since these functions are usually <a href="reftrans.html">referentially transparent</a>, Webmachine applications can be quite easy to test. There's no need for mock objects, fake database connections, or any other wastes of time when you can write tests against each component of your application in terms of the input and output to various functions.
+</p>
+<p>
+We believe that by giving Web developers a
+<a href="mechanics.html">system</a> with conventions that
+<a href="diagram.html">directly map to HTTP</a>
+and REST, we help them to write and extend Web applications quickly while not dictating the shape of the rest of their application. The resulting applications are straightforward to examine and maintain, and have very easily understood HTTP semantics.
+</p>
+ </div>
+ <div id="right">
+ <h3>more information</h3>
+ <ul>
+ <li><a href="intros.html">introduction</a></li>
+ <li><a href="docs.html">documentation</a></li>
+ <li><a href="blogs.html">other writing</a></li>
+ </ul>
+ <a href="diagram.html"><img src="images/WM200-crop.png" alt="Webmachine Diagram" /></a>
+ <a href="http://www.basho.com"><img src="images/basho-landscape.gif" alt="Basho" /></a>
+ </div>
+ <div id="footer">
+
+ </div>
+ </div>
+
+<script type="text/javascript">
+var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
+document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
+</script>
+<script type="text/javascript">
+try {
+var pageTracker = _gat._getTracker("UA-4979965-5");
+pageTracker._trackPageview();
+} catch(err) {}</script>
+
+</body>
+</html>
--- /dev/null
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+ <meta name="author" content="Basho Technologies" />
+ <meta name="description" content="Webmachine introductions" />
+ <meta name="keywords" content="webmachine http rest web" />
+ <meta http-equiv="content-type" content="text/html;charset=utf-8" />
+ <link rel="stylesheet" href="css/style-1c.css" type="text/css" />
+ <title>introductions to Webmachine</title>
+</head>
+<body>
+ <div id="content">
+ <h1><span class="hr"></span><a href="/">webmachine</a></h1>
+ <ul id="top">
+ <li><a href="/">Home</a></li>
+ <li><a href="http://bitbucket.org/justin/webmachine/">Source Code</a></li>
+ <li><a href="contact.html">Contact</a></li>
+ </ul>
+ <div id="left">
+<h3>How do I get started?</h3>
+
+<p>
+If you want to jump in and start coding right away, the
+<a href="quickstart.html">quickstart</a> is the way to go.
+</p>
+<p>
+If you would prefer to watch a narrated slideshow introduction, this is roughly similar to the presentation that was given at
+<a href="http://www.erlang-factory.com/conference/SFBayAreaErlangFactory2009">Bay Area Erlang Factory 2009</a>:
+</p>
+
+<object width="512" height="322"><param name="movie" value="http://d.yimg.com/static.video.yahoo.com/yep/YV_YEP.swf?ver=2.2.40" /><param name="allowFullScreen" value="true" /><param name="AllowScriptAccess" VALUE="always" /><param name="bgcolor" value="#000000" /><param name="flashVars" value="id=13693397&vid=5178506&lang=en-us&intl=us&thumbUrl=http%3A//l.yimg.com/a/p/i/bcst/videosearch/9129/86376656.jpeg&embed=1" /><embed src="http://d.yimg.com/static.video.yahoo.com/yep/YV_YEP.swf?ver=2.2.40" type="application/x-shockwave-flash" width="512" height="322" allowFullScreen="true" AllowScriptAccess="always" bgcolor="#000000" flashVars="id=13693397&vid=5178506&lang=en-us&intl=us&thumbUrl=http%3A//l.yimg.com/a/p/i/bcst/videosearch/9129/86376656.jpeg&embed=1" ></embed></object>
+
+<p>
+Some <a href="blogs.html">blogs</a> also have posts that can serve as
+useful introductions to Webmachine, if you prefer to start that way.
+</p>
+
+<p>
+No matter how you get started, you'll probably want to come back and
+read <a href="docs.html">more documentation</a> once you get up and running.
+<p>
+
+
+ </div>
+ <div id="footer">
+
+ </div>
+ </div>
+
+<script type="text/javascript">
+var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
+document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
+</script>
+<script type="text/javascript">
+try {
+var pageTracker = _gat._getTracker("UA-4979965-5");
+pageTracker._trackPageview();
+} catch(err) {}</script>
+
+</body>
+</html>
+
--- /dev/null
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+ <meta name="author" content="Basho Technologies" />
+ <meta name="description" content="Webmachine mechanics" />
+ <meta name="keywords" content="webmachine http rest web" />
+ <meta http-equiv="content-type" content="text/html;charset=utf-8" />
+ <link rel="stylesheet" href="css/style-1c.css" type="text/css" />
+ <title>Webmachine mechanics</title>
+</head>
+<body>
+ <div id="content">
+ <h1><span class="hr"></span><a href="/">webmachine</a></h1>
+ <ul id="top">
+ <li><a href="/">Home</a></li>
+ <li><a href="http://bitbucket.org/justin/webmachine/">Source Code</a></li>
+ <li><a href="contact.html">Contact</a></li>
+ </ul>
+ <div id="left">
+<h3>How does this Webmachine thing work, anyway?</h3>
+
+<p>
+This page describes the basic mechanics of Webmachine from the point
+of view of a single incoming HTTP Request, documenting the behavior of
+Webmachine through to the HTTP Response.
+</p>
+<p>
+(This is a bit different from what you might get with a "Web
+Framework" as we're not going to talk about MVC, ORMs, or anything
+else about the rest of the shape of your application. We believe that
+you know better than we do how to structure your own app --
+Webmachine's job is to help you make sure that your app's presence on
+the Web is well-behaved and well-structured.)
+</p>
+<p>
+When a request is initially received by Webmachine it is handled by the
+<a href="dispatcher.html">dispatcher</a>.
+If the dispatcher does not find a matching resource then it will
+immediately respond with a 404 Not Found. If a match is found then a
+<a href="reqdata.html">request data record</a>
+is created and the matching
+<a href="resources.html">resource</a> is
+kicked off via its <code>init/1</code> function.
+</p>
+<p>
+The resource then flows through the decision core, which is
+effectively just running the request through the
+<a href="diagram.html">HTTP flowchart</a>. At
+each decision point (diamond) in the diagram, Webmachine will
+determine which path to follow. In some cases it can determine the
+path purely from the request data -- for instance, the path from
+decision <code>C3</code> depends purely on whether the client sent
+an <code>Accept</code> header. In many cases, however, the decision
+is application-specific -- the path from <code>B10</code> depends on
+the value the
+<a href="resources.html">resource</a> module
+returns from <code>allowed_methods.</code> Eventually the chosen path
+will terminate at one of the rectangles on the diagram. At that point
+Webmachine will send an appropriate HTTP response, with the headers
+and body dependent on the path through the diagram and the values
+returned by the resource's functions.
+</p>
+<p>
+Most of the time you don't need to worry about this big diagram,
+though -- just define the
+<a href="resources.html">resource functions</a>
+relevant to your app and Webmachine will do the rest. A
+good understanding of this central mechanism in Webmachine is most
+useful when
+<a href="debugging.html">debugging your resources</a>.
+</p>
+<p>
+From the way that webmachine's decision core works, it follows that
+Webmachine's HTTP behavior is transactional. Each HTTP Request is
+fully received, and the resulting HTTP Response is then fully
+constructed before being returned. This means that while Webmachine
+is suitable for a great many Web applications it is not a good fit for
+an application that will gradually or continuously stream responses
+back to clients inside the context of a single HTTP Request.
+</p>
+<p>
+A useful way to build Webmachine applications is often just to write a
+single function such as <code>to_html</code> to provide the most
+basic of stubs; when that function exists (or any other returned by
+<code>content_types_provided</code>) you can produce <code>200 OK</code>
+responses. After that, you can easily extend your
+application's Web behavior simply by filling in the other
+<a href="resources.html">resource functions</a> as desired.
+</p>
+ </div>
+ <div id="footer">
+
+ </div>
+ </div>
+
+<script type="text/javascript">
+var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
+document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
+</script>
+<script type="text/javascript">
+try {
+var pageTracker = _gat._getTracker("UA-4979965-5");
+pageTracker._trackPageview();
+} catch(err) {}</script>
+
+</body>
+</html>
+
--- /dev/null
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+ <meta name="author" content="Basho Technologies" />
+ <meta name="description" content="getting started quickly with Webmachine" />
+ <meta name="keywords" content="webmachine http rest web" />
+ <meta http-equiv="content-type" content="text/html;charset=utf-8" />
+ <link rel="stylesheet" href="css/style-1c.css" type="text/css" />
+ <title>getting started quickly with Webmachine</title>
+</head>
+<body>
+ <div id="content">
+ <h1><span class="hr"></span><a href="/">webmachine</a></h1>
+ <ul id="top">
+ <li><a href="/">Home</a></li>
+ <li><a href="http://bitbucket.org/justin/webmachine/">Source Code</a></li>
+ <li><a href="contact.html">Contact</a></li>
+ </ul>
+ <div id="left">
+<h3>getting started quickly with Webmachine</h3>
+
+
+<p>Make sure that you have a working Erlang/OTP release, R12B5 or later.</p>
+
+<p>Get the webmachine code:</p>
+
+<p><pre>
+hg clone http://bitbucket.org/justin/webmachine/ webmachine-read-only
+</pre></p>
+
+<p>Build webmachine:</p>
+
+<p><pre>
+cd webmachine-read-only
+make
+</pre></p>
+
+<p>Create, build, and start the skeleton resource:</p>
+
+<p><pre>
+./scripts/new_webmachine.sh mywebdemo /tmp
+cd /tmp/mywebdemo
+make
+./start.sh
+</pre></p>
+
+<p>Take a look! Point a web browser at <a href="http://localhost:8000/">http://localhost:8000/</a></p>
+
+<p>To make this resource handle URI paths other than /, add more
+<a href="dispatcher.html">dispatch</a> terms in
+/tmp/mywebdemo/priv/dispatch.conf; to make that resource to more
+interesting things, modify the
+<a href="resources.html">resource</a> itself
+at /tmp/mywebdemo/src/mywebdemo_resource.erl.</p>
+
+<p>To learn how to do more interesting things, check out <a href="example_resources.html">some examples</a> or read <a href="docs.html">more documentation</a>.</p>
+
+
+ </div>
+ <div id="footer">
+
+ </div>
+ </div>
+
+<script type="text/javascript">
+var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
+document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
+</script>
+<script type="text/javascript">
+try {
+var pageTracker = _gat._getTracker("UA-4979965-5");
+pageTracker._trackPageview();
+} catch(err) {}</script>
+
+</body>
+</html>
+
--- /dev/null
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+ <meta name="author" content="Basho Technologies" />
+ <meta name="description" content="Webmachine's approach to resource functions and referential transparency" />
+ <meta name="keywords" content="webmachine http rest web" />
+ <meta http-equiv="content-type" content="text/html;charset=utf-8" />
+ <link rel="stylesheet" href="css/style-1c.css" type="text/css" />
+ <title>Webmachine's approach to resource functions and referential transparency</title>
+</head>
+<body>
+ <div id="content">
+ <h1><span class="hr"></span><a href="/">webmachine</a></h1>
+ <ul id="top">
+ <li><a href="/">Home</a></li>
+ <li><a href="http://bitbucket.org/justin/webmachine/">Source Code</a></li>
+ <li><a href="contact.html">Contact</a></li>
+ </ul>
+ <div id="left">
+<h3>Webmachine's approach to resource functions and referential transparency</h3>
+
+<p>
+Webmachine goes to great lengths to help your <a href="resources.html">resource functions</a> to be as referentially transparent as possible. By "referentially transparent" we mean that given the same input <code> {ReqData, Context} </code> the function will return the same output <code> {Result, ReqData, Context} </code> and that side effects will be insignificant from the point of view of Webmachine's execution.
+</p>
+<p>
+We don't try to force you into pure referential transparency; we give you as big a hole as you want via <code>Context</code>. As that term is application-specific, you can put database handles, server process identifiers, or anything else you like in there and we won't try to stop you.
+</p>
+<p>
+However, all Webmachine really cares about is the rest of the terms. Since resource functions are generally referentially transparent at least with regard to those terms, many things are easier -- testing, <a href="debugging.html">debugging</a>, and even static analysis and reasoning about your Web application.
+</p>
+<p>
+Note that there is one important exception to this. The <a href="streambody.html">streamed body feature</a> exists to allow resources to consume or produce request/response bodies a hunk at a time without ever having the whole thing in memory. While the continuation-passing style used in the streaming API is friendly to general functional analysis, due to the necessary side-effect of reading or writing to sockets the stream bodies cannot be treated in quite the same way as other uses of the <code>ReqData</code> interface. Luckily, it is easy to inspect a <code>ReqData</code> to see if this is the case in any individual resource or request instance.
+</p>
+ </div>
+ <div id="footer">
+
+ </div>
+ </div>
+
+<script type="text/javascript">
+var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
+document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
+</script>
+<script type="text/javascript">
+try {
+var pageTracker = _gat._getTracker("UA-4979965-5");
+pageTracker._trackPageview();
+} catch(err) {}</script>
+
+</body>
+</html>
+
--- /dev/null
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+ <meta name="author" content="Basho Technologies" />
+ <meta name="description" content="Webmachine request/response data" />
+ <meta name="keywords" content="webmachine http rest web" />
+ <meta http-equiv="content-type" content="text/html;charset=utf-8" />
+ <link rel="stylesheet" href="css/style-1c.css" type="text/css" />
+ <title>Webmachine request/response data</title>
+</head>
+<body>
+ <div id="content">
+ <h1><span class="hr"></span><a href="/">webmachine</a></h1>
+ <ul id="top">
+ <li><a href="/">Home</a></li>
+ <li><a href="http://bitbucket.org/justin/webmachine/">Source Code</a></li>
+ <li><a href="contact.html">Contact</a></li>
+ </ul>
+ <div id="left">
+<h3>Webmachine request/response data</h3>
+
+<p>
+
+This is documentation of the Webmachine Request Data API as embodied
+by the <code>"wrq"</code> module. This module is the means by which
+resources access and manipulate the state of the request they are
+handling.
+
+</p>
+<p>
+
+Given that all webmachine resource functions have this signature:
+
+</p>
+
+<span class="fwf">f(ReqData, Context) -> {Result, ReqData, Context}</span>
+
+<p>
+
+we should explain in detail the <code>ReqData</code> input and output
+parameter. This is a data structure used to represent the request
+sent by the client as well as the response being built by the
+resource. The <code>wrq</code> module is used to access the values in
+the input parameter. Most functions in most resources have no need to
+modify the output <code>ReqData</code> and can simply pass along the
+one received as input. However, in some cases a resource will need to
+make some update to the response other than that implied by
+<code>Result</code> and in those cases it should use the
+<code>wrq</code> module to build a modified <code>ReqData</code> from
+the original one for the return value.
+
+</p>
+<p>
+
+A couple of nonstandard types are assumed here:
+
+</p>
+
+<table><tr><th>Type</th><th>Description</th></tr>
+<tr><td>string()</td><td>a list() with all elements in the ASCII range</td></tr>
+<tr><td>rd()</td><td>opaque record, used as the input/output <code>ReqData</code></td></tr>
+<tr><td>streambody()</td><td>A webmachine <a href="streambody.html">streamed body format</a></td></tr>
+<tr><td>mochiheaders()</td><td>a structure used in mochiweb for HTTP header storage</td></tr>
+</table>
+
+<p>The accessors are:</p>
+
+<table><tr><th>Function</th><th>Description</th></tr>
+<tr><td><code> method(rd()) -> 'DELETE' | 'GET' | 'HEAD' | 'OPTIONS' | 'POST' | 'PUT' | 'TRACE' </code></td><td>The HTTP method used by the client. (note that it is an <code> atom() </code>)</td></tr>
+
+<tr><td><code> version(rd()) -> {integer(),integer()} </code></td><td>The HTTP version used by the client. Most often <code> {1,1} </code>.</td></tr>
+<tr><td><code> peer(rd()) -> string() </code></td><td>The IP address of the client.</td></tr>
+<tr><td><code> disp_path(rd()) -> string() </code></td><td>The "local" path of the resource URI; the part after any prefix used in <a href="dispatcher.html">dispatch configuration</a>. Of the three path accessors, this is the one you usually want. This is also the one that will change after <code>create_path</code> is called in your <a href="resources.html">resource</a>.</td></tr>
+
+<tr><td><code> path(rd()) -> string() </code></td><td>The path part of the URI -- after the host and port, but not including any query string.</td></tr>
+<tr><td><code> raw_path(rd()) -> string() </code></td><td>The entire path part of the URI, including any query string present.</td></tr>
+<tr><td><code> path_info(atom(),rd()) -> 'undefined' | string() </code></td><td>Looks up a binding as described in <a href="dispatcher.html">dispatch configuration</a>.</td></tr>
+
+<tr><td><code> path_info(rd()) -> any() </code></td><td>The dictionary of bindings as described in <a href="dispatcher.html">dispatch configuration</a>.</td></tr>
+<tr><td><code> path_tokens(rd()) -> list() </code></td><td>This is a list of <code> string() </code> terms, the disp_path components split by "/".</td></tr>
+
+<tr><td><code> get_req_header(string(),rd()) -> 'undefined' | string() </code></td><td>Look up the value of an incoming request header.</td></tr>
+<tr><td><code> req_headers(rd()) -> mochiheaders() </code></td><td>The incoming HTTP headers. Generally, get_req_header is more useful.</td></tr>
+<tr><td><code> req_body(rd()) -> 'undefined' | binary() </code></td><td>The incoming request body, if any.</td></tr>
+
+<tr><td><code> stream_req_body(rd(),integer()) -> streambody() </code></td><td>The incoming request body in <a href="streambody.html">streamed</a> form, with hunks no bigger than the integer argument.</td></tr>
+<tr><td><code> get_cookie_value(string(),rd()) -> string() </code></td><td>Look up the named value in the incoming request cookie header.</td></tr>
+<tr><td><code> req_cookie(rd()) -> string() </code></td><td>The raw value of the cookie header. Note that get_cookie_value is often more useful.</td></tr>
+
+<tr><td><code> get_qs_value(string(),rd()) -> 'undefined' | string() </code></td><td>Given the name of a key, look up the corresponding value in the query string.</td></tr>
+<tr><td><code> get_qs_value(string(),string(),rd()) -> string() </code></td><td>Given the name of a key and a default value if not present, look up the corresponding value in the query string.</td></tr>
+<tr><td><code> req_qs(rd()) -> [{string(), string()}] </code></td><td>The parsed query string, if any. Note that get_qs_value is often more useful.</td></tr>
+
+<tr><td><code> get_resp_header(string(),rd()) -> string() </code></td><td>Look up the current value of an outgoing request header.</td></tr>
+<tr><td><code> resp_redirect(rd()) -> bool() </code></td><td>the last value passed to do_redirect, false otherwise -- if true, then some responses will be 303 instead of 2xx where applicable</td></tr>
+<tr><td><code> resp_headers(rd()) -> mochiheaders() </code></td><td>The outgoing HTTP headers. Generally, get_resp_header is more useful.</td></tr>
+
+<tr><td><code> resp_body(rd()) -> 'undefined' | binary() </code></td><td>The outgoing response body, if one has been set. Usually, append_to_response_body is the best way to set this.</td></tr>
+<tr><td><code> app_root(rd()) -> string() </code></td><td>Indicates the "height" above the requested URI that this resource is dispatched from. Typical values are <code> "." </code>, <code> ".." </code>, <code> "../.." </code> and so on.</td></tr>
+
+</table>
+
+
+<p
+>The functions for (nondestructive) modification of <code> rd() </code> terms are:
+</p>
+
+
+<table><tr><th>Function</th><th>Description</th></tr>
+<tr><td><code> set_resp_header(string(),string(),rd()) -> rd() </code></td><td>Given a header name and value, set an outgoing request header to that value.</td></tr>
+<tr><td><code> append_to_response_body(binary(),rd()) -> rd() </code></td><td>Append the given value to the body of the outgoing response.</td></tr>
+
+<tr><td><code> do_redirect(bool(),rd()) -> rd() </code></td><td>see resp_redirect; this sets that value.</td></tr>
+<tr><td><code> set_disp_path(string(),rd()) -> rd() </code></td><td>The disp_path is the only path that can be changed during a request. This function will do so.</td></tr>
+<tr><td><code> set_req_body(binary(),rd()) -> rd() </code></td><td>Replace the incoming request body with this for the rest of the processing.</td></tr>
+
+<tr><td><code> set_resp_body(binary(),rd()) -> rd() </code></td><td>Set the outgoing response body to this value.</td></tr>
+<tr><td><code> set_resp_body(streambody(),rd()) -> rd() </code></td><td>Use this <a href="streambody.html">streamed body</a> to produce the outgoing response body on demand.</td></tr>
+<tr><td><code> set_resp_headers([{string(),string()}],rd()) -> rd() </code></td><td>Given a list of two-tuples of {headername,value}, set those outgoing response headers.</td></tr>
+
+<tr><td><code> remove_resp_header(string(),rd()) -> rd() </code></td><td>Remove the named outgoing response header.</td></tr>
+</table>
+
+
+ </div>
+ <div id="footer">
+
+ </div>
+ </div>
+
+<script type="text/javascript">
+var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
+document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
+</script>
+<script type="text/javascript">
+try {
+var pageTracker = _gat._getTracker("UA-4979965-5");
+pageTracker._trackPageview();
+} catch(err) {}</script>
+
+</body>
+</html>
+
--- /dev/null
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+ <meta name="author" content="Basho Technologies" />
+ <meta name="description" content="Webmachine resource functions" />
+ <meta name="keywords" content="webmachine http rest web" />
+ <meta http-equiv="content-type" content="text/html;charset=utf-8" />
+ <link rel="stylesheet" href="css/style-1c.css" type="text/css" />
+ <title>Webmachine resource functions</title>
+</head>
+<body>
+ <div id="content">
+ <h1><span class="hr"></span><a href="/">webmachine</a></h1>
+ <ul id="top">
+ <li><a href="/">Home</a></li>
+ <li><a href="http://bitbucket.org/justin/webmachine/">Source Code</a></li>
+ <li><a href="contact.html">Contact</a></li>
+ </ul>
+ <div id="left">
+<h3>Webmachine resource functions</h3>
+
+<p>All webmachine resources should include the webmachine resource library:</p>
+
+<p>
+<span class="fwf">
+-include_lib("webmachine/include/webmachine.hrl").
+</span>
+</p>
+
+<p>
+All webmachine resources should define and export <span class="fwf">init/1</span>, which will receive a configuration property list from the <a href="dispatcher.html">dispatcher</a> as its argument. This function should, if successful, return <span class="fwf">{ok, Context}</span>. Context is any term, and will be threaded through all of the other webmachine resource functions. Alternately, the resource can go into debugging mode by returning <span class="fwf">{{trace, Dir}, Context}</span> instead -- see <a href="debugging.html">the tracing documentation</a> for more information.</p>
+
+<p>All webmachine resource functions are of the signature:</p>
+
+<p>
+<span class="fwf">
+f(ReqData, Context) -> {Result, ReqData, Context}
+</span>
+</p>
+
+
+<p>
+<span class="fwf">Context</span> is an arbitrary <span class="fwf">term()</span> that is specific to your application. Webmachine will never do anything with this term other than threading it through the various functions of your resource. This is the means by which transient application-specific request state is passed along between functions.
+</p>
+<p>
+<span class="fwf">ReqData</span> is a <span class="fwf">#wm_reqdata{}</span> term, and is manipulated via the <a href="reqdata.html">wrq</a> interface. A resource function may access request data (such as header values) from the input value. If a resource function wishes to affect the response data in some way other than that implied by its return value (e.g. adding an X-Header) then it should modify the returned <span class="fwf">ReqData</span> term accordingly.
+</p>
+<p>
+The rest of this document is about the effects produced by different values in the <span class="fwf">Result</span> term.
+</p>
+<p>
+There are over 30 resource functions you can define, but any of them can be omitted as they have reasonable defaults.
+</p>
+<p>
+Each function is described below, showing the default and allowed values that may be in the <span class="fwf">Result</span> term. The default will be used if a resource does not export the function. If a function has an "X" in the "Halt" column, it also has the option of returning either of the two following special values for <span class="fwf">Result</span>:
+
+</p>
+
+<table><tr><th>Result</th><th>Effect</th></tr>
+<tr><td class="fwf lhcol">{error, Err::term()}</td><td>Immediately end processing of this request, returning a 500 response code. The response body will contain the <span class="fwf"> Err </span> term.</td></tr>
+<tr><td class="fwf lhcol">{halt, Code::integer()}</td><td>Immediately end processing of this request, returning response code Code. It is the responsibility of the resource to ensure that all necessary response header and body elements are filled in <span class="fwf"> ReqData </span> in order to make that reponse code valid.</td></tr>
+<tr><td class="fwf lhcol">{halt, {Code::integer(), ReasonPhrase::iolist()}}</td><td>Same as <span class="fwf"> {halt, Code::integer()} </span> but supply a custom reason phrase for the HTTP status code as well.</td></tr>
+</table>
+
+<p>
+Any function which has no description is optional and the effect of its return value should be evident from examining the <a href="diagram.html">diagram</a>.
+</p>
+
+<table>
+<tr><th>Function</th><th>Default</th><th>Halt</th><th>Allowed</th><th>Description</th></tr>
+
+<tr><td class="fwf">resource_exists</td><td class="fwf">true</td><td align='center' class='x_check'>X</td><td class="fwf">true <span class="fwf">|</span> false</td><td>Returning non-true values will result in 404 Not Found.</td></tr>
+
+<tr><td class="fwf">service_available</td><td class="fwf">true</td><td align='center' class='x_check'>X</td><td class="fwf">true <span class="fwf">|</span> false</td><td></td></tr>
+<tr><td class="fwf">is_authorized</td><td class="fwf">true</td><td align='center' class='x_check'>X</td><td class="fwf">true <span class="fwf">|</span> AuthHead</td><td>If this returns anything other than true, the response will be 401 Unauthorized. The AuthHead return value will be used as the value in the WWW-Authenticate header.</td></tr>
+
+<tr><td class="fwf">forbidden</td><td class="fwf">false</td><td align='center' class='x_check'>X</td><td class="fwf">true <span class="fwf">|</span> false</td><td></td></tr>
+<tr><td class="fwf">allow_missing_post</td><td class="fwf">false</td><td align='center' class='x_check'>X</td><td class="fwf">true <span class="fwf">|</span> false</td><td>If the resource accepts POST requests to nonexistent resources, then this should return true.</td></tr>
+
+<tr><td class="fwf">malformed_request</td><td class="fwf">false</td><td align='center' class='x_check'>X</td><td class="fwf">true <span class="fwf">|</span> false</td><td></td></tr>
+<tr><td class="fwf">uri_too_long</td><td class="fwf">false</td><td align='center' class='x_check'>X</td><td class="fwf">true <span class="fwf">|</span> false</td><td></td></tr>
+<tr><td class="fwf">known_content_type</td><td class="fwf">true</td><td align='center' class='x_check'>X</td><td class="fwf">true <span class="fwf">|</span> false</td><td></td></tr>
+
+<tr><td class="fwf">valid_content_headers</td><td class="fwf">true</td><td align='center' class='x_check'>X</td><td class="fwf">true <span class="fwf">|</span> false</td><td></td></tr>
+<tr><td class="fwf">valid_entity_length</td><td class="fwf">true</td><td align='center' class='x_check'>X</td><td class="fwf">true <span class="fwf">|</span> false</td><td></td></tr>
+<tr><td class="fwf">options</td><td class="fwf">[]</td><td></td><td class="fwf">[Header]</td><td>If the OPTIONS method is supported and is used, the return value of this function is expected to be a list of pairs representing header names and values that should appear in the response.</td></tr>
+
+<tr><td class="fwf">allowed_methods</td><td class="fwf">['GET', 'HEAD']</td><td></td><td class="fwf">[Method]</td><td>If a Method not in this list is requested, then a 405 Method Not Allowed will be sent. Note that these are all-caps and are atoms. (single-quoted)</td></tr>
+<tr><td class="fwf">delete_resource</td><td class="fwf">false</td><td align='center' class='x_check'>X</td><td class="fwf">true <span class="fwf">|</span> false</td><td>This is called when a DELETE request should be enacted, and should return true if the deletion succeeded.</td></tr>
+<tr><td class="fwf">delete_completed</td><td class="fwf">true</td><td align='center' class='x_check'>X</td><td class="fwf">true <span class="fwf">|</span> false</td><td>This is only called after a successful delete_resource call, and should return false if the deletion was accepted but cannot yet be guaranteed to have finished.</td></tr>
+
+<tr><td class="fwf">post_is_create</td><td class="fwf">false</td><td></td><td class="fwf">true <span class="fwf">|</span> false</td><td>If POST requests should be treated as a request to put content into a (potentially new) resource as opposed to being a generic submission for processing, then this function should return true. If it does return true, then create_path will be called and the rest of the request will be treated much like a PUT to the Path entry returned by that call.</td></tr>
+<tr><td class="fwf">create_path</td><td class="fwf">undefined</td><td></td><td class="fwf">Path</td><td>This will be called on a POST request if post_is_create returns true. It is an error for this function to not produce a Path if post_is_create returns true. The Path returned should be a valid URI part following the dispatcher prefix. That Path will replace the previous one in the return value of <span class="fwf"> wrq:disp_path(ReqData) </span> for all subsequent resource function calls in the course of this request.</td></tr>
+
+<tr><td class="fwf">process_post</td><td class="fwf">false</td><td align='center' class='x_check'>X</td><td class="fwf">true <span class="fwf">|</span> false</td><td>If post_is_create returns false, then this will be called to process any POST requests. If it succeeds, it should return true.</td></tr>
+<tr><td class="fwf">content_types_provided</td><td><span class="fwf"> [{"text/html", to_html}] </span></td><td></td><td><span class="fwf"> [{Mediatype, Handler}] </span></td><td>This should return a list of pairs where each pair is of the form <span class="fwf"> {Mediatype, Handler} </span> where <span class="fwf">Mediatype</span> is a string of content-type format and the <span class="fwf">Handler</span> is an atom naming the function which can provide a resource representation in that media type. Content negotiation is driven by this return value. For example, if a client request includes an Accept header with a value that does not appear as a first element in any of the return tuples, then a 406 Not Acceptable will be sent.</td></tr>
+
+<tr><td class="fwf">content_types_accepted</td><td><span class="fwf"> [] </span></td><td></td><td><span class="fwf"> [{Mediatype, Handler}] </span></td><td>This is used similarly to content_types_provided, except that it is for incoming resource representations -- for example, PUT requests. Handler functions usually want to use <span class="fwf"> wrq:req_body(ReqData) </span> to access the incoming request body.</td></tr>
+<tr><td class="fwf">charsets_provided</td><td class="fwf">no_charset</td><td></td><td>no_charset <span class="fwf">|</span> <span class="fwf"> [{Charset, CharsetConverter}] </span></td><td>If this is anything other than the atom no_charset, it must be a list of pairs where each pair is of the form Charset, Converter where Charset is a string naming a charset and Converter is a callable function in the resource which will be called on the produced body in a GET and ensure that it is in Charset.</td></tr>
+
+<tr><td class="fwf">encodings_provided</td><td><span class="fwf"> [{"identity", fun(X) -> X end}] </span></td><td></td><td><span class="fwf"> [{Encoding, Encoder}] </span></td><td>This must be a list of pairs where in each pair Encoding is a string naming a valid content encoding and Encoder is a callable function in the resource which will be called on the produced body in a GET and ensure that it is so encoded. One useful setting is to have the function check on method, and on GET requests return <span class="fwf"> [{"identity", fun(X) -> X end}, {"gzip", fun(X) -> zlib:gzip(X) end}] </span> as this is all that is needed to support gzip content encoding.</td></tr>
+
+<tr><td class="fwf">variances</td><td><span class="fwf"> [] </span></td><td></td><td><span class="fwf"> [HeaderName] </span></td><td>If this function is implemented, it should return a list of strings with header names that should be included in a given response's Vary header. The standard conneg headers (Accept, Accept-Encoding, Accept-Charset, Accept-Language) do not need to be specified here as Webmachine will add the correct elements of those automatically depending on resource behavior.</td></tr>
+<tr><td class="fwf">is_conflict</td><td class="fwf">false</td><td></td><td class="fwf">true <span class="fwf">|</span> false</td><td>If this returns true, the client will receive a 409 Conflict.</td></tr>
+<tr><td class="fwf">multiple_choices</td><td class="fwf">false</td><td align='center' class='x_check'>X</td><td class="fwf">true <span class="fwf">|</span> false</td><td>If this returns true, then it is assumed that multiple representations of the response are possible and a single one cannot be automatically chosen, so a 300 Multiple Choices will be sent instead of a 200.</td></tr>
+
+<tr><td class="fwf">previously_existed</td><td class="fwf">false</td><td align='center' class='x_check'>X</td><td class="fwf">true <span class="fwf">|</span> false</td><td></td></tr>
+<tr><td class="fwf">moved_permanently</td><td class="fwf">false</td><td align='center' class='x_check'>X</td><td class="fwf"><span class="fwf"> {true, MovedURI} </span> <span class="fwf">|</span> false</td><td></td></tr>
+
+<tr><td class="fwf">moved_temporarily</td><td class="fwf">false</td><td align='center' class='x_check'>X</td><td class="fwf"><span class="fwf"> {true, MovedURI} </span> <span class="fwf">|</span> false</td><td></td></tr>
+<tr><td class="fwf">last_modified</td><td class="fwf">undefined</td><td></td><td class="fwf">undefined <span class="fwf">|</span> <span class="fwf"> {{YYYY,MM,DD}, {Hour,Min,Sec}} </span></td><td></td></tr>
+
+<tr><td class="fwf">expires</td><td class="fwf">undefined</td><td></td><td class="fwf">undefined <span class="fwf">|</span> <span class="fwf"> {{YYYY,MM,DD}, {Hour,Min,Sec}} </span></td><td></td></tr>
+<tr><td class="fwf">generate_etag</td><td class="fwf">undefined</td><td></td><td class="fwf">undefined <span class="fwf">|</span> ETag</td><td>If this returns a value, it will be used as the value of the ETag header and for comparison in conditional requests.</td></tr>
+<tr><td class="fwf">finish_request</td><td class="fwf">true</td><td></td><td class="fwf">true <span class="fwf">|</span> false</td><td>This function, if exported, is called just before the final response is constructed and sent. The <span class="fwf"> Result </span> is ignored, so any effect of this function must be by returning a modified <span class="fwf"> ReqData </span>.</td></tr>
+
+<tr><td class="fwf">body-producing function named as a Handler by content_types_provided</td><td></td><td align='center' class='x_check'>X</td><td class="fwf"><span class="fwf"> Body </span></td><td>The Body should be either an <span class="fwf">iolist()</span> or <a href="streambody.html">{stream,streambody()}</a></td></tr>
+<tr><td class="fwf">POST-processing function named as a Handler by content_types_accepted</td><td></td><td align='center' class='x_check'>X</td><td class="fwf"><span class="fwf"> true </span></td><td></td></tr>
+</table>
+
+<p>
+The above are all of the supported predefined resource functions. In addition to whichever of these a resource wishes to use, it also must export all of the functions named in the return values of the content_types_provided and content_types_accepted functions with behavior as described in the bottom two rows of the table.
+</p>
+ </div>
+ <div id="footer">
+
+ </div>
+ </div>
+
+<script type="text/javascript">
+var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
+document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
+</script>
+<script type="text/javascript">
+try {
+var pageTracker = _gat._getTracker("UA-4979965-5");
+pageTracker._trackPageview();
+} catch(err) {}</script>
+
+</body>
+</html>
+
--- /dev/null
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+ <meta name="author" content="Basho Technologies" />
+ <meta name="description" content="Webmachine streamed bodies" />
+ <meta name="keywords" content="webmachine http rest web" />
+ <meta http-equiv="content-type" content="text/html;charset=utf-8" />
+ <link rel="stylesheet" href="css/style-1c.css" type="text/css" />
+ <title>Webmachine streamed bodies</title>
+</head>
+<body>
+ <div id="content">
+ <h1><span class="hr"></span><a href="/">webmachine</a></h1>
+ <ul id="top">
+ <li><a href="/">Home</a></li>
+ <li><a href="http://bitbucket.org/justin/webmachine/">Source Code</a></li>
+ <li><a href="contact.html">Contact</a></li>
+ </ul>
+ <div id="left">
+<h3>Webmachine streamed bodies</h3>
+
+<p>
+
+Webmachine allows the resource developer to handle request and
+response bodies as either whole units (binary or iolist) to be handed
+around at once, or else to choose to "stream" the body.
+
+</p>
+<p>
+
+The body-handling functions are:
+
+</p>
+
+<ul><li><code> wrq:req_body/1 </code>
+</li><li><code> wrq:stream_req_body/2 </code>
+</li><li><code> wrq:set_resp_body/2 </code>
+</li></ul>
+
+<p>
+
+The last of these, <code>wrq:set_resp_body/2</code>, is also called
+implicitly with the return value of any content-producing function
+such as <code>to_html/2</code>.
+
+</p>
+<p>
+
+The first of these (<code>req_body</code>) is the simplest. It will
+provide the whole incoming request body as a binary. (Unless the body
+is too large, as set by <code>wrq:set_max_recv_body/2</code> or
+defaulting to 50M) For the majority of resources, this is the easiest
+way to handle the incoming request body.
+
+</p>
+<p>
+
+If a resource wants to handle the incoming request body a hunk at a
+time, it may call <code>wrq:stream_req_body/2</code> instead. Instead
+of a binary, this produces a <code>StreamBody</code> structure.
+
+</p>
+<p>
+
+(It is an error to call both <code>wrq:req_body/1</code> and
+<code>wrq:stream_req_body/2</code> in the execution of a single
+resource.)
+
+</p>
+<p>
+
+A <code>StreamBody</code> is a pair of the form
+<code>{Data,Next}</code> where <code>Data</code> is a binary and
+<code>Next</code> is either the atom <code>done</code> signifying the
+end of the body or else a 0-arity function that, when called, will
+produce the "next" <code>StreamBody</code> structure.
+
+</p>
+<p>
+
+The integer parameter to <code>wrq:stream_req_body/2</code> indicates
+the maximum size in bytes of any <code>Hunk</code> from the resulting
+<code>StreamBody</code>.
+
+</p>
+<p>
+
+When a resource provides a body to be sent in the response, it should
+use <code>wrq:set_resp_body/2</code>. The parameter to this function
+may be either an iolist, representing the entire body, or else a pair
+of the form <code>{stream, StreamBody}</code>.
+
+</p>
+<p>
+
+An example may make the usage of this API clearer. A complete and
+working resource module using this API in both directions:
+
+</p>
+<p>
+
+<pre>
+-module(mywebdemo_resource).
+-export([init/1, allowed_methods/2, process_post/2]).
+
+-include_lib("webmachine/include/webmachine.hrl").
+
+init([]) -> {ok, undefined}.
+
+allowed_methods(ReqData, State) -> {['POST'], ReqData, State}.
+
+process_post(ReqData, State) ->
+ Body = get_streamed_body(wrq:stream_req_body(ReqData, 3), []),
+ {true, wrq:set_resp_body({stream, send_streamed_body(Body,4)},ReqData), State}.
+
+send_streamed_body(Body, Max) ->
+ HunkLen=8*Max,
+ case Body of
+ <<A:HunkLen,Rest/binary>> ->
+ io:format("SENT ~p~n",[<<A:HunkLen>>]),
+ {<<A:HunkLen>>, fun() -> send_streamed_body(Rest,Max) end};
+ _ ->
+ io:format("SENT ~p~n",[Body]),
+ {Body, done}
+ end.
+
+get_streamed_body({Hunk,done},Acc) ->
+ io:format("RECEIVED ~p~n",[Hunk]),
+ iolist_to_binary(lists:reverse([Hunk|Acc]));
+get_streamed_body({Hunk,Next},Acc) ->
+ io:format("RECEIVED ~p~n",[Hunk]),
+ get_streamed_body(Next(),[Hunk|Acc]).
+</pre>
+
+</p>
+<p>
+
+If you use this resource in place of the file
+<code>/tmp/mywebdemo/src/mywebdemo_resource.erl</code> in the
+<a href="quickstart.html">quickstart</a> setup, you should then be able
+to issue <code>curl -d '1234567890' http://127.0.0.1:8000/</code> on
+the command line and the <code>io:format</code> calls will show you
+what is going on.
+
+</p>
+<p>
+
+Obviously, a realistic resource wouldn't use this API just to collect
+the whole binary into memory or break one up that is already present
+-- you'd use <code>req_body</code> and put a simple iolist into
+<code>set_resp_body</code> instead. Also, the choices of 3 and 4
+bytes as hunk size are far from optimal for most reasonable uses.
+This resource is intended only as a demonstration of the API, not as a
+real-world use of streaming request/response bodies.
+
+</p>
+
+ </div>
+ <div id="footer">
+
+ </div>
+ </div>
+
+<script type="text/javascript">
+var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
+document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
+</script>
+<script type="text/javascript">
+try {
+var pageTracker = _gat._getTracker("UA-4979965-5");
+pageTracker._trackPageview();
+} catch(err) {}</script>
+
+</body>
+</html>
+
--- /dev/null
+#!/bin/sh -e
+## The contents of this file are subject to the Mozilla Public License
+## Version 1.1 (the "License"); you may not use this file except in
+## compliance with the License. You may obtain a copy of the License
+## at http://www.mozilla.org/MPL/
+##
+## Software distributed under the License is distributed on an "AS IS"
+## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+## the License for the specific language governing rights and
+## limitations under the License.
+##
+## The Original Code is RabbitMQ.
+##
+## The Initial Developer of the Original Code is GoPivotal, Inc.
+## Copyright (c) 2012-2014 GoPivotal, Inc. All rights reserved.
+##
+
+### next line potentially updated in package install steps
+SYS_PREFIX=
+
+### next line will be updated when generating a standalone release
+ERL_DIR=
+
+CLEAN_BOOT_FILE=start_clean
+SASL_BOOT_FILE=start_sasl
+
+## Set default values
+
+CONFIG_FILE=${SYS_PREFIX}/etc/rabbitmq/rabbitmq
+LOG_BASE=${SYS_PREFIX}/var/log/rabbitmq
+MNESIA_BASE=${SYS_PREFIX}/var/lib/rabbitmq/mnesia
+ENABLED_PLUGINS_FILE=${SYS_PREFIX}/etc/rabbitmq/enabled_plugins
+
+PLUGINS_DIR="${RABBITMQ_HOME}/plugins"
+
+CONF_ENV_FILE=${SYS_PREFIX}/etc/rabbitmq/rabbitmq-env.conf
--- /dev/null
+@echo off
+
+REM Usage: rabbitmq-echopid.bat <rabbitmq_nodename>
+REM
+REM <rabbitmq_nodename> sname of the erlang node to connect to (required)
+
+setlocal
+
+if "%1"=="" goto fail
+
+:: set timeout vars ::
+set TIMEOUT=10
+set TIMER=1
+
+:: check that wmic exists ::
+set WMIC_PATH=%SYSTEMROOT%\System32\Wbem\wmic.exe
+if not exist "%WMIC_PATH%" (
+ goto fail
+)
+
+:getpid
+for /f "usebackq tokens=* skip=1" %%P IN (`%%WMIC_PATH%% process where "name='erl.exe' and commandline like '%%-sname %1%%'" get processid 2^>nul`) do (
+ set PID=%%P
+ goto echopid
+)
+
+:echopid
+:: check for pid not found ::
+if "%PID%" == "" (
+ PING 127.0.0.1 -n 2 > nul
+ set /a TIMER+=1
+ if %TIMEOUT%==%TIMER% goto fail
+ goto getpid
+)
+
+:: show pid ::
+echo %PID%
+
+:: all done ::
+:ok
+endlocal
+EXIT /B 0
+
+:: something went wrong ::
+:fail
+endlocal
+EXIT /B 1
+
+
--- /dev/null
+#!/bin/sh -e
+## The contents of this file are subject to the Mozilla Public License
+## Version 1.1 (the "License"); you may not use this file except in
+## compliance with the License. You may obtain a copy of the License
+## at http://www.mozilla.org/MPL/
+##
+## Software distributed under the License is distributed on an "AS IS"
+## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+## the License for the specific language governing rights and
+## limitations under the License.
+##
+## The Original Code is RabbitMQ.
+##
+## The Initial Developer of the Original Code is GoPivotal, Inc.
+## Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+##
+
+# We set +e here since since our test for "readlink -f" below needs to
+# be able to fail.
+set +e
+# Determine where this script is really located (if this script is
+# invoked from another script, this is the location of the caller)
+SCRIPT_PATH="$0"
+while [ -h "$SCRIPT_PATH" ] ; do
+ # Determine if readlink -f is supported at all. TODO clean this up.
+ FULL_PATH=`readlink -f $SCRIPT_PATH 2>/dev/null`
+ if [ "$?" != "0" ]; then
+ REL_PATH=`readlink $SCRIPT_PATH`
+ if expr "$REL_PATH" : '/.*' > /dev/null; then
+ SCRIPT_PATH="$REL_PATH"
+ else
+ SCRIPT_PATH="`dirname "$SCRIPT_PATH"`/$REL_PATH"
+ fi
+ else
+ SCRIPT_PATH=$FULL_PATH
+ fi
+done
+set -e
+
+SCRIPT_DIR=`dirname $SCRIPT_PATH`
+RABBITMQ_HOME="${SCRIPT_DIR}/.."
+[ "x" = "x$HOSTNAME" ] && HOSTNAME=`env hostname`
+NODENAME=rabbit@${HOSTNAME%%.*}
+
+## Set defaults
+. ${SCRIPT_DIR}/rabbitmq-defaults
+
+## Common defaults
+SERVER_ERL_ARGS="+K true +A30 +P 1048576 \
+ -kernel inet_default_connect_options [{nodelay,true}]"
+
+# warn about old rabbitmq.conf file, if no new one
+if [ -f /etc/rabbitmq/rabbitmq.conf ] && \
+ [ ! -f ${CONF_ENV_FILE} ] ; then
+ echo -n "WARNING: ignoring /etc/rabbitmq/rabbitmq.conf -- "
+ echo "location has moved to ${CONF_ENV_FILE}"
+fi
+
+## Get configuration variables from the configure environment file
+[ -f ${CONF_ENV_FILE} ] && . ${CONF_ENV_FILE} || true
--- /dev/null
+#!/bin/sh -e
+## The contents of this file are subject to the Mozilla Public License
+## Version 1.1 (the "License"); you may not use this file except in
+## compliance with the License. You may obtain a copy of the License
+## at http://www.mozilla.org/MPL/
+##
+## Software distributed under the License is distributed on an "AS IS"
+## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+## the License for the specific language governing rights and
+## limitations under the License.
+##
+## The Original Code is RabbitMQ.
+##
+## The Initial Developer of the Original Code is GoPivotal, Inc.
+## Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+##
+
+# Get default settings with user overrides for (RABBITMQ_)<var_name>
+# Non-empty defaults should be set in rabbitmq-env
+. `dirname $0`/rabbitmq-env
+
+##--- Set environment vars RABBITMQ_<var_name> to defaults if not set
+
+[ "x" = "x$RABBITMQ_ENABLED_PLUGINS_FILE" ] && RABBITMQ_ENABLED_PLUGINS_FILE=${ENABLED_PLUGINS_FILE}
+[ "x" = "x$RABBITMQ_PLUGINS_DIR" ] && RABBITMQ_PLUGINS_DIR=${PLUGINS_DIR}
+
+##--- End of overridden <var_name> variables
+
+exec ${ERL_DIR}erl \
+ -pa "${RABBITMQ_HOME}/ebin" \
+ -noinput \
+ -hidden \
+ -sname rabbitmq-plugins$$ \
+ -boot "${CLEAN_BOOT_FILE}" \
+ -s rabbit_plugins_main \
+ -enabled_plugins_file "$RABBITMQ_ENABLED_PLUGINS_FILE" \
+ -plugins_dist_dir "$RABBITMQ_PLUGINS_DIR" \
+ -extra "$@"
--- /dev/null
+@echo off
+REM The contents of this file are subject to the Mozilla Public License
+REM Version 1.1 (the "License"); you may not use this file except in
+REM compliance with the License. You may obtain a copy of the License
+REM at http://www.mozilla.org/MPL/
+REM
+REM Software distributed under the License is distributed on an "AS IS"
+REM basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+REM the License for the specific language governing rights and
+REM limitations under the License.
+REM
+REM The Original Code is RabbitMQ.
+REM
+REM The Initial Developer of the Original Code is GoPivotal, Inc.
+REM Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+REM
+
+setlocal
+
+rem Preserve values that might contain exclamation marks before
+rem enabling delayed expansion
+set TDP0=%~dp0
+set STAR=%*
+setlocal enabledelayedexpansion
+
+if "!RABBITMQ_SERVICENAME!"=="" (
+ set RABBITMQ_SERVICENAME=RabbitMQ
+)
+
+if "!RABBITMQ_BASE!"=="" (
+ set RABBITMQ_BASE=!APPDATA!\!RABBITMQ_SERVICENAME!
+)
+
+if not exist "!ERLANG_HOME!\bin\erl.exe" (
+ echo.
+ echo ******************************
+ echo ERLANG_HOME not set correctly.
+ echo ******************************
+ echo.
+ echo Please either set ERLANG_HOME to point to your Erlang installation or place the
+ echo RabbitMQ server distribution in the Erlang lib folder.
+ echo.
+ exit /B
+)
+
+if "!RABBITMQ_ENABLED_PLUGINS_FILE!"=="" (
+ set RABBITMQ_ENABLED_PLUGINS_FILE=!RABBITMQ_BASE!\enabled_plugins
+)
+
+if "!RABBITMQ_PLUGINS_DIR!"=="" (
+ set RABBITMQ_PLUGINS_DIR=!TDP0!..\plugins
+)
+
+"!ERLANG_HOME!\bin\erl.exe" -pa "!TDP0!..\ebin" -noinput -hidden -sname rabbitmq-plugins!RANDOM!!TIME:~9! -s rabbit_plugins_main -enabled_plugins_file "!RABBITMQ_ENABLED_PLUGINS_FILE!" -plugins_dist_dir "!RABBITMQ_PLUGINS_DIR:\=/!" -extra !STAR!
+
+endlocal
+endlocal
--- /dev/null
+#!/bin/sh -e
+## The contents of this file are subject to the Mozilla Public License
+## Version 1.1 (the "License"); you may not use this file except in
+## compliance with the License. You may obtain a copy of the License
+## at http://www.mozilla.org/MPL/
+##
+## Software distributed under the License is distributed on an "AS IS"
+## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+## the License for the specific language governing rights and
+## limitations under the License.
+##
+## The Original Code is RabbitMQ.
+##
+## The Initial Developer of the Original Code is GoPivotal, Inc.
+## Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+##
+
+# Get default settings with user overrides for (RABBITMQ_)<var_name>
+# Non-empty defaults should be set in rabbitmq-env
+. `dirname $0`/rabbitmq-env
+
+##--- Set environment vars RABBITMQ_<var_name> to defaults if not set
+
+DEFAULT_NODE_IP_ADDRESS=auto
+DEFAULT_NODE_PORT=5672
+[ "x" = "x$RABBITMQ_NODE_IP_ADDRESS" ] && RABBITMQ_NODE_IP_ADDRESS=${NODE_IP_ADDRESS}
+[ "x" = "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_NODE_PORT=${NODE_PORT}
+
+[ "x" = "x$RABBITMQ_NODE_IP_ADDRESS" ] && [ "x" != "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_NODE_IP_ADDRESS=${DEFAULT_NODE_IP_ADDRESS}
+[ "x" != "x$RABBITMQ_NODE_IP_ADDRESS" ] && [ "x" = "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_NODE_PORT=${DEFAULT_NODE_PORT}
+
+[ "x" = "x$RABBITMQ_DIST_PORT" ] && RABBITMQ_DIST_PORT=${DIST_PORT}
+[ "x" = "x$RABBITMQ_DIST_PORT" ] && [ "x" = "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_DIST_PORT=$((${DEFAULT_NODE_PORT} + 20000))
+[ "x" = "x$RABBITMQ_DIST_PORT" ] && [ "x" != "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_DIST_PORT=$((${RABBITMQ_NODE_PORT} + 20000))
+
+[ "x" = "x$RABBITMQ_NODENAME" ] && RABBITMQ_NODENAME=${NODENAME}
+[ "x" = "x$RABBITMQ_SERVER_ERL_ARGS" ] && RABBITMQ_SERVER_ERL_ARGS=${SERVER_ERL_ARGS}
+[ "x" = "x$RABBITMQ_CONFIG_FILE" ] && RABBITMQ_CONFIG_FILE=${CONFIG_FILE}
+[ "x" = "x$RABBITMQ_LOG_BASE" ] && RABBITMQ_LOG_BASE=${LOG_BASE}
+[ "x" = "x$RABBITMQ_MNESIA_BASE" ] && RABBITMQ_MNESIA_BASE=${MNESIA_BASE}
+[ "x" = "x$RABBITMQ_SERVER_START_ARGS" ] && RABBITMQ_SERVER_START_ARGS=${SERVER_START_ARGS}
+
+[ "x" = "x$RABBITMQ_MNESIA_DIR" ] && RABBITMQ_MNESIA_DIR=${MNESIA_DIR}
+[ "x" = "x$RABBITMQ_MNESIA_DIR" ] && RABBITMQ_MNESIA_DIR=${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME}
+
+[ "x" = "x$RABBITMQ_PID_FILE" ] && RABBITMQ_PID_FILE=${PID_FILE}
+[ "x" = "x$RABBITMQ_PID_FILE" ] && RABBITMQ_PID_FILE=${RABBITMQ_MNESIA_DIR}.pid
+
+[ "x" = "x$RABBITMQ_PLUGINS_EXPAND_DIR" ] && RABBITMQ_PLUGINS_EXPAND_DIR=${PLUGINS_EXPAND_DIR}
+[ "x" = "x$RABBITMQ_PLUGINS_EXPAND_DIR" ] && RABBITMQ_PLUGINS_EXPAND_DIR=${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME}-plugins-expand
+
+[ "x" = "x$RABBITMQ_ENABLED_PLUGINS_FILE" ] && RABBITMQ_ENABLED_PLUGINS_FILE=${ENABLED_PLUGINS_FILE}
+
+[ "x" = "x$RABBITMQ_PLUGINS_DIR" ] && RABBITMQ_PLUGINS_DIR=${PLUGINS_DIR}
+
+## Log rotation
+[ "x" = "x$RABBITMQ_LOGS" ] && RABBITMQ_LOGS=${LOGS}
+[ "x" = "x$RABBITMQ_LOGS" ] && RABBITMQ_LOGS="${RABBITMQ_LOG_BASE}/${RABBITMQ_NODENAME}.log"
+[ "x" = "x$RABBITMQ_SASL_LOGS" ] && RABBITMQ_SASL_LOGS=${SASL_LOGS}
+[ "x" = "x$RABBITMQ_SASL_LOGS" ] && RABBITMQ_SASL_LOGS="${RABBITMQ_LOG_BASE}/${RABBITMQ_NODENAME}-sasl.log"
+
+##--- End of overridden <var_name> variables
+
+RABBITMQ_START_RABBIT=
+[ "x" = "x$RABBITMQ_ALLOW_INPUT" ] && RABBITMQ_START_RABBIT=" -noinput"
+[ "x" = "x$RABBITMQ_NODE_ONLY" ] && RABBITMQ_START_RABBIT="$RABBITMQ_START_RABBIT -s rabbit boot "
+
+case "$(uname -s)" in
+ CYGWIN*) # we make no attempt to record the cygwin pid; rabbitmqctl wait
+ # will not be able to make sense of it anyway
+ ;;
+ *) # When -detached is passed, we don't write the pid, since it'd be the
+ # wrong one
+ detached=""
+ for opt in "$@"; do
+ if [ "$opt" = "-detached" ]; then
+ detached="true"
+ fi
+ done
+ if [ $detached ]; then
+ echo "Warning: PID file not written; -detached was passed." 1>&2
+ else
+ mkdir -p $(dirname ${RABBITMQ_PID_FILE});
+ echo $$ > ${RABBITMQ_PID_FILE}
+ fi
+esac
+
+RABBITMQ_EBIN_ROOT="${RABBITMQ_HOME}/ebin"
+
+set +e
+
+RABBITMQ_CONFIG_FILE=$RABBITMQ_CONFIG_FILE \
+RABBITMQ_DIST_PORT=$RABBITMQ_DIST_PORT \
+ ${ERL_DIR}erl -pa "$RABBITMQ_EBIN_ROOT" \
+ -boot "${CLEAN_BOOT_FILE}" \
+ -noinput \
+ -hidden \
+ -s rabbit_prelaunch \
+ -sname rabbitmqprelaunch$$ \
+ -extra "${RABBITMQ_NODENAME}"
+
+PRELAUNCH_RESULT=$?
+if [ ${PRELAUNCH_RESULT} = 2 ] ; then
+ # dist port is mentioned in config, so do not set it
+ true
+elif [ ${PRELAUNCH_RESULT} = 0 ] ; then
+ # dist port is not mentioned in the config file, we can set it
+ RABBITMQ_DIST_ARG="-kernel inet_dist_listen_min ${RABBITMQ_DIST_PORT} -kernel inet_dist_listen_max ${RABBITMQ_DIST_PORT}"
+else
+ exit ${PRELAUNCH_RESULT}
+fi
+
+set -e
+
+RABBITMQ_CONFIG_ARG=
+[ -f "${RABBITMQ_CONFIG_FILE}.config" ] && RABBITMQ_CONFIG_ARG="-config ${RABBITMQ_CONFIG_FILE}"
+
+RABBITMQ_LISTEN_ARG=
+[ "x" != "x$RABBITMQ_NODE_PORT" ] && [ "x" != "x$RABBITMQ_NODE_IP_ADDRESS" ] && RABBITMQ_LISTEN_ARG="-rabbit tcp_listeners [{\""${RABBITMQ_NODE_IP_ADDRESS}"\","${RABBITMQ_NODE_PORT}"}]"
+
+# we need to turn off path expansion because some of the vars, notably
+# RABBITMQ_SERVER_ERL_ARGS, contain terms that look like globs and
+# there is no other way of preventing their expansion.
+set -f
+
+exec ${ERL_DIR}erl \
+ -pa ${RABBITMQ_EBIN_ROOT} \
+ ${RABBITMQ_START_RABBIT} \
+ -sname ${RABBITMQ_NODENAME} \
+ -boot "${SASL_BOOT_FILE}" \
+ ${RABBITMQ_CONFIG_ARG} \
+ +W w \
+ ${RABBITMQ_SERVER_ERL_ARGS} \
+ ${RABBITMQ_LISTEN_ARG} \
+ -sasl errlog_type error \
+ -sasl sasl_error_logger false \
+ -rabbit error_logger '{file,"'${RABBITMQ_LOGS}'"}' \
+ -rabbit sasl_error_logger '{file,"'${RABBITMQ_SASL_LOGS}'"}' \
+ -rabbit enabled_plugins_file "\"$RABBITMQ_ENABLED_PLUGINS_FILE\"" \
+ -rabbit plugins_dir "\"$RABBITMQ_PLUGINS_DIR\"" \
+ -rabbit plugins_expand_dir "\"$RABBITMQ_PLUGINS_EXPAND_DIR\"" \
+ -os_mon start_cpu_sup false \
+ -os_mon start_disksup false \
+ -os_mon start_memsup false \
+ -mnesia dir "\"${RABBITMQ_MNESIA_DIR}\"" \
+ ${RABBITMQ_SERVER_START_ARGS} \
+ ${RABBITMQ_DIST_ARG} \
+ "$@"
--- /dev/null
+@echo off
+REM The contents of this file are subject to the Mozilla Public License
+REM Version 1.1 (the "License"); you may not use this file except in
+REM compliance with the License. You may obtain a copy of the License
+REM at http://www.mozilla.org/MPL/
+REM
+REM Software distributed under the License is distributed on an "AS IS"
+REM basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+REM the License for the specific language governing rights and
+REM limitations under the License.
+REM
+REM The Original Code is RabbitMQ.
+REM
+REM The Initial Developer of the Original Code is GoPivotal, Inc.
+REM Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+REM
+
+setlocal
+
+rem Preserve values that might contain exclamation marks before
+rem enabling delayed expansion
+set TDP0=%~dp0
+set STAR=%*
+setlocal enabledelayedexpansion
+
+if "!RABBITMQ_BASE!"=="" (
+ set RABBITMQ_BASE=!APPDATA!\RabbitMQ
+)
+
+if "!COMPUTERNAME!"=="" (
+ set COMPUTERNAME=localhost
+)
+
+if "!RABBITMQ_NODENAME!"=="" (
+ set RABBITMQ_NODENAME=rabbit@!COMPUTERNAME!
+)
+
+if "!RABBITMQ_NODE_IP_ADDRESS!"=="" (
+ if not "!RABBITMQ_NODE_PORT!"=="" (
+ set RABBITMQ_NODE_IP_ADDRESS=auto
+ )
+) else (
+ if "!RABBITMQ_NODE_PORT!"=="" (
+ set RABBITMQ_NODE_PORT=5672
+ )
+)
+
+if "!RABBITMQ_DIST_PORT!"=="" (
+ if "!RABBITMQ_NODE_PORT!"=="" (
+ set RABBITMQ_DIST_PORT=25672
+ ) else (
+ set /a RABBITMQ_DIST_PORT=20000+!RABBITMQ_NODE_PORT!
+ )
+)
+
+if not exist "!ERLANG_HOME!\bin\erl.exe" (
+ echo.
+ echo ******************************
+ echo ERLANG_HOME not set correctly.
+ echo ******************************
+ echo.
+ echo Please either set ERLANG_HOME to point to your Erlang installation or place the
+ echo RabbitMQ server distribution in the Erlang lib folder.
+ echo.
+ exit /B
+)
+
+if "!RABBITMQ_MNESIA_BASE!"=="" (
+ set RABBITMQ_MNESIA_BASE=!RABBITMQ_BASE!/db
+)
+if "!RABBITMQ_LOG_BASE!"=="" (
+ set RABBITMQ_LOG_BASE=!RABBITMQ_BASE!/log
+)
+
+
+rem We save the previous logs in their respective backup
+rem Log management (rotation, filtering based of size...) is left as an exercice for the user.
+
+set LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!.log
+set SASL_LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!-sasl.log
+
+rem End of log management
+
+
+if "!RABBITMQ_MNESIA_DIR!"=="" (
+ set RABBITMQ_MNESIA_DIR=!RABBITMQ_MNESIA_BASE!/!RABBITMQ_NODENAME!-mnesia
+)
+
+if "!RABBITMQ_PLUGINS_EXPAND_DIR!"=="" (
+ set RABBITMQ_PLUGINS_EXPAND_DIR=!RABBITMQ_MNESIA_BASE!/!RABBITMQ_NODENAME!-plugins-expand
+)
+
+if "!RABBITMQ_ENABLED_PLUGINS_FILE!"=="" (
+ set RABBITMQ_ENABLED_PLUGINS_FILE=!RABBITMQ_BASE!\enabled_plugins
+)
+
+if "!RABBITMQ_PLUGINS_DIR!"=="" (
+ set RABBITMQ_PLUGINS_DIR=!TDP0!..\plugins
+)
+
+set RABBITMQ_EBIN_ROOT=!TDP0!..\ebin
+
+"!ERLANG_HOME!\bin\erl.exe" ^
+ -pa "!RABBITMQ_EBIN_ROOT!" ^
+ -noinput -hidden ^
+ -s rabbit_prelaunch ^
+ -sname rabbitmqprelaunch!RANDOM!!TIME:~9! ^
+ -extra "!RABBITMQ_NODENAME!"
+
+if ERRORLEVEL 2 (
+ rem dist port mentioned in config, do not attempt to set it
+) else if ERRORLEVEL 1 (
+ exit /B 1
+) else (
+ set RABBITMQ_DIST_ARG=-kernel inet_dist_listen_min !RABBITMQ_DIST_PORT! -kernel inet_dist_listen_max !RABBITMQ_DIST_PORT!
+)
+
+set RABBITMQ_EBIN_PATH="-pa !RABBITMQ_EBIN_ROOT!"
+
+if "!RABBITMQ_CONFIG_FILE!"=="" (
+ set RABBITMQ_CONFIG_FILE=!RABBITMQ_BASE!\rabbitmq
+)
+
+if exist "!RABBITMQ_CONFIG_FILE!.config" (
+ set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE!"
+) else (
+ set RABBITMQ_CONFIG_ARG=
+)
+
+set RABBITMQ_LISTEN_ARG=
+if not "!RABBITMQ_NODE_IP_ADDRESS!"=="" (
+ if not "!RABBITMQ_NODE_PORT!"=="" (
+ set RABBITMQ_LISTEN_ARG=-rabbit tcp_listeners [{\""!RABBITMQ_NODE_IP_ADDRESS!"\","!RABBITMQ_NODE_PORT!"}]
+ )
+)
+
+"!ERLANG_HOME!\bin\erl.exe" ^
+-pa "!RABBITMQ_EBIN_ROOT!" ^
+-noinput ^
+-boot start_sasl ^
+-s rabbit boot ^
+!RABBITMQ_CONFIG_ARG! ^
+-sname !RABBITMQ_NODENAME! ^
++W w ^
++A30 ^
++P 1048576 ^
+-kernel inet_default_connect_options "[{nodelay, true}]" ^
+!RABBITMQ_LISTEN_ARG! ^
+!RABBITMQ_SERVER_ERL_ARGS! ^
+-sasl errlog_type error ^
+-sasl sasl_error_logger false ^
+-rabbit error_logger {file,\""!LOGS:\=/!"\"} ^
+-rabbit sasl_error_logger {file,\""!SASL_LOGS:\=/!"\"} ^
+-rabbit enabled_plugins_file \""!RABBITMQ_ENABLED_PLUGINS_FILE:\=/!"\" ^
+-rabbit plugins_dir \""!RABBITMQ_PLUGINS_DIR:\=/!"\" ^
+-rabbit plugins_expand_dir \""!RABBITMQ_PLUGINS_EXPAND_DIR:\=/!"\" ^
+-os_mon start_cpu_sup false ^
+-os_mon start_disksup false ^
+-os_mon start_memsup false ^
+-mnesia dir \""!RABBITMQ_MNESIA_DIR:\=/!"\" ^
+!RABBITMQ_SERVER_START_ARGS! ^
+!RABBITMQ_DIST_ARG! ^
+!STAR!
+
+endlocal
+endlocal
--- /dev/null
+@echo off
+REM The contents of this file are subject to the Mozilla Public License
+REM Version 1.1 (the "License"); you may not use this file except in
+REM compliance with the License. You may obtain a copy of the License
+REM at http://www.mozilla.org/MPL/
+REM
+REM Software distributed under the License is distributed on an "AS IS"
+REM basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+REM the License for the specific language governing rights and
+REM limitations under the License.
+REM
+REM The Original Code is RabbitMQ.
+REM
+REM The Initial Developer of the Original Code is GoPivotal, Inc.
+REM Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+REM
+
+setlocal
+
+rem Preserve values that might contain exclamation marks before
+rem enabling delayed expansion
+set TN0=%~n0
+set TDP0=%~dp0
+set P1=%1
+setlocal enabledelayedexpansion
+
+set STARVAR=
+shift
+:loop1
+if "%1"=="" goto after_loop
+ set STARVAR=%STARVAR% %1
+ shift
+goto loop1
+:after_loop
+
+if "!RABBITMQ_SERVICENAME!"=="" (
+ set RABBITMQ_SERVICENAME=RabbitMQ
+)
+
+if "!RABBITMQ_BASE!"=="" (
+ set RABBITMQ_BASE=!APPDATA!\!RABBITMQ_SERVICENAME!
+)
+
+if "!COMPUTERNAME!"=="" (
+ set COMPUTERNAME=localhost
+)
+
+if "!RABBITMQ_NODENAME!"=="" (
+ set RABBITMQ_NODENAME=rabbit@!COMPUTERNAME!
+)
+
+if "!RABBITMQ_NODE_IP_ADDRESS!"=="" (
+ if not "!RABBITMQ_NODE_PORT!"=="" (
+ set RABBITMQ_NODE_IP_ADDRESS=auto
+ )
+) else (
+ if "!RABBITMQ_NODE_PORT!"=="" (
+ set RABBITMQ_NODE_PORT=5672
+ )
+)
+
+if "!RABBITMQ_DIST_PORT!"=="" (
+ if "!RABBITMQ_NODE_PORT!"=="" (
+ set RABBITMQ_DIST_PORT=25672
+ ) else (
+ set /a RABBITMQ_DIST_PORT=20000+!RABBITMQ_NODE_PORT!
+ )
+)
+
+if "!ERLANG_SERVICE_MANAGER_PATH!"=="" (
+ if not exist "!ERLANG_HOME!\bin\erl.exe" (
+ echo.
+ echo ******************************
+ echo ERLANG_HOME not set correctly.
+ echo ******************************
+ echo.
+ echo Please either set ERLANG_HOME to point to your Erlang installation or place the
+ echo RabbitMQ server distribution in the Erlang lib folder.
+ echo.
+ exit /B
+ )
+ for /f "delims=" %%i in ('dir /ad/b "!ERLANG_HOME!"') do if exist "!ERLANG_HOME!\%%i\bin\erlsrv.exe" (
+ set ERLANG_SERVICE_MANAGER_PATH=!ERLANG_HOME!\%%i\bin
+ )
+)
+
+set CONSOLE_FLAG=
+set CONSOLE_LOG_VALID=
+for %%i in (new reuse) do if "%%i" == "!RABBITMQ_CONSOLE_LOG!" set CONSOLE_LOG_VALID=TRUE
+if "!CONSOLE_LOG_VALID!" == "TRUE" (
+ set CONSOLE_FLAG=-debugtype !RABBITMQ_CONSOLE_LOG!
+)
+
+rem *** End of configuration ***
+
+if not exist "!ERLANG_SERVICE_MANAGER_PATH!\erlsrv.exe" (
+ echo.
+ echo **********************************************
+ echo ERLANG_SERVICE_MANAGER_PATH not set correctly.
+ echo **********************************************
+ echo.
+ echo "!ERLANG_SERVICE_MANAGER_PATH!\erlsrv.exe" not found
+ echo Please set ERLANG_SERVICE_MANAGER_PATH to the folder containing "erlsrv.exe".
+ echo.
+ exit /B 1
+)
+
+if "!RABBITMQ_MNESIA_BASE!"=="" (
+ set RABBITMQ_MNESIA_BASE=!RABBITMQ_BASE!/db
+)
+if "!RABBITMQ_LOG_BASE!"=="" (
+ set RABBITMQ_LOG_BASE=!RABBITMQ_BASE!/log
+)
+
+
+rem We save the previous logs in their respective backup
+rem Log management (rotation, filtering based on size...) is left as an exercise for the user.
+
+set LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!.log
+set SASL_LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!-sasl.log
+
+rem End of log management
+
+
+if "!RABBITMQ_MNESIA_DIR!"=="" (
+ set RABBITMQ_MNESIA_DIR=!RABBITMQ_MNESIA_BASE!/!RABBITMQ_NODENAME!-mnesia
+)
+
+if "!RABBITMQ_PLUGINS_EXPAND_DIR!"=="" (
+ set RABBITMQ_PLUGINS_EXPAND_DIR=!RABBITMQ_MNESIA_BASE!/!RABBITMQ_NODENAME!-plugins-expand
+)
+
+if "!P1!" == "install" goto INSTALL_SERVICE
+for %%i in (start stop disable enable list remove) do if "%%i" == "!P1!" goto MODIFY_SERVICE
+
+echo.
+echo *********************
+echo Service control usage
+echo *********************
+echo.
+echo !TN0! help - Display this help
+echo !TN0! install - Install the !RABBITMQ_SERVICENAME! service
+echo !TN0! remove - Remove the !RABBITMQ_SERVICENAME! service
+echo.
+echo The following actions can also be accomplished by using
+echo Windows Services Management Console (services.msc):
+echo.
+echo !TN0! start - Start the !RABBITMQ_SERVICENAME! service
+echo !TN0! stop - Stop the !RABBITMQ_SERVICENAME! service
+echo !TN0! disable - Disable the !RABBITMQ_SERVICENAME! service
+echo !TN0! enable - Enable the !RABBITMQ_SERVICENAME! service
+echo.
+exit /B
+
+
+:INSTALL_SERVICE
+
+if not exist "!RABBITMQ_BASE!" (
+ echo Creating base directory !RABBITMQ_BASE! & md "!RABBITMQ_BASE!"
+)
+
+"!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" list !RABBITMQ_SERVICENAME! 2>NUL 1>NUL
+if errorlevel 1 (
+ "!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" add !RABBITMQ_SERVICENAME! -internalservicename !RABBITMQ_SERVICENAME!
+) else (
+ echo !RABBITMQ_SERVICENAME! service is already present - only updating service parameters
+)
+
+if "!RABBITMQ_ENABLED_PLUGINS_FILE!"=="" (
+ set RABBITMQ_ENABLED_PLUGINS_FILE=!RABBITMQ_BASE!\enabled_plugins
+)
+
+if "!RABBITMQ_PLUGINS_DIR!"=="" (
+ set RABBITMQ_PLUGINS_DIR=!TDP0!..\plugins
+)
+
+set RABBITMQ_EBIN_ROOT=!TDP0!..\ebin
+
+if "!RABBITMQ_CONFIG_FILE!"=="" (
+ set RABBITMQ_CONFIG_FILE=!RABBITMQ_BASE!\rabbitmq
+)
+
+"!ERLANG_HOME!\bin\erl.exe" ^
+ -pa "!RABBITMQ_EBIN_ROOT!" ^
+ -noinput -hidden ^
+ -s rabbit_prelaunch ^
+ -sname rabbitmqprelaunch!RANDOM!!TIME:~9!
+
+if ERRORLEVEL 3 (
+ rem ERRORLEVEL means (or greater) so we need to catch all other failure
+ rem cases here
+ exit /B 1
+) else if ERRORLEVEL 2 (
+ rem dist port mentioned in config, do not attempt to set it
+) else if ERRORLEVEL 1 (
+ exit /B 1
+) else (
+ set RABBITMQ_DIST_ARG=-kernel inet_dist_listen_min !RABBITMQ_DIST_PORT! -kernel inet_dist_listen_max !RABBITMQ_DIST_PORT!
+)
+
+if exist "!RABBITMQ_CONFIG_FILE!.config" (
+ set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE!"
+) else (
+ set RABBITMQ_CONFIG_ARG=
+)
+
+set RABBITMQ_LISTEN_ARG=
+if not "!RABBITMQ_NODE_IP_ADDRESS!"=="" (
+ if not "!RABBITMQ_NODE_PORT!"=="" (
+ set RABBITMQ_LISTEN_ARG=-rabbit tcp_listeners "[{\"!RABBITMQ_NODE_IP_ADDRESS!\", !RABBITMQ_NODE_PORT!}]"
+ )
+)
+
+set ERLANG_SERVICE_ARGUMENTS= ^
+-pa "!RABBITMQ_EBIN_ROOT!" ^
+-boot start_sasl ^
+-s rabbit boot ^
+!RABBITMQ_CONFIG_ARG! ^
++W w ^
++A30 ^
++P 1048576 ^
+-kernel inet_default_connect_options "[{nodelay,true}]" ^
+!RABBITMQ_LISTEN_ARG! ^
+!RABBITMQ_SERVER_ERL_ARGS! ^
+-sasl errlog_type error ^
+-sasl sasl_error_logger false ^
+-rabbit error_logger {file,\""!LOGS:\=/!"\"} ^
+-rabbit sasl_error_logger {file,\""!SASL_LOGS:\=/!"\"} ^
+-rabbit enabled_plugins_file \""!RABBITMQ_ENABLED_PLUGINS_FILE:\=/!"\" ^
+-rabbit plugins_dir \""!RABBITMQ_PLUGINS_DIR:\=/!"\" ^
+-rabbit plugins_expand_dir \""!RABBITMQ_PLUGINS_EXPAND_DIR:\=/!"\" ^
+-rabbit windows_service_config \""!RABBITMQ_CONFIG_FILE:\=/!"\" ^
+-os_mon start_cpu_sup false ^
+-os_mon start_disksup false ^
+-os_mon start_memsup false ^
+-mnesia dir \""!RABBITMQ_MNESIA_DIR:\=/!"\" ^
+!RABBITMQ_SERVER_START_ARGS! ^
+!RABBITMQ_DIST_ARG! ^
+!STARVAR!
+
+set ERLANG_SERVICE_ARGUMENTS=!ERLANG_SERVICE_ARGUMENTS:\=\\!
+set ERLANG_SERVICE_ARGUMENTS=!ERLANG_SERVICE_ARGUMENTS:"=\"!
+
+"!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" set !RABBITMQ_SERVICENAME! ^
+-machine "!ERLANG_SERVICE_MANAGER_PATH!\erl.exe" ^
+-env ERL_CRASH_DUMP="!RABBITMQ_BASE:\=/!/erl_crash.dump" ^
+-workdir "!RABBITMQ_BASE!" ^
+-stopaction "rabbit:stop_and_halt()." ^
+-sname !RABBITMQ_NODENAME! ^
+!CONSOLE_FLAG! ^
+-comment "A robust and scalable messaging broker" ^
+-args "!ERLANG_SERVICE_ARGUMENTS!" > NUL
+
+goto END
+
+
+:MODIFY_SERVICE
+
+"!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" !P1! !RABBITMQ_SERVICENAME!
+goto END
+
+
+:END
+
+endlocal
+endlocal
--- /dev/null
+#!/bin/sh -e
+## The contents of this file are subject to the Mozilla Public License
+## Version 1.1 (the "License"); you may not use this file except in
+## compliance with the License. You may obtain a copy of the License
+## at http://www.mozilla.org/MPL/
+##
+## Software distributed under the License is distributed on an "AS IS"
+## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+## the License for the specific language governing rights and
+## limitations under the License.
+##
+## The Original Code is RabbitMQ.
+##
+## The Initial Developer of the Original Code is GoPivotal, Inc.
+## Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+##
+
+# Get default settings with user overrides for (RABBITMQ_)<var_name>
+# Non-empty defaults should be set in rabbitmq-env
+. `dirname $0`/rabbitmq-env
+
+##--- Set environment vars RABBITMQ_<var_name> to defaults if not set
+
+[ "x" = "x$RABBITMQ_NODENAME" ] && RABBITMQ_NODENAME=${NODENAME}
+[ "x" = "x$RABBITMQ_CTL_ERL_ARGS" ] && RABBITMQ_CTL_ERL_ARGS=${CTL_ERL_ARGS}
+
+##--- End of overridden <var_name> variables
+
+exec ${ERL_DIR}erl \
+ -pa "${RABBITMQ_HOME}/ebin" \
+ -noinput \
+ -hidden \
+ ${RABBITMQ_CTL_ERL_ARGS} \
+ -sname rabbitmqctl$$ \
+ -boot "${CLEAN_BOOT_FILE}" \
+ -s rabbit_control_main \
+ -nodename $RABBITMQ_NODENAME \
+ -extra "$@"
--- /dev/null
+@echo off
+REM The contents of this file are subject to the Mozilla Public License
+REM Version 1.1 (the "License"); you may not use this file except in
+REM compliance with the License. You may obtain a copy of the License
+REM at http://www.mozilla.org/MPL/
+REM
+REM Software distributed under the License is distributed on an "AS IS"
+REM basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+REM the License for the specific language governing rights and
+REM limitations under the License.
+REM
+REM The Original Code is RabbitMQ.
+REM
+REM The Initial Developer of the Original Code is GoPivotal, Inc.
+REM Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+REM
+
+setlocal
+
+rem Preserve values that might contain exclamation marks before
+rem enabling delayed expansion
+set TDP0=%~dp0
+set STAR=%*
+setlocal enabledelayedexpansion
+
+if "!COMPUTERNAME!"=="" (
+ set COMPUTERNAME=localhost
+)
+
+if "!RABBITMQ_NODENAME!"=="" (
+ set RABBITMQ_NODENAME=rabbit@!COMPUTERNAME!
+)
+
+if not exist "!ERLANG_HOME!\bin\erl.exe" (
+ echo.
+ echo ******************************
+ echo ERLANG_HOME not set correctly.
+ echo ******************************
+ echo.
+ echo Please either set ERLANG_HOME to point to your Erlang installation or place the
+ echo RabbitMQ server distribution in the Erlang lib folder.
+ echo.
+ exit /B
+)
+
+"!ERLANG_HOME!\bin\erl.exe" -pa "!TDP0!..\ebin" -noinput -hidden !RABBITMQ_CTL_ERL_ARGS! -sname rabbitmqctl!RANDOM!!TIME:~9! -s rabbit_control_main -nodename !RABBITMQ_NODENAME! -extra !STAR!
+
+endlocal
+endlocal
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+-module(app_utils).
+
+-export([load_applications/1, start_applications/1, start_applications/2,
+ stop_applications/1, stop_applications/2, app_dependency_order/2,
+ wait_for_applications/1]).
+
+-ifdef(use_specs).
+
+-type error_handler() :: fun((atom(), any()) -> 'ok').
+
+-spec load_applications([atom()]) -> 'ok'.
+-spec start_applications([atom()]) -> 'ok'.
+-spec stop_applications([atom()]) -> 'ok'.
+-spec start_applications([atom()], error_handler()) -> 'ok'.
+-spec stop_applications([atom()], error_handler()) -> 'ok'.
+-spec wait_for_applications([atom()]) -> 'ok'.
+-spec app_dependency_order([atom()], boolean()) -> [digraph:vertex()].
+
+-endif.
+
+%%---------------------------------------------------------------------------
+%% Public API
+
+load_applications(Apps) ->
+ load_applications(queue:from_list(Apps), sets:new()),
+ ok.
+
+start_applications(Apps) ->
+ start_applications(
+ Apps, fun (App, Reason) ->
+ throw({error, {cannot_start_application, App, Reason}})
+ end).
+
+stop_applications(Apps) ->
+ stop_applications(
+ Apps, fun (App, Reason) ->
+ throw({error, {cannot_stop_application, App, Reason}})
+ end).
+
+start_applications(Apps, ErrorHandler) ->
+ manage_applications(fun lists:foldl/3,
+ fun application:start/1,
+ fun application:stop/1,
+ already_started,
+ ErrorHandler,
+ Apps).
+
+stop_applications(Apps, ErrorHandler) ->
+ manage_applications(fun lists:foldr/3,
+ fun application:stop/1,
+ fun application:start/1,
+ not_started,
+ ErrorHandler,
+ Apps).
+
+
+wait_for_applications(Apps) ->
+ [wait_for_application(App) || App <- Apps], ok.
+
+app_dependency_order(RootApps, StripUnreachable) ->
+ {ok, G} = rabbit_misc:build_acyclic_graph(
+ fun (App, _Deps) -> [{App, App}] end,
+ fun (App, Deps) -> [{Dep, App} || Dep <- Deps] end,
+ [{App, app_dependencies(App)} ||
+ {App, _Desc, _Vsn} <- application:loaded_applications()]),
+ try
+ case StripUnreachable of
+ true -> digraph:del_vertices(G, digraph:vertices(G) --
+ digraph_utils:reachable(RootApps, G));
+ false -> ok
+ end,
+ digraph_utils:topsort(G)
+ after
+ true = digraph:delete(G)
+ end.
+
+%%---------------------------------------------------------------------------
+%% Private API
+
+wait_for_application(Application) ->
+ case lists:keymember(Application, 1, rabbit_misc:which_applications()) of
+ true -> ok;
+ false -> timer:sleep(1000),
+ wait_for_application(Application)
+ end.
+
+load_applications(Worklist, Loaded) ->
+ case queue:out(Worklist) of
+ {empty, _WorkList} ->
+ ok;
+ {{value, App}, Worklist1} ->
+ case sets:is_element(App, Loaded) of
+ true -> load_applications(Worklist1, Loaded);
+ false -> case application:load(App) of
+ ok -> ok;
+ {error, {already_loaded, App}} -> ok;
+ Error -> throw(Error)
+ end,
+ load_applications(
+ queue:join(Worklist1,
+ queue:from_list(app_dependencies(App))),
+ sets:add_element(App, Loaded))
+ end
+ end.
+
+app_dependencies(App) ->
+ case application:get_key(App, applications) of
+ undefined -> [];
+ {ok, Lst} -> Lst
+ end.
+
+manage_applications(Iterate, Do, Undo, SkipError, ErrorHandler, Apps) ->
+ Iterate(fun (App, Acc) ->
+ case Do(App) of
+ ok -> [App | Acc];
+ {error, {SkipError, _}} -> Acc;
+ {error, Reason} ->
+ lists:foreach(Undo, Acc),
+ ErrorHandler(App, Reason)
+ end
+ end, [], Apps),
+ ok.
+
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(background_gc).
+
+-behaviour(gen_server2).
+
+-export([start_link/0, run/0]).
+-export([gc/0]). %% For run_interval only
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-define(MAX_RATIO, 0.01).
+-define(IDEAL_INTERVAL, 60000).
+
+-record(state, {last_interval}).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/0 :: () -> {'ok', pid()} | {'error', any()}).
+-spec(run/0 :: () -> 'ok').
+-spec(gc/0 :: () -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start_link() -> gen_server2:start_link({local, ?MODULE}, ?MODULE, [],
+ [{timeout, infinity}]).
+
+run() -> gen_server2:cast(?MODULE, run).
+
+%%----------------------------------------------------------------------------
+
+init([]) -> {ok, interval_gc(#state{last_interval = ?IDEAL_INTERVAL})}.
+
+handle_call(Msg, _From, State) ->
+ {stop, {unexpected_call, Msg}, {unexpected_call, Msg}, State}.
+
+handle_cast(run, State) -> gc(), {noreply, State};
+
+handle_cast(Msg, State) -> {stop, {unexpected_cast, Msg}, State}.
+
+handle_info(run, State) -> {noreply, interval_gc(State)};
+
+handle_info(Msg, State) -> {stop, {unexpected_info, Msg}, State}.
+
+code_change(_OldVsn, State, _Extra) -> {ok, State}.
+
+terminate(_Reason, State) -> State.
+
+%%----------------------------------------------------------------------------
+
+interval_gc(State = #state{last_interval = LastInterval}) ->
+ {ok, Interval} = rabbit_misc:interval_operation(
+ {?MODULE, gc, []},
+ ?MAX_RATIO, ?IDEAL_INTERVAL, LastInterval),
+ erlang:send_after(Interval, self(), run),
+ State#state{last_interval = Interval}.
+
+gc() ->
+ [garbage_collect(P) || P <- processes(),
+ {status, waiting} == process_info(P, status)],
+ garbage_collect(), %% since we will never be waiting...
+ ok.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(credit_flow).
+
+%% Credit flow is controlled by a credit specification - a
+%% {InitialCredit, MoreCreditAfter} tuple. For the message sender,
+%% credit starts at InitialCredit and is decremented with every
+%% message sent. The message receiver grants more credit to the sender
+%% by sending it a {bump_credit, ...} control message after receiving
+%% MoreCreditAfter messages. The sender should pass this message in to
+%% handle_bump_msg/1. The sender should block when it goes below 0
+%% (check by invoking blocked/0). If a process is both a sender and a
+%% receiver it will not grant any more credit to its senders when it
+%% is itself blocked - thus the only processes that need to check
+%% blocked/0 are ones that read from network sockets.
+
+-define(DEFAULT_CREDIT, {200, 50}).
+
+-export([send/1, send/2, ack/1, ack/2, handle_bump_msg/1, blocked/0, state/0]).
+-export([peer_down/1]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-opaque(bump_msg() :: {pid(), non_neg_integer()}).
+-type(credit_spec() :: {non_neg_integer(), non_neg_integer()}).
+
+-spec(send/1 :: (pid()) -> 'ok').
+-spec(send/2 :: (pid(), credit_spec()) -> 'ok').
+-spec(ack/1 :: (pid()) -> 'ok').
+-spec(ack/2 :: (pid(), credit_spec()) -> 'ok').
+-spec(handle_bump_msg/1 :: (bump_msg()) -> 'ok').
+-spec(blocked/0 :: () -> boolean()).
+-spec(peer_down/1 :: (pid()) -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+%% process dict update macro - eliminates the performance-hurting
+%% closure creation a HOF would introduce
+-define(UPDATE(Key, Default, Var, Expr),
+ begin
+ %% We deliberately allow Var to escape from the case here
+ %% to be used in Expr. Any temporary var we introduced
+ %% would also escape, and might conflict.
+ case get(Key) of
+ undefined -> Var = Default;
+ Var -> ok
+ end,
+ put(Key, Expr)
+ end).
+
+%%----------------------------------------------------------------------------
+
+%% There are two "flows" here; of messages and of credit, going in
+%% opposite directions. The variable names "From" and "To" refer to
+%% the flow of credit, but the function names refer to the flow of
+%% messages. This is the clearest I can make it (since the function
+%% names form the API and want to make sense externally, while the
+%% variable names are used in credit bookkeeping and want to make
+%% sense internally).
+
+%% For any given pair of processes, ack/2 and send/2 must always be
+%% called with the same credit_spec().
+
+send(From) -> send(From, ?DEFAULT_CREDIT).
+
+send(From, {InitialCredit, _MoreCreditAfter}) ->
+ ?UPDATE({credit_from, From}, InitialCredit, C,
+ if C == 1 -> block(From),
+ 0;
+ true -> C - 1
+ end).
+
+ack(To) -> ack(To, ?DEFAULT_CREDIT).
+
+ack(To, {_InitialCredit, MoreCreditAfter}) ->
+ ?UPDATE({credit_to, To}, MoreCreditAfter, C,
+ if C == 1 -> grant(To, MoreCreditAfter),
+ MoreCreditAfter;
+ true -> C - 1
+ end).
+
+handle_bump_msg({From, MoreCredit}) ->
+ ?UPDATE({credit_from, From}, 0, C,
+ if C =< 0 andalso C + MoreCredit > 0 -> unblock(From),
+ C + MoreCredit;
+ true -> C + MoreCredit
+ end).
+
+blocked() -> case get(credit_blocked) of
+ undefined -> false;
+ [] -> false;
+ _ -> true
+ end.
+
+state() -> case blocked() of
+ true -> flow;
+ false -> case get(credit_blocked_at) of
+ undefined -> running;
+ B -> Diff = timer:now_diff(erlang:now(), B),
+ case Diff < 5000000 of
+ true -> flow;
+ false -> running
+ end
+ end
+ end.
+
+peer_down(Peer) ->
+ %% In theory we could also remove it from credit_deferred here, but it
+ %% doesn't really matter; at some point later we will drain
+ %% credit_deferred and thus send messages into the void...
+ unblock(Peer),
+ erase({credit_from, Peer}),
+ erase({credit_to, Peer}),
+ ok.
+
+%% --------------------------------------------------------------------------
+
+grant(To, Quantity) ->
+ Msg = {bump_credit, {self(), Quantity}},
+ case blocked() of
+ false -> To ! Msg;
+ true -> ?UPDATE(credit_deferred, [], Deferred, [{To, Msg} | Deferred])
+ end.
+
+block(From) ->
+ case blocked() of
+ false -> put(credit_blocked_at, erlang:now());
+ true -> ok
+ end,
+ ?UPDATE(credit_blocked, [], Blocks, [From | Blocks]).
+
+unblock(From) ->
+ ?UPDATE(credit_blocked, [], Blocks, Blocks -- [From]),
+ case blocked() of
+ false -> case erase(credit_deferred) of
+ undefined -> ok;
+ Credits -> [To ! Msg || {To, Msg} <- Credits]
+ end;
+ true -> ok
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(delegate).
+
+-behaviour(gen_server2).
+
+-export([start_link/1, invoke_no_result/2, invoke/2,
+ monitor/2, demonitor/1, call/2, cast/2]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-record(state, {node, monitors, name}).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-export_type([monitor_ref/0]).
+
+-type(monitor_ref() :: reference() | {atom(), pid()}).
+-type(fun_or_mfa(A) :: fun ((pid()) -> A) | {atom(), atom(), [any()]}).
+
+-spec(start_link/1 ::
+ (non_neg_integer()) -> {'ok', pid()} | ignore | {'error', any()}).
+-spec(invoke/2 :: ( pid(), fun_or_mfa(A)) -> A;
+ ([pid()], fun_or_mfa(A)) -> {[{pid(), A}],
+ [{pid(), term()}]}).
+-spec(invoke_no_result/2 :: (pid() | [pid()], fun_or_mfa(any())) -> 'ok').
+-spec(monitor/2 :: ('process', pid()) -> monitor_ref()).
+-spec(demonitor/1 :: (monitor_ref()) -> 'true').
+
+-spec(call/2 ::
+ ( pid(), any()) -> any();
+ ([pid()], any()) -> {[{pid(), any()}], [{pid(), term()}]}).
+-spec(cast/2 :: (pid() | [pid()], any()) -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+-define(HIBERNATE_AFTER_MIN, 1000).
+-define(DESIRED_HIBERNATE, 10000).
+
+%%----------------------------------------------------------------------------
+
+start_link(Num) ->
+ Name = delegate_name(Num),
+ gen_server2:start_link({local, Name}, ?MODULE, [Name], []).
+
+invoke(Pid, FunOrMFA) when is_pid(Pid) andalso node(Pid) =:= node() ->
+ apply1(FunOrMFA, Pid);
+invoke(Pid, FunOrMFA) when is_pid(Pid) ->
+ case invoke([Pid], FunOrMFA) of
+ {[{Pid, Result}], []} ->
+ Result;
+ {[], [{Pid, {Class, Reason, StackTrace}}]} ->
+ erlang:raise(Class, Reason, StackTrace)
+ end;
+
+invoke([], _FunOrMFA) -> %% optimisation
+ {[], []};
+invoke([Pid], FunOrMFA) when node(Pid) =:= node() -> %% optimisation
+ case safe_invoke(Pid, FunOrMFA) of
+ {ok, _, Result} -> {[{Pid, Result}], []};
+ {error, _, Error} -> {[], [{Pid, Error}]}
+ end;
+invoke(Pids, FunOrMFA) when is_list(Pids) ->
+ {LocalPids, Grouped} = group_pids_by_node(Pids),
+ %% The use of multi_call is only safe because the timeout is
+ %% infinity, and thus there is no process spawned in order to do
+ %% the sending. Thus calls can't overtake preceding calls/casts.
+ {Replies, BadNodes} =
+ case orddict:fetch_keys(Grouped) of
+ [] -> {[], []};
+ RemoteNodes -> gen_server2:multi_call(
+ RemoteNodes, delegate(self(), RemoteNodes),
+ {invoke, FunOrMFA, Grouped}, infinity)
+ end,
+ BadPids = [{Pid, {exit, {nodedown, BadNode}, []}} ||
+ BadNode <- BadNodes,
+ Pid <- orddict:fetch(BadNode, Grouped)],
+ ResultsNoNode = lists:append([safe_invoke(LocalPids, FunOrMFA) |
+ [Results || {_Node, Results} <- Replies]]),
+ lists:foldl(
+ fun ({ok, Pid, Result}, {Good, Bad}) -> {[{Pid, Result} | Good], Bad};
+ ({error, Pid, Error}, {Good, Bad}) -> {Good, [{Pid, Error} | Bad]}
+ end, {[], BadPids}, ResultsNoNode).
+
+invoke_no_result(Pid, FunOrMFA) when is_pid(Pid) andalso node(Pid) =:= node() ->
+ safe_invoke(Pid, FunOrMFA), %% we don't care about any error
+ ok;
+invoke_no_result(Pid, FunOrMFA) when is_pid(Pid) ->
+ invoke_no_result([Pid], FunOrMFA);
+
+invoke_no_result([], _FunOrMFA) -> %% optimisation
+ ok;
+invoke_no_result([Pid], FunOrMFA) when node(Pid) =:= node() -> %% optimisation
+ safe_invoke(Pid, FunOrMFA), %% must not die
+ ok;
+invoke_no_result(Pids, FunOrMFA) when is_list(Pids) ->
+ {LocalPids, Grouped} = group_pids_by_node(Pids),
+ case orddict:fetch_keys(Grouped) of
+ [] -> ok;
+ RemoteNodes -> gen_server2:abcast(
+ RemoteNodes, delegate(self(), RemoteNodes),
+ {invoke, FunOrMFA, Grouped})
+ end,
+ safe_invoke(LocalPids, FunOrMFA), %% must not die
+ ok.
+
+monitor(process, Pid) when node(Pid) =:= node() ->
+ erlang:monitor(process, Pid);
+monitor(process, Pid) ->
+ Name = delegate(Pid, [node(Pid)]),
+ gen_server2:cast(Name, {monitor, self(), Pid}),
+ {Name, Pid}.
+
+demonitor(Ref) when is_reference(Ref) ->
+ erlang:demonitor(Ref);
+demonitor({Name, Pid}) ->
+ gen_server2:cast(Name, {demonitor, self(), Pid}).
+
+call(PidOrPids, Msg) ->
+ invoke(PidOrPids, {gen_server2, call, [Msg, infinity]}).
+
+cast(PidOrPids, Msg) ->
+ invoke_no_result(PidOrPids, {gen_server2, cast, [Msg]}).
+
+%%----------------------------------------------------------------------------
+
+group_pids_by_node(Pids) ->
+ LocalNode = node(),
+ lists:foldl(
+ fun (Pid, {Local, Remote}) when node(Pid) =:= LocalNode ->
+ {[Pid | Local], Remote};
+ (Pid, {Local, Remote}) ->
+ {Local,
+ orddict:update(
+ node(Pid), fun (List) -> [Pid | List] end, [Pid], Remote)}
+ end, {[], orddict:new()}, Pids).
+
+delegate_name(Hash) ->
+ list_to_atom("delegate_" ++ integer_to_list(Hash)).
+
+delegate(Pid, RemoteNodes) ->
+ case get(delegate) of
+ undefined -> Name = delegate_name(
+ erlang:phash2(Pid,
+ delegate_sup:count(RemoteNodes))),
+ put(delegate, Name),
+ Name;
+ Name -> Name
+ end.
+
+safe_invoke(Pids, FunOrMFA) when is_list(Pids) ->
+ [safe_invoke(Pid, FunOrMFA) || Pid <- Pids];
+safe_invoke(Pid, FunOrMFA) when is_pid(Pid) ->
+ try
+ {ok, Pid, apply1(FunOrMFA, Pid)}
+ catch Class:Reason ->
+ {error, Pid, {Class, Reason, erlang:get_stacktrace()}}
+ end.
+
+apply1({M, F, A}, Arg) -> apply(M, F, [Arg | A]);
+apply1(Fun, Arg) -> Fun(Arg).
+
+%%----------------------------------------------------------------------------
+
+init([Name]) ->
+ {ok, #state{node = node(), monitors = dict:new(), name = Name}, hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+handle_call({invoke, FunOrMFA, Grouped}, _From, State = #state{node = Node}) ->
+ {reply, safe_invoke(orddict:fetch(Node, Grouped), FunOrMFA), State,
+ hibernate}.
+
+handle_cast({monitor, MonitoringPid, Pid},
+ State = #state{monitors = Monitors}) ->
+ Monitors1 = case dict:find(Pid, Monitors) of
+ {ok, {Ref, Pids}} ->
+ Pids1 = gb_sets:add_element(MonitoringPid, Pids),
+ dict:store(Pid, {Ref, Pids1}, Monitors);
+ error ->
+ Ref = erlang:monitor(process, Pid),
+ Pids = gb_sets:singleton(MonitoringPid),
+ dict:store(Pid, {Ref, Pids}, Monitors)
+ end,
+ {noreply, State#state{monitors = Monitors1}, hibernate};
+
+handle_cast({demonitor, MonitoringPid, Pid},
+ State = #state{monitors = Monitors}) ->
+ Monitors1 = case dict:find(Pid, Monitors) of
+ {ok, {Ref, Pids}} ->
+ Pids1 = gb_sets:del_element(MonitoringPid, Pids),
+ case gb_sets:is_empty(Pids1) of
+ true -> erlang:demonitor(Ref),
+ dict:erase(Pid, Monitors);
+ false -> dict:store(Pid, {Ref, Pids1}, Monitors)
+ end;
+ error ->
+ Monitors
+ end,
+ {noreply, State#state{monitors = Monitors1}, hibernate};
+
+handle_cast({invoke, FunOrMFA, Grouped}, State = #state{node = Node}) ->
+ safe_invoke(orddict:fetch(Node, Grouped), FunOrMFA),
+ {noreply, State, hibernate}.
+
+handle_info({'DOWN', Ref, process, Pid, Info},
+ State = #state{monitors = Monitors, name = Name}) ->
+ {noreply,
+ case dict:find(Pid, Monitors) of
+ {ok, {Ref, Pids}} ->
+ Msg = {'DOWN', {Name, Pid}, process, Pid, Info},
+ gb_sets:fold(fun (MonitoringPid, _) -> MonitoringPid ! Msg end,
+ none, Pids),
+ State#state{monitors = dict:erase(Pid, Monitors)};
+ error ->
+ State
+ end, hibernate};
+
+handle_info(_Info, State) ->
+ {noreply, State, hibernate}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(delegate_sup).
+
+-behaviour(supervisor).
+
+-export([start_link/1, count/1]).
+
+-export([init/1]).
+
+-define(SERVER, ?MODULE).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/1 :: (integer()) -> rabbit_types:ok_pid_or_error()).
+-spec(count/1 :: ([node()]) -> integer()).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start_link(Count) ->
+ supervisor:start_link({local, ?SERVER}, ?MODULE, [Count]).
+
+count([]) ->
+ 1;
+count([Node | Nodes]) ->
+ try
+ length(supervisor:which_children({?SERVER, Node}))
+ catch exit:{{R, _}, _} when R =:= nodedown; R =:= shutdown ->
+ count(Nodes);
+ exit:{R, _} when R =:= noproc; R =:= normal; R =:= shutdown;
+ R =:= nodedown ->
+ count(Nodes)
+ end.
+
+%%----------------------------------------------------------------------------
+
+init([Count]) ->
+ {ok, {{one_for_one, 10, 10},
+ [{Num, {delegate, start_link, [Num]},
+ transient, 16#ffffffff, worker, [delegate]} ||
+ Num <- lists:seq(0, Count - 1)]}}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% A dual-index tree.
+%%
+%% Entries have the following shape:
+%%
+%% +----+--------------------+---+
+%% | PK | SK1, SK2, ..., SKN | V |
+%% +----+--------------------+---+
+%%
+%% i.e. a primary key, set of secondary keys, and a value.
+%%
+%% There can be only one entry per primary key, but secondary keys may
+%% appear in multiple entries.
+%%
+%% The set of secondary keys must be non-empty. Or, to put it another
+%% way, entries only exist while their secondary key set is non-empty.
+
+-module(dtree).
+
+-export([empty/0, insert/4, take/3, take/2, take_all/2, drop/2,
+ is_defined/2, is_empty/1, smallest/1, size/1]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-export_type([?MODULE/0]).
+
+-opaque(?MODULE() :: {gb_tree(), gb_tree()}).
+
+-type(pk() :: any()).
+-type(sk() :: any()).
+-type(val() :: any()).
+-type(kv() :: {pk(), val()}).
+
+-spec(empty/0 :: () -> ?MODULE()).
+-spec(insert/4 :: (pk(), [sk()], val(), ?MODULE()) -> ?MODULE()).
+-spec(take/3 :: ([pk()], sk(), ?MODULE()) -> {[kv()], ?MODULE()}).
+-spec(take/2 :: (sk(), ?MODULE()) -> {[kv()], ?MODULE()}).
+-spec(take_all/2 :: (sk(), ?MODULE()) -> {[kv()], ?MODULE()}).
+-spec(drop/2 :: (pk(), ?MODULE()) -> ?MODULE()).
+-spec(is_defined/2 :: (sk(), ?MODULE()) -> boolean()).
+-spec(is_empty/1 :: (?MODULE()) -> boolean()).
+-spec(smallest/1 :: (?MODULE()) -> kv()).
+-spec(size/1 :: (?MODULE()) -> non_neg_integer()).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+empty() -> {gb_trees:empty(), gb_trees:empty()}.
+
+%% Insert an entry. Fails if there already is an entry with the given
+%% primary key.
+insert(PK, [], V, {P, S}) ->
+ %% dummy insert to force error if PK exists
+ gb_trees:insert(PK, {gb_sets:empty(), V}, P),
+ {P, S};
+insert(PK, SKs, V, {P, S}) ->
+ {gb_trees:insert(PK, {gb_sets:from_list(SKs), V}, P),
+ lists:foldl(fun (SK, S0) ->
+ case gb_trees:lookup(SK, S0) of
+ {value, PKS} -> PKS1 = gb_sets:insert(PK, PKS),
+ gb_trees:update(SK, PKS1, S0);
+ none -> PKS = gb_sets:singleton(PK),
+ gb_trees:insert(SK, PKS, S0)
+ end
+ end, S, SKs)}.
+
+%% Remove the given secondary key from the entries of the given
+%% primary keys, returning the primary-key/value pairs of any entries
+%% that were dropped as the result (i.e. due to their secondary key
+%% set becoming empty). It is ok for the given primary keys and/or
+%% secondary key to not exist.
+take(PKs, SK, {P, S}) ->
+ case gb_trees:lookup(SK, S) of
+ none -> {[], {P, S}};
+ {value, PKS} -> TakenPKS = gb_sets:from_list(PKs),
+ PKSInter = gb_sets:intersection(PKS, TakenPKS),
+ PKSDiff = gb_sets_difference (PKS, PKSInter),
+ {KVs, P1} = take2(PKSInter, SK, P),
+ {KVs, {P1, case gb_sets:is_empty(PKSDiff) of
+ true -> gb_trees:delete(SK, S);
+ false -> gb_trees:update(SK, PKSDiff, S)
+ end}}
+ end.
+
+%% Remove the given secondary key from all entries, returning the
+%% primary-key/value pairs of any entries that were dropped as the
+%% result (i.e. due to their secondary key set becoming empty). It is
+%% ok for the given secondary key to not exist.
+take(SK, {P, S}) ->
+ case gb_trees:lookup(SK, S) of
+ none -> {[], {P, S}};
+ {value, PKS} -> {KVs, P1} = take2(PKS, SK, P),
+ {KVs, {P1, gb_trees:delete(SK, S)}}
+ end.
+
+%% Drop all entries which contain the given secondary key, returning
+%% the primary-key/value pairs of these entries. It is ok for the
+%% given secondary key to not exist.
+take_all(SK, {P, S}) ->
+ case gb_trees:lookup(SK, S) of
+ none -> {[], {P, S}};
+ {value, PKS} -> {KVs, SKS, P1} = take_all2(PKS, P),
+ {KVs, {P1, prune(SKS, PKS, S)}}
+ end.
+
+%% Drop all entries for the given primary key (which does not have to exist).
+drop(PK, {P, S}) ->
+ case gb_trees:lookup(PK, P) of
+ none -> {P, S};
+ {value, {SKS, _V}} -> {gb_trees:delete(PK, P),
+ prune(SKS, gb_sets:singleton(PK), S)}
+ end.
+
+is_defined(SK, {_P, S}) -> gb_trees:is_defined(SK, S).
+
+is_empty({P, _S}) -> gb_trees:is_empty(P).
+
+smallest({P, _S}) -> {K, {_SKS, V}} = gb_trees:smallest(P),
+ {K, V}.
+
+size({P, _S}) -> gb_trees:size(P).
+
+%%----------------------------------------------------------------------------
+
+take2(PKS, SK, P) ->
+ gb_sets:fold(fun (PK, {KVs, P0}) ->
+ {SKS, V} = gb_trees:get(PK, P0),
+ SKS1 = gb_sets:delete(SK, SKS),
+ case gb_sets:is_empty(SKS1) of
+ true -> KVs1 = [{PK, V} | KVs],
+ {KVs1, gb_trees:delete(PK, P0)};
+ false -> {KVs, gb_trees:update(PK, {SKS1, V}, P0)}
+ end
+ end, {[], P}, PKS).
+
+take_all2(PKS, P) ->
+ gb_sets:fold(fun (PK, {KVs, SKS0, P0}) ->
+ {SKS, V} = gb_trees:get(PK, P0),
+ {[{PK, V} | KVs], gb_sets:union(SKS, SKS0),
+ gb_trees:delete(PK, P0)}
+ end, {[], gb_sets:empty(), P}, PKS).
+
+prune(SKS, PKS, S) ->
+ gb_sets:fold(fun (SK0, S0) ->
+ PKS1 = gb_trees:get(SK0, S0),
+ PKS2 = gb_sets_difference(PKS1, PKS),
+ case gb_sets:is_empty(PKS2) of
+ true -> gb_trees:delete(SK0, S0);
+ false -> gb_trees:update(SK0, PKS2, S0)
+ end
+ end, S, SKS).
+
+gb_sets_difference(S1, S2) ->
+ gb_sets:fold(fun gb_sets:delete_any/2, S1, S2).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(file_handle_cache).
+
+%% A File Handle Cache
+%%
+%% This extends a subset of the functionality of the Erlang file
+%% module. In the below, we use "file handle" to specifically refer to
+%% file handles, and "file descriptor" to refer to descriptors which
+%% are not file handles, e.g. sockets.
+%%
+%% Some constraints
+%% 1) This supports one writer, multiple readers per file. Nothing
+%% else.
+%% 2) Do not open the same file from different processes. Bad things
+%% may happen, especially for writes.
+%% 3) Writes are all appends. You cannot write to the middle of a
+%% file, although you can truncate and then append if you want.
+%% 4) Although there is a write buffer, there is no read buffer. Feel
+%% free to use the read_ahead mode, but beware of the interaction
+%% between that buffer and the write buffer.
+%%
+%% Some benefits
+%% 1) You do not have to remember to call sync before close
+%% 2) Buffering is much more flexible than with the plain file module,
+%% and you can control when the buffer gets flushed out. This means
+%% that you can rely on reads-after-writes working, without having to
+%% call the expensive sync.
+%% 3) Unnecessary calls to position and sync get optimised out.
+%% 4) You can find out what your 'real' offset is, and what your
+%% 'virtual' offset is (i.e. where the hdl really is, and where it
+%% would be after the write buffer is written out).
+%%
+%% There is also a server component which serves to limit the number
+%% of open file descriptors. This is a hard limit: the server
+%% component will ensure that clients do not have more file
+%% descriptors open than it's configured to allow.
+%%
+%% On open, the client requests permission from the server to open the
+%% required number of file handles. The server may ask the client to
+%% close other file handles that it has open, or it may queue the
+%% request and ask other clients to close file handles they have open
+%% in order to satisfy the request. Requests are always satisfied in
+%% the order they arrive, even if a latter request (for a small number
+%% of file handles) can be satisfied before an earlier request (for a
+%% larger number of file handles). On close, the client sends a
+%% message to the server. These messages allow the server to keep
+%% track of the number of open handles. The client also keeps a
+%% gb_tree which is updated on every use of a file handle, mapping the
+%% time at which the file handle was last used (timestamp) to the
+%% handle. Thus the smallest key in this tree maps to the file handle
+%% that has not been used for the longest amount of time. This
+%% smallest key is included in the messages to the server. As such,
+%% the server keeps track of when the least recently used file handle
+%% was used *at the point of the most recent open or close* by each
+%% client.
+%%
+%% Note that this data can go very out of date, by the client using
+%% the least recently used handle.
+%%
+%% When the limit is exceeded (i.e. the number of open file handles is
+%% at the limit and there are pending 'open' requests), the server
+%% calculates the average age of the last reported least recently used
+%% file handle of all the clients. It then tells all the clients to
+%% close any handles not used for longer than this average, by
+%% invoking the callback the client registered. The client should
+%% receive this message and pass it into
+%% set_maximum_since_use/1. However, it is highly possible this age
+%% will be greater than the ages of all the handles the client knows
+%% of because the client has used its file handles in the mean
+%% time. Thus at this point the client reports to the server the
+%% current timestamp at which its least recently used file handle was
+%% last used. The server will check two seconds later that either it
+%% is back under the limit, in which case all is well again, or if
+%% not, it will calculate a new average age. Its data will be much
+%% more recent now, and so it is very likely that when this is
+%% communicated to the clients, the clients will close file handles.
+%% (In extreme cases, where it's very likely that all clients have
+%% used their open handles since they last sent in an update, which
+%% would mean that the average will never cause any file handles to
+%% be closed, the server can send out an average age of 0, resulting
+%% in all available clients closing all their file handles.)
+%%
+%% Care is taken to ensure that (a) processes which are blocked
+%% waiting for file descriptors to become available are not sent
+%% requests to close file handles; and (b) given it is known how many
+%% file handles a process has open, when the average age is forced to
+%% 0, close messages are only sent to enough processes to release the
+%% correct number of file handles and the list of processes is
+%% randomly shuffled. This ensures we don't cause processes to
+%% needlessly close file handles, and ensures that we don't always
+%% make such requests of the same processes.
+%%
+%% The advantage of this scheme is that there is only communication
+%% from the client to the server on open, close, and when in the
+%% process of trying to reduce file handle usage. There is no
+%% communication from the client to the server on normal file handle
+%% operations. This scheme forms a feed-back loop - the server does
+%% not care which file handles are closed, just that some are, and it
+%% checks this repeatedly when over the limit.
+%%
+%% Handles which are closed as a result of the server are put into a
+%% "soft-closed" state in which the handle is closed (data flushed out
+%% and sync'd first) but the state is maintained. The handle will be
+%% fully reopened again as soon as needed, thus users of this library
+%% do not need to worry about their handles being closed by the server
+%% - reopening them when necessary is handled transparently.
+%%
+%% The server also supports obtain, release and transfer. obtain/{0,1}
+%% blocks until a file descriptor is available, at which point the
+%% requesting process is considered to 'own' more descriptor(s).
+%% release/{0,1} is the inverse operation and releases previously obtained
+%% descriptor(s). transfer/{1,2} transfers ownership of file descriptor(s)
+%% between processes. It is non-blocking. Obtain has a
+%% lower limit, set by the ?OBTAIN_LIMIT/1 macro. File handles can use
+%% the entire limit, but will be evicted by obtain calls up to the
+%% point at which no more obtain calls can be satisfied by the obtains
+%% limit. Thus there will always be some capacity available for file
+%% handles. Processes that use obtain are never asked to return them,
+%% and they are not managed in any way by the server. It is simply a
+%% mechanism to ensure that processes that need file descriptors such
+%% as sockets can do so in such a way that the overall number of open
+%% file descriptors is managed.
+%%
+%% The callers of register_callback/3, obtain, and the argument of
+%% transfer are monitored, reducing the count of handles in use
+%% appropriately when the processes terminate.
+
+-behaviour(gen_server2).
+
+-export([register_callback/3]).
+-export([open/3, close/1, read/2, append/2, needs_sync/1, sync/1, position/2,
+ truncate/1, current_virtual_offset/1, current_raw_offset/1, flush/1,
+ copy/3, set_maximum_since_use/1, delete/1, clear/1]).
+-export([obtain/0, obtain/1, release/0, release/1, transfer/1, transfer/2,
+ set_limit/1, get_limit/0, info_keys/0, with_handle/1, with_handle/2,
+ info/0, info/1]).
+-export([ulimit/0]).
+
+-export([start_link/0, start_link/2, init/1, handle_call/3, handle_cast/2,
+ handle_info/2, terminate/2, code_change/3, prioritise_cast/3]).
+
+-define(SERVER, ?MODULE).
+-define(RESERVED_FOR_OTHERS, 100).
+
+-define(FILE_HANDLES_LIMIT_OTHER, 1024).
+-define(FILE_HANDLES_CHECK_INTERVAL, 2000).
+
+-define(OBTAIN_LIMIT(LIMIT), trunc((LIMIT * 0.9) - 2)).
+-define(CLIENT_ETS_TABLE, file_handle_cache_client).
+-define(ELDERS_ETS_TABLE, file_handle_cache_elders).
+
+%%----------------------------------------------------------------------------
+
+-record(file,
+ { reader_count,
+ has_writer
+ }).
+
+-record(handle,
+ { hdl,
+ offset,
+ is_dirty,
+ write_buffer_size,
+ write_buffer_size_limit,
+ write_buffer,
+ at_eof,
+ path,
+ mode,
+ options,
+ is_write,
+ is_read,
+ last_used_at
+ }).
+
+-record(fhc_state,
+ { elders,
+ limit,
+ open_count,
+ open_pending,
+ obtain_limit, %%socket
+ obtain_count_socket,
+ obtain_count_file,
+ obtain_pending_socket,
+ obtain_pending_file,
+ clients,
+ timer_ref,
+ alarm_set,
+ alarm_clear
+ }).
+
+-record(cstate,
+ { pid,
+ callback,
+ opened,
+ obtained_socket,
+ obtained_file,
+ blocked,
+ pending_closes
+ }).
+
+-record(pending,
+ { kind,
+ pid,
+ requested,
+ from
+ }).
+
+%%----------------------------------------------------------------------------
+%% Specs
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-type(ref() :: any()).
+-type(ok_or_error() :: 'ok' | {'error', any()}).
+-type(val_or_error(T) :: {'ok', T} | {'error', any()}).
+-type(position() :: ('bof' | 'eof' | non_neg_integer() |
+ {('bof' |'eof'), non_neg_integer()} |
+ {'cur', integer()})).
+-type(offset() :: non_neg_integer()).
+
+-spec(register_callback/3 :: (atom(), atom(), [any()]) -> 'ok').
+-spec(open/3 ::
+ (file:filename(), [any()],
+ [{'write_buffer', (non_neg_integer() | 'infinity' | 'unbuffered')}])
+ -> val_or_error(ref())).
+-spec(close/1 :: (ref()) -> ok_or_error()).
+-spec(read/2 :: (ref(), non_neg_integer()) ->
+ val_or_error([char()] | binary()) | 'eof').
+-spec(append/2 :: (ref(), iodata()) -> ok_or_error()).
+-spec(sync/1 :: (ref()) -> ok_or_error()).
+-spec(position/2 :: (ref(), position()) -> val_or_error(offset())).
+-spec(truncate/1 :: (ref()) -> ok_or_error()).
+-spec(current_virtual_offset/1 :: (ref()) -> val_or_error(offset())).
+-spec(current_raw_offset/1 :: (ref()) -> val_or_error(offset())).
+-spec(flush/1 :: (ref()) -> ok_or_error()).
+-spec(copy/3 :: (ref(), ref(), non_neg_integer()) ->
+ val_or_error(non_neg_integer())).
+-spec(delete/1 :: (ref()) -> ok_or_error()).
+-spec(clear/1 :: (ref()) -> ok_or_error()).
+-spec(set_maximum_since_use/1 :: (non_neg_integer()) -> 'ok').
+-spec(obtain/0 :: () -> 'ok').
+-spec(obtain/1 :: (non_neg_integer()) -> 'ok').
+-spec(release/0 :: () -> 'ok').
+-spec(release/1 :: (non_neg_integer()) -> 'ok').
+-spec(transfer/1 :: (pid()) -> 'ok').
+-spec(transfer/2 :: (pid(), non_neg_integer()) -> 'ok').
+-spec(with_handle/1 :: (fun(() -> A)) -> A).
+-spec(with_handle/2 :: (non_neg_integer(), fun(() -> A)) -> A).
+-spec(set_limit/1 :: (non_neg_integer()) -> 'ok').
+-spec(get_limit/0 :: () -> non_neg_integer()).
+-spec(info_keys/0 :: () -> rabbit_types:info_keys()).
+-spec(info/0 :: () -> rabbit_types:infos()).
+-spec(info/1 :: ([atom()]) -> rabbit_types:infos()).
+-spec(ulimit/0 :: () -> 'unknown' | non_neg_integer()).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+-define(INFO_KEYS, [total_limit, total_used, sockets_limit, sockets_used]).
+
+%%----------------------------------------------------------------------------
+%% Public API
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ start_link(fun alarm_handler:set_alarm/1, fun alarm_handler:clear_alarm/1).
+
+start_link(AlarmSet, AlarmClear) ->
+ gen_server2:start_link({local, ?SERVER}, ?MODULE, [AlarmSet, AlarmClear],
+ [{timeout, infinity}]).
+
+register_callback(M, F, A)
+ when is_atom(M) andalso is_atom(F) andalso is_list(A) ->
+ gen_server2:cast(?SERVER, {register_callback, self(), {M, F, A}}).
+
+open(Path, Mode, Options) ->
+ Path1 = filename:absname(Path),
+ File1 = #file { reader_count = RCount, has_writer = HasWriter } =
+ case get({Path1, fhc_file}) of
+ File = #file {} -> File;
+ undefined -> #file { reader_count = 0,
+ has_writer = false }
+ end,
+ Mode1 = append_to_write(Mode),
+ IsWriter = is_writer(Mode1),
+ case IsWriter andalso HasWriter of
+ true -> {error, writer_exists};
+ false -> {ok, Ref} = new_closed_handle(Path1, Mode1, Options),
+ case get_or_reopen([{Ref, new}]) of
+ {ok, [_Handle1]} ->
+ RCount1 = case is_reader(Mode1) of
+ true -> RCount + 1;
+ false -> RCount
+ end,
+ HasWriter1 = HasWriter orelse IsWriter,
+ put({Path1, fhc_file},
+ File1 #file { reader_count = RCount1,
+ has_writer = HasWriter1 }),
+ {ok, Ref};
+ Error ->
+ erase({Ref, fhc_handle}),
+ Error
+ end
+ end.
+
+close(Ref) ->
+ case erase({Ref, fhc_handle}) of
+ undefined -> ok;
+ Handle -> case hard_close(Handle) of
+ ok -> ok;
+ {Error, Handle1} -> put_handle(Ref, Handle1),
+ Error
+ end
+ end.
+
+read(Ref, Count) ->
+ with_flushed_handles(
+ [Ref],
+ fun ([#handle { is_read = false }]) ->
+ {error, not_open_for_reading};
+ ([Handle = #handle { hdl = Hdl, offset = Offset }]) ->
+ case prim_file:read(Hdl, Count) of
+ {ok, Data} = Obj -> Offset1 = Offset + iolist_size(Data),
+ {Obj,
+ [Handle #handle { offset = Offset1 }]};
+ eof -> {eof, [Handle #handle { at_eof = true }]};
+ Error -> {Error, [Handle]}
+ end
+ end).
+
+append(Ref, Data) ->
+ with_handles(
+ [Ref],
+ fun ([#handle { is_write = false }]) ->
+ {error, not_open_for_writing};
+ ([Handle]) ->
+ case maybe_seek(eof, Handle) of
+ {{ok, _Offset}, #handle { hdl = Hdl, offset = Offset,
+ write_buffer_size_limit = 0,
+ at_eof = true } = Handle1} ->
+ Offset1 = Offset + iolist_size(Data),
+ {prim_file:write(Hdl, Data),
+ [Handle1 #handle { is_dirty = true, offset = Offset1 }]};
+ {{ok, _Offset}, #handle { write_buffer = WriteBuffer,
+ write_buffer_size = Size,
+ write_buffer_size_limit = Limit,
+ at_eof = true } = Handle1} ->
+ WriteBuffer1 = [Data | WriteBuffer],
+ Size1 = Size + iolist_size(Data),
+ Handle2 = Handle1 #handle { write_buffer = WriteBuffer1,
+ write_buffer_size = Size1 },
+ case Limit =/= infinity andalso Size1 > Limit of
+ true -> {Result, Handle3} = write_buffer(Handle2),
+ {Result, [Handle3]};
+ false -> {ok, [Handle2]}
+ end;
+ {{error, _} = Error, Handle1} ->
+ {Error, [Handle1]}
+ end
+ end).
+
+sync(Ref) ->
+ with_flushed_handles(
+ [Ref],
+ fun ([#handle { is_dirty = false, write_buffer = [] }]) ->
+ ok;
+ ([Handle = #handle { hdl = Hdl,
+ is_dirty = true, write_buffer = [] }]) ->
+ case prim_file:sync(Hdl) of
+ ok -> {ok, [Handle #handle { is_dirty = false }]};
+ Error -> {Error, [Handle]}
+ end
+ end).
+
+needs_sync(Ref) ->
+ %% This must *not* use with_handles/2; see bug 25052
+ case get({Ref, fhc_handle}) of
+ #handle { is_dirty = false, write_buffer = [] } -> false;
+ #handle {} -> true
+ end.
+
+position(Ref, NewOffset) ->
+ with_flushed_handles(
+ [Ref],
+ fun ([Handle]) -> {Result, Handle1} = maybe_seek(NewOffset, Handle),
+ {Result, [Handle1]}
+ end).
+
+truncate(Ref) ->
+ with_flushed_handles(
+ [Ref],
+ fun ([Handle1 = #handle { hdl = Hdl }]) ->
+ case prim_file:truncate(Hdl) of
+ ok -> {ok, [Handle1 #handle { at_eof = true }]};
+ Error -> {Error, [Handle1]}
+ end
+ end).
+
+current_virtual_offset(Ref) ->
+ with_handles([Ref], fun ([#handle { at_eof = true, is_write = true,
+ offset = Offset,
+ write_buffer_size = Size }]) ->
+ {ok, Offset + Size};
+ ([#handle { offset = Offset }]) ->
+ {ok, Offset}
+ end).
+
+current_raw_offset(Ref) ->
+ with_handles([Ref], fun ([Handle]) -> {ok, Handle #handle.offset} end).
+
+flush(Ref) ->
+ with_flushed_handles([Ref], fun ([Handle]) -> {ok, [Handle]} end).
+
+copy(Src, Dest, Count) ->
+ with_flushed_handles(
+ [Src, Dest],
+ fun ([SHandle = #handle { is_read = true, hdl = SHdl, offset = SOffset },
+ DHandle = #handle { is_write = true, hdl = DHdl, offset = DOffset }]
+ ) ->
+ case prim_file:copy(SHdl, DHdl, Count) of
+ {ok, Count1} = Result1 ->
+ {Result1,
+ [SHandle #handle { offset = SOffset + Count1 },
+ DHandle #handle { offset = DOffset + Count1,
+ is_dirty = true }]};
+ Error ->
+ {Error, [SHandle, DHandle]}
+ end;
+ (_Handles) ->
+ {error, incorrect_handle_modes}
+ end).
+
+delete(Ref) ->
+ case erase({Ref, fhc_handle}) of
+ undefined ->
+ ok;
+ Handle = #handle { path = Path } ->
+ case hard_close(Handle #handle { is_dirty = false,
+ write_buffer = [] }) of
+ ok -> prim_file:delete(Path);
+ {Error, Handle1} -> put_handle(Ref, Handle1),
+ Error
+ end
+ end.
+
+clear(Ref) ->
+ with_handles(
+ [Ref],
+ fun ([#handle { at_eof = true, write_buffer_size = 0, offset = 0 }]) ->
+ ok;
+ ([Handle]) ->
+ case maybe_seek(bof, Handle #handle { write_buffer = [],
+ write_buffer_size = 0 }) of
+ {{ok, 0}, Handle1 = #handle { hdl = Hdl }} ->
+ case prim_file:truncate(Hdl) of
+ ok -> {ok, [Handle1 #handle { at_eof = true }]};
+ Error -> {Error, [Handle1]}
+ end;
+ {{error, _} = Error, Handle1} ->
+ {Error, [Handle1]}
+ end
+ end).
+
+set_maximum_since_use(MaximumAge) ->
+ Now = now(),
+ case lists:foldl(
+ fun ({{Ref, fhc_handle},
+ Handle = #handle { hdl = Hdl, last_used_at = Then }}, Rep) ->
+ case Hdl =/= closed andalso
+ timer:now_diff(Now, Then) >= MaximumAge of
+ true -> soft_close(Ref, Handle) orelse Rep;
+ false -> Rep
+ end;
+ (_KeyValuePair, Rep) ->
+ Rep
+ end, false, get()) of
+ false -> age_tree_change(), ok;
+ true -> ok
+ end.
+
+obtain() -> obtain(1).
+release() -> release(1).
+transfer(Pid) -> transfer(Pid, 1).
+
+obtain(Count) -> obtain(Count, socket).
+release(Count) -> release(Count, socket).
+
+with_handle(Fun) ->
+ with_handle(1, Fun).
+
+with_handle(N, Fun) ->
+ ok = obtain(N, file),
+ try Fun()
+ after ok = release(N, file)
+ end.
+
+obtain(Count, Type) when Count > 0 ->
+ %% If the FHC isn't running, obtains succeed immediately.
+ case whereis(?SERVER) of
+ undefined -> ok;
+ _ -> gen_server2:call(
+ ?SERVER, {obtain, Count, Type, self()}, infinity)
+ end.
+
+release(Count, Type) when Count > 0 ->
+ gen_server2:cast(?SERVER, {release, Count, Type, self()}).
+
+transfer(Pid, Count) when Count > 0 ->
+ gen_server2:cast(?SERVER, {transfer, Count, self(), Pid}).
+
+set_limit(Limit) ->
+ gen_server2:call(?SERVER, {set_limit, Limit}, infinity).
+
+get_limit() ->
+ gen_server2:call(?SERVER, get_limit, infinity).
+
+info_keys() -> ?INFO_KEYS.
+
+info() -> info(?INFO_KEYS).
+info(Items) -> gen_server2:call(?SERVER, {info, Items}, infinity).
+
+%%----------------------------------------------------------------------------
+%% Internal functions
+%%----------------------------------------------------------------------------
+
+is_reader(Mode) -> lists:member(read, Mode).
+
+is_writer(Mode) -> lists:member(write, Mode).
+
+append_to_write(Mode) ->
+ case lists:member(append, Mode) of
+ true -> [write | Mode -- [append, write]];
+ false -> Mode
+ end.
+
+with_handles(Refs, Fun) ->
+ case get_or_reopen([{Ref, reopen} || Ref <- Refs]) of
+ {ok, Handles} ->
+ case Fun(Handles) of
+ {Result, Handles1} when is_list(Handles1) ->
+ lists:zipwith(fun put_handle/2, Refs, Handles1),
+ Result;
+ Result ->
+ Result
+ end;
+ Error ->
+ Error
+ end.
+
+with_flushed_handles(Refs, Fun) ->
+ with_handles(
+ Refs,
+ fun (Handles) ->
+ case lists:foldl(
+ fun (Handle, {ok, HandlesAcc}) ->
+ {Res, Handle1} = write_buffer(Handle),
+ {Res, [Handle1 | HandlesAcc]};
+ (Handle, {Error, HandlesAcc}) ->
+ {Error, [Handle | HandlesAcc]}
+ end, {ok, []}, Handles) of
+ {ok, Handles1} ->
+ Fun(lists:reverse(Handles1));
+ {Error, Handles1} ->
+ {Error, lists:reverse(Handles1)}
+ end
+ end).
+
+get_or_reopen(RefNewOrReopens) ->
+ case partition_handles(RefNewOrReopens) of
+ {OpenHdls, []} ->
+ {ok, [Handle || {_Ref, Handle} <- OpenHdls]};
+ {OpenHdls, ClosedHdls} ->
+ Oldest = oldest(get_age_tree(), fun () -> now() end),
+ case gen_server2:call(?SERVER, {open, self(), length(ClosedHdls),
+ Oldest}, infinity) of
+ ok ->
+ case reopen(ClosedHdls) of
+ {ok, RefHdls} -> sort_handles(RefNewOrReopens,
+ OpenHdls, RefHdls, []);
+ Error -> Error
+ end;
+ close ->
+ [soft_close(Ref, Handle) ||
+ {{Ref, fhc_handle}, Handle = #handle { hdl = Hdl }} <-
+ get(),
+ Hdl =/= closed],
+ get_or_reopen(RefNewOrReopens)
+ end
+ end.
+
+reopen(ClosedHdls) -> reopen(ClosedHdls, get_age_tree(), []).
+
+reopen([], Tree, RefHdls) ->
+ put_age_tree(Tree),
+ {ok, lists:reverse(RefHdls)};
+reopen([{Ref, NewOrReopen, Handle = #handle { hdl = closed,
+ path = Path,
+ mode = Mode,
+ offset = Offset,
+ last_used_at = undefined }} |
+ RefNewOrReopenHdls] = ToOpen, Tree, RefHdls) ->
+ case prim_file:open(Path, case NewOrReopen of
+ new -> Mode;
+ reopen -> [read | Mode]
+ end) of
+ {ok, Hdl} ->
+ Now = now(),
+ {{ok, _Offset}, Handle1} =
+ maybe_seek(Offset, Handle #handle { hdl = Hdl,
+ offset = 0,
+ last_used_at = Now }),
+ put({Ref, fhc_handle}, Handle1),
+ reopen(RefNewOrReopenHdls, gb_trees:insert(Now, Ref, Tree),
+ [{Ref, Handle1} | RefHdls]);
+ Error ->
+ %% NB: none of the handles in ToOpen are in the age tree
+ Oldest = oldest(Tree, fun () -> undefined end),
+ [gen_server2:cast(?SERVER, {close, self(), Oldest}) || _ <- ToOpen],
+ put_age_tree(Tree),
+ Error
+ end.
+
+partition_handles(RefNewOrReopens) ->
+ lists:foldr(
+ fun ({Ref, NewOrReopen}, {Open, Closed}) ->
+ case get({Ref, fhc_handle}) of
+ #handle { hdl = closed } = Handle ->
+ {Open, [{Ref, NewOrReopen, Handle} | Closed]};
+ #handle {} = Handle ->
+ {[{Ref, Handle} | Open], Closed}
+ end
+ end, {[], []}, RefNewOrReopens).
+
+sort_handles([], [], [], Acc) ->
+ {ok, lists:reverse(Acc)};
+sort_handles([{Ref, _} | RefHdls], [{Ref, Handle} | RefHdlsA], RefHdlsB, Acc) ->
+ sort_handles(RefHdls, RefHdlsA, RefHdlsB, [Handle | Acc]);
+sort_handles([{Ref, _} | RefHdls], RefHdlsA, [{Ref, Handle} | RefHdlsB], Acc) ->
+ sort_handles(RefHdls, RefHdlsA, RefHdlsB, [Handle | Acc]).
+
+put_handle(Ref, Handle = #handle { last_used_at = Then }) ->
+ Now = now(),
+ age_tree_update(Then, Now, Ref),
+ put({Ref, fhc_handle}, Handle #handle { last_used_at = Now }).
+
+with_age_tree(Fun) -> put_age_tree(Fun(get_age_tree())).
+
+get_age_tree() ->
+ case get(fhc_age_tree) of
+ undefined -> gb_trees:empty();
+ AgeTree -> AgeTree
+ end.
+
+put_age_tree(Tree) -> put(fhc_age_tree, Tree).
+
+age_tree_update(Then, Now, Ref) ->
+ with_age_tree(
+ fun (Tree) ->
+ gb_trees:insert(Now, Ref, gb_trees:delete_any(Then, Tree))
+ end).
+
+age_tree_delete(Then) ->
+ with_age_tree(
+ fun (Tree) ->
+ Tree1 = gb_trees:delete_any(Then, Tree),
+ Oldest = oldest(Tree1, fun () -> undefined end),
+ gen_server2:cast(?SERVER, {close, self(), Oldest}),
+ Tree1
+ end).
+
+age_tree_change() ->
+ with_age_tree(
+ fun (Tree) ->
+ case gb_trees:is_empty(Tree) of
+ true -> Tree;
+ false -> {Oldest, _Ref} = gb_trees:smallest(Tree),
+ gen_server2:cast(?SERVER, {update, self(), Oldest})
+ end,
+ Tree
+ end).
+
+oldest(Tree, DefaultFun) ->
+ case gb_trees:is_empty(Tree) of
+ true -> DefaultFun();
+ false -> {Oldest, _Ref} = gb_trees:smallest(Tree),
+ Oldest
+ end.
+
+new_closed_handle(Path, Mode, Options) ->
+ WriteBufferSize =
+ case proplists:get_value(write_buffer, Options, unbuffered) of
+ unbuffered -> 0;
+ infinity -> infinity;
+ N when is_integer(N) -> N
+ end,
+ Ref = make_ref(),
+ put({Ref, fhc_handle}, #handle { hdl = closed,
+ offset = 0,
+ is_dirty = false,
+ write_buffer_size = 0,
+ write_buffer_size_limit = WriteBufferSize,
+ write_buffer = [],
+ at_eof = false,
+ path = Path,
+ mode = Mode,
+ options = Options,
+ is_write = is_writer(Mode),
+ is_read = is_reader(Mode),
+ last_used_at = undefined }),
+ {ok, Ref}.
+
+soft_close(Ref, Handle) ->
+ {Res, Handle1} = soft_close(Handle),
+ case Res of
+ ok -> put({Ref, fhc_handle}, Handle1),
+ true;
+ _ -> put_handle(Ref, Handle1),
+ false
+ end.
+
+soft_close(Handle = #handle { hdl = closed }) ->
+ {ok, Handle};
+soft_close(Handle) ->
+ case write_buffer(Handle) of
+ {ok, #handle { hdl = Hdl,
+ is_dirty = IsDirty,
+ last_used_at = Then } = Handle1 } ->
+ ok = case IsDirty of
+ true -> prim_file:sync(Hdl);
+ false -> ok
+ end,
+ ok = prim_file:close(Hdl),
+ age_tree_delete(Then),
+ {ok, Handle1 #handle { hdl = closed,
+ is_dirty = false,
+ last_used_at = undefined }};
+ {_Error, _Handle} = Result ->
+ Result
+ end.
+
+hard_close(Handle) ->
+ case soft_close(Handle) of
+ {ok, #handle { path = Path,
+ is_read = IsReader, is_write = IsWriter }} ->
+ #file { reader_count = RCount, has_writer = HasWriter } = File =
+ get({Path, fhc_file}),
+ RCount1 = case IsReader of
+ true -> RCount - 1;
+ false -> RCount
+ end,
+ HasWriter1 = HasWriter andalso not IsWriter,
+ case RCount1 =:= 0 andalso not HasWriter1 of
+ true -> erase({Path, fhc_file});
+ false -> put({Path, fhc_file},
+ File #file { reader_count = RCount1,
+ has_writer = HasWriter1 })
+ end,
+ ok;
+ {_Error, _Handle} = Result ->
+ Result
+ end.
+
+maybe_seek(NewOffset, Handle = #handle { hdl = Hdl, offset = Offset,
+ at_eof = AtEoF }) ->
+ {AtEoF1, NeedsSeek} = needs_seek(AtEoF, Offset, NewOffset),
+ case (case NeedsSeek of
+ true -> prim_file:position(Hdl, NewOffset);
+ false -> {ok, Offset}
+ end) of
+ {ok, Offset1} = Result ->
+ {Result, Handle #handle { offset = Offset1, at_eof = AtEoF1 }};
+ {error, _} = Error ->
+ {Error, Handle}
+ end.
+
+needs_seek( AtEoF, _CurOffset, cur ) -> {AtEoF, false};
+needs_seek( AtEoF, _CurOffset, {cur, 0}) -> {AtEoF, false};
+needs_seek( true, _CurOffset, eof ) -> {true , false};
+needs_seek( true, _CurOffset, {eof, 0}) -> {true , false};
+needs_seek( false, _CurOffset, eof ) -> {true , true };
+needs_seek( false, _CurOffset, {eof, 0}) -> {true , true };
+needs_seek( AtEoF, 0, bof ) -> {AtEoF, false};
+needs_seek( AtEoF, 0, {bof, 0}) -> {AtEoF, false};
+needs_seek( AtEoF, CurOffset, CurOffset) -> {AtEoF, false};
+needs_seek( true, CurOffset, {bof, DesiredOffset})
+ when DesiredOffset >= CurOffset ->
+ {true, true};
+needs_seek( true, _CurOffset, {cur, DesiredOffset})
+ when DesiredOffset > 0 ->
+ {true, true};
+needs_seek( true, CurOffset, DesiredOffset) %% same as {bof, DO}
+ when is_integer(DesiredOffset) andalso DesiredOffset >= CurOffset ->
+ {true, true};
+%% because we can't really track size, we could well end up at EoF and not know
+needs_seek(_AtEoF, _CurOffset, _DesiredOffset) ->
+ {false, true}.
+
+write_buffer(Handle = #handle { write_buffer = [] }) ->
+ {ok, Handle};
+write_buffer(Handle = #handle { hdl = Hdl, offset = Offset,
+ write_buffer = WriteBuffer,
+ write_buffer_size = DataSize,
+ at_eof = true }) ->
+ case prim_file:write(Hdl, lists:reverse(WriteBuffer)) of
+ ok ->
+ Offset1 = Offset + DataSize,
+ {ok, Handle #handle { offset = Offset1, is_dirty = true,
+ write_buffer = [], write_buffer_size = 0 }};
+ {error, _} = Error ->
+ {Error, Handle}
+ end.
+
+infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
+
+i(total_limit, #fhc_state{limit = Limit}) -> Limit;
+i(total_used, State) -> used(State);
+i(sockets_limit, #fhc_state{obtain_limit = Limit}) -> Limit;
+i(sockets_used, #fhc_state{obtain_count_socket = Count}) -> Count;
+i(Item, _) -> throw({bad_argument, Item}).
+
+used(#fhc_state{open_count = C1,
+ obtain_count_socket = C2,
+ obtain_count_file = C3}) -> C1 + C2 + C3.
+
+%%----------------------------------------------------------------------------
+%% gen_server2 callbacks
+%%----------------------------------------------------------------------------
+
+init([AlarmSet, AlarmClear]) ->
+ Limit = case application:get_env(file_handles_high_watermark) of
+ {ok, Watermark} when (is_integer(Watermark) andalso
+ Watermark > 0) ->
+ Watermark;
+ _ ->
+ case ulimit() of
+ unknown -> ?FILE_HANDLES_LIMIT_OTHER;
+ Lim -> lists:max([2, Lim - ?RESERVED_FOR_OTHERS])
+ end
+ end,
+ ObtainLimit = obtain_limit(Limit),
+ error_logger:info_msg("Limiting to approx ~p file handles (~p sockets)~n",
+ [Limit, ObtainLimit]),
+ Clients = ets:new(?CLIENT_ETS_TABLE, [set, private, {keypos, #cstate.pid}]),
+ Elders = ets:new(?ELDERS_ETS_TABLE, [set, private]),
+ {ok, #fhc_state { elders = Elders,
+ limit = Limit,
+ open_count = 0,
+ open_pending = pending_new(),
+ obtain_limit = ObtainLimit,
+ obtain_count_file = 0,
+ obtain_pending_file = pending_new(),
+ obtain_count_socket = 0,
+ obtain_pending_socket = pending_new(),
+ clients = Clients,
+ timer_ref = undefined,
+ alarm_set = AlarmSet,
+ alarm_clear = AlarmClear }}.
+
+prioritise_cast(Msg, _Len, _State) ->
+ case Msg of
+ {release, _, _, _} -> 5;
+ _ -> 0
+ end.
+
+handle_call({open, Pid, Requested, EldestUnusedSince}, From,
+ State = #fhc_state { open_count = Count,
+ open_pending = Pending,
+ elders = Elders,
+ clients = Clients })
+ when EldestUnusedSince =/= undefined ->
+ true = ets:insert(Elders, {Pid, EldestUnusedSince}),
+ Item = #pending { kind = open,
+ pid = Pid,
+ requested = Requested,
+ from = From },
+ ok = track_client(Pid, Clients),
+ case needs_reduce(State #fhc_state { open_count = Count + Requested }) of
+ true -> case ets:lookup(Clients, Pid) of
+ [#cstate { opened = 0 }] ->
+ true = ets:update_element(
+ Clients, Pid, {#cstate.blocked, true}),
+ {noreply,
+ reduce(State #fhc_state {
+ open_pending = pending_in(Item, Pending) })};
+ [#cstate { opened = Opened }] ->
+ true = ets:update_element(
+ Clients, Pid,
+ {#cstate.pending_closes, Opened}),
+ {reply, close, State}
+ end;
+ false -> {noreply, run_pending_item(Item, State)}
+ end;
+
+handle_call({obtain, N, Type, Pid}, From,
+ State = #fhc_state { clients = Clients }) ->
+ Count = obtain_state(Type, count, State),
+ Pending = obtain_state(Type, pending, State),
+ ok = track_client(Pid, Clients),
+ Item = #pending { kind = {obtain, Type}, pid = Pid,
+ requested = N, from = From },
+ Enqueue = fun () ->
+ true = ets:update_element(Clients, Pid,
+ {#cstate.blocked, true}),
+ set_obtain_state(Type, pending,
+ pending_in(Item, Pending), State)
+ end,
+ {noreply,
+ case obtain_limit_reached(Type, State) of
+ true -> Enqueue();
+ false -> case needs_reduce(
+ set_obtain_state(Type, count, Count + 1, State)) of
+ true -> reduce(Enqueue());
+ false -> adjust_alarm(
+ State, run_pending_item(Item, State))
+ end
+ end};
+
+handle_call({set_limit, Limit}, _From, State) ->
+ {reply, ok, adjust_alarm(
+ State, maybe_reduce(
+ process_pending(
+ State #fhc_state {
+ limit = Limit,
+ obtain_limit = obtain_limit(Limit) })))};
+
+handle_call(get_limit, _From, State = #fhc_state { limit = Limit }) ->
+ {reply, Limit, State};
+
+handle_call({info, Items}, _From, State) ->
+ {reply, infos(Items, State), State}.
+
+handle_cast({register_callback, Pid, MFA},
+ State = #fhc_state { clients = Clients }) ->
+ ok = track_client(Pid, Clients),
+ true = ets:update_element(Clients, Pid, {#cstate.callback, MFA}),
+ {noreply, State};
+
+handle_cast({update, Pid, EldestUnusedSince},
+ State = #fhc_state { elders = Elders })
+ when EldestUnusedSince =/= undefined ->
+ true = ets:insert(Elders, {Pid, EldestUnusedSince}),
+ %% don't call maybe_reduce from here otherwise we can create a
+ %% storm of messages
+ {noreply, State};
+
+handle_cast({release, N, Type, Pid}, State) ->
+ State1 = process_pending(update_counts({obtain, Type}, Pid, -N, State)),
+ {noreply, adjust_alarm(State, State1)};
+
+handle_cast({close, Pid, EldestUnusedSince},
+ State = #fhc_state { elders = Elders, clients = Clients }) ->
+ true = case EldestUnusedSince of
+ undefined -> ets:delete(Elders, Pid);
+ _ -> ets:insert(Elders, {Pid, EldestUnusedSince})
+ end,
+ ets:update_counter(Clients, Pid, {#cstate.pending_closes, -1, 0, 0}),
+ {noreply, adjust_alarm(State, process_pending(
+ update_counts(open, Pid, -1, State)))};
+
+handle_cast({transfer, N, FromPid, ToPid}, State) ->
+ ok = track_client(ToPid, State#fhc_state.clients),
+ {noreply, process_pending(
+ update_counts({obtain, socket}, ToPid, +N,
+ update_counts({obtain, socket}, FromPid, -N,
+ State)))}.
+
+handle_info(check_counts, State) ->
+ {noreply, maybe_reduce(State #fhc_state { timer_ref = undefined })};
+
+handle_info({'DOWN', _MRef, process, Pid, _Reason},
+ State = #fhc_state { elders = Elders,
+ open_count = OpenCount,
+ open_pending = OpenPending,
+ obtain_count_file = ObtainCountF,
+ obtain_count_socket = ObtainCountS,
+ obtain_pending_file = ObtainPendingF,
+ obtain_pending_socket = ObtainPendingS,
+ clients = Clients }) ->
+ [#cstate { opened = Opened,
+ obtained_file = ObtainedFile,
+ obtained_socket = ObtainedSocket}] =
+ ets:lookup(Clients, Pid),
+ true = ets:delete(Clients, Pid),
+ true = ets:delete(Elders, Pid),
+ Fun = fun (#pending { pid = Pid1 }) -> Pid1 =/= Pid end,
+ State1 = process_pending(
+ State #fhc_state {
+ open_count = OpenCount - Opened,
+ open_pending = filter_pending(Fun, OpenPending),
+ obtain_count_file = ObtainCountF - ObtainedFile,
+ obtain_count_socket = ObtainCountS - ObtainedSocket,
+ obtain_pending_file = filter_pending(Fun, ObtainPendingF),
+ obtain_pending_socket = filter_pending(Fun, ObtainPendingS) }),
+ {noreply, adjust_alarm(State, State1)}.
+
+terminate(_Reason, State = #fhc_state { clients = Clients,
+ elders = Elders }) ->
+ ets:delete(Clients),
+ ets:delete(Elders),
+ State.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+%% pending queue abstraction helpers
+%%----------------------------------------------------------------------------
+
+queue_fold(Fun, Init, Q) ->
+ case queue:out(Q) of
+ {empty, _Q} -> Init;
+ {{value, V}, Q1} -> queue_fold(Fun, Fun(V, Init), Q1)
+ end.
+
+filter_pending(Fun, {Count, Queue}) ->
+ {Delta, Queue1} =
+ queue_fold(
+ fun (Item = #pending { requested = Requested }, {DeltaN, QueueN}) ->
+ case Fun(Item) of
+ true -> {DeltaN, queue:in(Item, QueueN)};
+ false -> {DeltaN - Requested, QueueN}
+ end
+ end, {0, queue:new()}, Queue),
+ {Count + Delta, Queue1}.
+
+pending_new() ->
+ {0, queue:new()}.
+
+pending_in(Item = #pending { requested = Requested }, {Count, Queue}) ->
+ {Count + Requested, queue:in(Item, Queue)}.
+
+pending_out({0, _Queue} = Pending) ->
+ {empty, Pending};
+pending_out({N, Queue}) ->
+ {{value, #pending { requested = Requested }} = Result, Queue1} =
+ queue:out(Queue),
+ {Result, {N - Requested, Queue1}}.
+
+pending_count({Count, _Queue}) ->
+ Count.
+
+pending_is_empty({0, _Queue}) ->
+ true;
+pending_is_empty({_N, _Queue}) ->
+ false.
+
+%%----------------------------------------------------------------------------
+%% server helpers
+%%----------------------------------------------------------------------------
+
+obtain_limit(infinity) -> infinity;
+obtain_limit(Limit) -> case ?OBTAIN_LIMIT(Limit) of
+ OLimit when OLimit < 0 -> 0;
+ OLimit -> OLimit
+ end.
+
+obtain_limit_reached(socket, State) -> obtain_limit_reached(State);
+obtain_limit_reached(file, State) -> needs_reduce(State).
+
+obtain_limit_reached(#fhc_state{obtain_limit = Limit,
+ obtain_count_socket = Count}) ->
+ Limit =/= infinity andalso Count >= Limit.
+
+obtain_state(file, count, #fhc_state{obtain_count_file = N}) -> N;
+obtain_state(socket, count, #fhc_state{obtain_count_socket = N}) -> N;
+obtain_state(file, pending, #fhc_state{obtain_pending_file = N}) -> N;
+obtain_state(socket, pending, #fhc_state{obtain_pending_socket = N}) -> N.
+
+set_obtain_state(file, count, N, S) -> S#fhc_state{obtain_count_file = N};
+set_obtain_state(socket, count, N, S) -> S#fhc_state{obtain_count_socket = N};
+set_obtain_state(file, pending, N, S) -> S#fhc_state{obtain_pending_file = N};
+set_obtain_state(socket, pending, N, S) -> S#fhc_state{obtain_pending_socket = N}.
+
+adjust_alarm(OldState = #fhc_state { alarm_set = AlarmSet,
+ alarm_clear = AlarmClear }, NewState) ->
+ case {obtain_limit_reached(OldState), obtain_limit_reached(NewState)} of
+ {false, true} -> AlarmSet({file_descriptor_limit, []});
+ {true, false} -> AlarmClear(file_descriptor_limit);
+ _ -> ok
+ end,
+ NewState.
+
+process_pending(State = #fhc_state { limit = infinity }) ->
+ State;
+process_pending(State) ->
+ process_open(process_obtain(socket, process_obtain(file, State))).
+
+process_open(State = #fhc_state { limit = Limit,
+ open_pending = Pending}) ->
+ {Pending1, State1} = process_pending(Pending, Limit - used(State), State),
+ State1 #fhc_state { open_pending = Pending1 }.
+
+process_obtain(Type, State = #fhc_state { limit = Limit,
+ obtain_limit = ObtainLimit }) ->
+ ObtainCount = obtain_state(Type, count, State),
+ Pending = obtain_state(Type, pending, State),
+ Quota = case Type of
+ file -> Limit - (used(State));
+ socket -> lists:min([ObtainLimit - ObtainCount,
+ Limit - (used(State))])
+ end,
+ {Pending1, State1} = process_pending(Pending, Quota, State),
+ set_obtain_state(Type, pending, Pending1, State1).
+
+process_pending(Pending, Quota, State) when Quota =< 0 ->
+ {Pending, State};
+process_pending(Pending, Quota, State) ->
+ case pending_out(Pending) of
+ {empty, _Pending} ->
+ {Pending, State};
+ {{value, #pending { requested = Requested }}, _Pending1}
+ when Requested > Quota ->
+ {Pending, State};
+ {{value, #pending { requested = Requested } = Item}, Pending1} ->
+ process_pending(Pending1, Quota - Requested,
+ run_pending_item(Item, State))
+ end.
+
+run_pending_item(#pending { kind = Kind,
+ pid = Pid,
+ requested = Requested,
+ from = From },
+ State = #fhc_state { clients = Clients }) ->
+ gen_server2:reply(From, ok),
+ true = ets:update_element(Clients, Pid, {#cstate.blocked, false}),
+ update_counts(Kind, Pid, Requested, State).
+
+update_counts(Kind, Pid, Delta,
+ State = #fhc_state { open_count = OpenCount,
+ obtain_count_file = ObtainCountF,
+ obtain_count_socket = ObtainCountS,
+ clients = Clients }) ->
+ {OpenDelta, ObtainDeltaF, ObtainDeltaS} =
+ update_counts1(Kind, Pid, Delta, Clients),
+ State #fhc_state { open_count = OpenCount + OpenDelta,
+ obtain_count_file = ObtainCountF + ObtainDeltaF,
+ obtain_count_socket = ObtainCountS + ObtainDeltaS }.
+
+update_counts1(open, Pid, Delta, Clients) ->
+ ets:update_counter(Clients, Pid, {#cstate.opened, Delta}),
+ {Delta, 0, 0};
+update_counts1({obtain, file}, Pid, Delta, Clients) ->
+ ets:update_counter(Clients, Pid, {#cstate.obtained_file, Delta}),
+ {0, Delta, 0};
+update_counts1({obtain, socket}, Pid, Delta, Clients) ->
+ ets:update_counter(Clients, Pid, {#cstate.obtained_socket, Delta}),
+ {0, 0, Delta}.
+
+maybe_reduce(State) ->
+ case needs_reduce(State) of
+ true -> reduce(State);
+ false -> State
+ end.
+
+needs_reduce(State = #fhc_state { limit = Limit,
+ open_pending = OpenPending,
+ obtain_limit = ObtainLimit,
+ obtain_count_socket = ObtainCountS,
+ obtain_pending_file = ObtainPendingF,
+ obtain_pending_socket = ObtainPendingS }) ->
+ Limit =/= infinity
+ andalso ((used(State) > Limit)
+ orelse (not pending_is_empty(OpenPending))
+ orelse (not pending_is_empty(ObtainPendingF))
+ orelse (ObtainCountS < ObtainLimit
+ andalso not pending_is_empty(ObtainPendingS))).
+
+reduce(State = #fhc_state { open_pending = OpenPending,
+ obtain_pending_file = ObtainPendingFile,
+ obtain_pending_socket = ObtainPendingSocket,
+ elders = Elders,
+ clients = Clients,
+ timer_ref = TRef }) ->
+ Now = now(),
+ {CStates, Sum, ClientCount} =
+ ets:foldl(fun ({Pid, Eldest}, {CStatesAcc, SumAcc, CountAcc} = Accs) ->
+ [#cstate { pending_closes = PendingCloses,
+ opened = Opened,
+ blocked = Blocked } = CState] =
+ ets:lookup(Clients, Pid),
+ case Blocked orelse PendingCloses =:= Opened of
+ true -> Accs;
+ false -> {[CState | CStatesAcc],
+ SumAcc + timer:now_diff(Now, Eldest),
+ CountAcc + 1}
+ end
+ end, {[], 0, 0}, Elders),
+ case CStates of
+ [] -> ok;
+ _ -> case (Sum / ClientCount) -
+ (1000 * ?FILE_HANDLES_CHECK_INTERVAL) of
+ AverageAge when AverageAge > 0 ->
+ notify_age(CStates, AverageAge);
+ _ ->
+ notify_age0(Clients, CStates,
+ pending_count(OpenPending) +
+ pending_count(ObtainPendingFile) +
+ pending_count(ObtainPendingSocket))
+ end
+ end,
+ case TRef of
+ undefined -> TRef1 = erlang:send_after(
+ ?FILE_HANDLES_CHECK_INTERVAL, ?SERVER,
+ check_counts),
+ State #fhc_state { timer_ref = TRef1 };
+ _ -> State
+ end.
+
+notify_age(CStates, AverageAge) ->
+ lists:foreach(
+ fun (#cstate { callback = undefined }) -> ok;
+ (#cstate { callback = {M, F, A} }) -> apply(M, F, A ++ [AverageAge])
+ end, CStates).
+
+notify_age0(Clients, CStates, Required) ->
+ case [CState || CState <- CStates, CState#cstate.callback =/= undefined] of
+ [] -> ok;
+ Notifications -> S = random:uniform(length(Notifications)),
+ {L1, L2} = lists:split(S, Notifications),
+ notify(Clients, Required, L2 ++ L1)
+ end.
+
+notify(_Clients, _Required, []) ->
+ ok;
+notify(_Clients, Required, _Notifications) when Required =< 0 ->
+ ok;
+notify(Clients, Required, [#cstate{ pid = Pid,
+ callback = {M, F, A},
+ opened = Opened } | Notifications]) ->
+ apply(M, F, A ++ [0]),
+ ets:update_element(Clients, Pid, {#cstate.pending_closes, Opened}),
+ notify(Clients, Required - Opened, Notifications).
+
+track_client(Pid, Clients) ->
+ case ets:insert_new(Clients, #cstate { pid = Pid,
+ callback = undefined,
+ opened = 0,
+ obtained_file = 0,
+ obtained_socket = 0,
+ blocked = false,
+ pending_closes = 0 }) of
+ true -> _MRef = erlang:monitor(process, Pid),
+ ok;
+ false -> ok
+ end.
+
+
+%% To increase the number of file descriptors: on Windows set ERL_MAX_PORTS
+%% environment variable, on Linux set `ulimit -n`.
+ulimit() ->
+ case proplists:get_value(max_fds, erlang:system_info(check_io)) of
+ MaxFds when is_integer(MaxFds) andalso MaxFds > 1 ->
+ case os:type() of
+ {win32, _OsName} ->
+ %% On Windows max_fds is twice the number of open files:
+ %% https://github.com/yrashk/erlang/blob/e1282325ed75e52a98d5/erts/emulator/sys/win32/sys.c#L2459-2466
+ MaxFds div 2;
+ _Any ->
+ %% For other operating systems trust Erlang.
+ MaxFds
+ end;
+ _ ->
+ unknown
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(gatherer).
+
+-behaviour(gen_server2).
+
+-export([start_link/0, stop/1, fork/1, finish/1, in/2, sync_in/2, out/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
+-spec(stop/1 :: (pid()) -> 'ok').
+-spec(fork/1 :: (pid()) -> 'ok').
+-spec(finish/1 :: (pid()) -> 'ok').
+-spec(in/2 :: (pid(), any()) -> 'ok').
+-spec(sync_in/2 :: (pid(), any()) -> 'ok').
+-spec(out/1 :: (pid()) -> {'value', any()} | 'empty').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+-define(HIBERNATE_AFTER_MIN, 1000).
+-define(DESIRED_HIBERNATE, 10000).
+
+%%----------------------------------------------------------------------------
+
+-record(gstate, { forks, values, blocked }).
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ gen_server2:start_link(?MODULE, [], [{timeout, infinity}]).
+
+stop(Pid) ->
+ gen_server2:call(Pid, stop, infinity).
+
+fork(Pid) ->
+ gen_server2:call(Pid, fork, infinity).
+
+finish(Pid) ->
+ gen_server2:cast(Pid, finish).
+
+in(Pid, Value) ->
+ gen_server2:cast(Pid, {in, Value}).
+
+sync_in(Pid, Value) ->
+ gen_server2:call(Pid, {in, Value}, infinity).
+
+out(Pid) ->
+ gen_server2:call(Pid, out, infinity).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, #gstate { forks = 0, values = queue:new(), blocked = queue:new() },
+ hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+handle_call(stop, _From, State) ->
+ {stop, normal, ok, State};
+
+handle_call(fork, _From, State = #gstate { forks = Forks }) ->
+ {reply, ok, State #gstate { forks = Forks + 1 }, hibernate};
+
+handle_call({in, Value}, From, State) ->
+ {noreply, in(Value, From, State), hibernate};
+
+handle_call(out, From, State = #gstate { forks = Forks,
+ values = Values,
+ blocked = Blocked }) ->
+ case queue:out(Values) of
+ {empty, _} when Forks == 0 ->
+ {reply, empty, State, hibernate};
+ {empty, _} ->
+ {noreply, State #gstate { blocked = queue:in(From, Blocked) },
+ hibernate};
+ {{value, {PendingIn, Value}}, NewValues} ->
+ reply(PendingIn, ok),
+ {reply, {value, Value}, State #gstate { values = NewValues },
+ hibernate}
+ end;
+
+handle_call(Msg, _From, State) ->
+ {stop, {unexpected_call, Msg}, State}.
+
+handle_cast(finish, State = #gstate { forks = Forks, blocked = Blocked }) ->
+ NewForks = Forks - 1,
+ NewBlocked = case NewForks of
+ 0 -> [gen_server2:reply(From, empty) ||
+ From <- queue:to_list(Blocked)],
+ queue:new();
+ _ -> Blocked
+ end,
+ {noreply, State #gstate { forks = NewForks, blocked = NewBlocked },
+ hibernate};
+
+handle_cast({in, Value}, State) ->
+ {noreply, in(Value, undefined, State), hibernate};
+
+handle_cast(Msg, State) ->
+ {stop, {unexpected_cast, Msg}, State}.
+
+handle_info(Msg, State) ->
+ {stop, {unexpected_info, Msg}, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+terminate(_Reason, State) ->
+ State.
+
+%%----------------------------------------------------------------------------
+
+in(Value, From, State = #gstate { values = Values, blocked = Blocked }) ->
+ case queue:out(Blocked) of
+ {empty, _} ->
+ State #gstate { values = queue:in({From, Value}, Values) };
+ {{value, PendingOut}, NewBlocked} ->
+ reply(From, ok),
+ gen_server2:reply(PendingOut, {value, Value}),
+ State #gstate { blocked = NewBlocked }
+ end.
+
+reply(undefined, _Reply) -> ok;
+reply(From, Reply) -> gen_server2:reply(From, Reply).
--- /dev/null
+%% This file is a copy of gen_server.erl from the R13B-1 Erlang/OTP
+%% distribution, with the following modifications:
+%%
+%% 1) the module name is gen_server2
+%%
+%% 2) more efficient handling of selective receives in callbacks
+%% gen_server2 processes drain their message queue into an internal
+%% buffer before invoking any callback module functions. Messages are
+%% dequeued from the buffer for processing. Thus the effective message
+%% queue of a gen_server2 process is the concatenation of the internal
+%% buffer and the real message queue.
+%% As a result of the draining, any selective receive invoked inside a
+%% callback is less likely to have to scan a large message queue.
+%%
+%% 3) gen_server2:cast is guaranteed to be order-preserving
+%% The original code could reorder messages when communicating with a
+%% process on a remote node that was not currently connected.
+%%
+%% 4) The callback module can optionally implement prioritise_call/4,
+%% prioritise_cast/3 and prioritise_info/3. These functions take
+%% Message, From, Length and State or just Message, Length and State
+%% (where Length is the current number of messages waiting to be
+%% processed) and return a single integer representing the priority
+%% attached to the message, or 'drop' to ignore it (for
+%% prioritise_cast/3 and prioritise_info/3 only). Messages with
+%% higher priorities are processed before requests with lower
+%% priorities. The default priority is 0.
+%%
+%% 5) The callback module can optionally implement
+%% handle_pre_hibernate/1 and handle_post_hibernate/1. These will be
+%% called immediately prior to and post hibernation, respectively. If
+%% handle_pre_hibernate returns {hibernate, NewState} then the process
+%% will hibernate. If the module does not implement
+%% handle_pre_hibernate/1 then the default action is to hibernate.
+%%
+%% 6) init can return a 4th arg, {backoff, InitialTimeout,
+%% MinimumTimeout, DesiredHibernatePeriod} (all in milliseconds,
+%% 'infinity' does not make sense here). Then, on all callbacks which
+%% can return a timeout (including init), timeout can be
+%% 'hibernate'. When this is the case, the current timeout value will
+%% be used (initially, the InitialTimeout supplied from init). After
+%% this timeout has occurred, hibernation will occur as normal. Upon
+%% awaking, a new current timeout value will be calculated.
+%%
+%% The purpose is that the gen_server2 takes care of adjusting the
+%% current timeout value such that the process will increase the
+%% timeout value repeatedly if it is unable to sleep for the
+%% DesiredHibernatePeriod. If it is able to sleep for the
+%% DesiredHibernatePeriod it will decrease the current timeout down to
+%% the MinimumTimeout, so that the process is put to sleep sooner (and
+%% hopefully stays asleep for longer). In short, should a process
+%% using this receive a burst of messages, it should not hibernate
+%% between those messages, but as the messages become less frequent,
+%% the process will not only hibernate, it will do so sooner after
+%% each message.
+%%
+%% When using this backoff mechanism, normal timeout values (i.e. not
+%% 'hibernate') can still be used, and if they are used then the
+%% handle_info(timeout, State) will be called as normal. In this case,
+%% returning 'hibernate' from handle_info(timeout, State) will not
+%% hibernate the process immediately, as it would if backoff wasn't
+%% being used. Instead it'll wait for the current timeout as described
+%% above.
+%%
+%% 7) The callback module can return from any of the handle_*
+%% functions, a {become, Module, State} triple, or a {become, Module,
+%% State, Timeout} quadruple. This allows the gen_server to
+%% dynamically change the callback module. The State is the new state
+%% which will be passed into any of the callback functions in the new
+%% module. Note there is no form also encompassing a reply, thus if
+%% you wish to reply in handle_call/3 and change the callback module,
+%% you need to use gen_server2:reply/2 to issue the reply manually.
+%%
+%% 8) The callback module can optionally implement
+%% format_message_queue/2 which is the equivalent of format_status/2
+%% but where the second argument is specifically the priority_queue
+%% which contains the prioritised message_queue.
+%%
+%% 9) The function with_state/2 can be used to debug a process with
+%% heavyweight state (without needing to copy the entire state out of
+%% process as sys:get_status/1 would). Pass through a function which
+%% can be invoked on the state, get back the result. The state is not
+%% modified.
+%%
+%% 10) an mcall/1 function has been added for performing multiple
+%% call/3 in parallel. Unlike multi_call, which sends the same request
+%% to same-named processes residing on a supplied list of nodes, it
+%% operates on name/request pairs, where name is anything accepted by
+%% call/3, i.e. a pid, global name, local name, or local name on a
+%% particular node.
+%%
+
+%% All modifications are (C) 2009-2013 GoPivotal, Inc.
+
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(gen_server2).
+
+%%% ---------------------------------------------------
+%%%
+%%% The idea behind THIS server is that the user module
+%%% provides (different) functions to handle different
+%%% kind of inputs.
+%%% If the Parent process terminates the Module:terminate/2
+%%% function is called.
+%%%
+%%% The user module should export:
+%%%
+%%% init(Args)
+%%% ==> {ok, State}
+%%% {ok, State, Timeout}
+%%% {ok, State, Timeout, Backoff}
+%%% ignore
+%%% {stop, Reason}
+%%%
+%%% handle_call(Msg, {From, Tag}, State)
+%%%
+%%% ==> {reply, Reply, State}
+%%% {reply, Reply, State, Timeout}
+%%% {noreply, State}
+%%% {noreply, State, Timeout}
+%%% {stop, Reason, Reply, State}
+%%% Reason = normal | shutdown | Term terminate(State) is called
+%%%
+%%% handle_cast(Msg, State)
+%%%
+%%% ==> {noreply, State}
+%%% {noreply, State, Timeout}
+%%% {stop, Reason, State}
+%%% Reason = normal | shutdown | Term terminate(State) is called
+%%%
+%%% handle_info(Info, State) Info is e.g. {'EXIT', P, R}, {nodedown, N}, ...
+%%%
+%%% ==> {noreply, State}
+%%% {noreply, State, Timeout}
+%%% {stop, Reason, State}
+%%% Reason = normal | shutdown | Term, terminate(State) is called
+%%%
+%%% terminate(Reason, State) Let the user module clean up
+%%% Reason = normal | shutdown | {shutdown, Term} | Term
+%%% always called when server terminates
+%%%
+%%% ==> ok | Term
+%%%
+%%% handle_pre_hibernate(State)
+%%%
+%%% ==> {hibernate, State}
+%%% {stop, Reason, State}
+%%% Reason = normal | shutdown | Term, terminate(State) is called
+%%%
+%%% handle_post_hibernate(State)
+%%%
+%%% ==> {noreply, State}
+%%% {stop, Reason, State}
+%%% Reason = normal | shutdown | Term, terminate(State) is called
+%%%
+%%% The work flow (of the server) can be described as follows:
+%%%
+%%% User module Generic
+%%% ----------- -------
+%%% start -----> start
+%%% init <----- .
+%%%
+%%% loop
+%%% handle_call <----- .
+%%% -----> reply
+%%%
+%%% handle_cast <----- .
+%%%
+%%% handle_info <----- .
+%%%
+%%% terminate <----- .
+%%%
+%%% -----> reply
+%%%
+%%%
+%%% ---------------------------------------------------
+
+%% API
+-export([start/3, start/4,
+ start_link/3, start_link/4,
+ call/2, call/3,
+ cast/2, reply/2,
+ abcast/2, abcast/3,
+ multi_call/2, multi_call/3, multi_call/4,
+ mcall/1,
+ with_state/2,
+ enter_loop/3, enter_loop/4, enter_loop/5, enter_loop/6, wake_hib/1]).
+
+%% System exports
+-export([system_continue/3,
+ system_terminate/4,
+ system_code_change/4,
+ format_status/2]).
+
+%% Internal exports
+-export([init_it/6]).
+
+-import(error_logger, [format/2]).
+
+%% State record
+-record(gs2_state, {parent, name, state, mod, time,
+ timeout_state, queue, debug, prioritisers}).
+
+-ifdef(use_specs).
+
+%%%=========================================================================
+%%% Specs. These exist only to shut up dialyzer's warnings
+%%%=========================================================================
+
+-type(gs2_state() :: #gs2_state{}).
+
+-spec(handle_common_termination/3 ::
+ (any(), atom(), gs2_state()) -> no_return()).
+-spec(hibernate/1 :: (gs2_state()) -> no_return()).
+-spec(pre_hibernate/1 :: (gs2_state()) -> no_return()).
+-spec(system_terminate/4 :: (_, _, _, gs2_state()) -> no_return()).
+
+-type(millis() :: non_neg_integer()).
+
+%%%=========================================================================
+%%% API
+%%%=========================================================================
+
+-callback init(Args :: term()) ->
+ {ok, State :: term()} |
+ {ok, State :: term(), timeout() | hibernate} |
+ {ok, State :: term(), timeout() | hibernate,
+ {backoff, millis(), millis(), millis()}} |
+ ignore |
+ {stop, Reason :: term()}.
+-callback handle_call(Request :: term(), From :: {pid(), Tag :: term()},
+ State :: term()) ->
+ {reply, Reply :: term(), NewState :: term()} |
+ {reply, Reply :: term(), NewState :: term(), timeout() | hibernate} |
+ {noreply, NewState :: term()} |
+ {noreply, NewState :: term(), timeout() | hibernate} |
+ {stop, Reason :: term(),
+ Reply :: term(), NewState :: term()}.
+-callback handle_cast(Request :: term(), State :: term()) ->
+ {noreply, NewState :: term()} |
+ {noreply, NewState :: term(), timeout() | hibernate} |
+ {stop, Reason :: term(), NewState :: term()}.
+-callback handle_info(Info :: term(), State :: term()) ->
+ {noreply, NewState :: term()} |
+ {noreply, NewState :: term(), timeout() | hibernate} |
+ {stop, Reason :: term(), NewState :: term()}.
+-callback terminate(Reason :: (normal | shutdown | {shutdown, term()} | term()),
+ State :: term()) ->
+ ok | term().
+-callback code_change(OldVsn :: (term() | {down, term()}), State :: term(),
+ Extra :: term()) ->
+ {ok, NewState :: term()} | {error, Reason :: term()}.
+
+%% It's not possible to define "optional" -callbacks, so putting specs
+%% for handle_pre_hibernate/1 and handle_post_hibernate/1 will result
+%% in warnings (the same applied for the behaviour_info before).
+
+-else.
+
+-export([behaviour_info/1]).
+
+behaviour_info(callbacks) ->
+ [{init,1},{handle_call,3},{handle_cast,2},{handle_info,2},
+ {terminate,2},{code_change,3}];
+behaviour_info(_Other) ->
+ undefined.
+
+-endif.
+
+%%% -----------------------------------------------------------------
+%%% Starts a generic server.
+%%% start(Mod, Args, Options)
+%%% start(Name, Mod, Args, Options)
+%%% start_link(Mod, Args, Options)
+%%% start_link(Name, Mod, Args, Options) where:
+%%% Name ::= {local, atom()} | {global, atom()}
+%%% Mod ::= atom(), callback module implementing the 'real' server
+%%% Args ::= term(), init arguments (to Mod:init/1)
+%%% Options ::= [{timeout, Timeout} | {debug, [Flag]}]
+%%% Flag ::= trace | log | {logfile, File} | statistics | debug
+%%% (debug == log && statistics)
+%%% Returns: {ok, Pid} |
+%%% {error, {already_started, Pid}} |
+%%% {error, Reason}
+%%% -----------------------------------------------------------------
+start(Mod, Args, Options) ->
+ gen:start(?MODULE, nolink, Mod, Args, Options).
+
+start(Name, Mod, Args, Options) ->
+ gen:start(?MODULE, nolink, Name, Mod, Args, Options).
+
+start_link(Mod, Args, Options) ->
+ gen:start(?MODULE, link, Mod, Args, Options).
+
+start_link(Name, Mod, Args, Options) ->
+ gen:start(?MODULE, link, Name, Mod, Args, Options).
+
+
+%% -----------------------------------------------------------------
+%% Make a call to a generic server.
+%% If the server is located at another node, that node will
+%% be monitored.
+%% If the client is trapping exits and is linked server termination
+%% is handled here (? Shall we do that here (or rely on timeouts) ?).
+%% -----------------------------------------------------------------
+call(Name, Request) ->
+ case catch gen:call(Name, '$gen_call', Request) of
+ {ok,Res} ->
+ Res;
+ {'EXIT',Reason} ->
+ exit({Reason, {?MODULE, call, [Name, Request]}})
+ end.
+
+call(Name, Request, Timeout) ->
+ case catch gen:call(Name, '$gen_call', Request, Timeout) of
+ {ok,Res} ->
+ Res;
+ {'EXIT',Reason} ->
+ exit({Reason, {?MODULE, call, [Name, Request, Timeout]}})
+ end.
+
+%% -----------------------------------------------------------------
+%% Make a cast to a generic server.
+%% -----------------------------------------------------------------
+cast({global,Name}, Request) ->
+ catch global:send(Name, cast_msg(Request)),
+ ok;
+cast({Name,Node}=Dest, Request) when is_atom(Name), is_atom(Node) ->
+ do_cast(Dest, Request);
+cast(Dest, Request) when is_atom(Dest) ->
+ do_cast(Dest, Request);
+cast(Dest, Request) when is_pid(Dest) ->
+ do_cast(Dest, Request).
+
+do_cast(Dest, Request) ->
+ do_send(Dest, cast_msg(Request)),
+ ok.
+
+cast_msg(Request) -> {'$gen_cast',Request}.
+
+%% -----------------------------------------------------------------
+%% Send a reply to the client.
+%% -----------------------------------------------------------------
+reply({To, Tag}, Reply) ->
+ catch To ! {Tag, Reply}.
+
+%% -----------------------------------------------------------------
+%% Asyncronous broadcast, returns nothing, it's just send'n pray
+%% -----------------------------------------------------------------
+abcast(Name, Request) when is_atom(Name) ->
+ do_abcast([node() | nodes()], Name, cast_msg(Request)).
+
+abcast(Nodes, Name, Request) when is_list(Nodes), is_atom(Name) ->
+ do_abcast(Nodes, Name, cast_msg(Request)).
+
+do_abcast([Node|Nodes], Name, Msg) when is_atom(Node) ->
+ do_send({Name,Node},Msg),
+ do_abcast(Nodes, Name, Msg);
+do_abcast([], _,_) -> abcast.
+
+%%% -----------------------------------------------------------------
+%%% Make a call to servers at several nodes.
+%%% Returns: {[Replies],[BadNodes]}
+%%% A Timeout can be given
+%%%
+%%% A middleman process is used in case late answers arrives after
+%%% the timeout. If they would be allowed to glog the callers message
+%%% queue, it would probably become confused. Late answers will
+%%% now arrive to the terminated middleman and so be discarded.
+%%% -----------------------------------------------------------------
+multi_call(Name, Req)
+ when is_atom(Name) ->
+ do_multi_call([node() | nodes()], Name, Req, infinity).
+
+multi_call(Nodes, Name, Req)
+ when is_list(Nodes), is_atom(Name) ->
+ do_multi_call(Nodes, Name, Req, infinity).
+
+multi_call(Nodes, Name, Req, infinity) ->
+ do_multi_call(Nodes, Name, Req, infinity);
+multi_call(Nodes, Name, Req, Timeout)
+ when is_list(Nodes), is_atom(Name), is_integer(Timeout), Timeout >= 0 ->
+ do_multi_call(Nodes, Name, Req, Timeout).
+
+%%% -----------------------------------------------------------------
+%%% Make multiple calls to multiple servers, given pairs of servers
+%%% and messages.
+%%% Returns: {[{Dest, Reply}], [{Dest, Error}]}
+%%%
+%%% Dest can be pid() | RegName :: atom() |
+%%% {Name :: atom(), Node :: atom()} | {global, Name :: atom()}
+%%%
+%%% A middleman process is used to avoid clogging up the callers
+%%% message queue.
+%%% -----------------------------------------------------------------
+mcall(CallSpecs) ->
+ Tag = make_ref(),
+ {_, MRef} = spawn_monitor(
+ fun() ->
+ Refs = lists:foldl(
+ fun ({Dest, _Request}=S, Dict) ->
+ dict:store(do_mcall(S), Dest, Dict)
+ end, dict:new(), CallSpecs),
+ collect_replies(Tag, Refs, [], [])
+ end),
+ receive
+ {'DOWN', MRef, _, _, {Tag, Result}} -> Result;
+ {'DOWN', MRef, _, _, Reason} -> exit(Reason)
+ end.
+
+do_mcall({{global,Name}=Dest, Request}) ->
+ %% whereis_name is simply an ets lookup, and is precisely what
+ %% global:send/2 does, yet we need a Ref to put in the call to the
+ %% server, so invoking whereis_name makes a lot more sense here.
+ case global:whereis_name(Name) of
+ Pid when is_pid(Pid) ->
+ MRef = erlang:monitor(process, Pid),
+ catch msend(Pid, MRef, Request),
+ MRef;
+ undefined ->
+ Ref = make_ref(),
+ self() ! {'DOWN', Ref, process, Dest, noproc},
+ Ref
+ end;
+do_mcall({{Name,Node}=Dest, Request}) when is_atom(Name), is_atom(Node) ->
+ {_Node, MRef} = start_monitor(Node, Name), %% NB: we don't handle R6
+ catch msend(Dest, MRef, Request),
+ MRef;
+do_mcall({Dest, Request}) when is_atom(Dest); is_pid(Dest) ->
+ MRef = erlang:monitor(process, Dest),
+ catch msend(Dest, MRef, Request),
+ MRef.
+
+msend(Dest, MRef, Request) ->
+ erlang:send(Dest, {'$gen_call', {self(), MRef}, Request}, [noconnect]).
+
+collect_replies(Tag, Refs, Replies, Errors) ->
+ case dict:size(Refs) of
+ 0 -> exit({Tag, {Replies, Errors}});
+ _ -> receive
+ {MRef, Reply} ->
+ {Refs1, Replies1} = handle_call_result(MRef, Reply,
+ Refs, Replies),
+ collect_replies(Tag, Refs1, Replies1, Errors);
+ {'DOWN', MRef, _, _, Reason} ->
+ Reason1 = case Reason of
+ noconnection -> nodedown;
+ _ -> Reason
+ end,
+ {Refs1, Errors1} = handle_call_result(MRef, Reason1,
+ Refs, Errors),
+ collect_replies(Tag, Refs1, Replies, Errors1)
+ end
+ end.
+
+handle_call_result(MRef, Result, Refs, AccList) ->
+ %% we avoid the mailbox scanning cost of a call to erlang:demonitor/{1,2}
+ %% here, so we must cope with MRefs that we've already seen and erased
+ case dict:find(MRef, Refs) of
+ {ok, Pid} -> {dict:erase(MRef, Refs), [{Pid, Result}|AccList]};
+ _ -> {Refs, AccList}
+ end.
+
+%% -----------------------------------------------------------------
+%% Apply a function to a generic server's state.
+%% -----------------------------------------------------------------
+with_state(Name, Fun) ->
+ case catch gen:call(Name, '$with_state', Fun, infinity) of
+ {ok,Res} ->
+ Res;
+ {'EXIT',Reason} ->
+ exit({Reason, {?MODULE, with_state, [Name, Fun]}})
+ end.
+
+%%-----------------------------------------------------------------
+%% enter_loop(Mod, Options, State, <ServerName>, <TimeOut>, <Backoff>) ->_
+%%
+%% Description: Makes an existing process into a gen_server.
+%% The calling process will enter the gen_server receive
+%% loop and become a gen_server process.
+%% The process *must* have been started using one of the
+%% start functions in proc_lib, see proc_lib(3).
+%% The user is responsible for any initialization of the
+%% process, including registering a name for it.
+%%-----------------------------------------------------------------
+enter_loop(Mod, Options, State) ->
+ enter_loop(Mod, Options, State, self(), infinity, undefined).
+
+enter_loop(Mod, Options, State, Backoff = {backoff, _, _ , _}) ->
+ enter_loop(Mod, Options, State, self(), infinity, Backoff);
+
+enter_loop(Mod, Options, State, ServerName = {_, _}) ->
+ enter_loop(Mod, Options, State, ServerName, infinity, undefined);
+
+enter_loop(Mod, Options, State, Timeout) ->
+ enter_loop(Mod, Options, State, self(), Timeout, undefined).
+
+enter_loop(Mod, Options, State, ServerName, Backoff = {backoff, _, _, _}) ->
+ enter_loop(Mod, Options, State, ServerName, infinity, Backoff);
+
+enter_loop(Mod, Options, State, ServerName, Timeout) ->
+ enter_loop(Mod, Options, State, ServerName, Timeout, undefined).
+
+enter_loop(Mod, Options, State, ServerName, Timeout, Backoff) ->
+ Name = get_proc_name(ServerName),
+ Parent = get_parent(),
+ Debug = debug_options(Name, Options),
+ Queue = priority_queue:new(),
+ Backoff1 = extend_backoff(Backoff),
+ loop(find_prioritisers(
+ #gs2_state { parent = Parent, name = Name, state = State,
+ mod = Mod, time = Timeout, timeout_state = Backoff1,
+ queue = Queue, debug = Debug })).
+
+%%%========================================================================
+%%% Gen-callback functions
+%%%========================================================================
+
+%%% ---------------------------------------------------
+%%% Initiate the new process.
+%%% Register the name using the Rfunc function
+%%% Calls the Mod:init/Args function.
+%%% Finally an acknowledge is sent to Parent and the main
+%%% loop is entered.
+%%% ---------------------------------------------------
+init_it(Starter, self, Name, Mod, Args, Options) ->
+ init_it(Starter, self(), Name, Mod, Args, Options);
+init_it(Starter, Parent, Name0, Mod, Args, Options) ->
+ Name = name(Name0),
+ Debug = debug_options(Name, Options),
+ Queue = priority_queue:new(),
+ GS2State = find_prioritisers(
+ #gs2_state { parent = Parent,
+ name = Name,
+ mod = Mod,
+ queue = Queue,
+ debug = Debug }),
+ case catch Mod:init(Args) of
+ {ok, State} ->
+ proc_lib:init_ack(Starter, {ok, self()}),
+ loop(GS2State #gs2_state { state = State,
+ time = infinity,
+ timeout_state = undefined });
+ {ok, State, Timeout} ->
+ proc_lib:init_ack(Starter, {ok, self()}),
+ loop(GS2State #gs2_state { state = State,
+ time = Timeout,
+ timeout_state = undefined });
+ {ok, State, Timeout, Backoff = {backoff, _, _, _}} ->
+ Backoff1 = extend_backoff(Backoff),
+ proc_lib:init_ack(Starter, {ok, self()}),
+ loop(GS2State #gs2_state { state = State,
+ time = Timeout,
+ timeout_state = Backoff1 });
+ {stop, Reason} ->
+ %% For consistency, we must make sure that the
+ %% registered name (if any) is unregistered before
+ %% the parent process is notified about the failure.
+ %% (Otherwise, the parent process could get
+ %% an 'already_started' error if it immediately
+ %% tried starting the process again.)
+ unregister_name(Name0),
+ proc_lib:init_ack(Starter, {error, Reason}),
+ exit(Reason);
+ ignore ->
+ unregister_name(Name0),
+ proc_lib:init_ack(Starter, ignore),
+ exit(normal);
+ {'EXIT', Reason} ->
+ unregister_name(Name0),
+ proc_lib:init_ack(Starter, {error, Reason}),
+ exit(Reason);
+ Else ->
+ Error = {bad_return_value, Else},
+ proc_lib:init_ack(Starter, {error, Error}),
+ exit(Error)
+ end.
+
+name({local,Name}) -> Name;
+name({global,Name}) -> Name;
+%% name(Pid) when is_pid(Pid) -> Pid;
+%% when R12 goes away, drop the line beneath and uncomment the line above
+name(Name) -> Name.
+
+unregister_name({local,Name}) ->
+ _ = (catch unregister(Name));
+unregister_name({global,Name}) ->
+ _ = global:unregister_name(Name);
+unregister_name(Pid) when is_pid(Pid) ->
+ Pid;
+%% Under R12 let's just ignore it, as we have a single term as Name.
+%% On R13 it will never get here, as we get tuple with 'local/global' atom.
+unregister_name(_Name) -> ok.
+
+extend_backoff(undefined) ->
+ undefined;
+extend_backoff({backoff, InitialTimeout, MinimumTimeout, DesiredHibPeriod}) ->
+ {backoff, InitialTimeout, MinimumTimeout, DesiredHibPeriod, now()}.
+
+%%%========================================================================
+%%% Internal functions
+%%%========================================================================
+%%% ---------------------------------------------------
+%%% The MAIN loop.
+%%% ---------------------------------------------------
+loop(GS2State = #gs2_state { time = hibernate,
+ timeout_state = undefined }) ->
+ pre_hibernate(GS2State);
+loop(GS2State) ->
+ process_next_msg(drain(GS2State)).
+
+drain(GS2State) ->
+ receive
+ Input -> drain(in(Input, GS2State))
+ after 0 -> GS2State
+ end.
+
+process_next_msg(GS2State = #gs2_state { time = Time,
+ timeout_state = TimeoutState,
+ queue = Queue }) ->
+ case priority_queue:out(Queue) of
+ {{value, Msg}, Queue1} ->
+ process_msg(Msg, GS2State #gs2_state { queue = Queue1 });
+ {empty, Queue1} ->
+ {Time1, HibOnTimeout}
+ = case {Time, TimeoutState} of
+ {hibernate, {backoff, Current, _Min, _Desired, _RSt}} ->
+ {Current, true};
+ {hibernate, _} ->
+ %% wake_hib/7 will set Time to hibernate. If
+ %% we were woken and didn't receive a msg
+ %% then we will get here and need a sensible
+ %% value for Time1, otherwise we crash.
+ %% R13B1 always waits infinitely when waking
+ %% from hibernation, so that's what we do
+ %% here too.
+ {infinity, false};
+ _ -> {Time, false}
+ end,
+ receive
+ Input ->
+ %% Time could be 'hibernate' here, so *don't* call loop
+ process_next_msg(
+ drain(in(Input, GS2State #gs2_state { queue = Queue1 })))
+ after Time1 ->
+ case HibOnTimeout of
+ true ->
+ pre_hibernate(
+ GS2State #gs2_state { queue = Queue1 });
+ false ->
+ process_msg(timeout,
+ GS2State #gs2_state { queue = Queue1 })
+ end
+ end
+ end.
+
+wake_hib(GS2State = #gs2_state { timeout_state = TS }) ->
+ TimeoutState1 = case TS of
+ undefined ->
+ undefined;
+ {SleptAt, TimeoutState} ->
+ adjust_timeout_state(SleptAt, now(), TimeoutState)
+ end,
+ post_hibernate(
+ drain(GS2State #gs2_state { timeout_state = TimeoutState1 })).
+
+hibernate(GS2State = #gs2_state { timeout_state = TimeoutState }) ->
+ TS = case TimeoutState of
+ undefined -> undefined;
+ {backoff, _, _, _, _} -> {now(), TimeoutState}
+ end,
+ proc_lib:hibernate(?MODULE, wake_hib,
+ [GS2State #gs2_state { timeout_state = TS }]).
+
+pre_hibernate(GS2State = #gs2_state { state = State,
+ mod = Mod }) ->
+ case erlang:function_exported(Mod, handle_pre_hibernate, 1) of
+ true ->
+ case catch Mod:handle_pre_hibernate(State) of
+ {hibernate, NState} ->
+ hibernate(GS2State #gs2_state { state = NState } );
+ Reply ->
+ handle_common_termination(Reply, pre_hibernate, GS2State)
+ end;
+ false ->
+ hibernate(GS2State)
+ end.
+
+post_hibernate(GS2State = #gs2_state { state = State,
+ mod = Mod }) ->
+ case erlang:function_exported(Mod, handle_post_hibernate, 1) of
+ true ->
+ case catch Mod:handle_post_hibernate(State) of
+ {noreply, NState} ->
+ process_next_msg(GS2State #gs2_state { state = NState,
+ time = infinity });
+ {noreply, NState, Time} ->
+ process_next_msg(GS2State #gs2_state { state = NState,
+ time = Time });
+ Reply ->
+ handle_common_termination(Reply, post_hibernate, GS2State)
+ end;
+ false ->
+ %% use hibernate here, not infinity. This matches
+ %% R13B. The key is that we should be able to get through
+ %% to process_msg calling sys:handle_system_msg with Time
+ %% still set to hibernate, iff that msg is the very msg
+ %% that woke us up (or the first msg we receive after
+ %% waking up).
+ process_next_msg(GS2State #gs2_state { time = hibernate })
+ end.
+
+adjust_timeout_state(SleptAt, AwokeAt, {backoff, CurrentTO, MinimumTO,
+ DesiredHibPeriod, RandomState}) ->
+ NapLengthMicros = timer:now_diff(AwokeAt, SleptAt),
+ CurrentMicros = CurrentTO * 1000,
+ MinimumMicros = MinimumTO * 1000,
+ DesiredHibMicros = DesiredHibPeriod * 1000,
+ GapBetweenMessagesMicros = NapLengthMicros + CurrentMicros,
+ Base =
+ %% If enough time has passed between the last two messages then we
+ %% should consider sleeping sooner. Otherwise stay awake longer.
+ case GapBetweenMessagesMicros > (MinimumMicros + DesiredHibMicros) of
+ true -> lists:max([MinimumTO, CurrentTO div 2]);
+ false -> CurrentTO
+ end,
+ {Extra, RandomState1} = random:uniform_s(Base, RandomState),
+ CurrentTO1 = Base + Extra,
+ {backoff, CurrentTO1, MinimumTO, DesiredHibPeriod, RandomState1}.
+
+in({'$gen_cast', Msg} = Input,
+ GS2State = #gs2_state { prioritisers = {_, F, _} }) ->
+ in(Input, F(Msg, GS2State), GS2State);
+in({'$gen_call', From, Msg} = Input,
+ GS2State = #gs2_state { prioritisers = {F, _, _} }) ->
+ in(Input, F(Msg, From, GS2State), GS2State);
+in({'$with_state', _From, _Fun} = Input, GS2State) ->
+ in(Input, 0, GS2State);
+in({'EXIT', Parent, _R} = Input, GS2State = #gs2_state { parent = Parent }) ->
+ in(Input, infinity, GS2State);
+in({system, _From, _Req} = Input, GS2State) ->
+ in(Input, infinity, GS2State);
+in(Input, GS2State = #gs2_state { prioritisers = {_, _, F} }) ->
+ in(Input, F(Input, GS2State), GS2State).
+
+in(_Input, drop, GS2State) ->
+ GS2State;
+
+in(Input, Priority, GS2State = #gs2_state { queue = Queue }) ->
+ GS2State # gs2_state { queue = priority_queue:in(Input, Priority, Queue) }.
+
+process_msg({system, From, Req},
+ GS2State = #gs2_state { parent = Parent, debug = Debug }) ->
+ %% gen_server puts Hib on the end as the 7th arg, but that version
+ %% of the fun seems not to be documented so leaving out for now.
+ sys:handle_system_msg(Req, From, Parent, ?MODULE, Debug, GS2State);
+process_msg({'$with_state', From, Fun},
+ GS2State = #gs2_state{state = State}) ->
+ reply(From, catch Fun(State)),
+ loop(GS2State);
+process_msg({'EXIT', Parent, Reason} = Msg,
+ GS2State = #gs2_state { parent = Parent }) ->
+ terminate(Reason, Msg, GS2State);
+process_msg(Msg, GS2State = #gs2_state { debug = [] }) ->
+ handle_msg(Msg, GS2State);
+process_msg(Msg, GS2State = #gs2_state { name = Name, debug = Debug }) ->
+ Debug1 = sys:handle_debug(Debug, fun print_event/3, Name, {in, Msg}),
+ handle_msg(Msg, GS2State #gs2_state { debug = Debug1 }).
+
+%%% ---------------------------------------------------
+%%% Send/recive functions
+%%% ---------------------------------------------------
+do_send(Dest, Msg) ->
+ catch erlang:send(Dest, Msg).
+
+do_multi_call(Nodes, Name, Req, infinity) ->
+ Tag = make_ref(),
+ Monitors = send_nodes(Nodes, Name, Tag, Req),
+ rec_nodes(Tag, Monitors, Name, undefined);
+do_multi_call(Nodes, Name, Req, Timeout) ->
+ Tag = make_ref(),
+ Caller = self(),
+ Receiver =
+ spawn(
+ fun () ->
+ %% Middleman process. Should be unsensitive to regular
+ %% exit signals. The sychronization is needed in case
+ %% the receiver would exit before the caller started
+ %% the monitor.
+ process_flag(trap_exit, true),
+ Mref = erlang:monitor(process, Caller),
+ receive
+ {Caller,Tag} ->
+ Monitors = send_nodes(Nodes, Name, Tag, Req),
+ TimerId = erlang:start_timer(Timeout, self(), ok),
+ Result = rec_nodes(Tag, Monitors, Name, TimerId),
+ exit({self(),Tag,Result});
+ {'DOWN',Mref,_,_,_} ->
+ %% Caller died before sending us the go-ahead.
+ %% Give up silently.
+ exit(normal)
+ end
+ end),
+ Mref = erlang:monitor(process, Receiver),
+ Receiver ! {self(),Tag},
+ receive
+ {'DOWN',Mref,_,_,{Receiver,Tag,Result}} ->
+ Result;
+ {'DOWN',Mref,_,_,Reason} ->
+ %% The middleman code failed. Or someone did
+ %% exit(_, kill) on the middleman process => Reason==killed
+ exit(Reason)
+ end.
+
+send_nodes(Nodes, Name, Tag, Req) ->
+ send_nodes(Nodes, Name, Tag, Req, []).
+
+send_nodes([Node|Tail], Name, Tag, Req, Monitors)
+ when is_atom(Node) ->
+ Monitor = start_monitor(Node, Name),
+ %% Handle non-existing names in rec_nodes.
+ catch {Name, Node} ! {'$gen_call', {self(), {Tag, Node}}, Req},
+ send_nodes(Tail, Name, Tag, Req, [Monitor | Monitors]);
+send_nodes([_Node|Tail], Name, Tag, Req, Monitors) ->
+ %% Skip non-atom Node
+ send_nodes(Tail, Name, Tag, Req, Monitors);
+send_nodes([], _Name, _Tag, _Req, Monitors) ->
+ Monitors.
+
+%% Against old nodes:
+%% If no reply has been delivered within 2 secs. (per node) check that
+%% the server really exists and wait for ever for the answer.
+%%
+%% Against contemporary nodes:
+%% Wait for reply, server 'DOWN', or timeout from TimerId.
+
+rec_nodes(Tag, Nodes, Name, TimerId) ->
+ rec_nodes(Tag, Nodes, Name, [], [], 2000, TimerId).
+
+rec_nodes(Tag, [{N,R}|Tail], Name, Badnodes, Replies, Time, TimerId ) ->
+ receive
+ {'DOWN', R, _, _, _} ->
+ rec_nodes(Tag, Tail, Name, [N|Badnodes], Replies, Time, TimerId);
+ {{Tag, N}, Reply} -> %% Tag is bound !!!
+ unmonitor(R),
+ rec_nodes(Tag, Tail, Name, Badnodes,
+ [{N,Reply}|Replies], Time, TimerId);
+ {timeout, TimerId, _} ->
+ unmonitor(R),
+ %% Collect all replies that already have arrived
+ rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies)
+ end;
+rec_nodes(Tag, [N|Tail], Name, Badnodes, Replies, Time, TimerId) ->
+ %% R6 node
+ receive
+ {nodedown, N} ->
+ monitor_node(N, false),
+ rec_nodes(Tag, Tail, Name, [N|Badnodes], Replies, 2000, TimerId);
+ {{Tag, N}, Reply} -> %% Tag is bound !!!
+ receive {nodedown, N} -> ok after 0 -> ok end,
+ monitor_node(N, false),
+ rec_nodes(Tag, Tail, Name, Badnodes,
+ [{N,Reply}|Replies], 2000, TimerId);
+ {timeout, TimerId, _} ->
+ receive {nodedown, N} -> ok after 0 -> ok end,
+ monitor_node(N, false),
+ %% Collect all replies that already have arrived
+ rec_nodes_rest(Tag, Tail, Name, [N | Badnodes], Replies)
+ after Time ->
+ case rpc:call(N, erlang, whereis, [Name]) of
+ Pid when is_pid(Pid) -> % It exists try again.
+ rec_nodes(Tag, [N|Tail], Name, Badnodes,
+ Replies, infinity, TimerId);
+ _ -> % badnode
+ receive {nodedown, N} -> ok after 0 -> ok end,
+ monitor_node(N, false),
+ rec_nodes(Tag, Tail, Name, [N|Badnodes],
+ Replies, 2000, TimerId)
+ end
+ end;
+rec_nodes(_, [], _, Badnodes, Replies, _, TimerId) ->
+ case catch erlang:cancel_timer(TimerId) of
+ false -> % It has already sent it's message
+ receive
+ {timeout, TimerId, _} -> ok
+ after 0 ->
+ ok
+ end;
+ _ -> % Timer was cancelled, or TimerId was 'undefined'
+ ok
+ end,
+ {Replies, Badnodes}.
+
+%% Collect all replies that already have arrived
+rec_nodes_rest(Tag, [{N,R}|Tail], Name, Badnodes, Replies) ->
+ receive
+ {'DOWN', R, _, _, _} ->
+ rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies);
+ {{Tag, N}, Reply} -> %% Tag is bound !!!
+ unmonitor(R),
+ rec_nodes_rest(Tag, Tail, Name, Badnodes, [{N,Reply}|Replies])
+ after 0 ->
+ unmonitor(R),
+ rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies)
+ end;
+rec_nodes_rest(Tag, [N|Tail], Name, Badnodes, Replies) ->
+ %% R6 node
+ receive
+ {nodedown, N} ->
+ monitor_node(N, false),
+ rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies);
+ {{Tag, N}, Reply} -> %% Tag is bound !!!
+ receive {nodedown, N} -> ok after 0 -> ok end,
+ monitor_node(N, false),
+ rec_nodes_rest(Tag, Tail, Name, Badnodes, [{N,Reply}|Replies])
+ after 0 ->
+ receive {nodedown, N} -> ok after 0 -> ok end,
+ monitor_node(N, false),
+ rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies)
+ end;
+rec_nodes_rest(_Tag, [], _Name, Badnodes, Replies) ->
+ {Replies, Badnodes}.
+
+
+%%% ---------------------------------------------------
+%%% Monitor functions
+%%% ---------------------------------------------------
+
+start_monitor(Node, Name) when is_atom(Node), is_atom(Name) ->
+ if node() =:= nonode@nohost, Node =/= nonode@nohost ->
+ Ref = make_ref(),
+ self() ! {'DOWN', Ref, process, {Name, Node}, noconnection},
+ {Node, Ref};
+ true ->
+ case catch erlang:monitor(process, {Name, Node}) of
+ {'EXIT', _} ->
+ %% Remote node is R6
+ monitor_node(Node, true),
+ Node;
+ Ref when is_reference(Ref) ->
+ {Node, Ref}
+ end
+ end.
+
+%% Cancels a monitor started with Ref=erlang:monitor(_, _).
+unmonitor(Ref) when is_reference(Ref) ->
+ erlang:demonitor(Ref),
+ receive
+ {'DOWN', Ref, _, _, _} ->
+ true
+ after 0 ->
+ true
+ end.
+
+%%% ---------------------------------------------------
+%%% Message handling functions
+%%% ---------------------------------------------------
+
+dispatch({'$gen_cast', Msg}, Mod, State) ->
+ Mod:handle_cast(Msg, State);
+dispatch(Info, Mod, State) ->
+ Mod:handle_info(Info, State).
+
+common_reply(_Name, From, Reply, _NState, [] = _Debug) ->
+ reply(From, Reply),
+ [];
+common_reply(Name, {To, _Tag} = From, Reply, NState, Debug) ->
+ reply(From, Reply),
+ sys:handle_debug(Debug, fun print_event/3, Name, {out, Reply, To, NState}).
+
+common_noreply(_Name, _NState, [] = _Debug) ->
+ [];
+common_noreply(Name, NState, Debug) ->
+ sys:handle_debug(Debug, fun print_event/3, Name, {noreply, NState}).
+
+common_become(_Name, _Mod, _NState, [] = _Debug) ->
+ [];
+common_become(Name, Mod, NState, Debug) ->
+ sys:handle_debug(Debug, fun print_event/3, Name, {become, Mod, NState}).
+
+handle_msg({'$gen_call', From, Msg}, GS2State = #gs2_state { mod = Mod,
+ state = State,
+ name = Name,
+ debug = Debug }) ->
+ case catch Mod:handle_call(Msg, From, State) of
+ {reply, Reply, NState} ->
+ Debug1 = common_reply(Name, From, Reply, NState, Debug),
+ loop(GS2State #gs2_state { state = NState,
+ time = infinity,
+ debug = Debug1 });
+ {reply, Reply, NState, Time1} ->
+ Debug1 = common_reply(Name, From, Reply, NState, Debug),
+ loop(GS2State #gs2_state { state = NState,
+ time = Time1,
+ debug = Debug1});
+ {stop, Reason, Reply, NState} ->
+ {'EXIT', R} =
+ (catch terminate(Reason, Msg,
+ GS2State #gs2_state { state = NState })),
+ common_reply(Name, From, Reply, NState, Debug),
+ exit(R);
+ Other ->
+ handle_common_reply(Other, Msg, GS2State)
+ end;
+handle_msg(Msg, GS2State = #gs2_state { mod = Mod, state = State }) ->
+ Reply = (catch dispatch(Msg, Mod, State)),
+ handle_common_reply(Reply, Msg, GS2State).
+
+handle_common_reply(Reply, Msg, GS2State = #gs2_state { name = Name,
+ debug = Debug}) ->
+ case Reply of
+ {noreply, NState} ->
+ Debug1 = common_noreply(Name, NState, Debug),
+ loop(GS2State #gs2_state {state = NState,
+ time = infinity,
+ debug = Debug1});
+ {noreply, NState, Time1} ->
+ Debug1 = common_noreply(Name, NState, Debug),
+ loop(GS2State #gs2_state {state = NState,
+ time = Time1,
+ debug = Debug1});
+ {become, Mod, NState} ->
+ Debug1 = common_become(Name, Mod, NState, Debug),
+ loop(find_prioritisers(
+ GS2State #gs2_state { mod = Mod,
+ state = NState,
+ time = infinity,
+ debug = Debug1 }));
+ {become, Mod, NState, Time1} ->
+ Debug1 = common_become(Name, Mod, NState, Debug),
+ loop(find_prioritisers(
+ GS2State #gs2_state { mod = Mod,
+ state = NState,
+ time = Time1,
+ debug = Debug1 }));
+ _ ->
+ handle_common_termination(Reply, Msg, GS2State)
+ end.
+
+handle_common_termination(Reply, Msg, GS2State) ->
+ case Reply of
+ {stop, Reason, NState} ->
+ terminate(Reason, Msg, GS2State #gs2_state { state = NState });
+ {'EXIT', What} ->
+ terminate(What, Msg, GS2State);
+ _ ->
+ terminate({bad_return_value, Reply}, Msg, GS2State)
+ end.
+
+%%-----------------------------------------------------------------
+%% Callback functions for system messages handling.
+%%-----------------------------------------------------------------
+system_continue(Parent, Debug, GS2State) ->
+ loop(GS2State #gs2_state { parent = Parent, debug = Debug }).
+
+system_terminate(Reason, _Parent, Debug, GS2State) ->
+ terminate(Reason, [], GS2State #gs2_state { debug = Debug }).
+
+system_code_change(GS2State = #gs2_state { mod = Mod,
+ state = State },
+ _Module, OldVsn, Extra) ->
+ case catch Mod:code_change(OldVsn, State, Extra) of
+ {ok, NewState} ->
+ NewGS2State = find_prioritisers(
+ GS2State #gs2_state { state = NewState }),
+ {ok, [NewGS2State]};
+ Else ->
+ Else
+ end.
+
+%%-----------------------------------------------------------------
+%% Format debug messages. Print them as the call-back module sees
+%% them, not as the real erlang messages. Use trace for that.
+%%-----------------------------------------------------------------
+print_event(Dev, {in, Msg}, Name) ->
+ case Msg of
+ {'$gen_call', {From, _Tag}, Call} ->
+ io:format(Dev, "*DBG* ~p got call ~p from ~w~n",
+ [Name, Call, From]);
+ {'$gen_cast', Cast} ->
+ io:format(Dev, "*DBG* ~p got cast ~p~n",
+ [Name, Cast]);
+ _ ->
+ io:format(Dev, "*DBG* ~p got ~p~n", [Name, Msg])
+ end;
+print_event(Dev, {out, Msg, To, State}, Name) ->
+ io:format(Dev, "*DBG* ~p sent ~p to ~w, new state ~w~n",
+ [Name, Msg, To, State]);
+print_event(Dev, {noreply, State}, Name) ->
+ io:format(Dev, "*DBG* ~p new state ~w~n", [Name, State]);
+print_event(Dev, Event, Name) ->
+ io:format(Dev, "*DBG* ~p dbg ~p~n", [Name, Event]).
+
+
+%%% ---------------------------------------------------
+%%% Terminate the server.
+%%% ---------------------------------------------------
+
+terminate(Reason, Msg, #gs2_state { name = Name,
+ mod = Mod,
+ state = State,
+ debug = Debug }) ->
+ case catch Mod:terminate(Reason, State) of
+ {'EXIT', R} ->
+ error_info(R, Reason, Name, Msg, State, Debug),
+ exit(R);
+ _ ->
+ case Reason of
+ normal ->
+ exit(normal);
+ shutdown ->
+ exit(shutdown);
+ {shutdown,_}=Shutdown ->
+ exit(Shutdown);
+ _ ->
+ error_info(Reason, undefined, Name, Msg, State, Debug),
+ exit(Reason)
+ end
+ end.
+
+error_info(_Reason, _RootCause, application_controller, _Msg, _State, _Debug) ->
+ %% OTP-5811 Don't send an error report if it's the system process
+ %% application_controller which is terminating - let init take care
+ %% of it instead
+ ok;
+error_info(Reason, RootCause, Name, Msg, State, Debug) ->
+ Reason1 = error_reason(Reason),
+ Fmt =
+ "** Generic server ~p terminating~n"
+ "** Last message in was ~p~n"
+ "** When Server state == ~p~n"
+ "** Reason for termination == ~n** ~p~n",
+ case RootCause of
+ undefined -> format(Fmt, [Name, Msg, State, Reason1]);
+ _ -> format(Fmt ++ "** In 'terminate' callback "
+ "with reason ==~n** ~p~n",
+ [Name, Msg, State, Reason1,
+ error_reason(RootCause)])
+ end,
+ sys:print_log(Debug),
+ ok.
+
+error_reason({undef,[{M,F,A}|MFAs]} = Reason) ->
+ case code:is_loaded(M) of
+ false -> {'module could not be loaded',[{M,F,A}|MFAs]};
+ _ -> case erlang:function_exported(M, F, length(A)) of
+ true -> Reason;
+ false -> {'function not exported',[{M,F,A}|MFAs]}
+ end
+ end;
+error_reason(Reason) ->
+ Reason.
+
+%%% ---------------------------------------------------
+%%% Misc. functions.
+%%% ---------------------------------------------------
+
+opt(Op, [{Op, Value}|_]) ->
+ {ok, Value};
+opt(Op, [_|Options]) ->
+ opt(Op, Options);
+opt(_, []) ->
+ false.
+
+debug_options(Name, Opts) ->
+ case opt(debug, Opts) of
+ {ok, Options} -> dbg_options(Name, Options);
+ _ -> dbg_options(Name, [])
+ end.
+
+dbg_options(Name, []) ->
+ Opts =
+ case init:get_argument(generic_debug) of
+ error ->
+ [];
+ _ ->
+ [log, statistics]
+ end,
+ dbg_opts(Name, Opts);
+dbg_options(Name, Opts) ->
+ dbg_opts(Name, Opts).
+
+dbg_opts(Name, Opts) ->
+ case catch sys:debug_options(Opts) of
+ {'EXIT',_} ->
+ format("~p: ignoring erroneous debug options - ~p~n",
+ [Name, Opts]),
+ [];
+ Dbg ->
+ Dbg
+ end.
+
+get_proc_name(Pid) when is_pid(Pid) ->
+ Pid;
+get_proc_name({local, Name}) ->
+ case process_info(self(), registered_name) of
+ {registered_name, Name} ->
+ Name;
+ {registered_name, _Name} ->
+ exit(process_not_registered);
+ [] ->
+ exit(process_not_registered)
+ end;
+get_proc_name({global, Name}) ->
+ case whereis_name(Name) of
+ undefined ->
+ exit(process_not_registered_globally);
+ Pid when Pid =:= self() ->
+ Name;
+ _Pid ->
+ exit(process_not_registered_globally)
+ end.
+
+get_parent() ->
+ case get('$ancestors') of
+ [Parent | _] when is_pid(Parent)->
+ Parent;
+ [Parent | _] when is_atom(Parent)->
+ name_to_pid(Parent);
+ _ ->
+ exit(process_was_not_started_by_proc_lib)
+ end.
+
+name_to_pid(Name) ->
+ case whereis(Name) of
+ undefined ->
+ case whereis_name(Name) of
+ undefined ->
+ exit(could_not_find_registerd_name);
+ Pid ->
+ Pid
+ end;
+ Pid ->
+ Pid
+ end.
+
+whereis_name(Name) ->
+ case ets:lookup(global_names, Name) of
+ [{_Name, Pid, _Method, _RPid, _Ref}] ->
+ if node(Pid) == node() ->
+ case is_process_alive(Pid) of
+ true -> Pid;
+ false -> undefined
+ end;
+ true ->
+ Pid
+ end;
+ [] -> undefined
+ end.
+
+find_prioritisers(GS2State = #gs2_state { mod = Mod }) ->
+ PCall = function_exported_or_default(Mod, 'prioritise_call', 4,
+ fun (_Msg, _From, _State) -> 0 end),
+ PCast = function_exported_or_default(Mod, 'prioritise_cast', 3,
+ fun (_Msg, _State) -> 0 end),
+ PInfo = function_exported_or_default(Mod, 'prioritise_info', 3,
+ fun (_Msg, _State) -> 0 end),
+ GS2State #gs2_state { prioritisers = {PCall, PCast, PInfo} }.
+
+function_exported_or_default(Mod, Fun, Arity, Default) ->
+ case erlang:function_exported(Mod, Fun, Arity) of
+ true -> case Arity of
+ 3 -> fun (Msg, GS2State = #gs2_state { queue = Queue,
+ state = State }) ->
+ Length = priority_queue:len(Queue),
+ case catch Mod:Fun(Msg, Length, State) of
+ drop ->
+ drop;
+ Res when is_integer(Res) ->
+ Res;
+ Err ->
+ handle_common_termination(Err, Msg, GS2State)
+ end
+ end;
+ 4 -> fun (Msg, From, GS2State = #gs2_state { queue = Queue,
+ state = State }) ->
+ Length = priority_queue:len(Queue),
+ case catch Mod:Fun(Msg, From, Length, State) of
+ Res when is_integer(Res) ->
+ Res;
+ Err ->
+ handle_common_termination(Err, Msg, GS2State)
+ end
+ end
+ end;
+ false -> Default
+ end.
+
+%%-----------------------------------------------------------------
+%% Status information
+%%-----------------------------------------------------------------
+format_status(Opt, StatusData) ->
+ [PDict, SysState, Parent, Debug,
+ #gs2_state{name = Name, state = State, mod = Mod, queue = Queue}] =
+ StatusData,
+ NameTag = if is_pid(Name) ->
+ pid_to_list(Name);
+ is_atom(Name) ->
+ Name
+ end,
+ Header = lists:concat(["Status for generic server ", NameTag]),
+ Log = sys:get_debug(log, Debug, []),
+ Specfic = callback(Mod, format_status, [Opt, [PDict, State]],
+ fun () -> [{data, [{"State", State}]}] end),
+ Messages = callback(Mod, format_message_queue, [Opt, Queue],
+ fun () -> priority_queue:to_list(Queue) end),
+ [{header, Header},
+ {data, [{"Status", SysState},
+ {"Parent", Parent},
+ {"Logged events", Log},
+ {"Queued messages", Messages}]} |
+ Specfic].
+
+callback(Mod, FunName, Args, DefaultThunk) ->
+ case erlang:function_exported(Mod, FunName, length(Args)) of
+ true -> case catch apply(Mod, FunName, Args) of
+ {'EXIT', _} -> DefaultThunk();
+ Success -> Success
+ end;
+ false -> DefaultThunk()
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(gm).
+
+%% Guaranteed Multicast
+%% ====================
+%%
+%% This module provides the ability to create named groups of
+%% processes to which members can be dynamically added and removed,
+%% and for messages to be broadcast within the group that are
+%% guaranteed to reach all members of the group during the lifetime of
+%% the message. The lifetime of a message is defined as being, at a
+%% minimum, the time from which the message is first sent to any
+%% member of the group, up until the time at which it is known by the
+%% member who published the message that the message has reached all
+%% group members.
+%%
+%% The guarantee given is that provided a message, once sent, makes it
+%% to members who do not all leave the group, the message will
+%% continue to propagate to all group members.
+%%
+%% Another way of stating the guarantee is that if member P publishes
+%% messages m and m', then for all members P', if P' is a member of
+%% the group prior to the publication of m, and P' receives m', then
+%% P' will receive m.
+%%
+%% Note that only local-ordering is enforced: i.e. if member P sends
+%% message m and then message m', then for-all members P', if P'
+%% receives m and m', then they will receive m' after m. Causality
+%% ordering is _not_ enforced. I.e. if member P receives message m
+%% and as a result publishes message m', there is no guarantee that
+%% other members P' will receive m before m'.
+%%
+%%
+%% API Use
+%% -------
+%%
+%% Mnesia must be started. Use the idempotent create_tables/0 function
+%% to create the tables required.
+%%
+%% start_link/3
+%% Provide the group name, the callback module name, and any arguments
+%% you wish to be passed into the callback module's functions. The
+%% joined/2 function will be called when we have joined the group,
+%% with the arguments passed to start_link and a list of the current
+%% members of the group. See the callbacks specs and the comments
+%% below for further details of the callback functions.
+%%
+%% leave/1
+%% Provide the Pid. Removes the Pid from the group. The callback
+%% terminate/2 function will be called.
+%%
+%% broadcast/2
+%% Provide the Pid and a Message. The message will be sent to all
+%% members of the group as per the guarantees given above. This is a
+%% cast and the function call will return immediately. There is no
+%% guarantee that the message will reach any member of the group.
+%%
+%% confirmed_broadcast/2
+%% Provide the Pid and a Message. As per broadcast/2 except that this
+%% is a call, not a cast, and only returns 'ok' once the Message has
+%% reached every member of the group. Do not call
+%% confirmed_broadcast/2 directly from the callback module otherwise
+%% you will deadlock the entire group.
+%%
+%% info/1
+%% Provide the Pid. Returns a proplist with various facts, including
+%% the group name and the current group members.
+%%
+%% validate_members/2
+%% Check whether a given member list agrees with the chosen member's
+%% view. Any differences will be communicated via the members_changed
+%% callback. If there are no differences then there will be no reply.
+%% Note that members will not necessarily share the same view.
+%%
+%% forget_group/1
+%% Provide the group name. Removes its mnesia record. Makes no attempt
+%% to ensure the group is empty.
+%%
+%% Implementation Overview
+%% -----------------------
+%%
+%% One possible means of implementation would be a fan-out from the
+%% sender to every member of the group. This would require that the
+%% group is fully connected, and, in the event that the original
+%% sender of the message disappears from the group before the message
+%% has made it to every member of the group, raises questions as to
+%% who is responsible for sending on the message to new group members.
+%% In particular, the issue is with [ Pid ! Msg || Pid <- Members ] -
+%% if the sender dies part way through, who is responsible for
+%% ensuring that the remaining Members receive the Msg? In the event
+%% that within the group, messages sent are broadcast from a subset of
+%% the members, the fan-out arrangement has the potential to
+%% substantially impact the CPU and network workload of such members,
+%% as such members would have to accommodate the cost of sending each
+%% message to every group member.
+%%
+%% Instead, if the members of the group are arranged in a chain, then
+%% it becomes easier to reason about who within the group has received
+%% each message and who has not. It eases issues of responsibility: in
+%% the event of a group member disappearing, the nearest upstream
+%% member of the chain is responsible for ensuring that messages
+%% continue to propagate down the chain. It also results in equal
+%% distribution of sending and receiving workload, even if all
+%% messages are being sent from just a single group member. This
+%% configuration has the further advantage that it is not necessary
+%% for every group member to know of every other group member, and
+%% even that a group member does not have to be accessible from all
+%% other group members.
+%%
+%% Performance is kept high by permitting pipelining and all
+%% communication between joined group members is asynchronous. In the
+%% chain A -> B -> C -> D, if A sends a message to the group, it will
+%% not directly contact C or D. However, it must know that D receives
+%% the message (in addition to B and C) before it can consider the
+%% message fully sent. A simplistic implementation would require that
+%% D replies to C, C replies to B and B then replies to A. This would
+%% result in a propagation delay of twice the length of the chain. It
+%% would also require, in the event of the failure of C, that D knows
+%% to directly contact B and issue the necessary replies. Instead, the
+%% chain forms a ring: D sends the message on to A: D does not
+%% distinguish A as the sender, merely as the next member (downstream)
+%% within the chain (which has now become a ring). When A receives
+%% from D messages that A sent, it knows that all members have
+%% received the message. However, the message is not dead yet: if C
+%% died as B was sending to C, then B would need to detect the death
+%% of C and forward the message on to D instead: thus every node has
+%% to remember every message published until it is told that it can
+%% forget about the message. This is essential not just for dealing
+%% with failure of members, but also for the addition of new members.
+%%
+%% Thus once A receives the message back again, it then sends to B an
+%% acknowledgement for the message, indicating that B can now forget
+%% about the message. B does so, and forwards the ack to C. C forgets
+%% the message, and forwards the ack to D, which forgets the message
+%% and finally forwards the ack back to A. At this point, A takes no
+%% further action: the message and its acknowledgement have made it to
+%% every member of the group. The message is now dead, and any new
+%% member joining the group at this point will not receive the
+%% message.
+%%
+%% We therefore have two roles:
+%%
+%% 1. The sender, who upon receiving their own messages back, must
+%% then send out acknowledgements, and upon receiving their own
+%% acknowledgements back perform no further action.
+%%
+%% 2. The other group members who upon receiving messages and
+%% acknowledgements must update their own internal state accordingly
+%% (the sending member must also do this in order to be able to
+%% accommodate failures), and forwards messages on to their downstream
+%% neighbours.
+%%
+%%
+%% Implementation: It gets trickier
+%% --------------------------------
+%%
+%% Chain A -> B -> C -> D
+%%
+%% A publishes a message which B receives. A now dies. B and D will
+%% detect the death of A, and will link up, thus the chain is now B ->
+%% C -> D. B forwards A's message on to C, who forwards it to D, who
+%% forwards it to B. Thus B is now responsible for A's messages - both
+%% publications and acknowledgements that were in flight at the point
+%% at which A died. Even worse is that this is transitive: after B
+%% forwards A's message to C, B dies as well. Now C is not only
+%% responsible for B's in-flight messages, but is also responsible for
+%% A's in-flight messages.
+%%
+%% Lemma 1: A member can only determine which dead members they have
+%% inherited responsibility for if there is a total ordering on the
+%% conflicting additions and subtractions of members from the group.
+%%
+%% Consider the simultaneous death of B and addition of B' that
+%% transitions a chain from A -> B -> C to A -> B' -> C. Either B' or
+%% C is responsible for in-flight messages from B. It is easy to
+%% ensure that at least one of them thinks they have inherited B, but
+%% if we do not ensure that exactly one of them inherits B, then we
+%% could have B' converting publishes to acks, which then will crash C
+%% as C does not believe it has issued acks for those messages.
+%%
+%% More complex scenarios are easy to concoct: A -> B -> C -> D -> E
+%% becoming A -> C' -> E. Who has inherited which of B, C and D?
+%%
+%% However, for non-conflicting membership changes, only a partial
+%% ordering is required. For example, A -> B -> C becoming A -> A' ->
+%% B. The addition of A', between A and B can have no conflicts with
+%% the death of C: it is clear that A has inherited C's messages.
+%%
+%% For ease of implementation, we adopt the simple solution, of
+%% imposing a total order on all membership changes.
+%%
+%% On the death of a member, it is ensured the dead member's
+%% neighbours become aware of the death, and the upstream neighbour
+%% now sends to its new downstream neighbour its state, including the
+%% messages pending acknowledgement. The downstream neighbour can then
+%% use this to calculate which publishes and acknowledgements it has
+%% missed out on, due to the death of its old upstream. Thus the
+%% downstream can catch up, and continues the propagation of messages
+%% through the group.
+%%
+%% Lemma 2: When a member is joining, it must synchronously
+%% communicate with its upstream member in order to receive its
+%% starting state atomically with its addition to the group.
+%%
+%% New members must start with the same state as their nearest
+%% upstream neighbour. This ensures that it is not surprised by
+%% acknowledgements they are sent, and that should their downstream
+%% neighbour die, they are able to send the correct state to their new
+%% downstream neighbour to ensure it can catch up. Thus in the
+%% transition A -> B -> C becomes A -> A' -> B -> C becomes A -> A' ->
+%% C, A' must start with the state of A, so that it can send C the
+%% correct state when B dies, allowing C to detect any missed
+%% messages.
+%%
+%% If A' starts by adding itself to the group membership, A could then
+%% die, without A' having received the necessary state from A. This
+%% would leave A' responsible for in-flight messages from A, but
+%% having the least knowledge of all, of those messages. Thus A' must
+%% start by synchronously calling A, which then immediately sends A'
+%% back its state. A then adds A' to the group. If A dies at this
+%% point then A' will be able to see this (as A' will fail to appear
+%% in the group membership), and thus A' will ignore the state it
+%% receives from A, and will simply repeat the process, trying to now
+%% join downstream from some other member. This ensures that should
+%% the upstream die as soon as the new member has been joined, the new
+%% member is guaranteed to receive the correct state, allowing it to
+%% correctly process messages inherited due to the death of its
+%% upstream neighbour.
+%%
+%% The canonical definition of the group membership is held by a
+%% distributed database. Whilst this allows the total ordering of
+%% changes to be achieved, it is nevertheless undesirable to have to
+%% query this database for the current view, upon receiving each
+%% message. Instead, we wish for members to be able to cache a view of
+%% the group membership, which then requires a cache invalidation
+%% mechanism. Each member maintains its own view of the group
+%% membership. Thus when the group's membership changes, members may
+%% need to become aware of such changes in order to be able to
+%% accurately process messages they receive. Because of the
+%% requirement of a total ordering of conflicting membership changes,
+%% it is not possible to use the guaranteed broadcast mechanism to
+%% communicate these changes: to achieve the necessary ordering, it
+%% would be necessary for such messages to be published by exactly one
+%% member, which can not be guaranteed given that such a member could
+%% die.
+%%
+%% The total ordering we enforce on membership changes gives rise to a
+%% view version number: every change to the membership creates a
+%% different view, and the total ordering permits a simple
+%% monotonically increasing view version number.
+%%
+%% Lemma 3: If a message is sent from a member that holds view version
+%% N, it can be correctly processed by any member receiving the
+%% message with a view version >= N.
+%%
+%% Initially, let us suppose that each view contains the ordering of
+%% every member that was ever part of the group. Dead members are
+%% marked as such. Thus we have a ring of members, some of which are
+%% dead, and are thus inherited by the nearest alive downstream
+%% member.
+%%
+%% In the chain A -> B -> C, all three members initially have view
+%% version 1, which reflects reality. B publishes a message, which is
+%% forward by C to A. B now dies, which A notices very quickly. Thus A
+%% updates the view, creating version 2. It now forwards B's
+%% publication, sending that message to its new downstream neighbour,
+%% C. This happens before C is aware of the death of B. C must become
+%% aware of the view change before it interprets the message its
+%% received, otherwise it will fail to learn of the death of B, and
+%% thus will not realise it has inherited B's messages (and will
+%% likely crash).
+%%
+%% Thus very simply, we have that each subsequent view contains more
+%% information than the preceding view.
+%%
+%% However, to avoid the views growing indefinitely, we need to be
+%% able to delete members which have died _and_ for which no messages
+%% are in-flight. This requires that upon inheriting a dead member, we
+%% know the last publication sent by the dead member (this is easy: we
+%% inherit a member because we are the nearest downstream member which
+%% implies that we know at least as much than everyone else about the
+%% publications of the dead member), and we know the earliest message
+%% for which the acknowledgement is still in flight.
+%%
+%% In the chain A -> B -> C, when B dies, A will send to C its state
+%% (as C is the new downstream from A), allowing C to calculate which
+%% messages it has missed out on (described above). At this point, C
+%% also inherits B's messages. If that state from A also includes the
+%% last message published by B for which an acknowledgement has been
+%% seen, then C knows exactly which further acknowledgements it must
+%% receive (also including issuing acknowledgements for publications
+%% still in-flight that it receives), after which it is known there
+%% are no more messages in flight for B, thus all evidence that B was
+%% ever part of the group can be safely removed from the canonical
+%% group membership.
+%%
+%% Thus, for every message that a member sends, it includes with that
+%% message its view version. When a member receives a message it will
+%% update its view from the canonical copy, should its view be older
+%% than the view version included in the message it has received.
+%%
+%% The state held by each member therefore includes the messages from
+%% each publisher pending acknowledgement, the last publication seen
+%% from that publisher, and the last acknowledgement from that
+%% publisher. In the case of the member's own publications or
+%% inherited members, this last acknowledgement seen state indicates
+%% the last acknowledgement retired, rather than sent.
+%%
+%%
+%% Proof sketch
+%% ------------
+%%
+%% We need to prove that with the provided operational semantics, we
+%% can never reach a state that is not well formed from a well-formed
+%% starting state.
+%%
+%% Operational semantics (small step): straight-forward message
+%% sending, process monitoring, state updates.
+%%
+%% Well formed state: dead members inherited by exactly one non-dead
+%% member; for every entry in anyone's pending-acks, either (the
+%% publication of the message is in-flight downstream from the member
+%% and upstream from the publisher) or (the acknowledgement of the
+%% message is in-flight downstream from the publisher and upstream
+%% from the member).
+%%
+%% Proof by induction on the applicable operational semantics.
+%%
+%%
+%% Related work
+%% ------------
+%%
+%% The ring configuration and double traversal of messages around the
+%% ring is similar (though developed independently) to the LCR
+%% protocol by [Levy 2008]. However, LCR differs in several
+%% ways. Firstly, by using vector clocks, it enforces a total order of
+%% message delivery, which is unnecessary for our purposes. More
+%% significantly, it is built on top of a "group communication system"
+%% which performs the group management functions, taking
+%% responsibility away from the protocol as to how to cope with safely
+%% adding and removing members. When membership changes do occur, the
+%% protocol stipulates that every member must perform communication
+%% with every other member of the group, to ensure all outstanding
+%% deliveries complete, before the entire group transitions to the new
+%% view. This, in total, requires two sets of all-to-all synchronous
+%% communications.
+%%
+%% This is not only rather inefficient, but also does not explain what
+%% happens upon the failure of a member during this process. It does
+%% though entirely avoid the need for inheritance of responsibility of
+%% dead members that our protocol incorporates.
+%%
+%% In [Marandi et al 2010], a Paxos-based protocol is described. This
+%% work explicitly focuses on the efficiency of communication. LCR
+%% (and our protocol too) are more efficient, but at the cost of
+%% higher latency. The Ring-Paxos protocol is itself built on top of
+%% IP-multicast, which rules it out for many applications where
+%% point-to-point communication is all that can be required. They also
+%% have an excellent related work section which I really ought to
+%% read...
+%%
+%%
+%% [Levy 2008] The Complexity of Reliable Distributed Storage, 2008.
+%% [Marandi et al 2010] Ring Paxos: A High-Throughput Atomic Broadcast
+%% Protocol
+
+
+-behaviour(gen_server2).
+
+-export([create_tables/0, start_link/4, leave/1, broadcast/2, broadcast/3,
+ confirmed_broadcast/2, info/1, validate_members/2, forget_group/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3, prioritise_info/3]).
+
+-ifndef(use_specs).
+-export([behaviour_info/1]).
+-endif.
+
+-export([table_definitions/0]).
+
+-define(GROUP_TABLE, gm_group).
+-define(MAX_BUFFER_SIZE, 100000000). %% 100MB
+-define(HIBERNATE_AFTER_MIN, 1000).
+-define(DESIRED_HIBERNATE, 10000).
+-define(BROADCAST_TIMER, 25).
+-define(VERSION_START, 0).
+-define(SETS, ordsets).
+-define(DICT, orddict).
+
+-record(state,
+ { self,
+ left,
+ right,
+ group_name,
+ module,
+ view,
+ pub_count,
+ members_state,
+ callback_args,
+ confirms,
+ broadcast_buffer,
+ broadcast_buffer_sz,
+ broadcast_timer,
+ txn_executor
+ }).
+
+-record(gm_group, { name, version, members }).
+
+-record(view_member, { id, aliases, left, right }).
+
+-record(member, { pending_ack, last_pub, last_ack }).
+
+-define(TABLE, {?GROUP_TABLE, [{record_name, gm_group},
+ {attributes, record_info(fields, gm_group)}]}).
+-define(TABLE_MATCH, {match, #gm_group { _ = '_' }}).
+
+-define(TAG, '$gm').
+
+-ifdef(use_specs).
+
+-export_type([group_name/0]).
+
+-type(group_name() :: any()).
+-type(txn_fun() :: fun((fun(() -> any())) -> any())).
+
+-spec(create_tables/0 :: () -> 'ok' | {'aborted', any()}).
+-spec(start_link/4 :: (group_name(), atom(), any(), txn_fun()) ->
+ rabbit_types:ok_pid_or_error()).
+-spec(leave/1 :: (pid()) -> 'ok').
+-spec(broadcast/2 :: (pid(), any()) -> 'ok').
+-spec(confirmed_broadcast/2 :: (pid(), any()) -> 'ok').
+-spec(info/1 :: (pid()) -> rabbit_types:infos()).
+-spec(validate_members/2 :: (pid(), [pid()]) -> 'ok').
+-spec(forget_group/1 :: (group_name()) -> 'ok').
+
+%% The joined, members_changed and handle_msg callbacks can all return
+%% any of the following terms:
+%%
+%% 'ok' - the callback function returns normally
+%%
+%% {'stop', Reason} - the callback indicates the member should stop
+%% with reason Reason and should leave the group.
+%%
+%% {'become', Module, Args} - the callback indicates that the callback
+%% module should be changed to Module and that the callback functions
+%% should now be passed the arguments Args. This allows the callback
+%% module to be dynamically changed.
+
+%% Called when we've successfully joined the group. Supplied with Args
+%% provided in start_link, plus current group members.
+-callback joined(Args :: term(), Members :: [pid()]) ->
+ ok | {stop, Reason :: term()} | {become, Module :: atom(), Args :: any()}.
+
+%% Supplied with Args provided in start_link, the list of new members
+%% and the list of members previously known to us that have since
+%% died. Note that if a member joins and dies very quickly, it's
+%% possible that we will never see that member appear in either births
+%% or deaths. However we are guaranteed that (1) we will see a member
+%% joining either in the births here, or in the members passed to
+%% joined/2 before receiving any messages from it; and (2) we will not
+%% see members die that we have not seen born (or supplied in the
+%% members to joined/2).
+-callback members_changed(Args :: term(),
+ Births :: [pid()], Deaths :: [pid()]) ->
+ ok | {stop, Reason :: term()} | {become, Module :: atom(), Args :: any()}.
+
+%% Supplied with Args provided in start_link, the sender, and the
+%% message. This does get called for messages injected by this member,
+%% however, in such cases, there is no special significance of this
+%% invocation: it does not indicate that the message has made it to
+%% any other members, let alone all other members.
+-callback handle_msg(Args :: term(), From :: pid(), Message :: term()) ->
+ ok | {stop, Reason :: term()} | {become, Module :: atom(), Args :: any()}.
+
+%% Called on gm member termination as per rules in gen_server, with
+%% the Args provided in start_link plus the termination Reason.
+-callback terminate(Args :: term(), Reason :: term()) ->
+ ok | term().
+
+-else.
+
+behaviour_info(callbacks) ->
+ [{joined, 2}, {members_changed, 3}, {handle_msg, 3}, {terminate, 2}];
+behaviour_info(_Other) ->
+ undefined.
+
+-endif.
+
+create_tables() ->
+ create_tables([?TABLE]).
+
+create_tables([]) ->
+ ok;
+create_tables([{Table, Attributes} | Tables]) ->
+ case mnesia:create_table(Table, Attributes) of
+ {atomic, ok} -> create_tables(Tables);
+ {aborted, {already_exists, Table}} -> create_tables(Tables);
+ Err -> Err
+ end.
+
+table_definitions() ->
+ {Name, Attributes} = ?TABLE,
+ [{Name, [?TABLE_MATCH | Attributes]}].
+
+start_link(GroupName, Module, Args, TxnFun) ->
+ gen_server2:start_link(?MODULE, [GroupName, Module, Args, TxnFun], []).
+
+leave(Server) ->
+ gen_server2:cast(Server, leave).
+
+broadcast(Server, Msg) -> broadcast(Server, Msg, 0).
+
+broadcast(Server, Msg, SizeHint) ->
+ gen_server2:cast(Server, {broadcast, Msg, SizeHint}).
+
+confirmed_broadcast(Server, Msg) ->
+ gen_server2:call(Server, {confirmed_broadcast, Msg}, infinity).
+
+info(Server) ->
+ gen_server2:call(Server, info, infinity).
+
+validate_members(Server, Members) ->
+ gen_server2:cast(Server, {validate_members, Members}).
+
+forget_group(GroupName) ->
+ {atomic, ok} = mnesia:sync_transaction(
+ fun () ->
+ mnesia:delete({?GROUP_TABLE, GroupName})
+ end),
+ ok.
+
+init([GroupName, Module, Args, TxnFun]) ->
+ put(process_name, {?MODULE, GroupName}),
+ {MegaSecs, Secs, MicroSecs} = now(),
+ random:seed(MegaSecs, Secs, MicroSecs),
+ Self = make_member(GroupName),
+ gen_server2:cast(self(), join),
+ {ok, #state { self = Self,
+ left = {Self, undefined},
+ right = {Self, undefined},
+ group_name = GroupName,
+ module = Module,
+ view = undefined,
+ pub_count = -1,
+ members_state = undefined,
+ callback_args = Args,
+ confirms = queue:new(),
+ broadcast_buffer = [],
+ broadcast_buffer_sz = 0,
+ broadcast_timer = undefined,
+ txn_executor = TxnFun }, hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+
+handle_call({confirmed_broadcast, _Msg}, _From,
+ State = #state { members_state = undefined }) ->
+ reply(not_joined, State);
+
+handle_call({confirmed_broadcast, Msg}, _From,
+ State = #state { self = Self,
+ right = {Self, undefined},
+ module = Module,
+ callback_args = Args }) ->
+ handle_callback_result({Module:handle_msg(Args, get_pid(Self), Msg),
+ ok, State});
+
+handle_call({confirmed_broadcast, Msg}, From, State) ->
+ {Result, State1 = #state { pub_count = PubCount, confirms = Confirms }} =
+ internal_broadcast(Msg, 0, State),
+ Confirms1 = queue:in({PubCount, From}, Confirms),
+ handle_callback_result({Result, flush_broadcast_buffer(
+ State1 #state { confirms = Confirms1 })});
+
+handle_call(info, _From,
+ State = #state { members_state = undefined }) ->
+ reply(not_joined, State);
+
+handle_call(info, _From, State = #state { group_name = GroupName,
+ module = Module,
+ view = View }) ->
+ reply([{group_name, GroupName},
+ {module, Module},
+ {group_members, get_pids(alive_view_members(View))}], State);
+
+handle_call({add_on_right, _NewMember}, _From,
+ State = #state { members_state = undefined }) ->
+ reply(not_ready, State);
+
+handle_call({add_on_right, NewMember}, _From,
+ State = #state { self = Self,
+ group_name = GroupName,
+ members_state = MembersState,
+ txn_executor = TxnFun }) ->
+ Group = record_new_member_in_group(NewMember, Self, GroupName, TxnFun),
+ View1 = group_to_view(Group),
+ MembersState1 = remove_erased_members(MembersState, View1),
+ ok = send_right(NewMember, View1,
+ {catchup, Self, prepare_members_state(MembersState1)}),
+ {Result, State1} = change_view(View1, State #state {
+ members_state = MembersState1 }),
+ handle_callback_result({Result, {ok, Group}, State1}).
+
+handle_cast({?TAG, ReqVer, Msg},
+ State = #state { view = View,
+ members_state = MembersState,
+ group_name = GroupName }) ->
+ {Result, State1} =
+ case needs_view_update(ReqVer, View) of
+ true -> View1 = group_to_view(dirty_read_group(GroupName)),
+ MemberState1 = remove_erased_members(MembersState, View1),
+ change_view(View1, State #state {
+ members_state = MemberState1 });
+ false -> {ok, State}
+ end,
+ handle_callback_result(
+ if_callback_success(
+ Result, fun handle_msg_true/3, fun handle_msg_false/3, Msg, State1));
+
+handle_cast({broadcast, _Msg, _SizeHint},
+ State = #state { members_state = undefined }) ->
+ noreply(State);
+
+handle_cast({broadcast, Msg, _SizeHint},
+ State = #state { self = Self,
+ right = {Self, undefined},
+ module = Module,
+ callback_args = Args }) ->
+ handle_callback_result({Module:handle_msg(Args, get_pid(Self), Msg),
+ State});
+
+handle_cast({broadcast, Msg, SizeHint}, State) ->
+ {Result, State1} = internal_broadcast(Msg, SizeHint, State),
+ handle_callback_result({Result, maybe_flush_broadcast_buffer(State1)});
+
+handle_cast(join, State = #state { self = Self,
+ group_name = GroupName,
+ members_state = undefined,
+ module = Module,
+ callback_args = Args,
+ txn_executor = TxnFun }) ->
+ View = join_group(Self, GroupName, TxnFun),
+ MembersState =
+ case alive_view_members(View) of
+ [Self] -> blank_member_state();
+ _ -> undefined
+ end,
+ State1 = check_neighbours(State #state { view = View,
+ members_state = MembersState }),
+ handle_callback_result(
+ {Module:joined(Args, get_pids(all_known_members(View))), State1});
+
+handle_cast({validate_members, OldMembers},
+ State = #state { view = View,
+ module = Module,
+ callback_args = Args }) ->
+ NewMembers = get_pids(all_known_members(View)),
+ Births = NewMembers -- OldMembers,
+ Deaths = OldMembers -- NewMembers,
+ case {Births, Deaths} of
+ {[], []} -> noreply(State);
+ _ -> Result = Module:members_changed(Args, Births, Deaths),
+ handle_callback_result({Result, State})
+ end;
+
+handle_cast(leave, State) ->
+ {stop, normal, State}.
+
+
+handle_info(flush, State) ->
+ noreply(
+ flush_broadcast_buffer(State #state { broadcast_timer = undefined }));
+
+handle_info(timeout, State) ->
+ noreply(flush_broadcast_buffer(State));
+
+handle_info({'DOWN', MRef, process, _Pid, Reason},
+ State = #state { self = Self,
+ left = Left,
+ right = Right,
+ group_name = GroupName,
+ confirms = Confirms,
+ txn_executor = TxnFun }) ->
+ Member = case {Left, Right} of
+ {{Member1, MRef}, _} -> Member1;
+ {_, {Member1, MRef}} -> Member1;
+ _ -> undefined
+ end,
+ case {Member, Reason} of
+ {undefined, _} ->
+ noreply(State);
+ {_, {shutdown, ring_shutdown}} ->
+ noreply(State);
+ _ ->
+ View1 = group_to_view(record_dead_member_in_group(
+ Member, GroupName, TxnFun)),
+ handle_callback_result(
+ case alive_view_members(View1) of
+ [Self] -> maybe_erase_aliases(
+ State #state {
+ members_state = blank_member_state(),
+ confirms = purge_confirms(Confirms) },
+ View1);
+ _ -> change_view(View1, State)
+ end)
+ end.
+
+
+terminate(Reason, State = #state { module = Module,
+ callback_args = Args }) ->
+ flush_broadcast_buffer(State),
+ Module:terminate(Args, Reason).
+
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+prioritise_info(flush, _Len, _State) ->
+ 1;
+%% DOWN messages should not overtake initial catchups; if they do we
+%% will receive a DOWN we do not know what to do with.
+prioritise_info({'DOWN', _MRef, process, _Pid, _Reason}, _Len,
+ #state { members_state = undefined }) ->
+ 0;
+%% We should not prioritise DOWN messages from our left since
+%% otherwise the DOWN can overtake any last activity from the left,
+%% causing that activity to be lost.
+prioritise_info({'DOWN', _MRef, process, LeftPid, _Reason}, _Len,
+ #state { left = {{_LeftVer, LeftPid}, _MRef2} }) ->
+ 0;
+%% But prioritise all other DOWNs - we want to make sure we are not
+%% sending activity into the void for too long because our right is
+%% down but we don't know it.
+prioritise_info({'DOWN', _MRef, process, _Pid, _Reason}, _Len, _State) ->
+ 1;
+prioritise_info(_, _Len, _State) ->
+ 0.
+
+
+handle_msg(check_neighbours, State) ->
+ %% no-op - it's already been done by the calling handle_cast
+ {ok, State};
+
+handle_msg({catchup, Left, MembersStateLeft},
+ State = #state { self = Self,
+ left = {Left, _MRefL},
+ right = {Right, _MRefR},
+ view = View,
+ members_state = undefined }) ->
+ ok = send_right(Right, View, {catchup, Self, MembersStateLeft}),
+ MembersStateLeft1 = build_members_state(MembersStateLeft),
+ {ok, State #state { members_state = MembersStateLeft1 }};
+
+handle_msg({catchup, Left, MembersStateLeft},
+ State = #state { self = Self,
+ left = {Left, _MRefL},
+ view = View,
+ members_state = MembersState })
+ when MembersState =/= undefined ->
+ MembersStateLeft1 = build_members_state(MembersStateLeft),
+ AllMembers = lists:usort(?DICT:fetch_keys(MembersState) ++
+ ?DICT:fetch_keys(MembersStateLeft1)),
+ {MembersState1, Activity} =
+ lists:foldl(
+ fun (Id, MembersStateActivity) ->
+ #member { pending_ack = PALeft, last_ack = LA } =
+ find_member_or_blank(Id, MembersStateLeft1),
+ with_member_acc(
+ fun (#member { pending_ack = PA } = Member, Activity1) ->
+ case is_member_alias(Id, Self, View) of
+ true ->
+ {_AcksInFlight, Pubs, _PA1} =
+ find_prefix_common_suffix(PALeft, PA),
+ {Member #member { last_ack = LA },
+ activity_cons(Id, pubs_from_queue(Pubs),
+ [], Activity1)};
+ false ->
+ {Acks, _Common, Pubs} =
+ find_prefix_common_suffix(PA, PALeft),
+ {Member,
+ activity_cons(Id, pubs_from_queue(Pubs),
+ acks_from_queue(Acks),
+ Activity1)}
+ end
+ end, Id, MembersStateActivity)
+ end, {MembersState, activity_nil()}, AllMembers),
+ handle_msg({activity, Left, activity_finalise(Activity)},
+ State #state { members_state = MembersState1 });
+
+handle_msg({catchup, _NotLeft, _MembersState}, State) ->
+ {ok, State};
+
+handle_msg({activity, Left, Activity},
+ State = #state { self = Self,
+ left = {Left, _MRefL},
+ view = View,
+ members_state = MembersState,
+ confirms = Confirms })
+ when MembersState =/= undefined ->
+ {MembersState1, {Confirms1, Activity1}} =
+ lists:foldl(
+ fun ({Id, Pubs, Acks}, MembersStateConfirmsActivity) ->
+ with_member_acc(
+ fun (Member = #member { pending_ack = PA,
+ last_pub = LP,
+ last_ack = LA },
+ {Confirms2, Activity2}) ->
+ case is_member_alias(Id, Self, View) of
+ true ->
+ {ToAck, PA1} =
+ find_common(queue_from_pubs(Pubs), PA,
+ queue:new()),
+ LA1 = last_ack(Acks, LA),
+ AckNums = acks_from_queue(ToAck),
+ Confirms3 = maybe_confirm(
+ Self, Id, Confirms2, AckNums),
+ {Member #member { pending_ack = PA1,
+ last_ack = LA1 },
+ {Confirms3,
+ activity_cons(
+ Id, [], AckNums, Activity2)}};
+ false ->
+ PA1 = apply_acks(Acks, join_pubs(PA, Pubs)),
+ LA1 = last_ack(Acks, LA),
+ LP1 = last_pub(Pubs, LP),
+ {Member #member { pending_ack = PA1,
+ last_pub = LP1,
+ last_ack = LA1 },
+ {Confirms2,
+ activity_cons(Id, Pubs, Acks, Activity2)}}
+ end
+ end, Id, MembersStateConfirmsActivity)
+ end, {MembersState, {Confirms, activity_nil()}}, Activity),
+ State1 = State #state { members_state = MembersState1,
+ confirms = Confirms1 },
+ Activity3 = activity_finalise(Activity1),
+ ok = maybe_send_activity(Activity3, State1),
+ {Result, State2} = maybe_erase_aliases(State1, View),
+ if_callback_success(
+ Result, fun activity_true/3, fun activity_false/3, Activity3, State2);
+
+handle_msg({activity, _NotLeft, _Activity}, State) ->
+ {ok, State}.
+
+
+noreply(State) ->
+ {noreply, ensure_broadcast_timer(State), flush_timeout(State)}.
+
+reply(Reply, State) ->
+ {reply, Reply, ensure_broadcast_timer(State), flush_timeout(State)}.
+
+flush_timeout(#state{broadcast_buffer = []}) -> hibernate;
+flush_timeout(_) -> 0.
+
+ensure_broadcast_timer(State = #state { broadcast_buffer = [],
+ broadcast_timer = undefined }) ->
+ State;
+ensure_broadcast_timer(State = #state { broadcast_buffer = [],
+ broadcast_timer = TRef }) ->
+ erlang:cancel_timer(TRef),
+ State #state { broadcast_timer = undefined };
+ensure_broadcast_timer(State = #state { broadcast_timer = undefined }) ->
+ TRef = erlang:send_after(?BROADCAST_TIMER, self(), flush),
+ State #state { broadcast_timer = TRef };
+ensure_broadcast_timer(State) ->
+ State.
+
+internal_broadcast(Msg, SizeHint,
+ State = #state { self = Self,
+ pub_count = PubCount,
+ module = Module,
+ callback_args = Args,
+ broadcast_buffer = Buffer,
+ broadcast_buffer_sz = BufferSize }) ->
+ PubCount1 = PubCount + 1,
+ {Module:handle_msg(Args, get_pid(Self), Msg),
+ State #state { pub_count = PubCount1,
+ broadcast_buffer = [{PubCount1, Msg} | Buffer],
+ broadcast_buffer_sz = BufferSize + SizeHint}}.
+
+%% The Erlang distribution mechanism has an interesting quirk - it
+%% will kill the VM cold with "Absurdly large distribution output data
+%% buffer" if you attempt to send a message which serialises out to
+%% more than 2^31 bytes in size. It's therefore a very good idea to
+%% make sure that we don't exceed that size!
+%%
+%% Now, we could figure out the size of messages as they come in using
+%% size(term_to_binary(Msg)) or similar. The trouble is, that requires
+%% us to serialise the message only to throw the serialised form
+%% away. Hard to believe that's a sensible thing to do. So instead we
+%% accept a size hint from the application, via broadcast/3. This size
+%% hint can be the size of anything in the message which we expect
+%% could be large, and we just ignore the size of any small bits of
+%% the message term. Therefore MAX_BUFFER_SIZE is set somewhat
+%% conservatively at 100MB - but the buffer is only to allow us to
+%% buffer tiny messages anyway, so 100MB is plenty.
+
+maybe_flush_broadcast_buffer(State = #state{broadcast_buffer_sz = Size}) ->
+ case Size > ?MAX_BUFFER_SIZE of
+ true -> flush_broadcast_buffer(State);
+ false -> State
+ end.
+
+flush_broadcast_buffer(State = #state { broadcast_buffer = [] }) ->
+ State;
+flush_broadcast_buffer(State = #state { self = Self,
+ members_state = MembersState,
+ broadcast_buffer = Buffer,
+ pub_count = PubCount }) ->
+ [{PubCount, _Msg}|_] = Buffer, %% ASSERTION match on PubCount
+ Pubs = lists:reverse(Buffer),
+ Activity = activity_cons(Self, Pubs, [], activity_nil()),
+ ok = maybe_send_activity(activity_finalise(Activity), State),
+ MembersState1 = with_member(
+ fun (Member = #member { pending_ack = PA }) ->
+ PA1 = queue:join(PA, queue:from_list(Pubs)),
+ Member #member { pending_ack = PA1,
+ last_pub = PubCount }
+ end, Self, MembersState),
+ State #state { members_state = MembersState1,
+ broadcast_buffer = [],
+ broadcast_buffer_sz = 0}.
+
+
+%% ---------------------------------------------------------------------------
+%% View construction and inspection
+%% ---------------------------------------------------------------------------
+
+needs_view_update(ReqVer, {Ver, _View}) -> Ver < ReqVer.
+
+view_version({Ver, _View}) -> Ver.
+
+is_member_alive({dead, _Member}) -> false;
+is_member_alive(_) -> true.
+
+is_member_alias(Self, Self, _View) ->
+ true;
+is_member_alias(Member, Self, View) ->
+ ?SETS:is_element(Member,
+ ((fetch_view_member(Self, View)) #view_member.aliases)).
+
+dead_member_id({dead, Member}) -> Member.
+
+store_view_member(VMember = #view_member { id = Id }, {Ver, View}) ->
+ {Ver, ?DICT:store(Id, VMember, View)}.
+
+with_view_member(Fun, View, Id) ->
+ store_view_member(Fun(fetch_view_member(Id, View)), View).
+
+fetch_view_member(Id, {_Ver, View}) -> ?DICT:fetch(Id, View).
+
+find_view_member(Id, {_Ver, View}) -> ?DICT:find(Id, View).
+
+blank_view(Ver) -> {Ver, ?DICT:new()}.
+
+alive_view_members({_Ver, View}) -> ?DICT:fetch_keys(View).
+
+all_known_members({_Ver, View}) ->
+ ?DICT:fold(
+ fun (Member, #view_member { aliases = Aliases }, Acc) ->
+ ?SETS:to_list(Aliases) ++ [Member | Acc]
+ end, [], View).
+
+group_to_view(#gm_group { members = Members, version = Ver }) ->
+ Alive = lists:filter(fun is_member_alive/1, Members),
+ [_|_] = Alive, %% ASSERTION - can't have all dead members
+ add_aliases(link_view(Alive ++ Alive ++ Alive, blank_view(Ver)), Members).
+
+link_view([Left, Middle, Right | Rest], View) ->
+ case find_view_member(Middle, View) of
+ error ->
+ link_view(
+ [Middle, Right | Rest],
+ store_view_member(#view_member { id = Middle,
+ aliases = ?SETS:new(),
+ left = Left,
+ right = Right }, View));
+ {ok, _} ->
+ View
+ end;
+link_view(_, View) ->
+ View.
+
+add_aliases(View, Members) ->
+ Members1 = ensure_alive_suffix(Members),
+ {EmptyDeadSet, View1} =
+ lists:foldl(
+ fun (Member, {DeadAcc, ViewAcc}) ->
+ case is_member_alive(Member) of
+ true ->
+ {?SETS:new(),
+ with_view_member(
+ fun (VMember =
+ #view_member { aliases = Aliases }) ->
+ VMember #view_member {
+ aliases = ?SETS:union(Aliases, DeadAcc) }
+ end, ViewAcc, Member)};
+ false ->
+ {?SETS:add_element(dead_member_id(Member), DeadAcc),
+ ViewAcc}
+ end
+ end, {?SETS:new(), View}, Members1),
+ 0 = ?SETS:size(EmptyDeadSet), %% ASSERTION
+ View1.
+
+ensure_alive_suffix(Members) ->
+ queue:to_list(ensure_alive_suffix1(queue:from_list(Members))).
+
+ensure_alive_suffix1(MembersQ) ->
+ {{value, Member}, MembersQ1} = queue:out_r(MembersQ),
+ case is_member_alive(Member) of
+ true -> MembersQ;
+ false -> ensure_alive_suffix1(queue:in_r(Member, MembersQ1))
+ end.
+
+
+%% ---------------------------------------------------------------------------
+%% View modification
+%% ---------------------------------------------------------------------------
+
+join_group(Self, GroupName, TxnFun) ->
+ join_group(Self, GroupName, dirty_read_group(GroupName), TxnFun).
+
+join_group(Self, GroupName, {error, not_found}, TxnFun) ->
+ join_group(Self, GroupName,
+ prune_or_create_group(Self, GroupName, TxnFun), TxnFun);
+join_group(Self, _GroupName, #gm_group { members = [Self] } = Group, _TxnFun) ->
+ group_to_view(Group);
+join_group(Self, GroupName, #gm_group { members = Members } = Group, TxnFun) ->
+ case lists:member(Self, Members) of
+ true ->
+ group_to_view(Group);
+ false ->
+ case lists:filter(fun is_member_alive/1, Members) of
+ [] ->
+ join_group(Self, GroupName,
+ prune_or_create_group(Self, GroupName, TxnFun),
+ TxnFun);
+ Alive ->
+ Left = lists:nth(random:uniform(length(Alive)), Alive),
+ Handler =
+ fun () ->
+ join_group(
+ Self, GroupName,
+ record_dead_member_in_group(
+ Left, GroupName, TxnFun),
+ TxnFun)
+ end,
+ try
+ case neighbour_call(Left, {add_on_right, Self}) of
+ {ok, Group1} -> group_to_view(Group1);
+ not_ready -> join_group(Self, GroupName, TxnFun)
+ end
+ catch
+ exit:{R, _}
+ when R =:= noproc; R =:= normal; R =:= shutdown ->
+ Handler();
+ exit:{{R, _}, _}
+ when R =:= nodedown; R =:= shutdown ->
+ Handler()
+ end
+ end
+ end.
+
+dirty_read_group(GroupName) ->
+ case mnesia:dirty_read(?GROUP_TABLE, GroupName) of
+ [] -> {error, not_found};
+ [Group] -> Group
+ end.
+
+read_group(GroupName) ->
+ case mnesia:read({?GROUP_TABLE, GroupName}) of
+ [] -> {error, not_found};
+ [Group] -> Group
+ end.
+
+write_group(Group) -> mnesia:write(?GROUP_TABLE, Group, write), Group.
+
+prune_or_create_group(Self, GroupName, TxnFun) ->
+ TxnFun(
+ fun () ->
+ GroupNew = #gm_group { name = GroupName,
+ members = [Self],
+ version = get_version(Self) },
+ case read_group(GroupName) of
+ {error, not_found} ->
+ write_group(GroupNew);
+ Group = #gm_group { members = Members } ->
+ case lists:any(fun is_member_alive/1, Members) of
+ true -> Group;
+ false -> write_group(GroupNew)
+ end
+ end
+ end).
+
+record_dead_member_in_group(Member, GroupName, TxnFun) ->
+ TxnFun(
+ fun () ->
+ Group = #gm_group { members = Members, version = Ver } =
+ read_group(GroupName),
+ case lists:splitwith(
+ fun (Member1) -> Member1 =/= Member end, Members) of
+ {_Members1, []} -> %% not found - already recorded dead
+ Group;
+ {Members1, [Member | Members2]} ->
+ Members3 = Members1 ++ [{dead, Member} | Members2],
+ write_group(Group #gm_group { members = Members3,
+ version = Ver + 1 })
+ end
+ end).
+
+record_new_member_in_group(NewMember, Left, GroupName, TxnFun) ->
+ TxnFun(
+ fun () ->
+ Group = #gm_group { members = Members, version = Ver } =
+ read_group(GroupName),
+ {Prefix, [Left | Suffix]} =
+ lists:splitwith(fun (M) -> M =/= Left end, Members),
+ write_group(Group #gm_group {
+ members = Prefix ++ [Left, NewMember | Suffix],
+ version = Ver + 1 })
+ end).
+
+erase_members_in_group(Members, GroupName, TxnFun) ->
+ DeadMembers = [{dead, Id} || Id <- Members],
+ TxnFun(
+ fun () ->
+ Group = #gm_group { members = [_|_] = Members1, version = Ver } =
+ read_group(GroupName),
+ case Members1 -- DeadMembers of
+ Members1 -> Group;
+ Members2 -> write_group(
+ Group #gm_group { members = Members2,
+ version = Ver + 1 })
+ end
+ end).
+
+maybe_erase_aliases(State = #state { self = Self,
+ group_name = GroupName,
+ members_state = MembersState,
+ txn_executor = TxnFun }, View) ->
+ #view_member { aliases = Aliases } = fetch_view_member(Self, View),
+ {Erasable, MembersState1}
+ = ?SETS:fold(
+ fun (Id, {ErasableAcc, MembersStateAcc} = Acc) ->
+ #member { last_pub = LP, last_ack = LA } =
+ find_member_or_blank(Id, MembersState),
+ case can_erase_view_member(Self, Id, LA, LP) of
+ true -> {[Id | ErasableAcc],
+ erase_member(Id, MembersStateAcc)};
+ false -> Acc
+ end
+ end, {[], MembersState}, Aliases),
+ View1 = case Erasable of
+ [] -> View;
+ _ -> group_to_view(
+ erase_members_in_group(Erasable, GroupName, TxnFun))
+ end,
+ change_view(View1, State #state { members_state = MembersState1 }).
+
+can_erase_view_member(Self, Self, _LA, _LP) -> false;
+can_erase_view_member(_Self, _Id, N, N) -> true;
+can_erase_view_member(_Self, _Id, _LA, _LP) -> false.
+
+neighbour_cast(N, Msg) -> gen_server2:cast(get_pid(N), Msg).
+neighbour_call(N, Msg) -> gen_server2:call(get_pid(N), Msg, infinity).
+
+%% ---------------------------------------------------------------------------
+%% View monitoring and maintanence
+%% ---------------------------------------------------------------------------
+
+ensure_neighbour(_Ver, Self, {Self, undefined}, Self) ->
+ {Self, undefined};
+ensure_neighbour(Ver, Self, {Self, undefined}, RealNeighbour) ->
+ ok = neighbour_cast(RealNeighbour, {?TAG, Ver, check_neighbours}),
+ {RealNeighbour, maybe_monitor(RealNeighbour, Self)};
+ensure_neighbour(_Ver, _Self, {RealNeighbour, MRef}, RealNeighbour) ->
+ {RealNeighbour, MRef};
+ensure_neighbour(Ver, Self, {RealNeighbour, MRef}, Neighbour) ->
+ true = erlang:demonitor(MRef),
+ Msg = {?TAG, Ver, check_neighbours},
+ ok = neighbour_cast(RealNeighbour, Msg),
+ ok = case Neighbour of
+ Self -> ok;
+ _ -> neighbour_cast(Neighbour, Msg)
+ end,
+ {Neighbour, maybe_monitor(Neighbour, Self)}.
+
+maybe_monitor( Self, Self) -> undefined;
+maybe_monitor(Other, _Self) -> erlang:monitor(process, get_pid(Other)).
+
+check_neighbours(State = #state { self = Self,
+ left = Left,
+ right = Right,
+ view = View,
+ broadcast_buffer = Buffer }) ->
+ #view_member { left = VLeft, right = VRight }
+ = fetch_view_member(Self, View),
+ Ver = view_version(View),
+ Left1 = ensure_neighbour(Ver, Self, Left, VLeft),
+ Right1 = ensure_neighbour(Ver, Self, Right, VRight),
+ Buffer1 = case Right1 of
+ {Self, undefined} -> [];
+ _ -> Buffer
+ end,
+ State1 = State #state { left = Left1, right = Right1,
+ broadcast_buffer = Buffer1 },
+ ok = maybe_send_catchup(Right, State1),
+ State1.
+
+maybe_send_catchup(Right, #state { right = Right }) ->
+ ok;
+maybe_send_catchup(_Right, #state { self = Self,
+ right = {Self, undefined} }) ->
+ ok;
+maybe_send_catchup(_Right, #state { members_state = undefined }) ->
+ ok;
+maybe_send_catchup(_Right, #state { self = Self,
+ right = {Right, _MRef},
+ view = View,
+ members_state = MembersState }) ->
+ send_right(Right, View,
+ {catchup, Self, prepare_members_state(MembersState)}).
+
+
+%% ---------------------------------------------------------------------------
+%% Catch_up delta detection
+%% ---------------------------------------------------------------------------
+
+find_prefix_common_suffix(A, B) ->
+ {Prefix, A1} = find_prefix(A, B, queue:new()),
+ {Common, Suffix} = find_common(A1, B, queue:new()),
+ {Prefix, Common, Suffix}.
+
+%% Returns the elements of A that occur before the first element of B,
+%% plus the remainder of A.
+find_prefix(A, B, Prefix) ->
+ case {queue:out(A), queue:out(B)} of
+ {{{value, Val}, _A1}, {{value, Val}, _B1}} ->
+ {Prefix, A};
+ {{empty, A1}, {{value, _A}, _B1}} ->
+ {Prefix, A1};
+ {{{value, {NumA, _MsgA} = Val}, A1},
+ {{value, {NumB, _MsgB}}, _B1}} when NumA < NumB ->
+ find_prefix(A1, B, queue:in(Val, Prefix));
+ {_, {empty, _B1}} ->
+ {A, Prefix} %% Prefix well be empty here
+ end.
+
+%% A should be a prefix of B. Returns the commonality plus the
+%% remainder of B.
+find_common(A, B, Common) ->
+ case {queue:out(A), queue:out(B)} of
+ {{{value, Val}, A1}, {{value, Val}, B1}} ->
+ find_common(A1, B1, queue:in(Val, Common));
+ {{empty, _A}, _} ->
+ {Common, B}
+ end.
+
+
+%% ---------------------------------------------------------------------------
+%% Members helpers
+%% ---------------------------------------------------------------------------
+
+with_member(Fun, Id, MembersState) ->
+ store_member(
+ Id, Fun(find_member_or_blank(Id, MembersState)), MembersState).
+
+with_member_acc(Fun, Id, {MembersState, Acc}) ->
+ {MemberState, Acc1} = Fun(find_member_or_blank(Id, MembersState), Acc),
+ {store_member(Id, MemberState, MembersState), Acc1}.
+
+find_member_or_blank(Id, MembersState) ->
+ case ?DICT:find(Id, MembersState) of
+ {ok, Result} -> Result;
+ error -> blank_member()
+ end.
+
+erase_member(Id, MembersState) -> ?DICT:erase(Id, MembersState).
+
+blank_member() ->
+ #member { pending_ack = queue:new(), last_pub = -1, last_ack = -1 }.
+
+blank_member_state() -> ?DICT:new().
+
+store_member(Id, MemberState, MembersState) ->
+ ?DICT:store(Id, MemberState, MembersState).
+
+prepare_members_state(MembersState) -> ?DICT:to_list(MembersState).
+
+build_members_state(MembersStateList) -> ?DICT:from_list(MembersStateList).
+
+make_member(GroupName) ->
+ {case dirty_read_group(GroupName) of
+ #gm_group { version = Version } -> Version;
+ {error, not_found} -> ?VERSION_START
+ end, self()}.
+
+remove_erased_members(MembersState, View) ->
+ lists:foldl(fun (Id, MembersState1) ->
+ store_member(Id, find_member_or_blank(Id, MembersState),
+ MembersState1)
+ end, blank_member_state(), all_known_members(View)).
+
+get_version({Version, _Pid}) -> Version.
+
+get_pid({_Version, Pid}) -> Pid.
+
+get_pids(Ids) -> [Pid || {_Version, Pid} <- Ids].
+
+%% ---------------------------------------------------------------------------
+%% Activity assembly
+%% ---------------------------------------------------------------------------
+
+activity_nil() -> queue:new().
+
+activity_cons( _Id, [], [], Tail) -> Tail;
+activity_cons(Sender, Pubs, Acks, Tail) -> queue:in({Sender, Pubs, Acks}, Tail).
+
+activity_finalise(Activity) -> queue:to_list(Activity).
+
+maybe_send_activity([], _State) ->
+ ok;
+maybe_send_activity(Activity, #state { self = Self,
+ right = {Right, _MRefR},
+ view = View }) ->
+ send_right(Right, View, {activity, Self, Activity}).
+
+send_right(Right, View, Msg) ->
+ ok = neighbour_cast(Right, {?TAG, view_version(View), Msg}).
+
+callback(Args, Module, Activity) ->
+ Result =
+ lists:foldl(
+ fun ({Id, Pubs, _Acks}, {Args1, Module1, ok}) ->
+ lists:foldl(fun ({_PubNum, Pub}, Acc = {Args2, Module2, ok}) ->
+ case Module2:handle_msg(
+ Args2, get_pid(Id), Pub) of
+ ok ->
+ Acc;
+ {become, Module3, Args3} ->
+ {Args3, Module3, ok};
+ {stop, _Reason} = Error ->
+ Error
+ end;
+ (_, Error = {stop, _Reason}) ->
+ Error
+ end, {Args1, Module1, ok}, Pubs);
+ (_, Error = {stop, _Reason}) ->
+ Error
+ end, {Args, Module, ok}, Activity),
+ case Result of
+ {Args, Module, ok} -> ok;
+ {Args1, Module1, ok} -> {become, Module1, Args1};
+ {stop, _Reason} = Error -> Error
+ end.
+
+change_view(View, State = #state { view = View0,
+ module = Module,
+ callback_args = Args }) ->
+ OldMembers = all_known_members(View0),
+ NewMembers = all_known_members(View),
+ Births = NewMembers -- OldMembers,
+ Deaths = OldMembers -- NewMembers,
+ Result = case {Births, Deaths} of
+ {[], []} -> ok;
+ _ -> Module:members_changed(
+ Args, get_pids(Births), get_pids(Deaths))
+ end,
+ {Result, check_neighbours(State #state { view = View })}.
+
+handle_callback_result({Result, State}) ->
+ if_callback_success(
+ Result, fun no_reply_true/3, fun no_reply_false/3, undefined, State);
+handle_callback_result({Result, Reply, State}) ->
+ if_callback_success(
+ Result, fun reply_true/3, fun reply_false/3, Reply, State).
+
+no_reply_true (_Result, _Undefined, State) -> noreply(State).
+no_reply_false({stop, Reason}, _Undefined, State) -> {stop, Reason, State}.
+
+reply_true (_Result, Reply, State) -> reply(Reply, State).
+reply_false({stop, Reason}, Reply, State) -> {stop, Reason, Reply, State}.
+
+handle_msg_true (_Result, Msg, State) -> handle_msg(Msg, State).
+handle_msg_false(Result, _Msg, State) -> {Result, State}.
+
+activity_true(_Result, Activity, State = #state { module = Module,
+ callback_args = Args }) ->
+ {callback(Args, Module, Activity), State}.
+activity_false(Result, _Activity, State) ->
+ {Result, State}.
+
+if_callback_success(ok, True, _False, Arg, State) ->
+ True(ok, Arg, State);
+if_callback_success(
+ {become, Module, Args} = Result, True, _False, Arg, State) ->
+ True(Result, Arg, State #state { module = Module,
+ callback_args = Args });
+if_callback_success({stop, _Reason} = Result, _True, False, Arg, State) ->
+ False(Result, Arg, State).
+
+maybe_confirm(_Self, _Id, Confirms, []) ->
+ Confirms;
+maybe_confirm(Self, Self, Confirms, [PubNum | PubNums]) ->
+ case queue:out(Confirms) of
+ {empty, _Confirms} ->
+ Confirms;
+ {{value, {PubNum, From}}, Confirms1} ->
+ gen_server2:reply(From, ok),
+ maybe_confirm(Self, Self, Confirms1, PubNums);
+ {{value, {PubNum1, _From}}, _Confirms} when PubNum1 > PubNum ->
+ maybe_confirm(Self, Self, Confirms, PubNums)
+ end;
+maybe_confirm(_Self, _Id, Confirms, _PubNums) ->
+ Confirms.
+
+purge_confirms(Confirms) ->
+ [gen_server2:reply(From, ok) || {_PubNum, From} <- queue:to_list(Confirms)],
+ queue:new().
+
+
+%% ---------------------------------------------------------------------------
+%% Msg transformation
+%% ---------------------------------------------------------------------------
+
+acks_from_queue(Q) -> [PubNum || {PubNum, _Msg} <- queue:to_list(Q)].
+
+pubs_from_queue(Q) -> queue:to_list(Q).
+
+queue_from_pubs(Pubs) -> queue:from_list(Pubs).
+
+apply_acks( [], Pubs) -> Pubs;
+apply_acks(List, Pubs) -> {_, Pubs1} = queue:split(length(List), Pubs),
+ Pubs1.
+
+join_pubs(Q, []) -> Q;
+join_pubs(Q, Pubs) -> queue:join(Q, queue_from_pubs(Pubs)).
+
+last_ack( [], LA) -> LA;
+last_ack(List, LA) -> LA1 = lists:last(List),
+ true = LA1 > LA, %% ASSERTION
+ LA1.
+
+last_pub( [], LP) -> LP;
+last_pub(List, LP) -> {PubNum, _Msg} = lists:last(List),
+ true = PubNum > LP, %% ASSERTION
+ PubNum.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(gm_soak_test).
+
+-export([test/0]).
+-export([joined/2, members_changed/3, handle_msg/3, terminate/2]).
+
+-behaviour(gm).
+
+-include("gm_specs.hrl").
+
+%% ---------------------------------------------------------------------------
+%% Soak test
+%% ---------------------------------------------------------------------------
+
+get_state() ->
+ get(state).
+
+with_state(Fun) ->
+ put(state, Fun(get_state())).
+
+inc() ->
+ case 1 + get(count) of
+ 100000 -> Now = now(),
+ Start = put(ts, Now),
+ Diff = timer:now_diff(Now, Start),
+ Rate = 100000 / (Diff / 1000000),
+ io:format("~p seeing ~p msgs/sec~n", [self(), Rate]),
+ put(count, 0);
+ N -> put(count, N)
+ end.
+
+joined([], Members) ->
+ io:format("Joined ~p (~p members)~n", [self(), length(Members)]),
+ put(state, dict:from_list([{Member, empty} || Member <- Members])),
+ put(count, 0),
+ put(ts, now()),
+ ok.
+
+members_changed([], Births, Deaths) ->
+ with_state(
+ fun (State) ->
+ State1 =
+ lists:foldl(
+ fun (Born, StateN) ->
+ false = dict:is_key(Born, StateN),
+ dict:store(Born, empty, StateN)
+ end, State, Births),
+ lists:foldl(
+ fun (Died, StateN) ->
+ true = dict:is_key(Died, StateN),
+ dict:store(Died, died, StateN)
+ end, State1, Deaths)
+ end),
+ ok.
+
+handle_msg([], From, {test_msg, Num}) ->
+ inc(),
+ with_state(
+ fun (State) ->
+ ok = case dict:find(From, State) of
+ {ok, died} ->
+ exit({{from, From},
+ {received_posthumous_delivery, Num}});
+ {ok, empty} -> ok;
+ {ok, Num} -> ok;
+ {ok, Num1} when Num < Num1 ->
+ exit({{from, From},
+ {duplicate_delivery_of, Num},
+ {expecting, Num1}});
+ {ok, Num1} ->
+ exit({{from, From},
+ {received_early, Num},
+ {expecting, Num1}});
+ error ->
+ exit({{from, From},
+ {received_premature_delivery, Num}})
+ end,
+ dict:store(From, Num + 1, State)
+ end),
+ ok.
+
+terminate([], Reason) ->
+ io:format("Left ~p (~p)~n", [self(), Reason]),
+ ok.
+
+spawn_member() ->
+ spawn_link(
+ fun () ->
+ {MegaSecs, Secs, MicroSecs} = now(),
+ random:seed(MegaSecs, Secs, MicroSecs),
+ %% start up delay of no more than 10 seconds
+ timer:sleep(random:uniform(10000)),
+ {ok, Pid} = gm:start_link(
+ ?MODULE, ?MODULE, [],
+ fun rabbit_misc:execute_mnesia_transaction/1),
+ Start = random:uniform(10000),
+ send_loop(Pid, Start, Start + random:uniform(10000)),
+ gm:leave(Pid),
+ spawn_more()
+ end).
+
+spawn_more() ->
+ [spawn_member() || _ <- lists:seq(1, 4 - random:uniform(4))].
+
+send_loop(_Pid, Target, Target) ->
+ ok;
+send_loop(Pid, Count, Target) when Target > Count ->
+ case random:uniform(3) of
+ 3 -> gm:confirmed_broadcast(Pid, {test_msg, Count});
+ _ -> gm:broadcast(Pid, {test_msg, Count})
+ end,
+ timer:sleep(random:uniform(5) - 1), %% sleep up to 4 ms
+ send_loop(Pid, Count + 1, Target).
+
+test() ->
+ ok = gm:create_tables(),
+ spawn_member(),
+ spawn_member().
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(gm_speed_test).
+
+-export([test/3]).
+-export([joined/2, members_changed/3, handle_msg/3, terminate/2]).
+-export([wile_e_coyote/2]).
+
+-behaviour(gm).
+
+-include("gm_specs.hrl").
+
+%% callbacks
+
+joined(Owner, _Members) ->
+ Owner ! joined,
+ ok.
+
+members_changed(_Owner, _Births, _Deaths) ->
+ ok.
+
+handle_msg(Owner, _From, ping) ->
+ Owner ! ping,
+ ok.
+
+terminate(Owner, _Reason) ->
+ Owner ! terminated,
+ ok.
+
+%% other
+
+wile_e_coyote(Time, WriteUnit) ->
+ {ok, Pid} = gm:start_link(?MODULE, ?MODULE, self(),
+ fun rabbit_misc:execute_mnesia_transaction/1),
+ receive joined -> ok end,
+ timer:sleep(1000), %% wait for all to join
+ timer:send_after(Time, stop),
+ Start = now(),
+ {Sent, Received} = loop(Pid, WriteUnit, 0, 0),
+ End = now(),
+ ok = gm:leave(Pid),
+ receive terminated -> ok end,
+ Elapsed = timer:now_diff(End, Start) / 1000000,
+ io:format("Sending rate: ~p msgs/sec~nReceiving rate: ~p msgs/sec~n~n",
+ [Sent/Elapsed, Received/Elapsed]),
+ ok.
+
+loop(Pid, WriteUnit, Sent, Received) ->
+ case read(Received) of
+ {stop, Received1} -> {Sent, Received1};
+ {ok, Received1} -> ok = write(Pid, WriteUnit),
+ loop(Pid, WriteUnit, Sent + WriteUnit, Received1)
+ end.
+
+read(Count) ->
+ receive
+ ping -> read(Count + 1);
+ stop -> {stop, Count}
+ after 5 ->
+ {ok, Count}
+ end.
+
+write(_Pid, 0) -> ok;
+write(Pid, N) -> ok = gm:broadcast(Pid, ping),
+ write(Pid, N - 1).
+
+test(Time, WriteUnit, Nodes) ->
+ ok = gm:create_tables(),
+ [spawn(Node, ?MODULE, wile_e_coyote, [Time, WriteUnit]) || Node <- Nodes].
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(gm_tests).
+
+-export([test_join_leave/0,
+ test_broadcast/0,
+ test_confirmed_broadcast/0,
+ test_member_death/0,
+ test_receive_in_order/0,
+ all_tests/0]).
+-export([joined/2, members_changed/3, handle_msg/3, terminate/2]).
+
+-behaviour(gm).
+
+-include("gm_specs.hrl").
+
+-define(RECEIVE_OR_THROW(Body, Bool, Error),
+ receive Body ->
+ true = Bool,
+ passed
+ after 1000 ->
+ throw(Error)
+ end).
+
+joined(Pid, Members) ->
+ Pid ! {joined, self(), Members},
+ ok.
+
+members_changed(Pid, Births, Deaths) ->
+ Pid ! {members_changed, self(), Births, Deaths},
+ ok.
+
+handle_msg(Pid, From, Msg) ->
+ Pid ! {msg, self(), From, Msg},
+ ok.
+
+terminate(Pid, Reason) ->
+ Pid ! {termination, self(), Reason},
+ ok.
+
+%% ---------------------------------------------------------------------------
+%% Functional tests
+%% ---------------------------------------------------------------------------
+
+all_tests() ->
+ passed = test_join_leave(),
+ passed = test_broadcast(),
+ passed = test_confirmed_broadcast(),
+ passed = test_member_death(),
+ passed = test_receive_in_order(),
+ passed.
+
+test_join_leave() ->
+ with_two_members(fun (_Pid, _Pid2) -> passed end).
+
+test_broadcast() ->
+ test_broadcast(fun gm:broadcast/2).
+
+test_confirmed_broadcast() ->
+ test_broadcast(fun gm:confirmed_broadcast/2).
+
+test_member_death() ->
+ with_two_members(
+ fun (Pid, Pid2) ->
+ {ok, Pid3} = gm:start_link(
+ ?MODULE, ?MODULE, self(),
+ fun rabbit_misc:execute_mnesia_transaction/1),
+ passed = receive_joined(Pid3, [Pid, Pid2, Pid3],
+ timeout_joining_gm_group_3),
+ passed = receive_birth(Pid, Pid3, timeout_waiting_for_birth_3_1),
+ passed = receive_birth(Pid2, Pid3, timeout_waiting_for_birth_3_2),
+
+ unlink(Pid3),
+ exit(Pid3, kill),
+
+ %% Have to do some broadcasts to ensure that all members
+ %% find out about the death.
+ passed = (test_broadcast_fun(fun gm:confirmed_broadcast/2))(
+ Pid, Pid2),
+
+ passed = receive_death(Pid, Pid3, timeout_waiting_for_death_3_1),
+ passed = receive_death(Pid2, Pid3, timeout_waiting_for_death_3_2),
+
+ passed
+ end).
+
+test_receive_in_order() ->
+ with_two_members(
+ fun (Pid, Pid2) ->
+ Numbers = lists:seq(1,1000),
+ [begin ok = gm:broadcast(Pid, N), ok = gm:broadcast(Pid2, N) end
+ || N <- Numbers],
+ passed = receive_numbers(
+ Pid, Pid, {timeout_for_msgs, Pid, Pid}, Numbers),
+ passed = receive_numbers(
+ Pid, Pid2, {timeout_for_msgs, Pid, Pid2}, Numbers),
+ passed = receive_numbers(
+ Pid2, Pid, {timeout_for_msgs, Pid2, Pid}, Numbers),
+ passed = receive_numbers(
+ Pid2, Pid2, {timeout_for_msgs, Pid2, Pid2}, Numbers),
+ passed
+ end).
+
+test_broadcast(Fun) ->
+ with_two_members(test_broadcast_fun(Fun)).
+
+test_broadcast_fun(Fun) ->
+ fun (Pid, Pid2) ->
+ ok = Fun(Pid, magic_message),
+ passed = receive_or_throw({msg, Pid, Pid, magic_message},
+ timeout_waiting_for_msg),
+ passed = receive_or_throw({msg, Pid2, Pid, magic_message},
+ timeout_waiting_for_msg)
+ end.
+
+with_two_members(Fun) ->
+ ok = gm:create_tables(),
+
+ {ok, Pid} = gm:start_link(?MODULE, ?MODULE, self(),
+ fun rabbit_misc:execute_mnesia_transaction/1),
+ passed = receive_joined(Pid, [Pid], timeout_joining_gm_group_1),
+
+ {ok, Pid2} = gm:start_link(?MODULE, ?MODULE, self(),
+ fun rabbit_misc:execute_mnesia_transaction/1),
+ passed = receive_joined(Pid2, [Pid, Pid2], timeout_joining_gm_group_2),
+ passed = receive_birth(Pid, Pid2, timeout_waiting_for_birth_2),
+
+ passed = Fun(Pid, Pid2),
+
+ ok = gm:leave(Pid),
+ passed = receive_death(Pid2, Pid, timeout_waiting_for_death_1),
+ passed =
+ receive_termination(Pid, normal, timeout_waiting_for_termination_1),
+
+ ok = gm:leave(Pid2),
+ passed =
+ receive_termination(Pid2, normal, timeout_waiting_for_termination_2),
+
+ receive X -> throw({unexpected_message, X})
+ after 0 -> passed
+ end.
+
+receive_or_throw(Pattern, Error) ->
+ ?RECEIVE_OR_THROW(Pattern, true, Error).
+
+receive_birth(From, Born, Error) ->
+ ?RECEIVE_OR_THROW({members_changed, From, Birth, Death},
+ ([Born] == Birth) andalso ([] == Death),
+ Error).
+
+receive_death(From, Died, Error) ->
+ ?RECEIVE_OR_THROW({members_changed, From, Birth, Death},
+ ([] == Birth) andalso ([Died] == Death),
+ Error).
+
+receive_joined(From, Members, Error) ->
+ ?RECEIVE_OR_THROW({joined, From, Members1},
+ lists:usort(Members) == lists:usort(Members1),
+ Error).
+
+receive_termination(From, Reason, Error) ->
+ ?RECEIVE_OR_THROW({termination, From, Reason1},
+ Reason == Reason1,
+ Error).
+
+receive_numbers(_Pid, _Sender, _Error, []) ->
+ passed;
+receive_numbers(Pid, Sender, Error, [N | Numbers]) ->
+ ?RECEIVE_OR_THROW({msg, Pid, Sender, M},
+ M == N,
+ Error),
+ receive_numbers(Pid, Sender, Error, Numbers).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(lqueue).
+
+-export([new/0, is_empty/1, len/1, in/2, in_r/2, out/1, out_r/1, join/2,
+ foldl/3, foldr/3, from_list/1, to_list/1, peek/1, peek_r/1]).
+
+-define(QUEUE, queue).
+
+-ifdef(use_specs).
+
+-export_type([?MODULE/0]).
+
+-opaque(?MODULE() :: {non_neg_integer(), ?QUEUE()}).
+-type(value() :: any()).
+-type(result() :: 'empty' | {'value', value()}).
+
+-spec(new/0 :: () -> ?MODULE()).
+-spec(is_empty/1 :: (?MODULE()) -> boolean()).
+-spec(len/1 :: (?MODULE()) -> non_neg_integer()).
+-spec(in/2 :: (value(), ?MODULE()) -> ?MODULE()).
+-spec(in_r/2 :: (value(), ?MODULE()) -> ?MODULE()).
+-spec(out/1 :: (?MODULE()) -> {result(), ?MODULE()}).
+-spec(out_r/1 :: (?MODULE()) -> {result(), ?MODULE()}).
+-spec(join/2 :: (?MODULE(), ?MODULE()) -> ?MODULE()).
+-spec(foldl/3 :: (fun ((value(), B) -> B), B, ?MODULE()) -> B).
+-spec(foldr/3 :: (fun ((value(), B) -> B), B, ?MODULE()) -> B).
+-spec(from_list/1 :: ([value()]) -> ?MODULE()).
+-spec(to_list/1 :: (?MODULE()) -> [value()]).
+-spec(peek/1 :: (?MODULE()) -> result()).
+-spec(peek_r/1 :: (?MODULE()) -> result()).
+
+-endif.
+
+new() -> {0, ?QUEUE:new()}.
+
+is_empty({0, _Q}) -> true;
+is_empty(_) -> false.
+
+in(V, {L, Q}) -> {L+1, ?QUEUE:in(V, Q)}.
+
+in_r(V, {L, Q}) -> {L+1, ?QUEUE:in_r(V, Q)}.
+
+out({0, _Q} = Q) -> {empty, Q};
+out({L, Q}) -> {Result, Q1} = ?QUEUE:out(Q),
+ {Result, {L-1, Q1}}.
+
+out_r({0, _Q} = Q) -> {empty, Q};
+out_r({L, Q}) -> {Result, Q1} = ?QUEUE:out_r(Q),
+ {Result, {L-1, Q1}}.
+
+join({L1, Q1}, {L2, Q2}) -> {L1 + L2, ?QUEUE:join(Q1, Q2)}.
+
+to_list({_L, Q}) -> ?QUEUE:to_list(Q).
+
+from_list(L) -> {length(L), ?QUEUE:from_list(L)}.
+
+foldl(Fun, Init, Q) ->
+ case out(Q) of
+ {empty, _Q} -> Init;
+ {{value, V}, Q1} -> foldl(Fun, Fun(V, Init), Q1)
+ end.
+
+foldr(Fun, Init, Q) ->
+ case out_r(Q) of
+ {empty, _Q} -> Init;
+ {{value, V}, Q1} -> foldr(Fun, Fun(V, Init), Q1)
+ end.
+
+len({L, _Q}) -> L.
+
+peek({ 0, _Q}) -> empty;
+peek({_L, Q}) -> ?QUEUE:peek(Q).
+
+peek_r({ 0, _Q}) -> empty;
+peek_r({_L, Q}) -> ?QUEUE:peek_r(Q).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(mirrored_supervisor).
+
+%% Mirrored Supervisor
+%% ===================
+%%
+%% This module implements a new type of supervisor. It acts like a
+%% normal supervisor, but at creation time you also provide the name
+%% of a process group to join. All the supervisors within the
+%% process group act like a single large distributed supervisor:
+%%
+%% * A process with a given child_id will only exist on one
+%% supervisor within the group.
+%%
+%% * If one supervisor fails, children may migrate to surviving
+%% supervisors within the group.
+%%
+%% In almost all cases you will want to use the module name for the
+%% process group. Using multiple process groups with the same module
+%% name is supported. Having multiple module names for the same
+%% process group will lead to undefined behaviour.
+%%
+%% Motivation
+%% ----------
+%%
+%% Sometimes you have processes which:
+%%
+%% * Only need to exist once per cluster.
+%%
+%% * Does not contain much state (or can reconstruct its state easily).
+%%
+%% * Needs to be restarted elsewhere should it be running on a node
+%% which fails.
+%%
+%% By creating a mirrored supervisor group with one supervisor on
+%% each node, that's what you get.
+%%
+%%
+%% API use
+%% -------
+%%
+%% This is basically the same as for supervisor, except that:
+%%
+%% 1) start_link(Module, Args) becomes
+%% start_link(Group, TxFun, Module, Args).
+%%
+%% 2) start_link({local, Name}, Module, Args) becomes
+%% start_link({local, Name}, Group, TxFun, Module, Args).
+%%
+%% 3) start_link({global, Name}, Module, Args) is not available.
+%%
+%% 4) The restart strategy simple_one_for_one is not available.
+%%
+%% 5) Mnesia is used to hold global state. At some point your
+%% application should invoke create_tables() (or table_definitions()
+%% if it wants to manage table creation itself).
+%%
+%% The TxFun parameter to start_link/{4,5} is a function which the
+%% mirrored supervisor can use to execute Mnesia transactions. In the
+%% RabbitMQ server this goes via a worker pool; in other cases a
+%% function like:
+%%
+%% tx_fun(Fun) ->
+%% case mnesia:sync_transaction(Fun) of
+%% {atomic, Result} -> Result;
+%% {aborted, Reason} -> throw({error, Reason})
+%% end.
+%%
+%% could be used.
+%%
+%% Internals
+%% ---------
+%%
+%% Each mirrored_supervisor consists of three processes - the overall
+%% supervisor, the delegate supervisor and the mirroring server. The
+%% overall supervisor supervises the other two processes. Its pid is
+%% the one returned from start_link; the pids of the other two
+%% processes are effectively hidden in the API.
+%%
+%% The delegate supervisor is in charge of supervising all the child
+%% processes that are added to the supervisor as usual.
+%%
+%% The mirroring server intercepts calls to the supervisor API
+%% (directed at the overall supervisor), does any special handling,
+%% and forwards everything to the delegate supervisor.
+%%
+%% This module implements all three, hence init/1 is somewhat overloaded.
+%%
+%% The mirroring server creates and joins a process group on
+%% startup. It monitors all the existing members of this group, and
+%% broadcasts a "hello" message to them so that they can monitor it in
+%% turn. When it receives a 'DOWN' message, it checks to see if it's
+%% the "first" server in the group and restarts all the child
+%% processes from the dead supervisor if so.
+%%
+%% In the future we might load balance this.
+%%
+%% Startup is slightly fiddly. The mirroring server needs to know the
+%% Pid of the overall supervisor, but we don't have that until it has
+%% started. Therefore we set this after the fact. We also start any
+%% children we found in Module:init() at this point, since starting
+%% children requires knowing the overall supervisor pid.
+
+-define(SUPERVISOR, supervisor2).
+-define(GEN_SERVER, gen_server2).
+-define(PG2, pg2_fixed).
+
+-define(TABLE, mirrored_sup_childspec).
+-define(TABLE_DEF,
+ {?TABLE,
+ [{record_name, mirrored_sup_childspec},
+ {type, ordered_set},
+ {attributes, record_info(fields, mirrored_sup_childspec)}]}).
+-define(TABLE_MATCH, {match, #mirrored_sup_childspec{ _ = '_' }}).
+
+-export([start_link/4, start_link/5,
+ start_child/2, restart_child/2,
+ delete_child/2, terminate_child/2,
+ which_children/1, count_children/1, check_childspecs/1]).
+
+-behaviour(?GEN_SERVER).
+-behaviour(?SUPERVISOR).
+
+-export([init/1, handle_call/3, handle_info/2, terminate/2, code_change/3,
+ handle_cast/2]).
+
+-export([start_internal/3]).
+-export([create_tables/0, table_definitions/0]).
+
+-record(mirrored_sup_childspec, {key, mirroring_pid, childspec}).
+
+-record(state, {overall,
+ delegate,
+ group,
+ tx_fun,
+ initial_childspecs}).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+%%--------------------------------------------------------------------------
+%% Callback behaviour
+%%--------------------------------------------------------------------------
+
+-callback init(Args :: term()) ->
+ {ok, {{RestartStrategy :: supervisor2:strategy(),
+ MaxR :: non_neg_integer(),
+ MaxT :: non_neg_integer()},
+ [ChildSpec :: supervisor2:child_spec()]}}
+ | ignore.
+
+%%--------------------------------------------------------------------------
+%% Specs
+%%--------------------------------------------------------------------------
+
+-type startlink_err() :: {'already_started', pid()} | 'shutdown' | term().
+-type startlink_ret() :: {'ok', pid()} | 'ignore' | {'error', startlink_err()}.
+
+-type group_name() :: any().
+
+-type(tx_fun() :: fun((fun(() -> A)) -> A)).
+
+-spec start_link(GroupName, TxFun, Module, Args) -> startlink_ret() when
+ GroupName :: group_name(),
+ TxFun :: tx_fun(),
+ Module :: module(),
+ Args :: term().
+
+-spec start_link(SupName, GroupName, TxFun, Module, Args) ->
+ startlink_ret() when
+ SupName :: supervisor2:sup_name(),
+ GroupName :: group_name(),
+ TxFun :: tx_fun(),
+ Module :: module(),
+ Args :: term().
+
+-spec start_internal(Group, TxFun, ChildSpecs) -> Result when
+ Group :: group_name(),
+ TxFun :: tx_fun(),
+ ChildSpecs :: [supervisor2:child_spec()],
+ Result :: {'ok', pid()} | {'error', term()}.
+
+-spec create_tables() -> Result when
+ Result :: 'ok'.
+
+-else.
+
+-export([behaviour_info/1]).
+
+behaviour_info(callbacks) -> [{init,1}];
+behaviour_info(_Other) -> undefined.
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start_link(Group, TxFun, Mod, Args) ->
+ start_link0([], Group, TxFun, init(Mod, Args)).
+
+start_link({local, SupName}, Group, TxFun, Mod, Args) ->
+ start_link0([{local, SupName}], Group, TxFun, init(Mod, Args));
+
+start_link({global, _SupName}, _Group, _TxFun, _Mod, _Args) ->
+ erlang:error(badarg).
+
+start_link0(Prefix, Group, TxFun, Init) ->
+ case apply(?SUPERVISOR, start_link,
+ Prefix ++ [?MODULE, {overall, Group, TxFun, Init}]) of
+ {ok, Pid} -> case catch call(Pid, {init, Pid}) of
+ ok -> {ok, Pid};
+ E -> E
+ end;
+ Other -> Other
+ end.
+
+init(Mod, Args) ->
+ case Mod:init(Args) of
+ {ok, {{Bad, _, _}, _ChildSpecs}} when
+ Bad =:= simple_one_for_one -> erlang:error(badarg);
+ Init -> Init
+ end.
+
+start_child(Sup, ChildSpec) -> call(Sup, {start_child, ChildSpec}).
+delete_child(Sup, Id) -> find_call(Sup, Id, {delete_child, Id}).
+restart_child(Sup, Id) -> find_call(Sup, Id, {msg, restart_child, [Id]}).
+terminate_child(Sup, Id) -> find_call(Sup, Id, {msg, terminate_child, [Id]}).
+which_children(Sup) -> fold(which_children, Sup, fun lists:append/2).
+count_children(Sup) -> fold(count_children, Sup, fun add_proplists/2).
+check_childspecs(Specs) -> ?SUPERVISOR:check_childspecs(Specs).
+
+call(Sup, Msg) -> ?GEN_SERVER:call(mirroring(Sup), Msg, infinity).
+cast(Sup, Msg) -> with_exit_handler(
+ fun() -> ok end,
+ fun() -> ?GEN_SERVER:cast(mirroring(Sup), Msg) end).
+
+find_call(Sup, Id, Msg) ->
+ Group = call(Sup, group),
+ MatchHead = #mirrored_sup_childspec{mirroring_pid = '$1',
+ key = {Group, Id},
+ _ = '_'},
+ %% If we did this inside a tx we could still have failover
+ %% immediately after the tx - we can't be 100% here. So we may as
+ %% well dirty_select.
+ case mnesia:dirty_select(?TABLE, [{MatchHead, [], ['$1']}]) of
+ [Mirror] -> call(Mirror, Msg);
+ [] -> {error, not_found}
+ end.
+
+fold(FunAtom, Sup, AggFun) ->
+ Group = call(Sup, group),
+ lists:foldl(AggFun, [],
+ [apply(?SUPERVISOR, FunAtom, [D]) ||
+ M <- ?PG2:get_members(Group),
+ D <- [delegate(M)]]).
+
+child(Sup, Id) ->
+ [Pid] = [Pid || {Id1, Pid, _, _} <- ?SUPERVISOR:which_children(Sup),
+ Id1 =:= Id],
+ Pid.
+
+delegate(Sup) -> child(Sup, delegate).
+mirroring(Sup) -> child(Sup, mirroring).
+
+%%----------------------------------------------------------------------------
+
+start_internal(Group, TxFun, ChildSpecs) ->
+ ?GEN_SERVER:start_link(?MODULE, {mirroring, Group, TxFun, ChildSpecs},
+ [{timeout, infinity}]).
+
+%%----------------------------------------------------------------------------
+
+init({overall, _Group, _TxFun, ignore}) -> ignore;
+init({overall, Group, TxFun, {ok, {Restart, ChildSpecs}}}) ->
+ %% Important: Delegate MUST start before Mirroring so that when we
+ %% shut down from above it shuts down last, so Mirroring does not
+ %% see it die.
+ %%
+ %% See comment in handle_info('DOWN', ...) below
+ {ok, {{one_for_all, 0, 1},
+ [{delegate, {?SUPERVISOR, start_link, [?MODULE, {delegate, Restart}]},
+ temporary, 16#ffffffff, supervisor, [?SUPERVISOR]},
+ {mirroring, {?MODULE, start_internal, [Group, TxFun, ChildSpecs]},
+ permanent, 16#ffffffff, worker, [?MODULE]}]}};
+
+
+init({delegate, Restart}) ->
+ {ok, {Restart, []}};
+
+init({mirroring, Group, TxFun, ChildSpecs}) ->
+ {ok, #state{group = Group,
+ tx_fun = TxFun,
+ initial_childspecs = ChildSpecs}}.
+
+handle_call({init, Overall}, _From,
+ State = #state{overall = undefined,
+ delegate = undefined,
+ group = Group,
+ tx_fun = TxFun,
+ initial_childspecs = ChildSpecs}) ->
+ process_flag(trap_exit, true),
+ ?PG2:create(Group),
+ ok = ?PG2:join(Group, Overall),
+ Rest = ?PG2:get_members(Group) -- [Overall],
+ case Rest of
+ [] -> TxFun(fun() -> delete_all(Group) end);
+ _ -> ok
+ end,
+ [begin
+ ?GEN_SERVER:cast(mirroring(Pid), {ensure_monitoring, Overall}),
+ erlang:monitor(process, Pid)
+ end || Pid <- Rest],
+ Delegate = delegate(Overall),
+ erlang:monitor(process, Delegate),
+ State1 = State#state{overall = Overall, delegate = Delegate},
+ case errors([maybe_start(Group, TxFun, Overall, Delegate, S)
+ || S <- ChildSpecs]) of
+ [] -> {reply, ok, State1};
+ Errors -> {stop, {shutdown, Errors}, State1}
+ end;
+
+handle_call({start_child, ChildSpec}, _From,
+ State = #state{overall = Overall,
+ delegate = Delegate,
+ group = Group,
+ tx_fun = TxFun}) ->
+ {reply, case maybe_start(Group, TxFun, Overall, Delegate, ChildSpec) of
+ already_in_mnesia -> {error, already_present};
+ {already_in_mnesia, Pid} -> {error, {already_started, Pid}};
+ Else -> Else
+ end, State};
+
+handle_call({delete_child, Id}, _From, State = #state{delegate = Delegate,
+ group = Group,
+ tx_fun = TxFun}) ->
+ {reply, stop(Group, TxFun, Delegate, Id), State};
+
+handle_call({msg, F, A}, _From, State = #state{delegate = Delegate}) ->
+ {reply, apply(?SUPERVISOR, F, [Delegate | A]), State};
+
+handle_call(group, _From, State = #state{group = Group}) ->
+ {reply, Group, State};
+
+handle_call(Msg, _From, State) ->
+ {stop, {unexpected_call, Msg}, State}.
+
+handle_cast({ensure_monitoring, Pid}, State) ->
+ erlang:monitor(process, Pid),
+ {noreply, State};
+
+handle_cast({die, Reason}, State = #state{group = Group}) ->
+ tell_all_peers_to_die(Group, Reason),
+ {stop, Reason, State};
+
+handle_cast(Msg, State) ->
+ {stop, {unexpected_cast, Msg}, State}.
+
+handle_info({'DOWN', _Ref, process, Pid, Reason},
+ State = #state{delegate = Pid, group = Group}) ->
+ %% Since the delegate is temporary, its death won't cause us to
+ %% die. Since the overall supervisor kills processes in reverse
+ %% order when shutting down "from above" and we started after the
+ %% delegate, if we see the delegate die then that means it died
+ %% "from below" i.e. due to the behaviour of its children, not
+ %% because the whole app was being torn down.
+ %%
+ %% Therefore if we get here we know we need to cause the entire
+ %% mirrored sup to shut down, not just fail over.
+ tell_all_peers_to_die(Group, Reason),
+ {stop, Reason, State};
+
+handle_info({'DOWN', _Ref, process, Pid, _Reason},
+ State = #state{delegate = Delegate,
+ group = Group,
+ tx_fun = TxFun,
+ overall = O}) ->
+ %% TODO load balance this
+ %% No guarantee pg2 will have received the DOWN before us.
+ R = case lists:sort(?PG2:get_members(Group)) -- [Pid] of
+ [O | _] -> ChildSpecs =
+ TxFun(fun() -> update_all(O, Pid) end),
+ [start(Delegate, ChildSpec) || ChildSpec <- ChildSpecs];
+ _ -> []
+ end,
+ case errors(R) of
+ [] -> {noreply, State};
+ Errors -> {stop, {shutdown, Errors}, State}
+ end;
+
+handle_info(Info, State) ->
+ {stop, {unexpected_info, Info}, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+tell_all_peers_to_die(Group, Reason) ->
+ [cast(P, {die, Reason}) || P <- ?PG2:get_members(Group) -- [self()]].
+
+maybe_start(Group, TxFun, Overall, Delegate, ChildSpec) ->
+ try TxFun(fun() -> check_start(Group, Overall, Delegate, ChildSpec) end) of
+ start -> start(Delegate, ChildSpec);
+ undefined -> already_in_mnesia;
+ Pid -> {already_in_mnesia, Pid}
+ catch
+ %% If we are torn down while in the transaction...
+ {error, E} -> {error, E}
+ end.
+
+check_start(Group, Overall, Delegate, ChildSpec) ->
+ case mnesia:wread({?TABLE, {Group, id(ChildSpec)}}) of
+ [] -> write(Group, Overall, ChildSpec),
+ start;
+ [S] -> #mirrored_sup_childspec{key = {Group, Id},
+ mirroring_pid = Pid} = S,
+ case Overall of
+ Pid -> child(Delegate, Id);
+ _ -> case supervisor(Pid) of
+ dead -> write(Group, Overall, ChildSpec),
+ start;
+ Delegate0 -> child(Delegate0, Id)
+ end
+ end
+ end.
+
+supervisor(Pid) -> with_exit_handler(fun() -> dead end,
+ fun() -> delegate(Pid) end).
+
+write(Group, Overall, ChildSpec) ->
+ S = #mirrored_sup_childspec{key = {Group, id(ChildSpec)},
+ mirroring_pid = Overall,
+ childspec = ChildSpec},
+ ok = mnesia:write(?TABLE, S, write),
+ ChildSpec.
+
+delete(Group, Id) ->
+ ok = mnesia:delete({?TABLE, {Group, Id}}).
+
+start(Delegate, ChildSpec) ->
+ apply(?SUPERVISOR, start_child, [Delegate, ChildSpec]).
+
+stop(Group, TxFun, Delegate, Id) ->
+ try TxFun(fun() -> check_stop(Group, Delegate, Id) end) of
+ deleted -> apply(?SUPERVISOR, delete_child, [Delegate, Id]);
+ running -> {error, running}
+ catch
+ {error, E} -> {error, E}
+ end.
+
+check_stop(Group, Delegate, Id) ->
+ case child(Delegate, Id) of
+ undefined -> delete(Group, Id),
+ deleted;
+ _ -> running
+ end.
+
+id({Id, _, _, _, _, _}) -> Id.
+
+update_all(Overall, OldOverall) ->
+ MatchHead = #mirrored_sup_childspec{mirroring_pid = OldOverall,
+ key = '$1',
+ childspec = '$2',
+ _ = '_'},
+ [write(Group, Overall, C) ||
+ [{Group, _Id}, C] <- mnesia:select(?TABLE, [{MatchHead, [], ['$$']}])].
+
+delete_all(Group) ->
+ MatchHead = #mirrored_sup_childspec{key = {Group, '_'},
+ childspec = '$1',
+ _ = '_'},
+ [delete(Group, id(C)) ||
+ C <- mnesia:select(?TABLE, [{MatchHead, [], ['$1']}])].
+
+errors(Results) -> [E || {error, E} <- Results].
+
+%%----------------------------------------------------------------------------
+
+create_tables() -> create_tables([?TABLE_DEF]).
+
+create_tables([]) ->
+ ok;
+create_tables([{Table, Attributes} | Ts]) ->
+ case mnesia:create_table(Table, Attributes) of
+ {atomic, ok} -> create_tables(Ts);
+ {aborted, {already_exists, ?TABLE}} -> create_tables(Ts);
+ Err -> Err
+ end.
+
+table_definitions() ->
+ {Name, Attributes} = ?TABLE_DEF,
+ [{Name, [?TABLE_MATCH | Attributes]}].
+
+%%----------------------------------------------------------------------------
+
+with_exit_handler(Handler, Thunk) ->
+ try
+ Thunk()
+ catch
+ exit:{R, _} when R =:= noproc; R =:= nodedown;
+ R =:= normal; R =:= shutdown ->
+ Handler();
+ exit:{{R, _}, _} when R =:= nodedown; R =:= shutdown ->
+ Handler()
+ end.
+
+add_proplists(P1, P2) ->
+ add_proplists(lists:keysort(1, P1), lists:keysort(1, P2), []).
+add_proplists([], P2, Acc) -> P2 ++ Acc;
+add_proplists(P1, [], Acc) -> P1 ++ Acc;
+add_proplists([{K, V1} | P1], [{K, V2} | P2], Acc) ->
+ add_proplists(P1, P2, [{K, V1 + V2} | Acc]);
+add_proplists([{K1, _} = KV | P1], [{K2, _} | _] = P2, Acc) when K1 < K2 ->
+ add_proplists(P1, P2, [KV | Acc]);
+add_proplists(P1, [KV | P2], Acc) ->
+ add_proplists(P1, P2, [KV | Acc]).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(mirrored_supervisor_tests).
+
+-compile([export_all]).
+
+-export([init/1, handle_call/3, handle_info/2, terminate/2, code_change/3,
+ handle_cast/2]).
+
+-behaviour(gen_server).
+-behaviour(mirrored_supervisor).
+
+-define(MS, mirrored_supervisor).
+
+%% ---------------------------------------------------------------------------
+%% Functional tests
+%% ---------------------------------------------------------------------------
+
+all_tests() ->
+ passed = test_migrate(),
+ passed = test_migrate_twice(),
+ passed = test_already_there(),
+ passed = test_delete_restart(),
+ passed = test_which_children(),
+ passed = test_large_group(),
+ passed = test_childspecs_at_init(),
+ passed = test_anonymous_supervisors(),
+ passed = test_no_migration_on_shutdown(),
+ passed = test_start_idempotence(),
+ passed = test_unsupported(),
+ passed = test_ignore(),
+ passed = test_startup_failure(),
+ passed.
+
+%% Simplest test
+test_migrate() ->
+ with_sups(fun([A, _]) ->
+ ?MS:start_child(a, childspec(worker)),
+ Pid1 = pid_of(worker),
+ kill_registered(A, Pid1),
+ Pid2 = pid_of(worker),
+ false = (Pid1 =:= Pid2)
+ end, [a, b]).
+
+%% Is migration transitive?
+test_migrate_twice() ->
+ with_sups(fun([A, B]) ->
+ ?MS:start_child(a, childspec(worker)),
+ Pid1 = pid_of(worker),
+ kill_registered(A, Pid1),
+ {ok, C} = start_sup(c),
+ Pid2 = pid_of(worker),
+ kill_registered(B, Pid2),
+ Pid3 = pid_of(worker),
+ false = (Pid1 =:= Pid3),
+ kill(C)
+ end, [a, b]).
+
+%% Can't start the same child twice
+test_already_there() ->
+ with_sups(fun([_, _]) ->
+ S = childspec(worker),
+ {ok, Pid} = ?MS:start_child(a, S),
+ {error, {already_started, Pid}} = ?MS:start_child(b, S)
+ end, [a, b]).
+
+%% Deleting and restarting should work as per a normal supervisor
+test_delete_restart() ->
+ with_sups(fun([_, _]) ->
+ S = childspec(worker),
+ {ok, Pid1} = ?MS:start_child(a, S),
+ {error, running} = ?MS:delete_child(a, worker),
+ ok = ?MS:terminate_child(a, worker),
+ ok = ?MS:delete_child(a, worker),
+ {ok, Pid2} = ?MS:start_child(b, S),
+ false = (Pid1 =:= Pid2),
+ ok = ?MS:terminate_child(b, worker),
+ {ok, Pid3} = ?MS:restart_child(b, worker),
+ Pid3 = pid_of(worker),
+ false = (Pid2 =:= Pid3),
+ %% Not the same supervisor as the worker is on
+ ok = ?MS:terminate_child(a, worker),
+ ok = ?MS:delete_child(a, worker),
+ {ok, Pid4} = ?MS:start_child(a, S),
+ false = (Pid3 =:= Pid4)
+ end, [a, b]).
+
+test_which_children() ->
+ with_sups(
+ fun([A, B] = Both) ->
+ ?MS:start_child(A, childspec(worker)),
+ assert_wc(Both, fun ([C]) -> true = is_pid(wc_pid(C)) end),
+ ok = ?MS:terminate_child(a, worker),
+ assert_wc(Both, fun ([C]) -> undefined = wc_pid(C) end),
+ {ok, _} = ?MS:restart_child(a, worker),
+ assert_wc(Both, fun ([C]) -> true = is_pid(wc_pid(C)) end),
+ ?MS:start_child(B, childspec(worker2)),
+ assert_wc(Both, fun (C) -> 2 = length(C) end)
+ end, [a, b]).
+
+assert_wc(Sups, Fun) ->
+ [Fun(?MS:which_children(Sup)) || Sup <- Sups].
+
+wc_pid(Child) ->
+ {worker, Pid, worker, [mirrored_supervisor_tests]} = Child,
+ Pid.
+
+%% Not all the members of the group should actually do the failover
+test_large_group() ->
+ with_sups(fun([A, _, _, _]) ->
+ ?MS:start_child(a, childspec(worker)),
+ Pid1 = pid_of(worker),
+ kill_registered(A, Pid1),
+ Pid2 = pid_of(worker),
+ false = (Pid1 =:= Pid2)
+ end, [a, b, c, d]).
+
+%% Do childspecs work when returned from init?
+test_childspecs_at_init() ->
+ S = childspec(worker),
+ with_sups(fun([A, _]) ->
+ Pid1 = pid_of(worker),
+ kill_registered(A, Pid1),
+ Pid2 = pid_of(worker),
+ false = (Pid1 =:= Pid2)
+ end, [{a, [S]}, {b, [S]}]).
+
+test_anonymous_supervisors() ->
+ with_sups(fun([A, _B]) ->
+ ?MS:start_child(A, childspec(worker)),
+ Pid1 = pid_of(worker),
+ kill_registered(A, Pid1),
+ Pid2 = pid_of(worker),
+ false = (Pid1 =:= Pid2)
+ end, [anon, anon]).
+
+%% When a mirrored_supervisor terminates, we should not migrate, but
+%% the whole supervisor group should shut down. To test this we set up
+%% a situation where the gen_server will only fail if it's running
+%% under the supervisor called 'evil'. It should not migrate to
+%% 'good' and survive, rather the whole group should go away.
+test_no_migration_on_shutdown() ->
+ with_sups(fun([Evil, _]) ->
+ ?MS:start_child(Evil, childspec(worker)),
+ try
+ call(worker, ping, 1000, 100),
+ exit(worker_should_not_have_migrated)
+ catch exit:{timeout_waiting_for_server, _, _} ->
+ ok
+ end
+ end, [evil, good]).
+
+test_start_idempotence() ->
+ with_sups(fun([_]) ->
+ CS = childspec(worker),
+ {ok, Pid} = ?MS:start_child(a, CS),
+ {error, {already_started, Pid}} = ?MS:start_child(a, CS),
+ ?MS:terminate_child(a, worker),
+ {error, already_present} = ?MS:start_child(a, CS)
+ end, [a]).
+
+test_unsupported() ->
+ try
+ ?MS:start_link({global, foo}, get_group(group), fun tx_fun/1, ?MODULE,
+ {sup, one_for_one, []}),
+ exit(no_global)
+ catch error:badarg ->
+ ok
+ end,
+ try
+ ?MS:start_link({local, foo}, get_group(group), fun tx_fun/1, ?MODULE,
+ {sup, simple_one_for_one, []}),
+ exit(no_sofo)
+ catch error:badarg ->
+ ok
+ end,
+ passed.
+
+%% Just test we don't blow up
+test_ignore() ->
+ ?MS:start_link({local, foo}, get_group(group), fun tx_fun/1, ?MODULE,
+ {sup, fake_strategy_for_ignore, []}),
+ passed.
+
+test_startup_failure() ->
+ [test_startup_failure(F) || F <- [want_error, want_exit]],
+ passed.
+
+test_startup_failure(Fail) ->
+ process_flag(trap_exit, true),
+ ?MS:start_link(get_group(group), fun tx_fun/1, ?MODULE,
+ {sup, one_for_one, [childspec(Fail)]}),
+ receive
+ {'EXIT', _, shutdown} ->
+ ok
+ after 1000 ->
+ exit({did_not_exit, Fail})
+ end,
+ process_flag(trap_exit, false).
+
+%% ---------------------------------------------------------------------------
+
+with_sups(Fun, Sups) ->
+ inc_group(),
+ Pids = [begin {ok, Pid} = start_sup(Sup), Pid end || Sup <- Sups],
+ Fun(Pids),
+ [kill(Pid) || Pid <- Pids, is_process_alive(Pid)],
+ timer:sleep(500),
+ passed.
+
+start_sup(Spec) ->
+ start_sup(Spec, group).
+
+start_sup({Name, ChildSpecs}, Group) ->
+ {ok, Pid} = start_sup0(Name, get_group(Group), ChildSpecs),
+ %% We are not a supervisor, when we kill the supervisor we do not
+ %% want to die!
+ unlink(Pid),
+ {ok, Pid};
+
+start_sup(Name, Group) ->
+ start_sup({Name, []}, Group).
+
+start_sup0(anon, Group, ChildSpecs) ->
+ ?MS:start_link(Group, fun tx_fun/1, ?MODULE,
+ {sup, one_for_one, ChildSpecs});
+
+start_sup0(Name, Group, ChildSpecs) ->
+ ?MS:start_link({local, Name}, Group, fun tx_fun/1, ?MODULE,
+ {sup, one_for_one, ChildSpecs}).
+
+childspec(Id) ->
+ {Id, {?MODULE, start_gs, [Id]}, transient, 16#ffffffff, worker, [?MODULE]}.
+
+start_gs(want_error) ->
+ {error, foo};
+
+start_gs(want_exit) ->
+ exit(foo);
+
+start_gs(Id) ->
+ gen_server:start_link({local, Id}, ?MODULE, server, []).
+
+pid_of(Id) ->
+ {received, Pid, ping} = call(Id, ping),
+ Pid.
+
+tx_fun(Fun) ->
+ case mnesia:sync_transaction(Fun) of
+ {atomic, Result} -> Result;
+ {aborted, Reason} -> throw({error, Reason})
+ end.
+
+inc_group() ->
+ Count = case get(counter) of
+ undefined -> 0;
+ C -> C
+ end + 1,
+ put(counter, Count).
+
+get_group(Group) ->
+ {Group, get(counter)}.
+
+call(Id, Msg) -> call(Id, Msg, 10*1000, 100).
+
+call(Id, Msg, 0, _Decr) ->
+ exit({timeout_waiting_for_server, {Id, Msg}, erlang:get_stacktrace()});
+
+call(Id, Msg, MaxDelay, Decr) ->
+ try
+ gen_server:call(Id, Msg, infinity)
+ catch exit:_ -> timer:sleep(Decr),
+ call(Id, Msg, MaxDelay - Decr, Decr)
+ end.
+
+kill(Pid) -> kill(Pid, []).
+kill(Pid, Wait) when is_pid(Wait) -> kill(Pid, [Wait]);
+kill(Pid, Waits) ->
+ erlang:monitor(process, Pid),
+ [erlang:monitor(process, P) || P <- Waits],
+ exit(Pid, bang),
+ kill_wait(Pid),
+ [kill_wait(P) || P <- Waits].
+
+kill_registered(Pid, Child) ->
+ {registered_name, Name} = erlang:process_info(Child, registered_name),
+ kill(Pid, Child),
+ false = (Child =:= whereis(Name)),
+ ok.
+
+kill_wait(Pid) ->
+ receive
+ {'DOWN', _Ref, process, Pid, _Reason} ->
+ ok
+ end.
+
+%% ---------------------------------------------------------------------------
+%% Dumb gen_server we can supervise
+%% ---------------------------------------------------------------------------
+
+init({sup, fake_strategy_for_ignore, _ChildSpecs}) ->
+ ignore;
+
+init({sup, Strategy, ChildSpecs}) ->
+ {ok, {{Strategy, 0, 1}, ChildSpecs}};
+
+init(server) ->
+ {ok, state}.
+
+handle_call(Msg, _From, State) ->
+ die_if_my_supervisor_is_evil(),
+ {reply, {received, self(), Msg}, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+die_if_my_supervisor_is_evil() ->
+ try lists:keysearch(self(), 2, ?MS:which_children(evil)) of
+ false -> ok;
+ _ -> exit(doooom)
+ catch
+ exit:{noproc, _} -> ok
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(mnesia_sync).
+
+%% mnesia:sync_transaction/3 fails to guarantee that the log is flushed to disk
+%% at commit. This module is an attempt to minimise the risk of data loss by
+%% performing a coalesced log fsync. Unfortunately this is performed regardless
+%% of whether or not the log was appended to.
+
+-behaviour(gen_server).
+
+-export([sync/0]).
+
+-export([start_link/0, init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-define(SERVER, ?MODULE).
+
+-record(state, {waiting, disc_node}).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(sync/0 :: () -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+sync() ->
+ gen_server:call(?SERVER, sync, infinity).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, #state{disc_node = mnesia:system_info(use_dir), waiting = []}}.
+
+handle_call(sync, _From, #state{disc_node = false} = State) ->
+ {reply, ok, State};
+handle_call(sync, From, #state{waiting = Waiting} = State) ->
+ {noreply, State#state{waiting = [From | Waiting]}, 0};
+handle_call(Request, _From, State) ->
+ {stop, {unhandled_call, Request}, State}.
+
+handle_cast(Request, State) ->
+ {stop, {unhandled_cast, Request}, State}.
+
+handle_info(timeout, #state{waiting = Waiting} = State) ->
+ ok = disk_log:sync(latest_log),
+ [gen_server:reply(From, ok) || From <- Waiting],
+ {noreply, State#state{waiting = []}};
+handle_info(Message, State) ->
+ {stop, {unhandled_info, Message}, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
--- /dev/null
+%% This file is a copy of `mochijson2.erl' from mochiweb, revision
+%% d541e9a0f36c00dcadc2e589f20e47fbf46fc76f. For the license, see
+%% `LICENSE-MIT-Mochi'.
+
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Yet another JSON (RFC 4627) library for Erlang. mochijson2 works
+%% with binaries as strings, arrays as lists (without an {array, _})
+%% wrapper and it only knows how to decode UTF-8 (and ASCII).
+%%
+%% JSON terms are decoded as follows (javascript -> erlang):
+%% <ul>
+%% <li>{"key": "value"} ->
+%% {struct, [{<<"key">>, <<"value">>}]}</li>
+%% <li>["array", 123, 12.34, true, false, null] ->
+%% [<<"array">>, 123, 12.34, true, false, null]
+%% </li>
+%% </ul>
+%% <ul>
+%% <li>Strings in JSON decode to UTF-8 binaries in Erlang</li>
+%% <li>Objects decode to {struct, PropList}</li>
+%% <li>Numbers decode to integer or float</li>
+%% <li>true, false, null decode to their respective terms.</li>
+%% </ul>
+%% The encoder will accept the same format that the decoder will produce,
+%% but will also allow additional cases for leniency:
+%% <ul>
+%% <li>atoms other than true, false, null will be considered UTF-8
+%% strings (even as a proplist key)
+%% </li>
+%% <li>{json, IoList} will insert IoList directly into the output
+%% with no validation
+%% </li>
+%% <li>{array, Array} will be encoded as Array
+%% (legacy mochijson style)
+%% </li>
+%% <li>A non-empty raw proplist will be encoded as an object as long
+%% as the first pair does not have an atom key of json, struct,
+%% or array
+%% </li>
+%% </ul>
+
+-module(mochijson2).
+-author('bob@mochimedia.com').
+-export([encoder/1, encode/1]).
+-export([decoder/1, decode/1, decode/2]).
+
+%% This is a macro to placate syntax highlighters..
+-define(Q, $\").
+-define(ADV_COL(S, N), S#decoder{offset=N+S#decoder.offset,
+ column=N+S#decoder.column}).
+-define(INC_COL(S), S#decoder{offset=1+S#decoder.offset,
+ column=1+S#decoder.column}).
+-define(INC_LINE(S), S#decoder{offset=1+S#decoder.offset,
+ column=1,
+ line=1+S#decoder.line}).
+-define(INC_CHAR(S, C),
+ case C of
+ $\n ->
+ S#decoder{column=1,
+ line=1+S#decoder.line,
+ offset=1+S#decoder.offset};
+ _ ->
+ S#decoder{column=1+S#decoder.column,
+ offset=1+S#decoder.offset}
+ end).
+-define(IS_WHITESPACE(C),
+ (C =:= $\s orelse C =:= $\t orelse C =:= $\r orelse C =:= $\n)).
+
+%% @type json_string() = atom | binary()
+%% @type json_number() = integer() | float()
+%% @type json_array() = [json_term()]
+%% @type json_object() = {struct, [{json_string(), json_term()}]}
+%% @type json_eep18_object() = {[{json_string(), json_term()}]}
+%% @type json_iolist() = {json, iolist()}
+%% @type json_term() = json_string() | json_number() | json_array() |
+%% json_object() | json_eep18_object() | json_iolist()
+
+-record(encoder, {handler=null,
+ utf8=false}).
+
+-record(decoder, {object_hook=null,
+ offset=0,
+ line=1,
+ column=1,
+ state=null}).
+
+%% @spec encoder([encoder_option()]) -> function()
+%% @doc Create an encoder/1 with the given options.
+%% @type encoder_option() = handler_option() | utf8_option()
+%% @type utf8_option() = boolean(). Emit unicode as utf8 (default - false)
+encoder(Options) ->
+ State = parse_encoder_options(Options, #encoder{}),
+ fun (O) -> json_encode(O, State) end.
+
+%% @spec encode(json_term()) -> iolist()
+%% @doc Encode the given as JSON to an iolist.
+encode(Any) ->
+ json_encode(Any, #encoder{}).
+
+%% @spec decoder([decoder_option()]) -> function()
+%% @doc Create a decoder/1 with the given options.
+decoder(Options) ->
+ State = parse_decoder_options(Options, #decoder{}),
+ fun (O) -> json_decode(O, State) end.
+
+%% @spec decode(iolist(), [{format, proplist | eep18 | struct}]) -> json_term()
+%% @doc Decode the given iolist to Erlang terms using the given object format
+%% for decoding, where proplist returns JSON objects as [{binary(), json_term()}]
+%% proplists, eep18 returns JSON objects as {[binary(), json_term()]}, and struct
+%% returns them as-is.
+decode(S, Options) ->
+ json_decode(S, parse_decoder_options(Options, #decoder{})).
+
+%% @spec decode(iolist()) -> json_term()
+%% @doc Decode the given iolist to Erlang terms.
+decode(S) ->
+ json_decode(S, #decoder{}).
+
+%% Internal API
+
+parse_encoder_options([], State) ->
+ State;
+parse_encoder_options([{handler, Handler} | Rest], State) ->
+ parse_encoder_options(Rest, State#encoder{handler=Handler});
+parse_encoder_options([{utf8, Switch} | Rest], State) ->
+ parse_encoder_options(Rest, State#encoder{utf8=Switch}).
+
+parse_decoder_options([], State) ->
+ State;
+parse_decoder_options([{object_hook, Hook} | Rest], State) ->
+ parse_decoder_options(Rest, State#decoder{object_hook=Hook});
+parse_decoder_options([{format, Format} | Rest], State)
+ when Format =:= struct orelse Format =:= eep18 orelse Format =:= proplist ->
+ parse_decoder_options(Rest, State#decoder{object_hook=Format}).
+
+json_encode(true, _State) ->
+ <<"true">>;
+json_encode(false, _State) ->
+ <<"false">>;
+json_encode(null, _State) ->
+ <<"null">>;
+json_encode(I, _State) when is_integer(I) ->
+ integer_to_list(I);
+json_encode(F, _State) when is_float(F) ->
+ mochinum:digits(F);
+json_encode(S, State) when is_binary(S); is_atom(S) ->
+ json_encode_string(S, State);
+json_encode([{K, _}|_] = Props, State) when (K =/= struct andalso
+ K =/= array andalso
+ K =/= json) ->
+ json_encode_proplist(Props, State);
+json_encode({struct, Props}, State) when is_list(Props) ->
+ json_encode_proplist(Props, State);
+json_encode({Props}, State) when is_list(Props) ->
+ json_encode_proplist(Props, State);
+json_encode({}, State) ->
+ json_encode_proplist([], State);
+json_encode(Array, State) when is_list(Array) ->
+ json_encode_array(Array, State);
+json_encode({array, Array}, State) when is_list(Array) ->
+ json_encode_array(Array, State);
+json_encode({json, IoList}, _State) ->
+ IoList;
+json_encode(Bad, #encoder{handler=null}) ->
+ exit({json_encode, {bad_term, Bad}});
+json_encode(Bad, State=#encoder{handler=Handler}) ->
+ json_encode(Handler(Bad), State).
+
+json_encode_array([], _State) ->
+ <<"[]">>;
+json_encode_array(L, State) ->
+ F = fun (O, Acc) ->
+ [$,, json_encode(O, State) | Acc]
+ end,
+ [$, | Acc1] = lists:foldl(F, "[", L),
+ lists:reverse([$\] | Acc1]).
+
+json_encode_proplist([], _State) ->
+ <<"{}">>;
+json_encode_proplist(Props, State) ->
+ F = fun ({K, V}, Acc) ->
+ KS = json_encode_string(K, State),
+ VS = json_encode(V, State),
+ [$,, VS, $:, KS | Acc]
+ end,
+ [$, | Acc1] = lists:foldl(F, "{", Props),
+ lists:reverse([$\} | Acc1]).
+
+json_encode_string(A, State) when is_atom(A) ->
+ L = atom_to_list(A),
+ case json_string_is_safe(L) of
+ true ->
+ [?Q, L, ?Q];
+ false ->
+ json_encode_string_unicode(xmerl_ucs:from_utf8(L), State, [?Q])
+ end;
+json_encode_string(B, State) when is_binary(B) ->
+ case json_bin_is_safe(B) of
+ true ->
+ [?Q, B, ?Q];
+ false ->
+ json_encode_string_unicode(xmerl_ucs:from_utf8(B), State, [?Q])
+ end;
+json_encode_string(I, _State) when is_integer(I) ->
+ [?Q, integer_to_list(I), ?Q];
+json_encode_string(L, State) when is_list(L) ->
+ case json_string_is_safe(L) of
+ true ->
+ [?Q, L, ?Q];
+ false ->
+ json_encode_string_unicode(L, State, [?Q])
+ end.
+
+json_string_is_safe([]) ->
+ true;
+json_string_is_safe([C | Rest]) ->
+ case C of
+ ?Q ->
+ false;
+ $\\ ->
+ false;
+ $\b ->
+ false;
+ $\f ->
+ false;
+ $\n ->
+ false;
+ $\r ->
+ false;
+ $\t ->
+ false;
+ C when C >= 0, C < $\s; C >= 16#7f, C =< 16#10FFFF ->
+ false;
+ C when C < 16#7f ->
+ json_string_is_safe(Rest);
+ _ ->
+ false
+ end.
+
+json_bin_is_safe(<<>>) ->
+ true;
+json_bin_is_safe(<<C, Rest/binary>>) ->
+ case C of
+ ?Q ->
+ false;
+ $\\ ->
+ false;
+ $\b ->
+ false;
+ $\f ->
+ false;
+ $\n ->
+ false;
+ $\r ->
+ false;
+ $\t ->
+ false;
+ C when C >= 0, C < $\s; C >= 16#7f ->
+ false;
+ C when C < 16#7f ->
+ json_bin_is_safe(Rest)
+ end.
+
+json_encode_string_unicode([], _State, Acc) ->
+ lists:reverse([$\" | Acc]);
+json_encode_string_unicode([C | Cs], State, Acc) ->
+ Acc1 = case C of
+ ?Q ->
+ [?Q, $\\ | Acc];
+ %% Escaping solidus is only useful when trying to protect
+ %% against "</script>" injection attacks which are only
+ %% possible when JSON is inserted into a HTML document
+ %% in-line. mochijson2 does not protect you from this, so
+ %% if you do insert directly into HTML then you need to
+ %% uncomment the following case or escape the output of encode.
+ %%
+ %% $/ ->
+ %% [$/, $\\ | Acc];
+ %%
+ $\\ ->
+ [$\\, $\\ | Acc];
+ $\b ->
+ [$b, $\\ | Acc];
+ $\f ->
+ [$f, $\\ | Acc];
+ $\n ->
+ [$n, $\\ | Acc];
+ $\r ->
+ [$r, $\\ | Acc];
+ $\t ->
+ [$t, $\\ | Acc];
+ C when C >= 0, C < $\s ->
+ [unihex(C) | Acc];
+ C when C >= 16#7f, C =< 16#10FFFF, State#encoder.utf8 ->
+ [xmerl_ucs:to_utf8(C) | Acc];
+ C when C >= 16#7f, C =< 16#10FFFF, not State#encoder.utf8 ->
+ [unihex(C) | Acc];
+ C when C < 16#7f ->
+ [C | Acc];
+ _ ->
+ exit({json_encode, {bad_char, C}})
+ end,
+ json_encode_string_unicode(Cs, State, Acc1).
+
+hexdigit(C) when C >= 0, C =< 9 ->
+ C + $0;
+hexdigit(C) when C =< 15 ->
+ C + $a - 10.
+
+unihex(C) when C < 16#10000 ->
+ <<D3:4, D2:4, D1:4, D0:4>> = <<C:16>>,
+ Digits = [hexdigit(D) || D <- [D3, D2, D1, D0]],
+ [$\\, $u | Digits];
+unihex(C) when C =< 16#10FFFF ->
+ N = C - 16#10000,
+ S1 = 16#d800 bor ((N bsr 10) band 16#3ff),
+ S2 = 16#dc00 bor (N band 16#3ff),
+ [unihex(S1), unihex(S2)].
+
+json_decode(L, S) when is_list(L) ->
+ json_decode(iolist_to_binary(L), S);
+json_decode(B, S) ->
+ {Res, S1} = decode1(B, S),
+ {eof, _} = tokenize(B, S1#decoder{state=trim}),
+ Res.
+
+decode1(B, S=#decoder{state=null}) ->
+ case tokenize(B, S#decoder{state=any}) of
+ {{const, C}, S1} ->
+ {C, S1};
+ {start_array, S1} ->
+ decode_array(B, S1);
+ {start_object, S1} ->
+ decode_object(B, S1)
+ end.
+
+make_object(V, #decoder{object_hook=N}) when N =:= null orelse N =:= struct ->
+ V;
+make_object({struct, P}, #decoder{object_hook=eep18}) ->
+ {P};
+make_object({struct, P}, #decoder{object_hook=proplist}) ->
+ P;
+make_object(V, #decoder{object_hook=Hook}) ->
+ Hook(V).
+
+decode_object(B, S) ->
+ decode_object(B, S#decoder{state=key}, []).
+
+decode_object(B, S=#decoder{state=key}, Acc) ->
+ case tokenize(B, S) of
+ {end_object, S1} ->
+ V = make_object({struct, lists:reverse(Acc)}, S1),
+ {V, S1#decoder{state=null}};
+ {{const, K}, S1} ->
+ {colon, S2} = tokenize(B, S1),
+ {V, S3} = decode1(B, S2#decoder{state=null}),
+ decode_object(B, S3#decoder{state=comma}, [{K, V} | Acc])
+ end;
+decode_object(B, S=#decoder{state=comma}, Acc) ->
+ case tokenize(B, S) of
+ {end_object, S1} ->
+ V = make_object({struct, lists:reverse(Acc)}, S1),
+ {V, S1#decoder{state=null}};
+ {comma, S1} ->
+ decode_object(B, S1#decoder{state=key}, Acc)
+ end.
+
+decode_array(B, S) ->
+ decode_array(B, S#decoder{state=any}, []).
+
+decode_array(B, S=#decoder{state=any}, Acc) ->
+ case tokenize(B, S) of
+ {end_array, S1} ->
+ {lists:reverse(Acc), S1#decoder{state=null}};
+ {start_array, S1} ->
+ {Array, S2} = decode_array(B, S1),
+ decode_array(B, S2#decoder{state=comma}, [Array | Acc]);
+ {start_object, S1} ->
+ {Array, S2} = decode_object(B, S1),
+ decode_array(B, S2#decoder{state=comma}, [Array | Acc]);
+ {{const, Const}, S1} ->
+ decode_array(B, S1#decoder{state=comma}, [Const | Acc])
+ end;
+decode_array(B, S=#decoder{state=comma}, Acc) ->
+ case tokenize(B, S) of
+ {end_array, S1} ->
+ {lists:reverse(Acc), S1#decoder{state=null}};
+ {comma, S1} ->
+ decode_array(B, S1#decoder{state=any}, Acc)
+ end.
+
+tokenize_string(B, S=#decoder{offset=O}) ->
+ case tokenize_string_fast(B, O) of
+ {escape, O1} ->
+ Length = O1 - O,
+ S1 = ?ADV_COL(S, Length),
+ <<_:O/binary, Head:Length/binary, _/binary>> = B,
+ tokenize_string(B, S1, lists:reverse(binary_to_list(Head)));
+ O1 ->
+ Length = O1 - O,
+ <<_:O/binary, String:Length/binary, ?Q, _/binary>> = B,
+ {{const, String}, ?ADV_COL(S, Length + 1)}
+ end.
+
+tokenize_string_fast(B, O) ->
+ case B of
+ <<_:O/binary, ?Q, _/binary>> ->
+ O;
+ <<_:O/binary, $\\, _/binary>> ->
+ {escape, O};
+ <<_:O/binary, C1, _/binary>> when C1 < 128 ->
+ tokenize_string_fast(B, 1 + O);
+ <<_:O/binary, C1, C2, _/binary>> when C1 >= 194, C1 =< 223,
+ C2 >= 128, C2 =< 191 ->
+ tokenize_string_fast(B, 2 + O);
+ <<_:O/binary, C1, C2, C3, _/binary>> when C1 >= 224, C1 =< 239,
+ C2 >= 128, C2 =< 191,
+ C3 >= 128, C3 =< 191 ->
+ tokenize_string_fast(B, 3 + O);
+ <<_:O/binary, C1, C2, C3, C4, _/binary>> when C1 >= 240, C1 =< 244,
+ C2 >= 128, C2 =< 191,
+ C3 >= 128, C3 =< 191,
+ C4 >= 128, C4 =< 191 ->
+ tokenize_string_fast(B, 4 + O);
+ _ ->
+ throw(invalid_utf8)
+ end.
+
+tokenize_string(B, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, ?Q, _/binary>> ->
+ {{const, iolist_to_binary(lists:reverse(Acc))}, ?INC_COL(S)};
+ <<_:O/binary, "\\\"", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\" | Acc]);
+ <<_:O/binary, "\\\\", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\\ | Acc]);
+ <<_:O/binary, "\\/", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$/ | Acc]);
+ <<_:O/binary, "\\b", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\b | Acc]);
+ <<_:O/binary, "\\f", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\f | Acc]);
+ <<_:O/binary, "\\n", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\n | Acc]);
+ <<_:O/binary, "\\r", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\r | Acc]);
+ <<_:O/binary, "\\t", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\t | Acc]);
+ <<_:O/binary, "\\u", C3, C2, C1, C0, Rest/binary>> ->
+ C = erlang:list_to_integer([C3, C2, C1, C0], 16),
+ if C > 16#D7FF, C < 16#DC00 ->
+ %% coalesce UTF-16 surrogate pair
+ <<"\\u", D3, D2, D1, D0, _/binary>> = Rest,
+ D = erlang:list_to_integer([D3,D2,D1,D0], 16),
+ [CodePoint] = xmerl_ucs:from_utf16be(<<C:16/big-unsigned-integer,
+ D:16/big-unsigned-integer>>),
+ Acc1 = lists:reverse(xmerl_ucs:to_utf8(CodePoint), Acc),
+ tokenize_string(B, ?ADV_COL(S, 12), Acc1);
+ true ->
+ Acc1 = lists:reverse(xmerl_ucs:to_utf8(C), Acc),
+ tokenize_string(B, ?ADV_COL(S, 6), Acc1)
+ end;
+ <<_:O/binary, C1, _/binary>> when C1 < 128 ->
+ tokenize_string(B, ?INC_CHAR(S, C1), [C1 | Acc]);
+ <<_:O/binary, C1, C2, _/binary>> when C1 >= 194, C1 =< 223,
+ C2 >= 128, C2 =< 191 ->
+ tokenize_string(B, ?ADV_COL(S, 2), [C2, C1 | Acc]);
+ <<_:O/binary, C1, C2, C3, _/binary>> when C1 >= 224, C1 =< 239,
+ C2 >= 128, C2 =< 191,
+ C3 >= 128, C3 =< 191 ->
+ tokenize_string(B, ?ADV_COL(S, 3), [C3, C2, C1 | Acc]);
+ <<_:O/binary, C1, C2, C3, C4, _/binary>> when C1 >= 240, C1 =< 244,
+ C2 >= 128, C2 =< 191,
+ C3 >= 128, C3 =< 191,
+ C4 >= 128, C4 =< 191 ->
+ tokenize_string(B, ?ADV_COL(S, 4), [C4, C3, C2, C1 | Acc]);
+ _ ->
+ throw(invalid_utf8)
+ end.
+
+tokenize_number(B, S) ->
+ case tokenize_number(B, sign, S, []) of
+ {{int, Int}, S1} ->
+ {{const, list_to_integer(Int)}, S1};
+ {{float, Float}, S1} ->
+ {{const, list_to_float(Float)}, S1}
+ end.
+
+tokenize_number(B, sign, S=#decoder{offset=O}, []) ->
+ case B of
+ <<_:O/binary, $-, _/binary>> ->
+ tokenize_number(B, int, ?INC_COL(S), [$-]);
+ _ ->
+ tokenize_number(B, int, S, [])
+ end;
+tokenize_number(B, int, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, $0, _/binary>> ->
+ tokenize_number(B, frac, ?INC_COL(S), [$0 | Acc]);
+ <<_:O/binary, C, _/binary>> when C >= $1 andalso C =< $9 ->
+ tokenize_number(B, int1, ?INC_COL(S), [C | Acc])
+ end;
+tokenize_number(B, int1, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
+ tokenize_number(B, int1, ?INC_COL(S), [C | Acc]);
+ _ ->
+ tokenize_number(B, frac, S, Acc)
+ end;
+tokenize_number(B, frac, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, $., C, _/binary>> when C >= $0, C =< $9 ->
+ tokenize_number(B, frac1, ?ADV_COL(S, 2), [C, $. | Acc]);
+ <<_:O/binary, E, _/binary>> when E =:= $e orelse E =:= $E ->
+ tokenize_number(B, esign, ?INC_COL(S), [$e, $0, $. | Acc]);
+ _ ->
+ {{int, lists:reverse(Acc)}, S}
+ end;
+tokenize_number(B, frac1, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
+ tokenize_number(B, frac1, ?INC_COL(S), [C | Acc]);
+ <<_:O/binary, E, _/binary>> when E =:= $e orelse E =:= $E ->
+ tokenize_number(B, esign, ?INC_COL(S), [$e | Acc]);
+ _ ->
+ {{float, lists:reverse(Acc)}, S}
+ end;
+tokenize_number(B, esign, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when C =:= $- orelse C=:= $+ ->
+ tokenize_number(B, eint, ?INC_COL(S), [C | Acc]);
+ _ ->
+ tokenize_number(B, eint, S, Acc)
+ end;
+tokenize_number(B, eint, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
+ tokenize_number(B, eint1, ?INC_COL(S), [C | Acc])
+ end;
+tokenize_number(B, eint1, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
+ tokenize_number(B, eint1, ?INC_COL(S), [C | Acc]);
+ _ ->
+ {{float, lists:reverse(Acc)}, S}
+ end.
+
+tokenize(B, S=#decoder{offset=O}) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C) ->
+ tokenize(B, ?INC_CHAR(S, C));
+ <<_:O/binary, "{", _/binary>> ->
+ {start_object, ?INC_COL(S)};
+ <<_:O/binary, "}", _/binary>> ->
+ {end_object, ?INC_COL(S)};
+ <<_:O/binary, "[", _/binary>> ->
+ {start_array, ?INC_COL(S)};
+ <<_:O/binary, "]", _/binary>> ->
+ {end_array, ?INC_COL(S)};
+ <<_:O/binary, ",", _/binary>> ->
+ {comma, ?INC_COL(S)};
+ <<_:O/binary, ":", _/binary>> ->
+ {colon, ?INC_COL(S)};
+ <<_:O/binary, "null", _/binary>> ->
+ {{const, null}, ?ADV_COL(S, 4)};
+ <<_:O/binary, "true", _/binary>> ->
+ {{const, true}, ?ADV_COL(S, 4)};
+ <<_:O/binary, "false", _/binary>> ->
+ {{const, false}, ?ADV_COL(S, 5)};
+ <<_:O/binary, "\"", _/binary>> ->
+ tokenize_string(B, ?INC_COL(S));
+ <<_:O/binary, C, _/binary>> when (C >= $0 andalso C =< $9)
+ orelse C =:= $- ->
+ tokenize_number(B, S);
+ <<_:O/binary>> ->
+ trim = S#decoder.state,
+ {eof, S}
+ end.
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+
+%% testing constructs borrowed from the Yaws JSON implementation.
+
+%% Create an object from a list of Key/Value pairs.
+
+obj_new() ->
+ {struct, []}.
+
+is_obj({struct, Props}) ->
+ F = fun ({K, _}) when is_binary(K) -> true end,
+ lists:all(F, Props).
+
+obj_from_list(Props) ->
+ Obj = {struct, Props},
+ ?assert(is_obj(Obj)),
+ Obj.
+
+%% Test for equivalence of Erlang terms.
+%% Due to arbitrary order of construction, equivalent objects might
+%% compare unequal as erlang terms, so we need to carefully recurse
+%% through aggregates (tuples and objects).
+
+equiv({struct, Props1}, {struct, Props2}) ->
+ equiv_object(Props1, Props2);
+equiv(L1, L2) when is_list(L1), is_list(L2) ->
+ equiv_list(L1, L2);
+equiv(N1, N2) when is_number(N1), is_number(N2) -> N1 == N2;
+equiv(B1, B2) when is_binary(B1), is_binary(B2) -> B1 == B2;
+equiv(A, A) when A =:= true orelse A =:= false orelse A =:= null -> true.
+
+%% Object representation and traversal order is unknown.
+%% Use the sledgehammer and sort property lists.
+
+equiv_object(Props1, Props2) ->
+ L1 = lists:keysort(1, Props1),
+ L2 = lists:keysort(1, Props2),
+ Pairs = lists:zip(L1, L2),
+ true = lists:all(fun({{K1, V1}, {K2, V2}}) ->
+ equiv(K1, K2) and equiv(V1, V2)
+ end, Pairs).
+
+%% Recursively compare tuple elements for equivalence.
+
+equiv_list([], []) ->
+ true;
+equiv_list([V1 | L1], [V2 | L2]) ->
+ equiv(V1, V2) andalso equiv_list(L1, L2).
+
+decode_test() ->
+ [1199344435545.0, 1] = decode(<<"[1199344435545.0,1]">>),
+ <<16#F0,16#9D,16#9C,16#95>> = decode([34,"\\ud835","\\udf15",34]).
+
+e2j_vec_test() ->
+ test_one(e2j_test_vec(utf8), 1).
+
+test_one([], _N) ->
+ %% io:format("~p tests passed~n", [N-1]),
+ ok;
+test_one([{E, J} | Rest], N) ->
+ %% io:format("[~p] ~p ~p~n", [N, E, J]),
+ true = equiv(E, decode(J)),
+ true = equiv(E, decode(encode(E))),
+ test_one(Rest, 1+N).
+
+e2j_test_vec(utf8) ->
+ [
+ {1, "1"},
+ {3.1416, "3.14160"}, %% text representation may truncate, trail zeroes
+ {-1, "-1"},
+ {-3.1416, "-3.14160"},
+ {12.0e10, "1.20000e+11"},
+ {1.234E+10, "1.23400e+10"},
+ {-1.234E-10, "-1.23400e-10"},
+ {10.0, "1.0e+01"},
+ {123.456, "1.23456E+2"},
+ {10.0, "1e1"},
+ {<<"foo">>, "\"foo\""},
+ {<<"foo", 5, "bar">>, "\"foo\\u0005bar\""},
+ {<<"">>, "\"\""},
+ {<<"\n\n\n">>, "\"\\n\\n\\n\""},
+ {<<"\" \b\f\r\n\t\"">>, "\"\\\" \\b\\f\\r\\n\\t\\\"\""},
+ {obj_new(), "{}"},
+ {obj_from_list([{<<"foo">>, <<"bar">>}]), "{\"foo\":\"bar\"}"},
+ {obj_from_list([{<<"foo">>, <<"bar">>}, {<<"baz">>, 123}]),
+ "{\"foo\":\"bar\",\"baz\":123}"},
+ {[], "[]"},
+ {[[]], "[[]]"},
+ {[1, <<"foo">>], "[1,\"foo\"]"},
+
+ %% json array in a json object
+ {obj_from_list([{<<"foo">>, [123]}]),
+ "{\"foo\":[123]}"},
+
+ %% json object in a json object
+ {obj_from_list([{<<"foo">>, obj_from_list([{<<"bar">>, true}])}]),
+ "{\"foo\":{\"bar\":true}}"},
+
+ %% fold evaluation order
+ {obj_from_list([{<<"foo">>, []},
+ {<<"bar">>, obj_from_list([{<<"baz">>, true}])},
+ {<<"alice">>, <<"bob">>}]),
+ "{\"foo\":[],\"bar\":{\"baz\":true},\"alice\":\"bob\"}"},
+
+ %% json object in a json array
+ {[-123, <<"foo">>, obj_from_list([{<<"bar">>, []}]), null],
+ "[-123,\"foo\",{\"bar\":[]},null]"}
+ ].
+
+%% test utf8 encoding
+encoder_utf8_test() ->
+ %% safe conversion case (default)
+ [34,"\\u0001","\\u0442","\\u0435","\\u0441","\\u0442",34] =
+ encode(<<1,"\321\202\320\265\321\201\321\202">>),
+
+ %% raw utf8 output (optional)
+ Enc = mochijson2:encoder([{utf8, true}]),
+ [34,"\\u0001",[209,130],[208,181],[209,129],[209,130],34] =
+ Enc(<<1,"\321\202\320\265\321\201\321\202">>).
+
+input_validation_test() ->
+ Good = [
+ {16#00A3, <<?Q, 16#C2, 16#A3, ?Q>>}, %% pound
+ {16#20AC, <<?Q, 16#E2, 16#82, 16#AC, ?Q>>}, %% euro
+ {16#10196, <<?Q, 16#F0, 16#90, 16#86, 16#96, ?Q>>} %% denarius
+ ],
+ lists:foreach(fun({CodePoint, UTF8}) ->
+ Expect = list_to_binary(xmerl_ucs:to_utf8(CodePoint)),
+ Expect = decode(UTF8)
+ end, Good),
+
+ Bad = [
+ %% 2nd, 3rd, or 4th byte of a multi-byte sequence w/o leading byte
+ <<?Q, 16#80, ?Q>>,
+ %% missing continuations, last byte in each should be 80-BF
+ <<?Q, 16#C2, 16#7F, ?Q>>,
+ <<?Q, 16#E0, 16#80,16#7F, ?Q>>,
+ <<?Q, 16#F0, 16#80, 16#80, 16#7F, ?Q>>,
+ %% we don't support code points > 10FFFF per RFC 3629
+ <<?Q, 16#F5, 16#80, 16#80, 16#80, ?Q>>,
+ %% escape characters trigger a different code path
+ <<?Q, $\\, $\n, 16#80, ?Q>>
+ ],
+ lists:foreach(
+ fun(X) ->
+ ok = try decode(X) catch invalid_utf8 -> ok end,
+ %% could be {ucs,{bad_utf8_character_code}} or
+ %% {json_encode,{bad_char,_}}
+ {'EXIT', _} = (catch encode(X))
+ end, Bad).
+
+inline_json_test() ->
+ ?assertEqual(<<"\"iodata iodata\"">>,
+ iolist_to_binary(
+ encode({json, [<<"\"iodata">>, " iodata\""]}))),
+ ?assertEqual({struct, [{<<"key">>, <<"iodata iodata">>}]},
+ decode(
+ encode({struct,
+ [{key, {json, [<<"\"iodata">>, " iodata\""]}}]}))),
+ ok.
+
+big_unicode_test() ->
+ UTF8Seq = list_to_binary(xmerl_ucs:to_utf8(16#0001d120)),
+ ?assertEqual(
+ <<"\"\\ud834\\udd20\"">>,
+ iolist_to_binary(encode(UTF8Seq))),
+ ?assertEqual(
+ UTF8Seq,
+ decode(iolist_to_binary(encode(UTF8Seq)))),
+ ok.
+
+custom_decoder_test() ->
+ ?assertEqual(
+ {struct, [{<<"key">>, <<"value">>}]},
+ (decoder([]))("{\"key\": \"value\"}")),
+ F = fun ({struct, [{<<"key">>, <<"value">>}]}) -> win end,
+ ?assertEqual(
+ win,
+ (decoder([{object_hook, F}]))("{\"key\": \"value\"}")),
+ ok.
+
+atom_test() ->
+ %% JSON native atoms
+ [begin
+ ?assertEqual(A, decode(atom_to_list(A))),
+ ?assertEqual(iolist_to_binary(atom_to_list(A)),
+ iolist_to_binary(encode(A)))
+ end || A <- [true, false, null]],
+ %% Atom to string
+ ?assertEqual(
+ <<"\"foo\"">>,
+ iolist_to_binary(encode(foo))),
+ ?assertEqual(
+ <<"\"\\ud834\\udd20\"">>,
+ iolist_to_binary(encode(list_to_atom(xmerl_ucs:to_utf8(16#0001d120))))),
+ ok.
+
+key_encode_test() ->
+ %% Some forms are accepted as keys that would not be strings in other
+ %% cases
+ ?assertEqual(
+ <<"{\"foo\":1}">>,
+ iolist_to_binary(encode({struct, [{foo, 1}]}))),
+ ?assertEqual(
+ <<"{\"foo\":1}">>,
+ iolist_to_binary(encode({struct, [{<<"foo">>, 1}]}))),
+ ?assertEqual(
+ <<"{\"foo\":1}">>,
+ iolist_to_binary(encode({struct, [{"foo", 1}]}))),
+ ?assertEqual(
+ <<"{\"foo\":1}">>,
+ iolist_to_binary(encode([{foo, 1}]))),
+ ?assertEqual(
+ <<"{\"foo\":1}">>,
+ iolist_to_binary(encode([{<<"foo">>, 1}]))),
+ ?assertEqual(
+ <<"{\"foo\":1}">>,
+ iolist_to_binary(encode([{"foo", 1}]))),
+ ?assertEqual(
+ <<"{\"\\ud834\\udd20\":1}">>,
+ iolist_to_binary(
+ encode({struct, [{[16#0001d120], 1}]}))),
+ ?assertEqual(
+ <<"{\"1\":1}">>,
+ iolist_to_binary(encode({struct, [{1, 1}]}))),
+ ok.
+
+unsafe_chars_test() ->
+ Chars = "\"\\\b\f\n\r\t",
+ [begin
+ ?assertEqual(false, json_string_is_safe([C])),
+ ?assertEqual(false, json_bin_is_safe(<<C>>)),
+ ?assertEqual(<<C>>, decode(encode(<<C>>)))
+ end || C <- Chars],
+ ?assertEqual(
+ false,
+ json_string_is_safe([16#0001d120])),
+ ?assertEqual(
+ false,
+ json_bin_is_safe(list_to_binary(xmerl_ucs:to_utf8(16#0001d120)))),
+ ?assertEqual(
+ [16#0001d120],
+ xmerl_ucs:from_utf8(
+ binary_to_list(
+ decode(encode(list_to_atom(xmerl_ucs:to_utf8(16#0001d120))))))),
+ ?assertEqual(
+ false,
+ json_string_is_safe([16#110000])),
+ ?assertEqual(
+ false,
+ json_bin_is_safe(list_to_binary(xmerl_ucs:to_utf8([16#110000])))),
+ %% solidus can be escaped but isn't unsafe by default
+ ?assertEqual(
+ <<"/">>,
+ decode(<<"\"\\/\"">>)),
+ ok.
+
+int_test() ->
+ ?assertEqual(0, decode("0")),
+ ?assertEqual(1, decode("1")),
+ ?assertEqual(11, decode("11")),
+ ok.
+
+large_int_test() ->
+ ?assertEqual(<<"-2147483649214748364921474836492147483649">>,
+ iolist_to_binary(encode(-2147483649214748364921474836492147483649))),
+ ?assertEqual(<<"2147483649214748364921474836492147483649">>,
+ iolist_to_binary(encode(2147483649214748364921474836492147483649))),
+ ok.
+
+float_test() ->
+ ?assertEqual(<<"-2147483649.0">>, iolist_to_binary(encode(-2147483649.0))),
+ ?assertEqual(<<"2147483648.0">>, iolist_to_binary(encode(2147483648.0))),
+ ok.
+
+handler_test() ->
+ ?assertEqual(
+ {'EXIT',{json_encode,{bad_term,{x,y}}}},
+ catch encode({x,y})),
+ F = fun ({x,y}) -> [] end,
+ ?assertEqual(
+ <<"[]">>,
+ iolist_to_binary((encoder([{handler, F}]))({x, y}))),
+ ok.
+
+encode_empty_test_() ->
+ [{A, ?_assertEqual(<<"{}">>, iolist_to_binary(encode(B)))}
+ || {A, B} <- [{"eep18 {}", {}},
+ {"eep18 {[]}", {[]}},
+ {"{struct, []}", {struct, []}}]].
+
+encode_test_() ->
+ P = [{<<"k">>, <<"v">>}],
+ JSON = iolist_to_binary(encode({struct, P})),
+ [{atom_to_list(F),
+ ?_assertEqual(JSON, iolist_to_binary(encode(decode(JSON, [{format, F}]))))}
+ || F <- [struct, eep18, proplist]].
+
+format_test_() ->
+ P = [{<<"k">>, <<"v">>}],
+ JSON = iolist_to_binary(encode({struct, P})),
+ [{atom_to_list(F),
+ ?_assertEqual(A, decode(JSON, [{format, F}]))}
+ || {F, A} <- [{struct, {struct, P}},
+ {eep18, {P}},
+ {proplist, P}]].
+
+-endif.
--- /dev/null
+%% This file is a copy of `mochijson2.erl' from mochiweb, revision
+%% d541e9a0f36c00dcadc2e589f20e47fbf46fc76f. For the license, see
+%% `LICENSE-MIT-Mochi'.
+
+%% @copyright 2007 Mochi Media, Inc.
+%% @author Bob Ippolito <bob@mochimedia.com>
+
+%% @doc Useful numeric algorithms for floats that cover some deficiencies
+%% in the math module. More interesting is digits/1, which implements
+%% the algorithm from:
+%% http://www.cs.indiana.edu/~burger/fp/index.html
+%% See also "Printing Floating-Point Numbers Quickly and Accurately"
+%% in Proceedings of the SIGPLAN '96 Conference on Programming Language
+%% Design and Implementation.
+
+-module(mochinum).
+-author("Bob Ippolito <bob@mochimedia.com>").
+-export([digits/1, frexp/1, int_pow/2, int_ceil/1]).
+
+%% IEEE 754 Float exponent bias
+-define(FLOAT_BIAS, 1022).
+-define(MIN_EXP, -1074).
+-define(BIG_POW, 4503599627370496).
+
+%% External API
+
+%% @spec digits(number()) -> string()
+%% @doc Returns a string that accurately represents the given integer or float
+%% using a conservative amount of digits. Great for generating
+%% human-readable output, or compact ASCII serializations for floats.
+digits(N) when is_integer(N) ->
+ integer_to_list(N);
+digits(0.0) ->
+ "0.0";
+digits(Float) ->
+ {Frac1, Exp1} = frexp_int(Float),
+ [Place0 | Digits0] = digits1(Float, Exp1, Frac1),
+ {Place, Digits} = transform_digits(Place0, Digits0),
+ R = insert_decimal(Place, Digits),
+ case Float < 0 of
+ true ->
+ [$- | R];
+ _ ->
+ R
+ end.
+
+%% @spec frexp(F::float()) -> {Frac::float(), Exp::float()}
+%% @doc Return the fractional and exponent part of an IEEE 754 double,
+%% equivalent to the libc function of the same name.
+%% F = Frac * pow(2, Exp).
+frexp(F) ->
+ frexp1(unpack(F)).
+
+%% @spec int_pow(X::integer(), N::integer()) -> Y::integer()
+%% @doc Moderately efficient way to exponentiate integers.
+%% int_pow(10, 2) = 100.
+int_pow(_X, 0) ->
+ 1;
+int_pow(X, N) when N > 0 ->
+ int_pow(X, N, 1).
+
+%% @spec int_ceil(F::float()) -> integer()
+%% @doc Return the ceiling of F as an integer. The ceiling is defined as
+%% F when F == trunc(F);
+%% trunc(F) when F < 0;
+%% trunc(F) + 1 when F > 0.
+int_ceil(X) ->
+ T = trunc(X),
+ case (X - T) of
+ Pos when Pos > 0 -> T + 1;
+ _ -> T
+ end.
+
+
+%% Internal API
+
+int_pow(X, N, R) when N < 2 ->
+ R * X;
+int_pow(X, N, R) ->
+ int_pow(X * X, N bsr 1, case N band 1 of 1 -> R * X; 0 -> R end).
+
+insert_decimal(0, S) ->
+ "0." ++ S;
+insert_decimal(Place, S) when Place > 0 ->
+ L = length(S),
+ case Place - L of
+ 0 ->
+ S ++ ".0";
+ N when N < 0 ->
+ {S0, S1} = lists:split(L + N, S),
+ S0 ++ "." ++ S1;
+ N when N < 6 ->
+ %% More places than digits
+ S ++ lists:duplicate(N, $0) ++ ".0";
+ _ ->
+ insert_decimal_exp(Place, S)
+ end;
+insert_decimal(Place, S) when Place > -6 ->
+ "0." ++ lists:duplicate(abs(Place), $0) ++ S;
+insert_decimal(Place, S) ->
+ insert_decimal_exp(Place, S).
+
+insert_decimal_exp(Place, S) ->
+ [C | S0] = S,
+ S1 = case S0 of
+ [] ->
+ "0";
+ _ ->
+ S0
+ end,
+ Exp = case Place < 0 of
+ true ->
+ "e-";
+ false ->
+ "e+"
+ end,
+ [C] ++ "." ++ S1 ++ Exp ++ integer_to_list(abs(Place - 1)).
+
+
+digits1(Float, Exp, Frac) ->
+ Round = ((Frac band 1) =:= 0),
+ case Exp >= 0 of
+ true ->
+ BExp = 1 bsl Exp,
+ case (Frac =/= ?BIG_POW) of
+ true ->
+ scale((Frac * BExp * 2), 2, BExp, BExp,
+ Round, Round, Float);
+ false ->
+ scale((Frac * BExp * 4), 4, (BExp * 2), BExp,
+ Round, Round, Float)
+ end;
+ false ->
+ case (Exp =:= ?MIN_EXP) orelse (Frac =/= ?BIG_POW) of
+ true ->
+ scale((Frac * 2), 1 bsl (1 - Exp), 1, 1,
+ Round, Round, Float);
+ false ->
+ scale((Frac * 4), 1 bsl (2 - Exp), 2, 1,
+ Round, Round, Float)
+ end
+ end.
+
+scale(R, S, MPlus, MMinus, LowOk, HighOk, Float) ->
+ Est = int_ceil(math:log10(abs(Float)) - 1.0e-10),
+ %% Note that the scheme implementation uses a 326 element look-up table
+ %% for int_pow(10, N) where we do not.
+ case Est >= 0 of
+ true ->
+ fixup(R, S * int_pow(10, Est), MPlus, MMinus, Est,
+ LowOk, HighOk);
+ false ->
+ Scale = int_pow(10, -Est),
+ fixup(R * Scale, S, MPlus * Scale, MMinus * Scale, Est,
+ LowOk, HighOk)
+ end.
+
+fixup(R, S, MPlus, MMinus, K, LowOk, HighOk) ->
+ TooLow = case HighOk of
+ true ->
+ (R + MPlus) >= S;
+ false ->
+ (R + MPlus) > S
+ end,
+ case TooLow of
+ true ->
+ [(K + 1) | generate(R, S, MPlus, MMinus, LowOk, HighOk)];
+ false ->
+ [K | generate(R * 10, S, MPlus * 10, MMinus * 10, LowOk, HighOk)]
+ end.
+
+generate(R0, S, MPlus, MMinus, LowOk, HighOk) ->
+ D = R0 div S,
+ R = R0 rem S,
+ TC1 = case LowOk of
+ true ->
+ R =< MMinus;
+ false ->
+ R < MMinus
+ end,
+ TC2 = case HighOk of
+ true ->
+ (R + MPlus) >= S;
+ false ->
+ (R + MPlus) > S
+ end,
+ case TC1 of
+ false ->
+ case TC2 of
+ false ->
+ [D | generate(R * 10, S, MPlus * 10, MMinus * 10,
+ LowOk, HighOk)];
+ true ->
+ [D + 1]
+ end;
+ true ->
+ case TC2 of
+ false ->
+ [D];
+ true ->
+ case R * 2 < S of
+ true ->
+ [D];
+ false ->
+ [D + 1]
+ end
+ end
+ end.
+
+unpack(Float) ->
+ <<Sign:1, Exp:11, Frac:52>> = <<Float:64/float>>,
+ {Sign, Exp, Frac}.
+
+frexp1({_Sign, 0, 0}) ->
+ {0.0, 0};
+frexp1({Sign, 0, Frac}) ->
+ Exp = log2floor(Frac),
+ <<Frac1:64/float>> = <<Sign:1, ?FLOAT_BIAS:11, (Frac-1):52>>,
+ {Frac1, -(?FLOAT_BIAS) - 52 + Exp};
+frexp1({Sign, Exp, Frac}) ->
+ <<Frac1:64/float>> = <<Sign:1, ?FLOAT_BIAS:11, Frac:52>>,
+ {Frac1, Exp - ?FLOAT_BIAS}.
+
+log2floor(Int) ->
+ log2floor(Int, 0).
+
+log2floor(0, N) ->
+ N;
+log2floor(Int, N) ->
+ log2floor(Int bsr 1, 1 + N).
+
+
+transform_digits(Place, [0 | Rest]) ->
+ transform_digits(Place, Rest);
+transform_digits(Place, Digits) ->
+ {Place, [$0 + D || D <- Digits]}.
+
+
+frexp_int(F) ->
+ case unpack(F) of
+ {_Sign, 0, Frac} ->
+ {Frac, ?MIN_EXP};
+ {_Sign, Exp, Frac} ->
+ {Frac + (1 bsl 52), Exp - 53 - ?FLOAT_BIAS}
+ end.
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+int_ceil_test() ->
+ ?assertEqual(1, int_ceil(0.0001)),
+ ?assertEqual(0, int_ceil(0.0)),
+ ?assertEqual(1, int_ceil(0.99)),
+ ?assertEqual(1, int_ceil(1.0)),
+ ?assertEqual(-1, int_ceil(-1.5)),
+ ?assertEqual(-2, int_ceil(-2.0)),
+ ok.
+
+int_pow_test() ->
+ ?assertEqual(1, int_pow(1, 1)),
+ ?assertEqual(1, int_pow(1, 0)),
+ ?assertEqual(1, int_pow(10, 0)),
+ ?assertEqual(10, int_pow(10, 1)),
+ ?assertEqual(100, int_pow(10, 2)),
+ ?assertEqual(1000, int_pow(10, 3)),
+ ok.
+
+digits_test() ->
+ ?assertEqual("0",
+ digits(0)),
+ ?assertEqual("0.0",
+ digits(0.0)),
+ ?assertEqual("1.0",
+ digits(1.0)),
+ ?assertEqual("-1.0",
+ digits(-1.0)),
+ ?assertEqual("0.1",
+ digits(0.1)),
+ ?assertEqual("0.01",
+ digits(0.01)),
+ ?assertEqual("0.001",
+ digits(0.001)),
+ ?assertEqual("1.0e+6",
+ digits(1000000.0)),
+ ?assertEqual("0.5",
+ digits(0.5)),
+ ?assertEqual("4503599627370496.0",
+ digits(4503599627370496.0)),
+ %% small denormalized number
+ %% 4.94065645841246544177e-324 =:= 5.0e-324
+ <<SmallDenorm/float>> = <<0,0,0,0,0,0,0,1>>,
+ ?assertEqual("5.0e-324",
+ digits(SmallDenorm)),
+ ?assertEqual(SmallDenorm,
+ list_to_float(digits(SmallDenorm))),
+ %% large denormalized number
+ %% 2.22507385850720088902e-308
+ <<BigDenorm/float>> = <<0,15,255,255,255,255,255,255>>,
+ ?assertEqual("2.225073858507201e-308",
+ digits(BigDenorm)),
+ ?assertEqual(BigDenorm,
+ list_to_float(digits(BigDenorm))),
+ %% small normalized number
+ %% 2.22507385850720138309e-308
+ <<SmallNorm/float>> = <<0,16,0,0,0,0,0,0>>,
+ ?assertEqual("2.2250738585072014e-308",
+ digits(SmallNorm)),
+ ?assertEqual(SmallNorm,
+ list_to_float(digits(SmallNorm))),
+ %% large normalized number
+ %% 1.79769313486231570815e+308
+ <<LargeNorm/float>> = <<127,239,255,255,255,255,255,255>>,
+ ?assertEqual("1.7976931348623157e+308",
+ digits(LargeNorm)),
+ ?assertEqual(LargeNorm,
+ list_to_float(digits(LargeNorm))),
+ %% issue #10 - mochinum:frexp(math:pow(2, -1074)).
+ ?assertEqual("5.0e-324",
+ digits(math:pow(2, -1074))),
+ ok.
+
+frexp_test() ->
+ %% zero
+ ?assertEqual({0.0, 0}, frexp(0.0)),
+ %% one
+ ?assertEqual({0.5, 1}, frexp(1.0)),
+ %% negative one
+ ?assertEqual({-0.5, 1}, frexp(-1.0)),
+ %% small denormalized number
+ %% 4.94065645841246544177e-324
+ <<SmallDenorm/float>> = <<0,0,0,0,0,0,0,1>>,
+ ?assertEqual({0.5, -1073}, frexp(SmallDenorm)),
+ %% large denormalized number
+ %% 2.22507385850720088902e-308
+ <<BigDenorm/float>> = <<0,15,255,255,255,255,255,255>>,
+ ?assertEqual(
+ {0.99999999999999978, -1022},
+ frexp(BigDenorm)),
+ %% small normalized number
+ %% 2.22507385850720138309e-308
+ <<SmallNorm/float>> = <<0,16,0,0,0,0,0,0>>,
+ ?assertEqual({0.5, -1021}, frexp(SmallNorm)),
+ %% large normalized number
+ %% 1.79769313486231570815e+308
+ <<LargeNorm/float>> = <<127,239,255,255,255,255,255,255>>,
+ ?assertEqual(
+ {0.99999999999999989, 1024},
+ frexp(LargeNorm)),
+ %% issue #10 - mochinum:frexp(math:pow(2, -1074)).
+ ?assertEqual(
+ {0.5, -1073},
+ frexp(math:pow(2, -1074))),
+ ok.
+
+-endif.
--- /dev/null
+%% This is the version of pg2 from R14B02, which contains the fix
+%% described at
+%% http://erlang.2086793.n4.nabble.com/pg2-still-busted-in-R13B04-td2230601.html.
+%% The changes are a search-and-replace to rename the module and avoid
+%% clashes with other versions of pg2, and also a simple rewrite of
+%% "andalso" and "orelse" expressions to case statements where the second
+%% operand is not a boolean since R12B does not allow this.
+
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(pg2_fixed).
+
+-export([create/1, delete/1, join/2, leave/2]).
+-export([get_members/1, get_local_members/1]).
+-export([get_closest_pid/1, which_groups/0]).
+-export([start/0,start_link/0,init/1,handle_call/3,handle_cast/2,handle_info/2,
+ terminate/2]).
+
+%%% As of R13B03 monitors are used instead of links.
+
+%%%
+%%% Exported functions
+%%%
+
+-spec start_link() -> {'ok', pid()} | {'error', term()}.
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+-spec start() -> {'ok', pid()} | {'error', term()}.
+
+start() ->
+ ensure_started().
+
+-spec create(term()) -> 'ok'.
+
+create(Name) ->
+ ensure_started(),
+ case ets:member(pg2_fixed_table, {group, Name}) of
+ false ->
+ global:trans({{?MODULE, Name}, self()},
+ fun() ->
+ gen_server:multi_call(?MODULE, {create, Name})
+ end),
+ ok;
+ true ->
+ ok
+ end.
+
+-type name() :: term().
+
+-spec delete(name()) -> 'ok'.
+
+delete(Name) ->
+ ensure_started(),
+ global:trans({{?MODULE, Name}, self()},
+ fun() ->
+ gen_server:multi_call(?MODULE, {delete, Name})
+ end),
+ ok.
+
+-spec join(name(), pid()) -> 'ok' | {'error', {'no_such_group', term()}}.
+
+join(Name, Pid) when is_pid(Pid) ->
+ ensure_started(),
+ case ets:member(pg2_fixed_table, {group, Name}) of
+ false ->
+ {error, {no_such_group, Name}};
+ true ->
+ global:trans({{?MODULE, Name}, self()},
+ fun() ->
+ gen_server:multi_call(?MODULE,
+ {join, Name, Pid})
+ end),
+ ok
+ end.
+
+-spec leave(name(), pid()) -> 'ok' | {'error', {'no_such_group', name()}}.
+
+leave(Name, Pid) when is_pid(Pid) ->
+ ensure_started(),
+ case ets:member(pg2_fixed_table, {group, Name}) of
+ false ->
+ {error, {no_such_group, Name}};
+ true ->
+ global:trans({{?MODULE, Name}, self()},
+ fun() ->
+ gen_server:multi_call(?MODULE,
+ {leave, Name, Pid})
+ end),
+ ok
+ end.
+
+-type get_members_ret() :: [pid()] | {'error', {'no_such_group', name()}}.
+
+-spec get_members(name()) -> get_members_ret().
+
+get_members(Name) ->
+ ensure_started(),
+ case ets:member(pg2_fixed_table, {group, Name}) of
+ true ->
+ group_members(Name);
+ false ->
+ {error, {no_such_group, Name}}
+ end.
+
+-spec get_local_members(name()) -> get_members_ret().
+
+get_local_members(Name) ->
+ ensure_started(),
+ case ets:member(pg2_fixed_table, {group, Name}) of
+ true ->
+ local_group_members(Name);
+ false ->
+ {error, {no_such_group, Name}}
+ end.
+
+-spec which_groups() -> [name()].
+
+which_groups() ->
+ ensure_started(),
+ all_groups().
+
+-type gcp_error_reason() :: {'no_process', term()} | {'no_such_group', term()}.
+
+-spec get_closest_pid(term()) -> pid() | {'error', gcp_error_reason()}.
+
+get_closest_pid(Name) ->
+ case get_local_members(Name) of
+ [Pid] ->
+ Pid;
+ [] ->
+ {_,_,X} = erlang:now(),
+ case get_members(Name) of
+ [] -> {error, {no_process, Name}};
+ Members ->
+ lists:nth((X rem length(Members))+1, Members)
+ end;
+ Members when is_list(Members) ->
+ {_,_,X} = erlang:now(),
+ lists:nth((X rem length(Members))+1, Members);
+ Else ->
+ Else
+ end.
+
+%%%
+%%% Callback functions from gen_server
+%%%
+
+-record(state, {}).
+
+-spec init([]) -> {'ok', #state{}}.
+
+init([]) ->
+ Ns = nodes(),
+ net_kernel:monitor_nodes(true),
+ lists:foreach(fun(N) ->
+ {?MODULE, N} ! {new_pg2_fixed, node()},
+ self() ! {nodeup, N}
+ end, Ns),
+ pg2_fixed_table = ets:new(pg2_fixed_table, [ordered_set, protected, named_table]),
+ {ok, #state{}}.
+
+-type call() :: {'create', name()}
+ | {'delete', name()}
+ | {'join', name(), pid()}
+ | {'leave', name(), pid()}.
+
+-spec handle_call(call(), _, #state{}) ->
+ {'reply', 'ok', #state{}}.
+
+handle_call({create, Name}, _From, S) ->
+ assure_group(Name),
+ {reply, ok, S};
+handle_call({join, Name, Pid}, _From, S) ->
+ case ets:member(pg2_fixed_table, {group, Name}) of
+ true -> join_group(Name, Pid);
+ _ -> ok
+ end,
+ {reply, ok, S};
+handle_call({leave, Name, Pid}, _From, S) ->
+ case ets:member(pg2_fixed_table, {group, Name}) of
+ true -> leave_group(Name, Pid);
+ _ -> ok
+ end,
+ {reply, ok, S};
+handle_call({delete, Name}, _From, S) ->
+ delete_group(Name),
+ {reply, ok, S};
+handle_call(Request, From, S) ->
+ error_logger:warning_msg("The pg2_fixed server received an unexpected message:\n"
+ "handle_call(~p, ~p, _)\n",
+ [Request, From]),
+ {noreply, S}.
+
+-type all_members() :: [[name(),...]].
+-type cast() :: {'exchange', node(), all_members()}
+ | {'del_member', name(), pid()}.
+
+-spec handle_cast(cast(), #state{}) -> {'noreply', #state{}}.
+
+handle_cast({exchange, _Node, List}, S) ->
+ store(List),
+ {noreply, S};
+handle_cast(_, S) ->
+ %% Ignore {del_member, Name, Pid}.
+ {noreply, S}.
+
+-spec handle_info(tuple(), #state{}) -> {'noreply', #state{}}.
+
+handle_info({'DOWN', MonitorRef, process, _Pid, _Info}, S) ->
+ member_died(MonitorRef),
+ {noreply, S};
+handle_info({nodeup, Node}, S) ->
+ gen_server:cast({?MODULE, Node}, {exchange, node(), all_members()}),
+ {noreply, S};
+handle_info({new_pg2_fixed, Node}, S) ->
+ gen_server:cast({?MODULE, Node}, {exchange, node(), all_members()}),
+ {noreply, S};
+handle_info(_, S) ->
+ {noreply, S}.
+
+-spec terminate(term(), #state{}) -> 'ok'.
+
+terminate(_Reason, _S) ->
+ true = ets:delete(pg2_fixed_table),
+ ok.
+
+%%%
+%%% Local functions
+%%%
+
+%%% One ETS table, pg2_fixed_table, is used for bookkeeping. The type of the
+%%% table is ordered_set, and the fast matching of partially
+%%% instantiated keys is used extensively.
+%%%
+%%% {{group, Name}}
+%%% Process group Name.
+%%% {{ref, Pid}, RPid, MonitorRef, Counter}
+%%% {{ref, MonitorRef}, Pid}
+%%% Each process has one monitor. Sometimes a process is spawned to
+%%% monitor the pid (RPid). Counter is incremented when the Pid joins
+%%% some group.
+%%% {{member, Name, Pid}, GroupCounter}
+%%% {{local_member, Name, Pid}}
+%%% Pid is a member of group Name, GroupCounter is incremented when the
+%%% Pid joins the group Name.
+%%% {{pid, Pid, Name}}
+%%% Pid is a member of group Name.
+
+store(List) ->
+ _ = [case assure_group(Name) of
+ true ->
+ [join_group(Name, P) || P <- Members -- group_members(Name)];
+ _ ->
+ ok
+ end || [Name, Members] <- List],
+ ok.
+
+assure_group(Name) ->
+ Key = {group, Name},
+ ets:member(pg2_fixed_table, Key) orelse true =:= ets:insert(pg2_fixed_table, {Key}).
+
+delete_group(Name) ->
+ _ = [leave_group(Name, Pid) || Pid <- group_members(Name)],
+ true = ets:delete(pg2_fixed_table, {group, Name}),
+ ok.
+
+member_died(Ref) ->
+ [{{ref, Ref}, Pid}] = ets:lookup(pg2_fixed_table, {ref, Ref}),
+ Names = member_groups(Pid),
+ _ = [leave_group(Name, P) ||
+ Name <- Names,
+ P <- member_in_group(Pid, Name)],
+ %% Kept for backward compatibility with links. Can be removed, eventually.
+ _ = [gen_server:abcast(nodes(), ?MODULE, {del_member, Name, Pid}) ||
+ Name <- Names],
+ ok.
+
+join_group(Name, Pid) ->
+ Ref_Pid = {ref, Pid},
+ try _ = ets:update_counter(pg2_fixed_table, Ref_Pid, {4, +1})
+ catch _:_ ->
+ {RPid, Ref} = do_monitor(Pid),
+ true = ets:insert(pg2_fixed_table, {Ref_Pid, RPid, Ref, 1}),
+ true = ets:insert(pg2_fixed_table, {{ref, Ref}, Pid})
+ end,
+ Member_Name_Pid = {member, Name, Pid},
+ try _ = ets:update_counter(pg2_fixed_table, Member_Name_Pid, {2, +1, 1, 1})
+ catch _:_ ->
+ true = ets:insert(pg2_fixed_table, {Member_Name_Pid, 1}),
+ _ = [ets:insert(pg2_fixed_table, {{local_member, Name, Pid}}) ||
+ node(Pid) =:= node()],
+ true = ets:insert(pg2_fixed_table, {{pid, Pid, Name}})
+ end.
+
+leave_group(Name, Pid) ->
+ Member_Name_Pid = {member, Name, Pid},
+ try ets:update_counter(pg2_fixed_table, Member_Name_Pid, {2, -1, 0, 0}) of
+ N ->
+ if
+ N =:= 0 ->
+ true = ets:delete(pg2_fixed_table, {pid, Pid, Name}),
+ _ = [ets:delete(pg2_fixed_table, {local_member, Name, Pid}) ||
+ node(Pid) =:= node()],
+ true = ets:delete(pg2_fixed_table, Member_Name_Pid);
+ true ->
+ ok
+ end,
+ Ref_Pid = {ref, Pid},
+ case ets:update_counter(pg2_fixed_table, Ref_Pid, {4, -1}) of
+ 0 ->
+ [{Ref_Pid,RPid,Ref,0}] = ets:lookup(pg2_fixed_table, Ref_Pid),
+ true = ets:delete(pg2_fixed_table, {ref, Ref}),
+ true = ets:delete(pg2_fixed_table, Ref_Pid),
+ true = erlang:demonitor(Ref, [flush]),
+ kill_monitor_proc(RPid, Pid);
+ _ ->
+ ok
+ end
+ catch _:_ ->
+ ok
+ end.
+
+all_members() ->
+ [[G, group_members(G)] || G <- all_groups()].
+
+group_members(Name) ->
+ [P ||
+ [P, N] <- ets:match(pg2_fixed_table, {{member, Name, '$1'},'$2'}),
+ _ <- lists:seq(1, N)].
+
+local_group_members(Name) ->
+ [P ||
+ [Pid] <- ets:match(pg2_fixed_table, {{local_member, Name, '$1'}}),
+ P <- member_in_group(Pid, Name)].
+
+member_in_group(Pid, Name) ->
+ case ets:lookup(pg2_fixed_table, {member, Name, Pid}) of
+ [] -> [];
+ [{{member, Name, Pid}, N}] ->
+ lists:duplicate(N, Pid)
+ end.
+
+member_groups(Pid) ->
+ [Name || [Name] <- ets:match(pg2_fixed_table, {{pid, Pid, '$1'}})].
+
+all_groups() ->
+ [N || [N] <- ets:match(pg2_fixed_table, {{group,'$1'}})].
+
+ensure_started() ->
+ case whereis(?MODULE) of
+ undefined ->
+ C = {pg2_fixed, {?MODULE, start_link, []}, permanent,
+ 1000, worker, [?MODULE]},
+ supervisor:start_child(kernel_safe_sup, C);
+ Pg2_FixedPid ->
+ {ok, Pg2_FixedPid}
+ end.
+
+
+kill_monitor_proc(RPid, Pid) ->
+ case RPid of
+ Pid -> ok;
+ _ -> exit(RPid, kill)
+ end.
+
+%% When/if erlang:monitor() returns before trying to connect to the
+%% other node this function can be removed.
+do_monitor(Pid) ->
+ case (node(Pid) =:= node()) orelse lists:member(node(Pid), nodes()) of
+ true ->
+ %% Assume the node is still up
+ {Pid, erlang:monitor(process, Pid)};
+ false ->
+ F = fun() ->
+ Ref = erlang:monitor(process, Pid),
+ receive
+ {'DOWN', Ref, process, Pid, _Info} ->
+ exit(normal)
+ end
+ end,
+ erlang:spawn_monitor(F)
+ end.
--- /dev/null
+%% This file is a copy of pg2.erl from the R13B-3 Erlang/OTP
+%% distribution, with the following modifications:
+%%
+%% 1) Process groups are node-local only.
+%%
+%% 2) Groups are created/deleted implicitly.
+%%
+%% 3) 'join' and 'leave' are asynchronous.
+%%
+%% 4) the type specs of the exported non-callback functions have been
+%% extracted into a separate, guarded section, and rewritten in
+%% old-style spec syntax, for better compatibility with older
+%% versions of Erlang/OTP. The remaining type specs have been
+%% removed.
+
+%% All modifications are (C) 2010-2013 GoPivotal, Inc.
+
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(pg_local).
+
+-export([join/2, leave/2, get_members/1]).
+-export([sync/0]). %% intended for testing only; not part of official API
+-export([start/0, start_link/0, init/1, handle_call/3, handle_cast/2,
+ handle_info/2, terminate/2]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-type(name() :: term()).
+
+-spec(start_link/0 :: () -> {'ok', pid()} | {'error', any()}).
+-spec(start/0 :: () -> {'ok', pid()} | {'error', any()}).
+-spec(join/2 :: (name(), pid()) -> 'ok').
+-spec(leave/2 :: (name(), pid()) -> 'ok').
+-spec(get_members/1 :: (name()) -> [pid()]).
+
+-spec(sync/0 :: () -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+%%% As of R13B03 monitors are used instead of links.
+
+%%%
+%%% Exported functions
+%%%
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+start() ->
+ ensure_started().
+
+join(Name, Pid) when is_pid(Pid) ->
+ ensure_started(),
+ gen_server:cast(?MODULE, {join, Name, Pid}).
+
+leave(Name, Pid) when is_pid(Pid) ->
+ ensure_started(),
+ gen_server:cast(?MODULE, {leave, Name, Pid}).
+
+get_members(Name) ->
+ ensure_started(),
+ group_members(Name).
+
+sync() ->
+ ensure_started(),
+ gen_server:call(?MODULE, sync, infinity).
+
+%%%
+%%% Callback functions from gen_server
+%%%
+
+-record(state, {}).
+
+init([]) ->
+ pg_local_table = ets:new(pg_local_table, [ordered_set, protected, named_table]),
+ {ok, #state{}}.
+
+handle_call(sync, _From, S) ->
+ {reply, ok, S};
+
+handle_call(Request, From, S) ->
+ error_logger:warning_msg("The pg_local server received an unexpected message:\n"
+ "handle_call(~p, ~p, _)\n",
+ [Request, From]),
+ {noreply, S}.
+
+handle_cast({join, Name, Pid}, S) ->
+ join_group(Name, Pid),
+ {noreply, S};
+handle_cast({leave, Name, Pid}, S) ->
+ leave_group(Name, Pid),
+ {noreply, S};
+handle_cast(_, S) ->
+ {noreply, S}.
+
+handle_info({'DOWN', MonitorRef, process, _Pid, _Info}, S) ->
+ member_died(MonitorRef),
+ {noreply, S};
+handle_info(_, S) ->
+ {noreply, S}.
+
+terminate(_Reason, _S) ->
+ true = ets:delete(pg_local_table),
+ ok.
+
+%%%
+%%% Local functions
+%%%
+
+%%% One ETS table, pg_local_table, is used for bookkeeping. The type of the
+%%% table is ordered_set, and the fast matching of partially
+%%% instantiated keys is used extensively.
+%%%
+%%% {{ref, Pid}, MonitorRef, Counter}
+%%% {{ref, MonitorRef}, Pid}
+%%% Each process has one monitor. Counter is incremented when the
+%%% Pid joins some group.
+%%% {{member, Name, Pid}, _}
+%%% Pid is a member of group Name, GroupCounter is incremented when the
+%%% Pid joins the group Name.
+%%% {{pid, Pid, Name}}
+%%% Pid is a member of group Name.
+
+member_died(Ref) ->
+ [{{ref, Ref}, Pid}] = ets:lookup(pg_local_table, {ref, Ref}),
+ Names = member_groups(Pid),
+ _ = [leave_group(Name, P) ||
+ Name <- Names,
+ P <- member_in_group(Pid, Name)],
+ ok.
+
+join_group(Name, Pid) ->
+ Ref_Pid = {ref, Pid},
+ try _ = ets:update_counter(pg_local_table, Ref_Pid, {3, +1})
+ catch _:_ ->
+ Ref = erlang:monitor(process, Pid),
+ true = ets:insert(pg_local_table, {Ref_Pid, Ref, 1}),
+ true = ets:insert(pg_local_table, {{ref, Ref}, Pid})
+ end,
+ Member_Name_Pid = {member, Name, Pid},
+ try _ = ets:update_counter(pg_local_table, Member_Name_Pid, {2, +1})
+ catch _:_ ->
+ true = ets:insert(pg_local_table, {Member_Name_Pid, 1}),
+ true = ets:insert(pg_local_table, {{pid, Pid, Name}})
+ end.
+
+leave_group(Name, Pid) ->
+ Member_Name_Pid = {member, Name, Pid},
+ try ets:update_counter(pg_local_table, Member_Name_Pid, {2, -1}) of
+ N ->
+ if
+ N =:= 0 ->
+ true = ets:delete(pg_local_table, {pid, Pid, Name}),
+ true = ets:delete(pg_local_table, Member_Name_Pid);
+ true ->
+ ok
+ end,
+ Ref_Pid = {ref, Pid},
+ case ets:update_counter(pg_local_table, Ref_Pid, {3, -1}) of
+ 0 ->
+ [{Ref_Pid,Ref,0}] = ets:lookup(pg_local_table, Ref_Pid),
+ true = ets:delete(pg_local_table, {ref, Ref}),
+ true = ets:delete(pg_local_table, Ref_Pid),
+ true = erlang:demonitor(Ref, [flush]),
+ ok;
+ _ ->
+ ok
+ end
+ catch _:_ ->
+ ok
+ end.
+
+group_members(Name) ->
+ [P ||
+ [P, N] <- ets:match(pg_local_table, {{member, Name, '$1'},'$2'}),
+ _ <- lists:seq(1, N)].
+
+member_in_group(Pid, Name) ->
+ [{{member, Name, Pid}, N}] = ets:lookup(pg_local_table, {member, Name, Pid}),
+ lists:duplicate(N, Pid).
+
+member_groups(Pid) ->
+ [Name || [Name] <- ets:match(pg_local_table, {{pid, Pid, '$1'}})].
+
+ensure_started() ->
+ case whereis(?MODULE) of
+ undefined ->
+ C = {pg_local, {?MODULE, start_link, []}, permanent,
+ 16#ffffffff, worker, [?MODULE]},
+ supervisor:start_child(kernel_safe_sup, C);
+ PgLocalPid ->
+ {ok, PgLocalPid}
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(pmon).
+
+-export([new/0, new/1, monitor/2, monitor_all/2, demonitor/2,
+ is_monitored/2, erase/2, monitored/1, is_empty/1]).
+
+-compile({no_auto_import, [monitor/2]}).
+
+-record(state, {dict, module}).
+
+-ifdef(use_specs).
+
+%%----------------------------------------------------------------------------
+
+-export_type([?MODULE/0]).
+
+-opaque(?MODULE() :: #state{dict :: dict(),
+ module :: atom()}).
+
+-type(item() :: pid() | {atom(), node()}).
+
+-spec(new/0 :: () -> ?MODULE()).
+-spec(new/1 :: ('erlang' | 'delegate') -> ?MODULE()).
+-spec(monitor/2 :: (item(), ?MODULE()) -> ?MODULE()).
+-spec(monitor_all/2 :: ([item()], ?MODULE()) -> ?MODULE()).
+-spec(demonitor/2 :: (item(), ?MODULE()) -> ?MODULE()).
+-spec(is_monitored/2 :: (item(), ?MODULE()) -> boolean()).
+-spec(erase/2 :: (item(), ?MODULE()) -> ?MODULE()).
+-spec(monitored/1 :: (?MODULE()) -> [item()]).
+-spec(is_empty/1 :: (?MODULE()) -> boolean()).
+
+-endif.
+
+new() -> new(erlang).
+
+new(Module) -> #state{dict = dict:new(),
+ module = Module}.
+
+monitor(Item, S = #state{dict = M, module = Module}) ->
+ case dict:is_key(Item, M) of
+ true -> S;
+ false -> case node_alive_shortcut(Item) of
+ true -> Ref = Module:monitor(process, Item),
+ S#state{dict = dict:store(Item, Ref, M)};
+ false -> self() ! {'DOWN', fake_ref, process, Item,
+ nodedown},
+ S
+ end
+ end.
+
+monitor_all([], S) -> S; %% optimisation
+monitor_all([Item], S) -> monitor(Item, S); %% optimisation
+monitor_all(Items, S) -> lists:foldl(fun monitor/2, S, Items).
+
+demonitor(Item, S = #state{dict = M, module = Module}) ->
+ case dict:find(Item, M) of
+ {ok, MRef} -> Module:demonitor(MRef),
+ S#state{dict = dict:erase(Item, M)};
+ error -> M
+ end.
+
+is_monitored(Item, #state{dict = M}) -> dict:is_key(Item, M).
+
+erase(Item, S = #state{dict = M}) -> S#state{dict = dict:erase(Item, M)}.
+
+monitored(#state{dict = M}) -> dict:fetch_keys(M).
+
+is_empty(#state{dict = M}) -> dict:size(M) == 0.
+
+%%----------------------------------------------------------------------------
+
+%% We check here to see if the node is alive in order to avoid trying
+%% to connect to it if it isn't - this can cause substantial
+%% slowdowns. We can't perform this shortcut if passed {Name, Node}
+%% since we would need to convert that into a pid for the fake 'DOWN'
+%% message, so we always return true here - but that's OK, it's just
+%% an optimisation.
+node_alive_shortcut(P) when is_pid(P) ->
+ lists:member(node(P), [node() | nodes()]);
+node_alive_shortcut({_Name, _Node}) ->
+ true.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% Priority queues have essentially the same interface as ordinary
+%% queues, except that a) there is an in/3 that takes a priority, and
+%% b) we have only implemented the core API we need.
+%%
+%% Priorities should be integers - the higher the value the higher the
+%% priority - but we don't actually check that.
+%%
+%% in/2 inserts items with priority 0.
+%%
+%% We optimise the case where a priority queue is being used just like
+%% an ordinary queue. When that is the case we represent the priority
+%% queue as an ordinary queue. We could just call into the 'queue'
+%% module for that, but for efficiency we implement the relevant
+%% functions directly in here, thus saving on inter-module calls and
+%% eliminating a level of boxing.
+%%
+%% When the queue contains items with non-zero priorities, it is
+%% represented as a sorted kv list with the inverted Priority as the
+%% key and an ordinary queue as the value. Here again we use our own
+%% ordinary queue implemention for efficiency, often making recursive
+%% calls into the same function knowing that ordinary queues represent
+%% a base case.
+
+
+-module(priority_queue).
+
+-export([new/0, is_queue/1, is_empty/1, len/1, to_list/1, from_list/1,
+ in/2, in/3, out/1, out_p/1, join/2, filter/2, fold/3, highest/1]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-export_type([q/0]).
+
+-type(q() :: pqueue()).
+-type(priority() :: integer() | 'infinity').
+-type(squeue() :: {queue, [any()], [any()], non_neg_integer()}).
+-type(pqueue() :: squeue() | {pqueue, [{priority(), squeue()}]}).
+
+-spec(new/0 :: () -> pqueue()).
+-spec(is_queue/1 :: (any()) -> boolean()).
+-spec(is_empty/1 :: (pqueue()) -> boolean()).
+-spec(len/1 :: (pqueue()) -> non_neg_integer()).
+-spec(to_list/1 :: (pqueue()) -> [{priority(), any()}]).
+-spec(from_list/1 :: ([{priority(), any()}]) -> pqueue()).
+-spec(in/2 :: (any(), pqueue()) -> pqueue()).
+-spec(in/3 :: (any(), priority(), pqueue()) -> pqueue()).
+-spec(out/1 :: (pqueue()) -> {empty | {value, any()}, pqueue()}).
+-spec(out_p/1 :: (pqueue()) -> {empty | {value, any(), priority()}, pqueue()}).
+-spec(join/2 :: (pqueue(), pqueue()) -> pqueue()).
+-spec(filter/2 :: (fun ((any()) -> boolean()), pqueue()) -> pqueue()).
+-spec(fold/3 ::
+ (fun ((any(), priority(), A) -> A), A, pqueue()) -> A).
+-spec(highest/1 :: (pqueue()) -> priority() | 'empty').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+new() ->
+ {queue, [], [], 0}.
+
+is_queue({queue, R, F, L}) when is_list(R), is_list(F), is_integer(L) ->
+ true;
+is_queue({pqueue, Queues}) when is_list(Queues) ->
+ lists:all(fun ({infinity, Q}) -> is_queue(Q);
+ ({P, Q}) -> is_integer(P) andalso is_queue(Q)
+ end, Queues);
+is_queue(_) ->
+ false.
+
+is_empty({queue, [], [], 0}) ->
+ true;
+is_empty(_) ->
+ false.
+
+len({queue, _R, _F, L}) ->
+ L;
+len({pqueue, Queues}) ->
+ lists:sum([len(Q) || {_, Q} <- Queues]).
+
+to_list({queue, In, Out, _Len}) when is_list(In), is_list(Out) ->
+ [{0, V} || V <- Out ++ lists:reverse(In, [])];
+to_list({pqueue, Queues}) ->
+ [{maybe_negate_priority(P), V} || {P, Q} <- Queues,
+ {0, V} <- to_list(Q)].
+
+from_list(L) ->
+ lists:foldl(fun ({P, E}, Q) -> in(E, P, Q) end, new(), L).
+
+in(Item, Q) ->
+ in(Item, 0, Q).
+
+in(X, 0, {queue, [_] = In, [], 1}) ->
+ {queue, [X], In, 2};
+in(X, 0, {queue, In, Out, Len}) when is_list(In), is_list(Out) ->
+ {queue, [X|In], Out, Len + 1};
+in(X, Priority, _Q = {queue, [], [], 0}) ->
+ in(X, Priority, {pqueue, []});
+in(X, Priority, Q = {queue, _, _, _}) ->
+ in(X, Priority, {pqueue, [{0, Q}]});
+in(X, Priority, {pqueue, Queues}) ->
+ P = maybe_negate_priority(Priority),
+ {pqueue, case lists:keysearch(P, 1, Queues) of
+ {value, {_, Q}} ->
+ lists:keyreplace(P, 1, Queues, {P, in(X, Q)});
+ false when P == infinity ->
+ [{P, {queue, [X], [], 1}} | Queues];
+ false ->
+ case Queues of
+ [{infinity, InfQueue} | Queues1] ->
+ [{infinity, InfQueue} |
+ lists:keysort(1, [{P, {queue, [X], [], 1}} | Queues1])];
+ _ ->
+ lists:keysort(1, [{P, {queue, [X], [], 1}} | Queues])
+ end
+ end}.
+
+out({queue, [], [], 0} = Q) ->
+ {empty, Q};
+out({queue, [V], [], 1}) ->
+ {{value, V}, {queue, [], [], 0}};
+out({queue, [Y|In], [], Len}) ->
+ [V|Out] = lists:reverse(In, []),
+ {{value, V}, {queue, [Y], Out, Len - 1}};
+out({queue, In, [V], Len}) when is_list(In) ->
+ {{value,V}, r2f(In, Len - 1)};
+out({queue, In,[V|Out], Len}) when is_list(In) ->
+ {{value, V}, {queue, In, Out, Len - 1}};
+out({pqueue, [{P, Q} | Queues]}) ->
+ {R, Q1} = out(Q),
+ NewQ = case is_empty(Q1) of
+ true -> case Queues of
+ [] -> {queue, [], [], 0};
+ [{0, OnlyQ}] -> OnlyQ;
+ [_|_] -> {pqueue, Queues}
+ end;
+ false -> {pqueue, [{P, Q1} | Queues]}
+ end,
+ {R, NewQ}.
+
+out_p({queue, _, _, _} = Q) -> add_p(out(Q), 0);
+out_p({pqueue, [{P, _} | _]} = Q) -> add_p(out(Q), maybe_negate_priority(P)).
+
+add_p(R, P) -> case R of
+ {empty, Q} -> {empty, Q};
+ {{value, V}, Q} -> {{value, V, P}, Q}
+ end.
+
+join(A, {queue, [], [], 0}) ->
+ A;
+join({queue, [], [], 0}, B) ->
+ B;
+join({queue, AIn, AOut, ALen}, {queue, BIn, BOut, BLen}) ->
+ {queue, BIn, AOut ++ lists:reverse(AIn, BOut), ALen + BLen};
+join(A = {queue, _, _, _}, {pqueue, BPQ}) ->
+ {Pre, Post} =
+ lists:splitwith(fun ({P, _}) -> P < 0 orelse P == infinity end, BPQ),
+ Post1 = case Post of
+ [] -> [ {0, A} ];
+ [ {0, ZeroQueue} | Rest ] -> [ {0, join(A, ZeroQueue)} | Rest ];
+ _ -> [ {0, A} | Post ]
+ end,
+ {pqueue, Pre ++ Post1};
+join({pqueue, APQ}, B = {queue, _, _, _}) ->
+ {Pre, Post} =
+ lists:splitwith(fun ({P, _}) -> P < 0 orelse P == infinity end, APQ),
+ Post1 = case Post of
+ [] -> [ {0, B} ];
+ [ {0, ZeroQueue} | Rest ] -> [ {0, join(ZeroQueue, B)} | Rest ];
+ _ -> [ {0, B} | Post ]
+ end,
+ {pqueue, Pre ++ Post1};
+join({pqueue, APQ}, {pqueue, BPQ}) ->
+ {pqueue, merge(APQ, BPQ, [])}.
+
+merge([], BPQ, Acc) ->
+ lists:reverse(Acc, BPQ);
+merge(APQ, [], Acc) ->
+ lists:reverse(Acc, APQ);
+merge([{P, A}|As], [{P, B}|Bs], Acc) ->
+ merge(As, Bs, [ {P, join(A, B)} | Acc ]);
+merge([{PA, A}|As], Bs = [{PB, _}|_], Acc) when PA < PB orelse PA == infinity ->
+ merge(As, Bs, [ {PA, A} | Acc ]);
+merge(As = [{_, _}|_], [{PB, B}|Bs], Acc) ->
+ merge(As, Bs, [ {PB, B} | Acc ]).
+
+filter(Pred, Q) -> fold(fun(V, P, Acc) ->
+ case Pred(V) of
+ true -> in(V, P, Acc);
+ false -> Acc
+ end
+ end, new(), Q).
+
+fold(Fun, Init, Q) -> case out_p(Q) of
+ {empty, _Q} -> Init;
+ {{value, V, P}, Q1} -> fold(Fun, Fun(V, P, Init), Q1)
+ end.
+
+highest({queue, [], [], 0}) -> empty;
+highest({queue, _, _, _}) -> 0;
+highest({pqueue, [{P, _} | _]}) -> maybe_negate_priority(P).
+
+r2f([], 0) -> {queue, [], [], 0};
+r2f([_] = R, 1) -> {queue, [], R, 1};
+r2f([X,Y], 2) -> {queue, [X], [Y], 2};
+r2f([X,Y|R], L) -> {queue, [X,Y], lists:reverse(R, []), L}.
+
+maybe_negate_priority(infinity) -> infinity;
+maybe_negate_priority(P) -> -P.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit).
+
+-behaviour(application).
+
+-export([start/0, boot/0, stop/0,
+ stop_and_halt/0, await_startup/0, status/0, is_running/0,
+ is_running/1, environment/0, rotate_logs/1, force_event_refresh/1,
+ start_fhc/0]).
+
+-export([start/2, stop/1]).
+
+-export([log_location/1]). %% for testing
+
+%%---------------------------------------------------------------------------
+%% Boot steps.
+-export([maybe_insert_default_data/0, boot_delegate/0, recover/0]).
+
+-rabbit_boot_step({pre_boot, [{description, "rabbit boot start"}]}).
+
+-rabbit_boot_step({codec_correctness_check,
+ [{description, "codec correctness check"},
+ {mfa, {rabbit_binary_generator,
+ check_empty_frame_size,
+ []}},
+ {requires, pre_boot},
+ {enables, external_infrastructure}]}).
+
+-rabbit_boot_step({database,
+ [{mfa, {rabbit_mnesia, init, []}},
+ {requires, file_handle_cache},
+ {enables, external_infrastructure}]}).
+
+-rabbit_boot_step({database_sync,
+ [{description, "database sync"},
+ {mfa, {rabbit_sup, start_child, [mnesia_sync]}},
+ {requires, database},
+ {enables, external_infrastructure}]}).
+
+-rabbit_boot_step({file_handle_cache,
+ [{description, "file handle cache server"},
+ {mfa, {rabbit, start_fhc, []}},
+ {requires, pre_boot},
+ {enables, worker_pool}]}).
+
+-rabbit_boot_step({worker_pool,
+ [{description, "worker pool"},
+ {mfa, {rabbit_sup, start_supervisor_child,
+ [worker_pool_sup]}},
+ {requires, pre_boot},
+ {enables, external_infrastructure}]}).
+
+-rabbit_boot_step({external_infrastructure,
+ [{description, "external infrastructure ready"}]}).
+
+-rabbit_boot_step({rabbit_registry,
+ [{description, "plugin registry"},
+ {mfa, {rabbit_sup, start_child,
+ [rabbit_registry]}},
+ {requires, external_infrastructure},
+ {enables, kernel_ready}]}).
+
+-rabbit_boot_step({rabbit_log,
+ [{description, "logging server"},
+ {mfa, {rabbit_sup, start_restartable_child,
+ [rabbit_log]}},
+ {requires, external_infrastructure},
+ {enables, kernel_ready}]}).
+
+-rabbit_boot_step({rabbit_event,
+ [{description, "statistics event manager"},
+ {mfa, {rabbit_sup, start_restartable_child,
+ [rabbit_event]}},
+ {requires, external_infrastructure},
+ {enables, kernel_ready}]}).
+
+-rabbit_boot_step({kernel_ready,
+ [{description, "kernel ready"},
+ {requires, external_infrastructure}]}).
+
+-rabbit_boot_step({rabbit_alarm,
+ [{description, "alarm handler"},
+ {mfa, {rabbit_alarm, start, []}},
+ {requires, kernel_ready},
+ {enables, core_initialized}]}).
+
+-rabbit_boot_step({rabbit_memory_monitor,
+ [{description, "memory monitor"},
+ {mfa, {rabbit_sup, start_restartable_child,
+ [rabbit_memory_monitor]}},
+ {requires, rabbit_alarm},
+ {enables, core_initialized}]}).
+
+-rabbit_boot_step({guid_generator,
+ [{description, "guid generator"},
+ {mfa, {rabbit_sup, start_restartable_child,
+ [rabbit_guid]}},
+ {requires, kernel_ready},
+ {enables, core_initialized}]}).
+
+-rabbit_boot_step({delegate_sup,
+ [{description, "cluster delegate"},
+ {mfa, {rabbit, boot_delegate, []}},
+ {requires, kernel_ready},
+ {enables, core_initialized}]}).
+
+-rabbit_boot_step({rabbit_node_monitor,
+ [{description, "node monitor"},
+ {mfa, {rabbit_sup, start_restartable_child,
+ [rabbit_node_monitor]}},
+ {requires, rabbit_alarm},
+ {enables, core_initialized}]}).
+
+-rabbit_boot_step({core_initialized,
+ [{description, "core initialized"},
+ {requires, kernel_ready}]}).
+
+-rabbit_boot_step({empty_db_check,
+ [{description, "empty DB check"},
+ {mfa, {?MODULE, maybe_insert_default_data, []}},
+ {requires, core_initialized},
+ {enables, routing_ready}]}).
+
+-rabbit_boot_step({recovery,
+ [{description, "exchange, queue and binding recovery"},
+ {mfa, {rabbit, recover, []}},
+ {requires, core_initialized},
+ {enables, routing_ready}]}).
+
+-rabbit_boot_step({mirror_queue_slave_sup,
+ [{description, "mirror queue slave sup"},
+ {mfa, {rabbit_sup, start_supervisor_child,
+ [rabbit_mirror_queue_slave_sup]}},
+ {requires, recovery},
+ {enables, routing_ready}]}).
+
+-rabbit_boot_step({mirrored_queues,
+ [{description, "adding mirrors to queues"},
+ {mfa, {rabbit_mirror_queue_misc, on_node_up, []}},
+ {requires, mirror_queue_slave_sup},
+ {enables, routing_ready}]}).
+
+-rabbit_boot_step({routing_ready,
+ [{description, "message delivery logic ready"},
+ {requires, core_initialized}]}).
+
+-rabbit_boot_step({log_relay,
+ [{description, "error log relay"},
+ {mfa, {rabbit_sup, start_child,
+ [rabbit_error_logger_lifecycle,
+ supervised_lifecycle,
+ [rabbit_error_logger_lifecycle,
+ {rabbit_error_logger, start, []},
+ {rabbit_error_logger, stop, []}]]}},
+ {requires, routing_ready},
+ {enables, networking}]}).
+
+-rabbit_boot_step({direct_client,
+ [{description, "direct client"},
+ {mfa, {rabbit_direct, boot, []}},
+ {requires, log_relay}]}).
+
+-rabbit_boot_step({networking,
+ [{mfa, {rabbit_networking, boot, []}},
+ {requires, log_relay}]}).
+
+-rabbit_boot_step({notify_cluster,
+ [{description, "notify cluster nodes"},
+ {mfa, {rabbit_node_monitor, notify_node_up, []}},
+ {requires, networking}]}).
+
+-rabbit_boot_step({background_gc,
+ [{description, "background garbage collection"},
+ {mfa, {rabbit_sup, start_restartable_child,
+ [background_gc]}},
+ {enables, networking}]}).
+
+%%---------------------------------------------------------------------------
+
+-include("rabbit_framing.hrl").
+-include("rabbit.hrl").
+
+-define(APPS, [os_mon, mnesia, rabbit]).
+
+%% HiPE compilation uses multiple cores anyway, but some bits are
+%% IO-bound so we can go faster if we parallelise a bit more. In
+%% practice 2 processes seems just as fast as any other number > 1,
+%% and keeps the progress bar realistic-ish.
+-define(HIPE_PROCESSES, 2).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-type(file_suffix() :: binary()).
+%% this really should be an abstract type
+-type(log_location() :: 'tty' | 'undefined' | file:filename()).
+-type(param() :: atom()).
+
+-spec(start/0 :: () -> 'ok').
+-spec(boot/0 :: () -> 'ok').
+-spec(stop/0 :: () -> 'ok').
+-spec(stop_and_halt/0 :: () -> no_return()).
+-spec(await_startup/0 :: () -> 'ok').
+-spec(status/0 ::
+ () -> [{pid, integer()} |
+ {running_applications, [{atom(), string(), string()}]} |
+ {os, {atom(), atom()}} |
+ {erlang_version, string()} |
+ {memory, any()}]).
+-spec(is_running/0 :: () -> boolean()).
+-spec(is_running/1 :: (node()) -> boolean()).
+-spec(environment/0 :: () -> [{param(), term()}]).
+-spec(rotate_logs/1 :: (file_suffix()) -> rabbit_types:ok_or_error(any())).
+-spec(force_event_refresh/1 :: (reference()) -> 'ok').
+
+-spec(log_location/1 :: ('sasl' | 'kernel') -> log_location()).
+
+-spec(start/2 :: ('normal',[]) ->
+ {'error',
+ {'erlang_version_too_old',
+ {'found',[any()]},
+ {'required',[any(),...]}}} |
+ {'ok',pid()}).
+-spec(stop/1 :: (_) -> 'ok').
+
+-spec(maybe_insert_default_data/0 :: () -> 'ok').
+-spec(boot_delegate/0 :: () -> 'ok').
+-spec(recover/0 :: () -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+%% HiPE compilation happens before we have log handlers - so we have
+%% to io:format/2, it's all we can do.
+
+maybe_hipe_compile() ->
+ {ok, Want} = application:get_env(rabbit, hipe_compile),
+ Can = code:which(hipe) =/= non_existing,
+ case {Want, Can} of
+ {true, true} -> hipe_compile(),
+ true;
+ {true, false} -> false;
+ {false, _} -> true
+ end.
+
+warn_if_hipe_compilation_failed(true) ->
+ ok;
+warn_if_hipe_compilation_failed(false) ->
+ error_logger:warning_msg(
+ "Not HiPE compiling: HiPE not found in this Erlang installation.~n").
+
+%% HiPE compilation happens before we have log handlers and can take a
+%% long time, so make an exception to our no-stdout policy and display
+%% progress via stdout.
+hipe_compile() ->
+ {ok, HipeModulesAll} = application:get_env(rabbit, hipe_modules),
+ HipeModules = [HM || HM <- HipeModulesAll, code:which(HM) =/= non_existing],
+ Count = length(HipeModules),
+ io:format("~nHiPE compiling: |~s|~n |",
+ [string:copies("-", Count)]),
+ T1 = erlang:now(),
+ PidMRefs = [spawn_monitor(fun () -> [begin
+ {ok, M} = hipe:c(M, [o3]),
+ io:format("#")
+ end || M <- Ms]
+ end) ||
+ Ms <- split(HipeModules, ?HIPE_PROCESSES)],
+ [receive
+ {'DOWN', MRef, process, _, normal} -> ok;
+ {'DOWN', MRef, process, _, Reason} -> exit(Reason)
+ end || {_Pid, MRef} <- PidMRefs],
+ T2 = erlang:now(),
+ io:format("|~n~nCompiled ~B modules in ~Bs~n",
+ [Count, timer:now_diff(T2, T1) div 1000000]).
+
+split(L, N) -> split0(L, [[] || _ <- lists:seq(1, N)]).
+
+split0([], Ls) -> Ls;
+split0([I | Is], [L | Ls]) -> split0(Is, Ls ++ [[I | L]]).
+
+ensure_application_loaded() ->
+ %% We end up looking at the rabbit app's env for HiPE and log
+ %% handling, so it needs to be loaded. But during the tests, it
+ %% may end up getting loaded twice, so guard against that.
+ case application:load(rabbit) of
+ ok -> ok;
+ {error, {already_loaded, rabbit}} -> ok
+ end.
+
+start() ->
+ start_it(fun() ->
+ %% We do not want to HiPE compile or upgrade
+ %% mnesia after just restarting the app
+ ok = ensure_application_loaded(),
+ ok = ensure_working_log_handlers(),
+ rabbit_node_monitor:prepare_cluster_status_files(),
+ rabbit_mnesia:check_cluster_consistency(),
+ ok = app_utils:start_applications(
+ app_startup_order(), fun handle_app_error/2),
+ ok = log_broker_started(rabbit_plugins:active())
+ end).
+
+boot() ->
+ start_it(fun() ->
+ ok = ensure_application_loaded(),
+ Success = maybe_hipe_compile(),
+ ok = ensure_working_log_handlers(),
+ warn_if_hipe_compilation_failed(Success),
+ rabbit_node_monitor:prepare_cluster_status_files(),
+ ok = rabbit_upgrade:maybe_upgrade_mnesia(),
+ %% It's important that the consistency check happens after
+ %% the upgrade, since if we are a secondary node the
+ %% primary node will have forgotten us
+ rabbit_mnesia:check_cluster_consistency(),
+ Plugins = rabbit_plugins:setup(),
+ ToBeLoaded = Plugins ++ ?APPS,
+ ok = app_utils:load_applications(ToBeLoaded),
+ StartupApps = app_utils:app_dependency_order(ToBeLoaded,
+ false),
+ ok = app_utils:start_applications(
+ StartupApps, fun handle_app_error/2),
+ ok = log_broker_started(Plugins)
+ end).
+
+handle_app_error(App, {bad_return, {_MFA, {'EXIT', {Reason, _}}}}) ->
+ throw({could_not_start, App, Reason});
+
+handle_app_error(App, Reason) ->
+ throw({could_not_start, App, Reason}).
+
+start_it(StartFun) ->
+ Marker = spawn_link(fun() -> receive stop -> ok end end),
+ case catch register(rabbit_boot, Marker) of
+ true -> try
+ case is_running() of
+ true -> ok;
+ false -> StartFun()
+ end
+ catch
+ throw:{could_not_start, _App, _Reason}=Err ->
+ boot_error(Err, not_available);
+ _:Reason ->
+ boot_error(Reason, erlang:get_stacktrace())
+ after
+ unlink(Marker),
+ Marker ! stop,
+ %% give the error loggers some time to catch up
+ timer:sleep(100)
+ end;
+ _ -> unlink(Marker),
+ Marker ! stop
+ end.
+
+stop() ->
+ case whereis(rabbit_boot) of
+ undefined -> ok;
+ _ -> await_startup()
+ end,
+ rabbit_log:info("Stopping RabbitMQ~n"),
+ ok = app_utils:stop_applications(app_shutdown_order()).
+
+stop_and_halt() ->
+ try
+ stop()
+ after
+ rabbit_misc:local_info_msg("Halting Erlang VM~n", []),
+ init:stop()
+ end,
+ ok.
+
+await_startup() ->
+ app_utils:wait_for_applications(app_startup_order()).
+
+status() ->
+ S1 = [{pid, list_to_integer(os:getpid())},
+ {running_applications, rabbit_misc:which_applications()},
+ {os, os:type()},
+ {erlang_version, erlang:system_info(system_version)},
+ {memory, rabbit_vm:memory()},
+ {alarms, alarms()},
+ {listeners, listeners()}],
+ S2 = rabbit_misc:filter_exit_map(
+ fun ({Key, {M, F, A}}) -> {Key, erlang:apply(M, F, A)} end,
+ [{vm_memory_high_watermark, {vm_memory_monitor,
+ get_vm_memory_high_watermark, []}},
+ {vm_memory_limit, {vm_memory_monitor,
+ get_memory_limit, []}},
+ {disk_free_limit, {rabbit_disk_monitor,
+ get_disk_free_limit, []}},
+ {disk_free, {rabbit_disk_monitor,
+ get_disk_free, []}}]),
+ S3 = rabbit_misc:with_exit_handler(
+ fun () -> [] end,
+ fun () -> [{file_descriptors, file_handle_cache:info()}] end),
+ S4 = [{processes, [{limit, erlang:system_info(process_limit)},
+ {used, erlang:system_info(process_count)}]},
+ {run_queue, erlang:statistics(run_queue)},
+ {uptime, begin
+ {T,_} = erlang:statistics(wall_clock),
+ T div 1000
+ end}],
+ S1 ++ S2 ++ S3 ++ S4.
+
+alarms() ->
+ Alarms = rabbit_misc:with_exit_handler(rabbit_misc:const([]),
+ fun rabbit_alarm:get_alarms/0),
+ N = node(),
+ %% [{{resource_limit,memory,rabbit@mercurio},[]}]
+ [Limit || {{resource_limit, Limit, Node}, _} <- Alarms, Node =:= N].
+
+listeners() ->
+ Listeners = try
+ rabbit_networking:active_listeners()
+ catch
+ exit:{aborted, _} -> []
+ end,
+ [{Protocol, Port, rabbit_misc:ntoa(IP)} ||
+ #listener{node = Node,
+ protocol = Protocol,
+ ip_address = IP,
+ port = Port} <- Listeners, Node =:= node()].
+
+is_running() -> is_running(node()).
+
+is_running(Node) -> rabbit_nodes:is_process_running(Node, rabbit).
+
+environment() ->
+ lists:keysort(1, [P || P = {K, _} <- application:get_all_env(rabbit),
+ K =/= default_pass]).
+
+rotate_logs(BinarySuffix) ->
+ Suffix = binary_to_list(BinarySuffix),
+ rabbit_misc:local_info_msg("Rotating logs with suffix '~s'~n", [Suffix]),
+ log_rotation_result(rotate_logs(log_location(kernel),
+ Suffix,
+ rabbit_error_logger_file_h),
+ rotate_logs(log_location(sasl),
+ Suffix,
+ rabbit_sasl_report_file_h)).
+
+%%--------------------------------------------------------------------
+
+start(normal, []) ->
+ case erts_version_check() of
+ ok ->
+ {ok, Vsn} = application:get_key(rabbit, vsn),
+ error_logger:info_msg("Starting RabbitMQ ~s on Erlang ~s~n~s~n~s~n",
+ [Vsn, erlang:system_info(otp_release),
+ ?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE]),
+ {ok, SupPid} = rabbit_sup:start_link(),
+ true = register(rabbit, self()),
+ print_banner(),
+ log_banner(),
+ [ok = run_boot_step(Step) || Step <- boot_steps()],
+ {ok, SupPid};
+ Error ->
+ Error
+ end.
+
+stop(_State) ->
+ ok = rabbit_alarm:stop(),
+ ok = case rabbit_mnesia:is_clustered() of
+ true -> rabbit_amqqueue:on_node_down(node());
+ false -> rabbit_table:clear_ram_only_tables()
+ end,
+ ok.
+
+%%---------------------------------------------------------------------------
+%% application life cycle
+
+app_startup_order() ->
+ ok = app_utils:load_applications(?APPS),
+ app_utils:app_dependency_order(?APPS, false).
+
+app_shutdown_order() ->
+ Apps = ?APPS ++ rabbit_plugins:active(),
+ app_utils:app_dependency_order(Apps, true).
+
+%%---------------------------------------------------------------------------
+%% boot step logic
+
+run_boot_step({_StepName, Attributes}) ->
+ case [MFA || {mfa, MFA} <- Attributes] of
+ [] ->
+ ok;
+ MFAs ->
+ [try
+ apply(M,F,A)
+ of
+ ok -> ok;
+ {error, Reason} -> boot_error(Reason, not_available)
+ catch
+ _:Reason -> boot_error(Reason, erlang:get_stacktrace())
+ end || {M,F,A} <- MFAs],
+ ok
+ end.
+
+boot_steps() ->
+ sort_boot_steps(rabbit_misc:all_module_attributes(rabbit_boot_step)).
+
+vertices(_Module, Steps) ->
+ [{StepName, {StepName, Atts}} || {StepName, Atts} <- Steps].
+
+edges(_Module, Steps) ->
+ [case Key of
+ requires -> {StepName, OtherStep};
+ enables -> {OtherStep, StepName}
+ end || {StepName, Atts} <- Steps,
+ {Key, OtherStep} <- Atts,
+ Key =:= requires orelse Key =:= enables].
+
+sort_boot_steps(UnsortedSteps) ->
+ case rabbit_misc:build_acyclic_graph(fun vertices/2, fun edges/2,
+ UnsortedSteps) of
+ {ok, G} ->
+ %% Use topological sort to find a consistent ordering (if
+ %% there is one, otherwise fail).
+ SortedSteps = lists:reverse(
+ [begin
+ {StepName, Step} = digraph:vertex(G,
+ StepName),
+ Step
+ end || StepName <- digraph_utils:topsort(G)]),
+ digraph:delete(G),
+ %% Check that all mentioned {M,F,A} triples are exported.
+ case [{StepName, {M,F,A}} ||
+ {StepName, Attributes} <- SortedSteps,
+ {mfa, {M,F,A}} <- Attributes,
+ not erlang:function_exported(M, F, length(A))] of
+ [] -> SortedSteps;
+ MissingFunctions -> basic_boot_error(
+ {missing_functions, MissingFunctions},
+ "Boot step functions not exported: ~p~n",
+ [MissingFunctions])
+ end;
+ {error, {vertex, duplicate, StepName}} ->
+ basic_boot_error({duplicate_boot_step, StepName},
+ "Duplicate boot step name: ~w~n", [StepName]);
+ {error, {edge, Reason, From, To}} ->
+ basic_boot_error(
+ {invalid_boot_step_dependency, From, To},
+ "Could not add boot step dependency of ~w on ~w:~n~s",
+ [To, From,
+ case Reason of
+ {bad_vertex, V} ->
+ io_lib:format("Boot step not registered: ~w~n", [V]);
+ {bad_edge, [First | Rest]} ->
+ [io_lib:format("Cyclic dependency: ~w", [First]),
+ [io_lib:format(" depends on ~w", [Next]) ||
+ Next <- Rest],
+ io_lib:format(" depends on ~w~n", [First])]
+ end])
+ end.
+
+-ifdef(use_specs).
+-spec(boot_error/2 :: (term(), not_available | [tuple()]) -> no_return()).
+-endif.
+boot_error(Term={error, {timeout_waiting_for_tables, _}}, _Stacktrace) ->
+ AllNodes = rabbit_mnesia:cluster_nodes(all),
+ {Err, Nodes} =
+ case AllNodes -- [node()] of
+ [] -> {"Timeout contacting cluster nodes. Since RabbitMQ was"
+ " shut down forcefully~nit cannot determine which nodes"
+ " are timing out.~n", []};
+ Ns -> {rabbit_misc:format(
+ "Timeout contacting cluster nodes: ~p.~n", [Ns]),
+ Ns}
+ end,
+ basic_boot_error(Term,
+ Err ++ rabbit_nodes:diagnostics(Nodes) ++ "~n~n", []);
+boot_error(Reason, Stacktrace) ->
+ Fmt = "Error description:~n ~p~n~n" ++
+ "Log files (may contain more information):~n ~s~n ~s~n~n",
+ Args = [Reason, log_location(kernel), log_location(sasl)],
+ boot_error(Reason, Fmt, Args, Stacktrace).
+
+-ifdef(use_specs).
+-spec(boot_error/4 :: (term(), string(), [any()], not_available | [tuple()])
+ -> no_return()).
+-endif.
+boot_error(Reason, Fmt, Args, not_available) ->
+ basic_boot_error(Reason, Fmt, Args);
+boot_error(Reason, Fmt, Args, Stacktrace) ->
+ basic_boot_error(Reason, Fmt ++ "Stack trace:~n ~p~n~n",
+ Args ++ [Stacktrace]).
+
+basic_boot_error(Reason, Format, Args) ->
+ io:format("~n~nBOOT FAILED~n===========~n~n" ++ Format, Args),
+ rabbit_misc:local_info_msg(Format, Args),
+ timer:sleep(1000),
+ exit({?MODULE, failure_during_boot, Reason}).
+
+%%---------------------------------------------------------------------------
+%% boot step functions
+
+boot_delegate() ->
+ {ok, Count} = application:get_env(rabbit, delegate_count),
+ rabbit_sup:start_supervisor_child(delegate_sup, [Count]).
+
+recover() ->
+ rabbit_policy:recover(),
+ Qs = rabbit_amqqueue:recover(),
+ ok = rabbit_binding:recover(rabbit_exchange:recover(),
+ [QName || #amqqueue{name = QName} <- Qs]),
+ rabbit_amqqueue:start(Qs).
+
+maybe_insert_default_data() ->
+ case rabbit_table:is_empty() of
+ true -> insert_default_data();
+ false -> ok
+ end.
+
+insert_default_data() ->
+ {ok, DefaultUser} = application:get_env(default_user),
+ {ok, DefaultPass} = application:get_env(default_pass),
+ {ok, DefaultTags} = application:get_env(default_user_tags),
+ {ok, DefaultVHost} = application:get_env(default_vhost),
+ {ok, [DefaultConfigurePerm, DefaultWritePerm, DefaultReadPerm]} =
+ application:get_env(default_permissions),
+ ok = rabbit_vhost:add(DefaultVHost),
+ ok = rabbit_auth_backend_internal:add_user(DefaultUser, DefaultPass),
+ ok = rabbit_auth_backend_internal:set_tags(DefaultUser, DefaultTags),
+ ok = rabbit_auth_backend_internal:set_permissions(DefaultUser,
+ DefaultVHost,
+ DefaultConfigurePerm,
+ DefaultWritePerm,
+ DefaultReadPerm),
+ ok.
+
+%%---------------------------------------------------------------------------
+%% logging
+
+ensure_working_log_handlers() ->
+ Handlers = gen_event:which_handlers(error_logger),
+ ok = ensure_working_log_handler(error_logger_tty_h,
+ rabbit_error_logger_file_h,
+ error_logger_tty_h,
+ log_location(kernel),
+ Handlers),
+
+ ok = ensure_working_log_handler(sasl_report_tty_h,
+ rabbit_sasl_report_file_h,
+ sasl_report_tty_h,
+ log_location(sasl),
+ Handlers),
+ ok.
+
+ensure_working_log_handler(OldHandler, NewHandler, TTYHandler,
+ LogLocation, Handlers) ->
+ case LogLocation of
+ undefined -> ok;
+ tty -> case lists:member(TTYHandler, Handlers) of
+ true -> ok;
+ false ->
+ throw({error, {cannot_log_to_tty,
+ TTYHandler, not_installed}})
+ end;
+ _ -> case lists:member(NewHandler, Handlers) of
+ true -> ok;
+ false -> case rotate_logs(LogLocation, "",
+ OldHandler, NewHandler) of
+ ok -> ok;
+ {error, Reason} ->
+ throw({error, {cannot_log_to_file,
+ LogLocation, Reason}})
+ end
+ end
+ end.
+
+log_location(Type) ->
+ case application:get_env(rabbit, case Type of
+ kernel -> error_logger;
+ sasl -> sasl_error_logger
+ end) of
+ {ok, {file, File}} -> File;
+ {ok, false} -> undefined;
+ {ok, tty} -> tty;
+ {ok, silent} -> undefined;
+ {ok, Bad} -> throw({error, {cannot_log_to_file, Bad}});
+ _ -> undefined
+ end.
+
+rotate_logs(File, Suffix, Handler) ->
+ rotate_logs(File, Suffix, Handler, Handler).
+
+rotate_logs(undefined, _Suffix, _OldHandler, _NewHandler) -> ok;
+rotate_logs(tty, _Suffix, _OldHandler, _NewHandler) -> ok;
+rotate_logs(File, Suffix, OldHandler, NewHandler) ->
+ gen_event:swap_handler(error_logger,
+ {OldHandler, swap},
+ {NewHandler, {File, Suffix}}).
+
+log_rotation_result({error, MainLogError}, {error, SaslLogError}) ->
+ {error, {{cannot_rotate_main_logs, MainLogError},
+ {cannot_rotate_sasl_logs, SaslLogError}}};
+log_rotation_result({error, MainLogError}, ok) ->
+ {error, {cannot_rotate_main_logs, MainLogError}};
+log_rotation_result(ok, {error, SaslLogError}) ->
+ {error, {cannot_rotate_sasl_logs, SaslLogError}};
+log_rotation_result(ok, ok) ->
+ ok.
+
+force_event_refresh(Ref) ->
+ rabbit_direct:force_event_refresh(Ref),
+ rabbit_networking:force_connection_event_refresh(Ref),
+ rabbit_channel:force_event_refresh(Ref),
+ rabbit_amqqueue:force_event_refresh(Ref).
+
+%%---------------------------------------------------------------------------
+%% misc
+
+log_broker_started(Plugins) ->
+ rabbit_misc:with_local_io(
+ fun() ->
+ PluginList = iolist_to_binary([rabbit_misc:format(" * ~s~n", [P])
+ || P <- Plugins]),
+ error_logger:info_msg(
+ "Server startup complete; ~b plugins started.~n~s",
+ [length(Plugins), PluginList]),
+ io:format(" completed with ~p plugins.~n", [length(Plugins)])
+ end).
+
+erts_version_check() ->
+ FoundVer = erlang:system_info(version),
+ case rabbit_misc:version_compare(?ERTS_MINIMUM, FoundVer, lte) of
+ true -> ok;
+ false -> {error, {erlang_version_too_old,
+ {found, FoundVer}, {required, ?ERTS_MINIMUM}}}
+ end.
+
+print_banner() ->
+ {ok, Product} = application:get_key(id),
+ {ok, Version} = application:get_key(vsn),
+ io:format("~n ~s ~s. ~s"
+ "~n ## ## ~s"
+ "~n ## ##"
+ "~n ########## Logs: ~s"
+ "~n ###### ## ~s"
+ "~n ##########"
+ "~n Starting broker...",
+ [Product, Version, ?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE,
+ log_location(kernel), log_location(sasl)]).
+
+log_banner() ->
+ Settings = [{"node", node()},
+ {"home dir", home_dir()},
+ {"config file(s)", config_files()},
+ {"cookie hash", rabbit_nodes:cookie_hash()},
+ {"log", log_location(kernel)},
+ {"sasl log", log_location(sasl)},
+ {"database dir", rabbit_mnesia:dir()}],
+ DescrLen = 1 + lists:max([length(K) || {K, _V} <- Settings]),
+ Format = fun (K, V) ->
+ rabbit_misc:format(
+ "~-" ++ integer_to_list(DescrLen) ++ "s: ~s~n", [K, V])
+ end,
+ Banner = iolist_to_binary(
+ [case S of
+ {"config file(s)" = K, []} ->
+ Format(K, "(none)");
+ {"config file(s)" = K, [V0 | Vs]} ->
+ [Format(K, V0) | [Format("", V) || V <- Vs]];
+ {K, V} ->
+ Format(K, V)
+ end || S <- Settings]),
+ error_logger:info_msg("~s", [Banner]).
+
+home_dir() ->
+ case init:get_argument(home) of
+ {ok, [[Home]]} -> Home;
+ Other -> Other
+ end.
+
+config_files() ->
+ Abs = fun (F) ->
+ filename:absname(filename:rootname(F, ".config") ++ ".config")
+ end,
+ case init:get_argument(config) of
+ {ok, Files} -> [Abs(File) || [File] <- Files];
+ error -> case config_setting() of
+ none -> [];
+ File -> [Abs(File) ++ " (not found)"]
+ end
+ end.
+
+%% This is a pain. We want to know where the config file is. But we
+%% can't specify it on the command line if it is missing or the VM
+%% will fail to start, so we need to find it by some mechanism other
+%% than init:get_arguments/0. We can look at the environment variable
+%% which is responsible for setting it... but that doesn't work for a
+%% Windows service since the variable can change and the service not
+%% be reinstalled, so in that case we add a magic application env.
+config_setting() ->
+ case application:get_env(rabbit, windows_service_config) of
+ {ok, File1} -> File1;
+ undefined -> case os:getenv("RABBITMQ_CONFIG_FILE") of
+ false -> none;
+ File2 -> File2
+ end
+ end.
+
+%% We don't want this in fhc since it references rabbit stuff. And we can't put
+%% this in the bootstep directly.
+start_fhc() ->
+ rabbit_sup:start_restartable_child(
+ file_handle_cache,
+ [fun rabbit_alarm:set_alarm/1, fun rabbit_alarm:clear_alarm/1]).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_access_control).
+
+-include("rabbit.hrl").
+
+-export([check_user_pass_login/2, check_user_login/2, check_user_loopback/2,
+ check_vhost_access/2, check_resource_access/3]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-export_type([permission_atom/0]).
+
+-type(permission_atom() :: 'configure' | 'read' | 'write').
+
+-spec(check_user_pass_login/2 ::
+ (rabbit_types:username(), rabbit_types:password())
+ -> {'ok', rabbit_types:user()} | {'refused', string(), [any()]}).
+-spec(check_user_login/2 ::
+ (rabbit_types:username(), [{atom(), any()}])
+ -> {'ok', rabbit_types:user()} | {'refused', string(), [any()]}).
+-spec(check_user_loopback/2 :: (rabbit_types:username(),
+ rabbit_net:socket() | inet:ip_address())
+ -> 'ok' | 'not_allowed').
+-spec(check_vhost_access/2 ::
+ (rabbit_types:user(), rabbit_types:vhost())
+ -> 'ok' | rabbit_types:channel_exit()).
+-spec(check_resource_access/3 ::
+ (rabbit_types:user(), rabbit_types:r(atom()), permission_atom())
+ -> 'ok' | rabbit_types:channel_exit()).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+check_user_pass_login(Username, Password) ->
+ check_user_login(Username, [{password, Password}]).
+
+check_user_login(Username, AuthProps) ->
+ {ok, Modules} = application:get_env(rabbit, auth_backends),
+ R = lists:foldl(
+ fun ({ModN, ModZ}, {refused, _, _}) ->
+ %% Different modules for authN vs authZ. So authenticate
+ %% with authN module, then if that succeeds do
+ %% passwordless (i.e pre-authenticated) login with authZ
+ %% module, and use the #user{} the latter gives us.
+ case try_login(ModN, Username, AuthProps) of
+ {ok, _} -> try_login(ModZ, Username, []);
+ Else -> Else
+ end;
+ (Mod, {refused, _, _}) ->
+ %% Same module for authN and authZ. Just take the result
+ %% it gives us
+ try_login(Mod, Username, AuthProps);
+ (_, {ok, User}) ->
+ %% We've successfully authenticated. Skip to the end...
+ {ok, User}
+ end, {refused, "No modules checked '~s'", [Username]}, Modules),
+ rabbit_event:notify(case R of
+ {ok, _User} -> user_authentication_success;
+ _ -> user_authentication_failure
+ end, [{name, Username}]),
+ R.
+
+try_login(Module, Username, AuthProps) ->
+ case Module:check_user_login(Username, AuthProps) of
+ {error, E} -> {refused, "~s failed authenticating ~s: ~p~n",
+ [Module, Username, E]};
+ Else -> Else
+ end.
+
+check_user_loopback(Username, SockOrAddr) ->
+ {ok, Users} = application:get_env(rabbit, loopback_users),
+ case rabbit_net:is_loopback(SockOrAddr)
+ orelse not lists:member(Username, Users) of
+ true -> ok;
+ false -> not_allowed
+ end.
+
+check_vhost_access(User = #user{ username = Username,
+ auth_backend = Module }, VHostPath) ->
+ check_access(
+ fun() ->
+ %% TODO this could be an andalso shortcut under >R13A
+ case rabbit_vhost:exists(VHostPath) of
+ false -> false;
+ true -> Module:check_vhost_access(User, VHostPath)
+ end
+ end,
+ Module, "access to vhost '~s' refused for user '~s'",
+ [VHostPath, Username]).
+
+check_resource_access(User, R = #resource{kind = exchange, name = <<"">>},
+ Permission) ->
+ check_resource_access(User, R#resource{name = <<"amq.default">>},
+ Permission);
+check_resource_access(User = #user{username = Username, auth_backend = Module},
+ Resource, Permission) ->
+ check_access(
+ fun() -> Module:check_resource_access(User, Resource, Permission) end,
+ Module, "access to ~s refused for user '~s'",
+ [rabbit_misc:rs(Resource), Username]).
+
+check_access(Fun, Module, ErrStr, ErrArgs) ->
+ Allow = case Fun() of
+ {error, E} ->
+ rabbit_log:error(ErrStr ++ " by ~s: ~p~n",
+ ErrArgs ++ [Module, E]),
+ false;
+ Else ->
+ Else
+ end,
+ case Allow of
+ true ->
+ ok;
+ false ->
+ rabbit_misc:protocol_error(access_refused, ErrStr, ErrArgs)
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_alarm).
+
+-behaviour(gen_event).
+
+-export([start_link/0, start/0, stop/0, register/2, set_alarm/1,
+ clear_alarm/1, get_alarms/0, on_node_up/1, on_node_down/1]).
+
+-export([init/1, handle_call/2, handle_event/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-export([remote_conserve_resources/3]). %% Internal use only
+
+-define(SERVER, ?MODULE).
+
+-record(alarms, {alertees, alarmed_nodes, alarms}).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
+-spec(start/0 :: () -> 'ok').
+-spec(stop/0 :: () -> 'ok').
+-spec(register/2 :: (pid(), rabbit_types:mfargs()) -> [atom()]).
+-spec(set_alarm/1 :: (any()) -> 'ok').
+-spec(clear_alarm/1 :: (any()) -> 'ok').
+-spec(on_node_up/1 :: (node()) -> 'ok').
+-spec(on_node_down/1 :: (node()) -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ gen_event:start_link({local, ?SERVER}).
+
+start() ->
+ ok = rabbit_sup:start_restartable_child(?MODULE),
+ ok = gen_event:add_handler(?SERVER, ?MODULE, []),
+ {ok, MemoryWatermark} = application:get_env(vm_memory_high_watermark),
+ rabbit_sup:start_restartable_child(
+ vm_memory_monitor, [MemoryWatermark,
+ fun (Alarm) ->
+ background_gc:run(),
+ set_alarm(Alarm)
+ end,
+ fun clear_alarm/1]),
+ {ok, DiskLimit} = application:get_env(disk_free_limit),
+ rabbit_sup:start_delayed_restartable_child(
+ rabbit_disk_monitor, [DiskLimit]),
+ ok.
+
+stop() -> ok.
+
+register(Pid, AlertMFA) ->
+ gen_event:call(?SERVER, ?MODULE, {register, Pid, AlertMFA}, infinity).
+
+set_alarm(Alarm) -> gen_event:notify(?SERVER, {set_alarm, Alarm}).
+clear_alarm(Alarm) -> gen_event:notify(?SERVER, {clear_alarm, Alarm}).
+
+get_alarms() -> gen_event:call(?SERVER, ?MODULE, get_alarms, infinity).
+
+on_node_up(Node) -> gen_event:notify(?SERVER, {node_up, Node}).
+on_node_down(Node) -> gen_event:notify(?SERVER, {node_down, Node}).
+
+remote_conserve_resources(Pid, Source, true) ->
+ gen_event:notify({?SERVER, node(Pid)},
+ {set_alarm, {{resource_limit, Source, node()}, []}});
+remote_conserve_resources(Pid, Source, false) ->
+ gen_event:notify({?SERVER, node(Pid)},
+ {clear_alarm, {resource_limit, Source, node()}}).
+
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, #alarms{alertees = dict:new(),
+ alarmed_nodes = dict:new(),
+ alarms = []}}.
+
+handle_call({register, Pid, AlertMFA}, State = #alarms{alarmed_nodes = AN}) ->
+ {ok, lists:usort(lists:append([V || {_, V} <- dict:to_list(AN)])),
+ internal_register(Pid, AlertMFA, State)};
+
+handle_call(get_alarms, State = #alarms{alarms = Alarms}) ->
+ {ok, Alarms, State};
+
+handle_call(_Request, State) ->
+ {ok, not_understood, State}.
+
+handle_event({set_alarm, Alarm}, State = #alarms{alarms = Alarms}) ->
+ case lists:member(Alarm, Alarms) of
+ true -> {ok, State};
+ false -> UpdatedAlarms = lists:usort([Alarm|Alarms]),
+ handle_set_alarm(Alarm, State#alarms{alarms = UpdatedAlarms})
+ end;
+
+handle_event({clear_alarm, Alarm}, State = #alarms{alarms = Alarms}) ->
+ case lists:keymember(Alarm, 1, Alarms) of
+ true -> handle_clear_alarm(
+ Alarm, State#alarms{alarms = lists:keydelete(
+ Alarm, 1, Alarms)});
+ false -> {ok, State}
+
+ end;
+
+handle_event({node_up, Node}, State) ->
+ %% Must do this via notify and not call to avoid possible deadlock.
+ ok = gen_event:notify(
+ {?SERVER, Node},
+ {register, self(), {?MODULE, remote_conserve_resources, []}}),
+ {ok, State};
+
+handle_event({node_down, Node}, State) ->
+ {ok, maybe_alert(fun dict_unappend_all/3, Node, [], false, State)};
+
+handle_event({register, Pid, AlertMFA}, State) ->
+ {ok, internal_register(Pid, AlertMFA, State)};
+
+handle_event(_Event, State) ->
+ {ok, State}.
+
+handle_info({'DOWN', _MRef, process, Pid, _Reason},
+ State = #alarms{alertees = Alertees}) ->
+ {ok, State#alarms{alertees = dict:erase(Pid, Alertees)}};
+
+handle_info(_Info, State) ->
+ {ok, State}.
+
+terminate(_Arg, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+dict_append(Key, Val, Dict) ->
+ L = case dict:find(Key, Dict) of
+ {ok, V} -> V;
+ error -> []
+ end,
+ dict:store(Key, lists:usort([Val|L]), Dict).
+
+dict_unappend_all(Key, _Val, Dict) ->
+ dict:erase(Key, Dict).
+
+dict_unappend(Key, Val, Dict) ->
+ L = case dict:find(Key, Dict) of
+ {ok, V} -> V;
+ error -> []
+ end,
+
+ case lists:delete(Val, L) of
+ [] -> dict:erase(Key, Dict);
+ X -> dict:store(Key, X, Dict)
+ end.
+
+maybe_alert(UpdateFun, Node, Source, Alert,
+ State = #alarms{alarmed_nodes = AN,
+ alertees = Alertees}) ->
+ AN1 = UpdateFun(Node, Source, AN),
+ case node() of
+ Node -> ok = alert_remote(Alert, Alertees, Source);
+ _ -> ok
+ end,
+ ok = alert_local(Alert, Alertees, Source),
+ State#alarms{alarmed_nodes = AN1}.
+
+alert_local(Alert, Alertees, Source) ->
+ alert(Alertees, Source, Alert, fun erlang:'=:='/2).
+
+alert_remote(Alert, Alertees, Source) ->
+ alert(Alertees, Source, Alert, fun erlang:'=/='/2).
+
+alert(Alertees, Source, Alert, NodeComparator) ->
+ Node = node(),
+ dict:fold(fun (Pid, {M, F, A}, ok) ->
+ case NodeComparator(Node, node(Pid)) of
+ true -> apply(M, F, A ++ [Pid, Source, Alert]);
+ false -> ok
+ end
+ end, ok, Alertees).
+
+internal_register(Pid, {M, F, A} = AlertMFA,
+ State = #alarms{alertees = Alertees}) ->
+ _MRef = erlang:monitor(process, Pid),
+ case dict:find(node(), State#alarms.alarmed_nodes) of
+ {ok, Sources} -> [apply(M, F, A ++ [Pid, R, true]) || R <- Sources];
+ error -> ok
+ end,
+ NewAlertees = dict:store(Pid, AlertMFA, Alertees),
+ State#alarms{alertees = NewAlertees}.
+
+handle_set_alarm({{resource_limit, Source, Node}, []}, State) ->
+ rabbit_log:warning(
+ "~s resource limit alarm set on node ~p.~n~n"
+ "**********************************************************~n"
+ "*** Publishers will be blocked until this alarm clears ***~n"
+ "**********************************************************~n",
+ [Source, Node]),
+ {ok, maybe_alert(fun dict_append/3, Node, Source, true, State)};
+handle_set_alarm({file_descriptor_limit, []}, State) ->
+ rabbit_log:warning(
+ "file descriptor limit alarm set.~n~n"
+ "********************************************************************~n"
+ "*** New connections will not be accepted until this alarm clears ***~n"
+ "********************************************************************~n"),
+ {ok, State};
+handle_set_alarm(Alarm, State) ->
+ rabbit_log:warning("alarm '~p' set~n", [Alarm]),
+ {ok, State}.
+
+handle_clear_alarm({resource_limit, Source, Node}, State) ->
+ rabbit_log:warning("~s resource limit alarm cleared on node ~p~n",
+ [Source, Node]),
+ {ok, maybe_alert(fun dict_unappend/3, Node, Source, false, State)};
+handle_clear_alarm(file_descriptor_limit, State) ->
+ rabbit_log:warning("file descriptor limit alarm cleared~n"),
+ {ok, State};
+handle_clear_alarm(Alarm, State) ->
+ rabbit_log:warning("alarm '~p' cleared~n", [Alarm]),
+ {ok, State}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_amqqueue).
+
+-export([recover/0, stop/0, start/1, declare/5, declare/6,
+ delete_immediately/1, delete/3, purge/1, forget_all_durable/1]).
+-export([pseudo_queue/2]).
+-export([lookup/1, not_found_or_absent/1, with/2, with/3, with_or_die/2,
+ assert_equivalence/5,
+ check_exclusive_access/2, with_exclusive_access_or_die/3,
+ stat/1, deliver/2, deliver_flow/2, requeue/3, ack/3, reject/4]).
+-export([list/0, list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2]).
+-export([force_event_refresh/1, notify_policy_changed/1]).
+-export([consumers/1, consumers_all/1, consumer_info_keys/0]).
+-export([basic_get/4, basic_consume/10, basic_cancel/4, notify_decorators/1]).
+-export([notify_sent/2, notify_sent_queue_down/1, resume/2]).
+-export([notify_down_all/2, activate_limit_all/2, credit/5]).
+-export([on_node_down/1]).
+-export([update/2, store_queue/1, policy_changed/2]).
+-export([start_mirroring/1, stop_mirroring/1, sync_mirrors/1,
+ cancel_sync_mirrors/1]).
+
+%% internal
+-export([internal_declare/2, internal_delete/1, run_backing_queue/3,
+ set_ram_duration_target/2, set_maximum_since_use/2]).
+
+-include("rabbit.hrl").
+-include_lib("stdlib/include/qlc.hrl").
+
+-define(INTEGER_ARG_TYPES, [byte, short, signedint, long]).
+
+-define(MORE_CONSUMER_CREDIT_AFTER, 50).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-export_type([name/0, qmsg/0]).
+
+-type(name() :: rabbit_types:r('queue')).
+-type(qpids() :: [pid()]).
+-type(qlen() :: rabbit_types:ok(non_neg_integer())).
+-type(qfun(A) :: fun ((rabbit_types:amqqueue()) -> A | no_return())).
+-type(qmsg() :: {name(), pid(), msg_id(), boolean(), rabbit_types:message()}).
+-type(msg_id() :: non_neg_integer()).
+-type(ok_or_errors() ::
+ 'ok' | {'error', [{'error' | 'exit' | 'throw', any()}]}).
+-type(queue_or_absent() :: rabbit_types:amqqueue() |
+ {'absent', rabbit_types:amqqueue()}).
+-type(not_found_or_absent() :: 'not_found' |
+ {'absent', rabbit_types:amqqueue()}).
+-spec(recover/0 :: () -> [rabbit_types:amqqueue()]).
+-spec(stop/0 :: () -> 'ok').
+-spec(start/1 :: ([rabbit_types:amqqueue()]) -> 'ok').
+-spec(declare/5 ::
+ (name(), boolean(), boolean(),
+ rabbit_framing:amqp_table(), rabbit_types:maybe(pid()))
+ -> {'new' | 'existing' | 'absent' | 'owner_died',
+ rabbit_types:amqqueue()} | rabbit_types:channel_exit()).
+-spec(declare/6 ::
+ (name(), boolean(), boolean(),
+ rabbit_framing:amqp_table(), rabbit_types:maybe(pid()), node())
+ -> {'new' | 'existing' | 'absent' | 'owner_died',
+ rabbit_types:amqqueue()} | rabbit_types:channel_exit()).
+-spec(internal_declare/2 ::
+ (rabbit_types:amqqueue(), boolean())
+ -> queue_or_absent() | rabbit_misc:thunk(queue_or_absent())).
+-spec(update/2 ::
+ (name(),
+ fun((rabbit_types:amqqueue()) -> rabbit_types:amqqueue()))
+ -> 'not_found' | rabbit_types:amqqueue()).
+-spec(lookup/1 ::
+ (name()) -> rabbit_types:ok(rabbit_types:amqqueue()) |
+ rabbit_types:error('not_found');
+ ([name()]) -> [rabbit_types:amqqueue()]).
+-spec(not_found_or_absent/1 :: (name()) -> not_found_or_absent()).
+-spec(with/2 :: (name(), qfun(A)) ->
+ A | rabbit_types:error(not_found_or_absent())).
+-spec(with/3 :: (name(), qfun(A), fun((not_found_or_absent()) -> B)) -> A | B).
+-spec(with_or_die/2 ::
+ (name(), qfun(A)) -> A | rabbit_types:channel_exit()).
+-spec(assert_equivalence/5 ::
+ (rabbit_types:amqqueue(), boolean(), boolean(),
+ rabbit_framing:amqp_table(), rabbit_types:maybe(pid()))
+ -> 'ok' | rabbit_types:channel_exit() |
+ rabbit_types:connection_exit()).
+-spec(check_exclusive_access/2 ::
+ (rabbit_types:amqqueue(), pid())
+ -> 'ok' | rabbit_types:channel_exit()).
+-spec(with_exclusive_access_or_die/3 ::
+ (name(), pid(), qfun(A)) -> A | rabbit_types:channel_exit()).
+-spec(list/0 :: () -> [rabbit_types:amqqueue()]).
+-spec(list/1 :: (rabbit_types:vhost()) -> [rabbit_types:amqqueue()]).
+-spec(info_keys/0 :: () -> rabbit_types:info_keys()).
+-spec(info/1 :: (rabbit_types:amqqueue()) -> rabbit_types:infos()).
+-spec(info/2 ::
+ (rabbit_types:amqqueue(), rabbit_types:info_keys())
+ -> rabbit_types:infos()).
+-spec(info_all/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]).
+-spec(info_all/2 :: (rabbit_types:vhost(), rabbit_types:info_keys())
+ -> [rabbit_types:infos()]).
+-spec(force_event_refresh/1 :: (reference()) -> 'ok').
+-spec(notify_policy_changed/1 :: (rabbit_types:amqqueue()) -> 'ok').
+-spec(consumers/1 :: (rabbit_types:amqqueue())
+ -> [{pid(), rabbit_types:ctag(), boolean(),
+ non_neg_integer(), rabbit_framing:amqp_table()}]).
+-spec(consumer_info_keys/0 :: () -> rabbit_types:info_keys()).
+-spec(consumers_all/1 ::
+ (rabbit_types:vhost())
+ -> [{name(), pid(), rabbit_types:ctag(), boolean(),
+ non_neg_integer(), rabbit_framing:amqp_table()}]).
+-spec(stat/1 ::
+ (rabbit_types:amqqueue())
+ -> {'ok', non_neg_integer(), non_neg_integer()}).
+-spec(delete_immediately/1 :: (qpids()) -> 'ok').
+-spec(delete/3 ::
+ (rabbit_types:amqqueue(), 'false', 'false')
+ -> qlen();
+ (rabbit_types:amqqueue(), 'true' , 'false')
+ -> qlen() | rabbit_types:error('in_use');
+ (rabbit_types:amqqueue(), 'false', 'true' )
+ -> qlen() | rabbit_types:error('not_empty');
+ (rabbit_types:amqqueue(), 'true' , 'true' )
+ -> qlen() |
+ rabbit_types:error('in_use') |
+ rabbit_types:error('not_empty')).
+-spec(purge/1 :: (rabbit_types:amqqueue()) -> qlen()).
+-spec(forget_all_durable/1 :: (node()) -> 'ok').
+-spec(deliver/2 :: ([rabbit_types:amqqueue()], rabbit_types:delivery()) ->
+ qpids()).
+-spec(deliver_flow/2 :: ([rabbit_types:amqqueue()], rabbit_types:delivery()) ->
+ qpids()).
+-spec(requeue/3 :: (pid(), [msg_id()], pid()) -> 'ok').
+-spec(ack/3 :: (pid(), [msg_id()], pid()) -> 'ok').
+-spec(reject/4 :: (pid(), [msg_id()], boolean(), pid()) -> 'ok').
+-spec(notify_down_all/2 :: (qpids(), pid()) -> ok_or_errors()).
+-spec(activate_limit_all/2 :: (qpids(), pid()) -> ok_or_errors()).
+-spec(basic_get/4 :: (rabbit_types:amqqueue(), pid(), boolean(), pid()) ->
+ {'ok', non_neg_integer(), qmsg()} | 'empty').
+-spec(credit/5 :: (rabbit_types:amqqueue(), pid(), rabbit_types:ctag(),
+ non_neg_integer(), boolean()) -> 'ok').
+-spec(basic_consume/10 ::
+ (rabbit_types:amqqueue(), boolean(), pid(), pid(), boolean(),
+ non_neg_integer(), rabbit_types:ctag(), boolean(),
+ rabbit_framing:amqp_table(), any())
+ -> rabbit_types:ok_or_error('exclusive_consume_unavailable')).
+-spec(basic_cancel/4 ::
+ (rabbit_types:amqqueue(), pid(), rabbit_types:ctag(), any()) -> 'ok').
+-spec(notify_decorators/1 :: (rabbit_types:amqqueue()) -> 'ok').
+-spec(notify_sent/2 :: (pid(), pid()) -> 'ok').
+-spec(notify_sent_queue_down/1 :: (pid()) -> 'ok').
+-spec(resume/2 :: (pid(), pid()) -> 'ok').
+-spec(internal_delete/1 ::
+ (name()) -> rabbit_types:ok_or_error('not_found') |
+ rabbit_types:connection_exit() |
+ fun (() -> rabbit_types:ok_or_error('not_found') |
+ rabbit_types:connection_exit())).
+-spec(run_backing_queue/3 ::
+ (pid(), atom(),
+ (fun ((atom(), A) -> {[rabbit_types:msg_id()], A}))) -> 'ok').
+-spec(set_ram_duration_target/2 :: (pid(), number() | 'infinity') -> 'ok').
+-spec(set_maximum_since_use/2 :: (pid(), non_neg_integer()) -> 'ok').
+-spec(on_node_down/1 :: (node()) -> 'ok').
+-spec(pseudo_queue/2 :: (name(), pid()) -> rabbit_types:amqqueue()).
+-spec(store_queue/1 :: (rabbit_types:amqqueue()) -> 'ok').
+-spec(policy_changed/2 ::
+ (rabbit_types:amqqueue(), rabbit_types:amqqueue()) -> 'ok').
+-spec(start_mirroring/1 :: (pid()) -> 'ok').
+-spec(stop_mirroring/1 :: (pid()) -> 'ok').
+-spec(sync_mirrors/1 :: (pid()) -> 'ok' | rabbit_types:error('not_mirrored')).
+-spec(cancel_sync_mirrors/1 :: (pid()) -> 'ok' | {'ok', 'not_syncing'}).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+-define(CONSUMER_INFO_KEYS,
+ [queue_name, channel_pid, consumer_tag, ack_required, prefetch_count,
+ arguments]).
+
+recover() ->
+ %% Clear out remnants of old incarnation, in case we restarted
+ %% faster than other nodes handled DOWN messages from us.
+ on_node_down(node()),
+ DurableQueues = find_durable_queues(),
+ {ok, BQ} = application:get_env(rabbit, backing_queue_module),
+
+ %% We rely on BQ:start/1 returning the recovery terms in the same
+ %% order as the supplied queue names, so that we can zip them together
+ %% for further processing in recover_durable_queues.
+ {ok, OrderedRecoveryTerms} =
+ BQ:start([QName || #amqqueue{name = QName} <- DurableQueues]),
+ {ok,_} = supervisor:start_child(
+ rabbit_sup,
+ {rabbit_amqqueue_sup,
+ {rabbit_amqqueue_sup, start_link, []},
+ transient, infinity, supervisor, [rabbit_amqqueue_sup]}),
+ recover_durable_queues(lists:zip(DurableQueues, OrderedRecoveryTerms)).
+
+stop() ->
+ ok = supervisor:terminate_child(rabbit_sup, rabbit_amqqueue_sup),
+ ok = supervisor:delete_child(rabbit_sup, rabbit_amqqueue_sup),
+ {ok, BQ} = application:get_env(rabbit, backing_queue_module),
+ ok = BQ:stop().
+
+start(Qs) ->
+ %% At this point all recovered queues and their bindings are
+ %% visible to routing, so now it is safe for them to complete
+ %% their initialisation (which may involve interacting with other
+ %% queues).
+ [Pid ! {self(), go} || #amqqueue{pid = Pid} <- Qs],
+ ok.
+
+find_durable_queues() ->
+ Node = node(),
+ mnesia:async_dirty(
+ fun () ->
+ qlc:e(qlc:q([Q || Q = #amqqueue{name = Name,
+ pid = Pid}
+ <- mnesia:table(rabbit_durable_queue),
+ node(Pid) == Node,
+ mnesia:read(rabbit_queue, Name, read) =:= []]))
+ end).
+
+recover_durable_queues(QueuesAndRecoveryTerms) ->
+ {Results, Failures} =
+ gen_server2:mcall([{start_queue_process(node(), Q),
+ {init, {self(), Terms}}} ||
+ {Q, Terms} <- QueuesAndRecoveryTerms]),
+ [rabbit_log:error("Queue ~p failed to initialise: ~p~n",
+ [Pid, Error]) || {Pid, Error} <- Failures],
+ [Q || {_, {new, Q}} <- Results].
+
+declare(QueueName, Durable, AutoDelete, Args, Owner) ->
+ declare(QueueName, Durable, AutoDelete, Args, Owner, node()).
+
+
+%% The Node argument suggests where the queue (master if mirrored)
+%% should be. Note that in some cases (e.g. with "nodes" policy in
+%% effect) this might not be possible to satisfy.
+declare(QueueName, Durable, AutoDelete, Args, Owner, Node) ->
+ ok = check_declare_arguments(QueueName, Args),
+ Q = rabbit_policy:set(#amqqueue{name = QueueName,
+ durable = Durable,
+ auto_delete = AutoDelete,
+ arguments = Args,
+ exclusive_owner = Owner,
+ pid = none,
+ slave_pids = [],
+ sync_slave_pids = [],
+ gm_pids = []}),
+ Node = rabbit_mirror_queue_misc:initial_queue_node(Q, Node),
+ gen_server2:call(start_queue_process(Node, Q), {init, new}, infinity).
+
+internal_declare(Q, true) ->
+ rabbit_misc:execute_mnesia_tx_with_tail(
+ fun () -> ok = store_queue(Q), rabbit_misc:const(Q) end);
+internal_declare(Q = #amqqueue{name = QueueName}, false) ->
+ rabbit_misc:execute_mnesia_tx_with_tail(
+ fun () ->
+ case mnesia:wread({rabbit_queue, QueueName}) of
+ [] ->
+ case not_found_or_absent(QueueName) of
+ not_found -> Q1 = rabbit_policy:set(Q),
+ ok = store_queue(Q1),
+ B = add_default_binding(Q1),
+ fun () -> B(), Q1 end;
+ {absent, _Q} = R -> rabbit_misc:const(R)
+ end;
+ [ExistingQ = #amqqueue{pid = QPid}] ->
+ case rabbit_misc:is_process_alive(QPid) of
+ true -> rabbit_misc:const(ExistingQ);
+ false -> TailFun = internal_delete(QueueName),
+ fun () -> TailFun(), ExistingQ end
+ end
+ end
+ end).
+
+update(Name, Fun) ->
+ case mnesia:wread({rabbit_queue, Name}) of
+ [Q = #amqqueue{durable = Durable}] ->
+ Q1 = Fun(Q),
+ ok = mnesia:write(rabbit_queue, Q1, write),
+ case Durable of
+ true -> ok = mnesia:write(rabbit_durable_queue, Q1, write);
+ _ -> ok
+ end,
+ Q1;
+ [] ->
+ not_found
+ end.
+
+store_queue(Q = #amqqueue{durable = true}) ->
+ ok = mnesia:write(rabbit_durable_queue,
+ Q#amqqueue{slave_pids = [],
+ sync_slave_pids = [],
+ gm_pids = []}, write),
+ ok = mnesia:write(rabbit_queue, Q, write),
+ ok;
+store_queue(Q = #amqqueue{durable = false}) ->
+ ok = mnesia:write(rabbit_queue, Q, write),
+ ok.
+
+policy_changed(Q1 = #amqqueue{decorators = Decorators1},
+ Q2 = #amqqueue{decorators = Decorators2}) ->
+ rabbit_mirror_queue_misc:update_mirrors(Q1, Q2),
+ D1 = rabbit_queue_decorator:select(Decorators1),
+ D2 = rabbit_queue_decorator:select(Decorators2),
+ [ok = M:policy_changed(Q1, Q2) || M <- lists:usort(D1 ++ D2)],
+ %% Make sure we emit a stats event even if nothing
+ %% mirroring-related has changed - the policy may have changed anyway.
+ notify_policy_changed(Q1).
+
+start_queue_process(Node, Q) ->
+ {ok, Pid} = rabbit_amqqueue_sup:start_child(Node, [Q]),
+ Pid.
+
+add_default_binding(#amqqueue{name = QueueName}) ->
+ ExchangeName = rabbit_misc:r(QueueName, exchange, <<>>),
+ RoutingKey = QueueName#resource.name,
+ rabbit_binding:add(#binding{source = ExchangeName,
+ destination = QueueName,
+ key = RoutingKey,
+ args = []}).
+
+lookup([]) -> []; %% optimisation
+lookup([Name]) -> ets:lookup(rabbit_queue, Name); %% optimisation
+lookup(Names) when is_list(Names) ->
+ %% Normally we'd call mnesia:dirty_read/1 here, but that is quite
+ %% expensive for reasons explained in rabbit_misc:dirty_read/1.
+ lists:append([ets:lookup(rabbit_queue, Name) || Name <- Names]);
+lookup(Name) ->
+ rabbit_misc:dirty_read({rabbit_queue, Name}).
+
+not_found_or_absent(Name) ->
+ %% NB: we assume that the caller has already performed a lookup on
+ %% rabbit_queue and not found anything
+ case mnesia:read({rabbit_durable_queue, Name}) of
+ [] -> not_found;
+ [Q] -> {absent, Q} %% Q exists on stopped node
+ end.
+
+not_found_or_absent_dirty(Name) ->
+ %% We should read from both tables inside a tx, to get a
+ %% consistent view. But the chances of an inconsistency are small,
+ %% and only affect the error kind.
+ case rabbit_misc:dirty_read({rabbit_durable_queue, Name}) of
+ {error, not_found} -> not_found;
+ {ok, Q} -> {absent, Q}
+ end.
+
+with(Name, F, E) ->
+ case lookup(Name) of
+ {ok, Q = #amqqueue{pid = QPid}} ->
+ %% We check is_process_alive(QPid) in case we receive a
+ %% nodedown (for example) in F() that has nothing to do
+ %% with the QPid. F() should be written s.t. that this
+ %% cannot happen, so we bail if it does since that
+ %% indicates a code bug and we don't want to get stuck in
+ %% the retry loop.
+ rabbit_misc:with_exit_handler(
+ fun () -> false = rabbit_misc:is_process_alive(QPid),
+ timer:sleep(25),
+ with(Name, F, E)
+ end, fun () -> F(Q) end);
+ {error, not_found} ->
+ E(not_found_or_absent_dirty(Name))
+ end.
+
+with(Name, F) -> with(Name, F, fun (E) -> {error, E} end).
+
+with_or_die(Name, F) ->
+ with(Name, F, fun (not_found) -> rabbit_misc:not_found(Name);
+ ({absent, Q}) -> rabbit_misc:absent(Q)
+ end).
+
+assert_equivalence(#amqqueue{durable = Durable,
+ auto_delete = AutoDelete} = Q,
+ Durable, AutoDelete, RequiredArgs, Owner) ->
+ assert_args_equivalence(Q, RequiredArgs),
+ check_exclusive_access(Q, Owner, strict);
+assert_equivalence(#amqqueue{name = QueueName},
+ _Durable, _AutoDelete, _RequiredArgs, _Owner) ->
+ rabbit_misc:protocol_error(
+ precondition_failed, "parameters for ~s not equivalent",
+ [rabbit_misc:rs(QueueName)]).
+
+check_exclusive_access(Q, Owner) -> check_exclusive_access(Q, Owner, lax).
+
+check_exclusive_access(#amqqueue{exclusive_owner = Owner}, Owner, _MatchType) ->
+ ok;
+check_exclusive_access(#amqqueue{exclusive_owner = none}, _ReaderPid, lax) ->
+ ok;
+check_exclusive_access(#amqqueue{name = QueueName}, _ReaderPid, _MatchType) ->
+ rabbit_misc:protocol_error(
+ resource_locked,
+ "cannot obtain exclusive access to locked ~s",
+ [rabbit_misc:rs(QueueName)]).
+
+with_exclusive_access_or_die(Name, ReaderPid, F) ->
+ with_or_die(Name,
+ fun (Q) -> check_exclusive_access(Q, ReaderPid), F(Q) end).
+
+assert_args_equivalence(#amqqueue{name = QueueName, arguments = Args},
+ RequiredArgs) ->
+ rabbit_misc:assert_args_equivalence(Args, RequiredArgs, QueueName,
+ [Key || {Key, _Fun} <- declare_args()]).
+
+check_declare_arguments(QueueName, Args) ->
+ check_arguments(QueueName, Args, declare_args()).
+
+check_consume_arguments(QueueName, Args) ->
+ check_arguments(QueueName, Args, consume_args()).
+
+check_arguments(QueueName, Args, Validators) ->
+ [case rabbit_misc:table_lookup(Args, Key) of
+ undefined -> ok;
+ TypeVal -> case Fun(TypeVal, Args) of
+ ok -> ok;
+ {error, Error} -> rabbit_misc:protocol_error(
+ precondition_failed,
+ "invalid arg '~s' for ~s: ~255p",
+ [Key, rabbit_misc:rs(QueueName),
+ Error])
+ end
+ end || {Key, Fun} <- Validators],
+ ok.
+
+declare_args() ->
+ [{<<"x-expires">>, fun check_expires_arg/2},
+ {<<"x-message-ttl">>, fun check_message_ttl_arg/2},
+ {<<"x-dead-letter-routing-key">>, fun check_dlxrk_arg/2},
+ {<<"x-max-length">>, fun check_non_neg_int_arg/2}].
+
+consume_args() -> [{<<"x-priority">>, fun check_int_arg/2},
+ {<<"x-cancel-on-ha-failover">>, fun check_bool_arg/2}].
+
+check_int_arg({Type, _}, _) ->
+ case lists:member(Type, ?INTEGER_ARG_TYPES) of
+ true -> ok;
+ false -> {error, {unacceptable_type, Type}}
+ end.
+
+check_bool_arg({bool, _}, _) -> ok;
+check_bool_arg({Type, _}, _) -> {error, {unacceptable_type, Type}}.
+
+check_non_neg_int_arg({Type, Val}, Args) ->
+ case check_int_arg({Type, Val}, Args) of
+ ok when Val >= 0 -> ok;
+ ok -> {error, {value_negative, Val}};
+ Error -> Error
+ end.
+
+check_expires_arg({Type, Val}, Args) ->
+ case check_int_arg({Type, Val}, Args) of
+ ok when Val == 0 -> {error, {value_zero, Val}};
+ ok -> rabbit_misc:check_expiry(Val);
+ Error -> Error
+ end.
+
+check_message_ttl_arg({Type, Val}, Args) ->
+ case check_int_arg({Type, Val}, Args) of
+ ok -> rabbit_misc:check_expiry(Val);
+ Error -> Error
+ end.
+
+check_dlxrk_arg({longstr, _}, Args) ->
+ case rabbit_misc:table_lookup(Args, <<"x-dead-letter-exchange">>) of
+ undefined -> {error, routing_key_but_no_dlx_defined};
+ _ -> ok
+ end;
+check_dlxrk_arg({Type, _}, _Args) ->
+ {error, {unacceptable_type, Type}}.
+
+list() -> mnesia:dirty_match_object(rabbit_queue, #amqqueue{_ = '_'}).
+
+%% Not dirty_match_object since that would not be transactional when used in a
+%% tx context
+list(VHostPath) ->
+ mnesia:async_dirty(
+ fun () ->
+ mnesia:match_object(
+ rabbit_queue,
+ #amqqueue{name = rabbit_misc:r(VHostPath, queue), _ = '_'},
+ read)
+ end).
+
+info_keys() -> rabbit_amqqueue_process:info_keys().
+
+map(VHostPath, F) -> rabbit_misc:filter_exit_map(F, list(VHostPath)).
+
+info(#amqqueue{ pid = QPid }) -> delegate:call(QPid, info).
+
+info(#amqqueue{ pid = QPid }, Items) ->
+ case delegate:call(QPid, {info, Items}) of
+ {ok, Res} -> Res;
+ {error, Error} -> throw(Error)
+ end.
+
+info_all(VHostPath) -> map(VHostPath, fun (Q) -> info(Q) end).
+
+info_all(VHostPath, Items) -> map(VHostPath, fun (Q) -> info(Q, Items) end).
+
+force_event_refresh(Ref) ->
+ [gen_server2:cast(Q#amqqueue.pid,
+ {force_event_refresh, Ref}) || Q <- list()],
+ ok.
+
+notify_policy_changed(#amqqueue{pid = QPid}) ->
+ gen_server2:cast(QPid, policy_changed).
+
+consumers(#amqqueue{ pid = QPid }) -> delegate:call(QPid, consumers).
+
+consumer_info_keys() -> ?CONSUMER_INFO_KEYS.
+
+consumers_all(VHostPath) ->
+ ConsumerInfoKeys=consumer_info_keys(),
+ lists:append(
+ map(VHostPath,
+ fun (Q) ->
+ [lists:zip(
+ ConsumerInfoKeys,
+ [Q#amqqueue.name, ChPid, CTag, AckRequired, Prefetch, Args]) ||
+ {ChPid, CTag, AckRequired, Prefetch, Args} <- consumers(Q)]
+ end)).
+
+stat(#amqqueue{pid = QPid}) -> delegate:call(QPid, stat).
+
+delete_immediately(QPids) ->
+ [gen_server2:cast(QPid, delete_immediately) || QPid <- QPids],
+ ok.
+
+delete(#amqqueue{ pid = QPid }, IfUnused, IfEmpty) ->
+ delegate:call(QPid, {delete, IfUnused, IfEmpty}).
+
+purge(#amqqueue{ pid = QPid }) -> delegate:call(QPid, purge).
+
+deliver(Qs, Delivery) -> deliver(Qs, Delivery, noflow).
+
+deliver_flow(Qs, Delivery) -> deliver(Qs, Delivery, flow).
+
+requeue(QPid, MsgIds, ChPid) -> delegate:call(QPid, {requeue, MsgIds, ChPid}).
+
+ack(QPid, MsgIds, ChPid) -> delegate:cast(QPid, {ack, MsgIds, ChPid}).
+
+reject(QPid, Requeue, MsgIds, ChPid) ->
+ delegate:cast(QPid, {reject, Requeue, MsgIds, ChPid}).
+
+notify_down_all(QPids, ChPid) ->
+ {_, Bads} = delegate:call(QPids, {notify_down, ChPid}),
+ case lists:filter(
+ fun ({_Pid, {exit, {R, _}, _}}) -> rabbit_misc:is_abnormal_exit(R);
+ ({_Pid, _}) -> false
+ end, Bads) of
+ [] -> ok;
+ Bads1 -> {error, Bads1}
+ end.
+
+activate_limit_all(QPids, ChPid) ->
+ delegate:cast(QPids, {activate_limit, ChPid}).
+
+credit(#amqqueue{pid = QPid}, ChPid, CTag, Credit, Drain) ->
+ delegate:cast(QPid, {credit, ChPid, CTag, Credit, Drain}).
+
+basic_get(#amqqueue{pid = QPid}, ChPid, NoAck, LimiterPid) ->
+ delegate:call(QPid, {basic_get, ChPid, NoAck, LimiterPid}).
+
+basic_consume(#amqqueue{pid = QPid, name = QName}, NoAck, ChPid, LimiterPid,
+ LimiterActive, ConsumerPrefetchCount, ConsumerTag,
+ ExclusiveConsume, Args, OkMsg) ->
+ ok = check_consume_arguments(QName, Args),
+ delegate:call(QPid, {basic_consume, NoAck, ChPid, LimiterPid, LimiterActive,
+ ConsumerPrefetchCount, ConsumerTag, ExclusiveConsume,
+ Args, OkMsg}).
+
+basic_cancel(#amqqueue{pid = QPid}, ChPid, ConsumerTag, OkMsg) ->
+ delegate:call(QPid, {basic_cancel, ChPid, ConsumerTag, OkMsg}).
+
+notify_decorators(#amqqueue{pid = QPid}) ->
+ delegate:cast(QPid, notify_decorators).
+
+notify_sent(QPid, ChPid) ->
+ Key = {consumer_credit_to, QPid},
+ put(Key, case get(Key) of
+ 1 -> gen_server2:cast(
+ QPid, {notify_sent, ChPid,
+ ?MORE_CONSUMER_CREDIT_AFTER}),
+ ?MORE_CONSUMER_CREDIT_AFTER;
+ undefined -> erlang:monitor(process, QPid),
+ ?MORE_CONSUMER_CREDIT_AFTER - 1;
+ C -> C - 1
+ end),
+ ok.
+
+notify_sent_queue_down(QPid) ->
+ erase({consumer_credit_to, QPid}),
+ ok.
+
+resume(QPid, ChPid) -> delegate:cast(QPid, {resume, ChPid}).
+
+internal_delete1(QueueName) ->
+ ok = mnesia:delete({rabbit_queue, QueueName}),
+ %% this 'guarded' delete prevents unnecessary writes to the mnesia
+ %% disk log
+ case mnesia:wread({rabbit_durable_queue, QueueName}) of
+ [] -> ok;
+ [_] -> ok = mnesia:delete({rabbit_durable_queue, QueueName})
+ end,
+ %% we want to execute some things, as decided by rabbit_exchange,
+ %% after the transaction.
+ rabbit_binding:remove_for_destination(QueueName).
+
+internal_delete(QueueName) ->
+ rabbit_misc:execute_mnesia_tx_with_tail(
+ fun () ->
+ case {mnesia:wread({rabbit_queue, QueueName}),
+ mnesia:wread({rabbit_durable_queue, QueueName})} of
+ {[], []} ->
+ rabbit_misc:const({error, not_found});
+ _ ->
+ Deletions = internal_delete1(QueueName),
+ T = rabbit_binding:process_deletions(Deletions),
+ fun() ->
+ ok = T(),
+ ok = rabbit_event:notify(queue_deleted,
+ [{name, QueueName}])
+ end
+ end
+ end).
+
+forget_all_durable(Node) ->
+ %% Note rabbit is not running so we avoid e.g. the worker pool. Also why
+ %% we don't invoke the return from rabbit_binding:process_deletions/1.
+ {atomic, ok} =
+ mnesia:sync_transaction(
+ fun () ->
+ Qs = mnesia:match_object(rabbit_durable_queue,
+ #amqqueue{_ = '_'}, write),
+ [rabbit_binding:process_deletions(
+ internal_delete1(Name)) ||
+ #amqqueue{name = Name, pid = Pid} = Q <- Qs,
+ node(Pid) =:= Node,
+ rabbit_policy:get(<<"ha-mode">>, Q) =:= undefined],
+ ok
+ end),
+ ok.
+
+run_backing_queue(QPid, Mod, Fun) ->
+ gen_server2:cast(QPid, {run_backing_queue, Mod, Fun}).
+
+set_ram_duration_target(QPid, Duration) ->
+ gen_server2:cast(QPid, {set_ram_duration_target, Duration}).
+
+set_maximum_since_use(QPid, Age) ->
+ gen_server2:cast(QPid, {set_maximum_since_use, Age}).
+
+start_mirroring(QPid) -> ok = delegate:cast(QPid, start_mirroring).
+stop_mirroring(QPid) -> ok = delegate:cast(QPid, stop_mirroring).
+
+sync_mirrors(QPid) -> delegate:call(QPid, sync_mirrors).
+cancel_sync_mirrors(QPid) -> delegate:call(QPid, cancel_sync_mirrors).
+
+on_node_down(Node) ->
+ rabbit_misc:execute_mnesia_tx_with_tail(
+ fun () -> QsDels =
+ qlc:e(qlc:q([{QName, delete_queue(QName)} ||
+ #amqqueue{name = QName, pid = Pid,
+ slave_pids = []}
+ <- mnesia:table(rabbit_queue),
+ node(Pid) == Node andalso
+ not rabbit_misc:is_process_alive(Pid)])),
+ {Qs, Dels} = lists:unzip(QsDels),
+ T = rabbit_binding:process_deletions(
+ lists:foldl(fun rabbit_binding:combine_deletions/2,
+ rabbit_binding:new_deletions(), Dels)),
+ fun () ->
+ T(),
+ lists:foreach(
+ fun(QName) ->
+ ok = rabbit_event:notify(queue_deleted,
+ [{name, QName}])
+ end, Qs)
+ end
+ end).
+
+delete_queue(QueueName) ->
+ ok = mnesia:delete({rabbit_queue, QueueName}),
+ rabbit_binding:remove_transient_for_destination(QueueName).
+
+pseudo_queue(QueueName, Pid) ->
+ #amqqueue{name = QueueName,
+ durable = false,
+ auto_delete = false,
+ arguments = [],
+ pid = Pid,
+ slave_pids = []}.
+
+deliver([], _Delivery, _Flow) ->
+ %% /dev/null optimisation
+ [];
+
+deliver(Qs, Delivery, Flow) ->
+ {MPids, SPids} = qpids(Qs),
+ QPids = MPids ++ SPids,
+ case Flow of
+ flow -> [credit_flow:send(QPid) || QPid <- QPids];
+ noflow -> ok
+ end,
+
+ %% We let slaves know that they were being addressed as slaves at
+ %% the time - if they receive such a message from the channel
+ %% after they have become master they should mark the message as
+ %% 'delivered' since they do not know what the master may have
+ %% done with it.
+ MMsg = {deliver, Delivery, false, Flow},
+ SMsg = {deliver, Delivery, true, Flow},
+ delegate:cast(MPids, MMsg),
+ delegate:cast(SPids, SMsg),
+ QPids.
+
+qpids([]) -> {[], []}; %% optimisation
+qpids([#amqqueue{pid = QPid, slave_pids = SPids}]) -> {[QPid], SPids}; %% opt
+qpids(Qs) ->
+ {MPids, SPids} = lists:foldl(fun (#amqqueue{pid = QPid, slave_pids = SPids},
+ {MPidAcc, SPidAcc}) ->
+ {[QPid | MPidAcc], [SPids | SPidAcc]}
+ end, {[], []}, Qs),
+ {MPids, lists:append(SPids)}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_amqqueue_process).
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+-behaviour(gen_server2).
+
+-define(SYNC_INTERVAL, 200). %% milliseconds
+-define(RAM_DURATION_UPDATE_INTERVAL, 5000).
+-define(CONSUMER_BIAS_RATIO, 1.1). %% i.e. consume 10% faster
+
+-export([start_link/1, info_keys/0]).
+
+-export([init_with_backing_queue_state/7]).
+
+-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
+ handle_info/2, handle_pre_hibernate/1, prioritise_call/4,
+ prioritise_cast/3, prioritise_info/3, format_message_queue/2]).
+
+%% Queue's state
+-record(q, {q,
+ exclusive_consumer,
+ has_had_consumers,
+ backing_queue,
+ backing_queue_state,
+ consumers,
+ expires,
+ sync_timer_ref,
+ rate_timer_ref,
+ expiry_timer_ref,
+ stats_timer,
+ msg_id_to_channel,
+ ttl,
+ ttl_timer_ref,
+ ttl_timer_expiry,
+ senders,
+ dlx,
+ dlx_routing_key,
+ max_length,
+ args_policy_version,
+ status
+ }).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/1 ::
+ (rabbit_types:amqqueue()) -> rabbit_types:ok_pid_or_error()).
+-spec(info_keys/0 :: () -> rabbit_types:info_keys()).
+-spec(init_with_backing_queue_state/7 ::
+ (rabbit_types:amqqueue(), atom(), tuple(), any(),
+ [rabbit_types:delivery()], pmon:pmon(), dict()) -> #q{}).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+-define(STATISTICS_KEYS,
+ [name,
+ policy,
+ exclusive_consumer_pid,
+ exclusive_consumer_tag,
+ messages_ready,
+ messages_unacknowledged,
+ messages,
+ consumers,
+ consumer_utilisation,
+ memory,
+ slave_pids,
+ synchronised_slave_pids,
+ backing_queue_status,
+ state
+ ]).
+
+-define(CREATION_EVENT_KEYS,
+ [name,
+ durable,
+ auto_delete,
+ arguments,
+ owner_pid
+ ]).
+
+-define(INFO_KEYS, [pid | ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [name]]).
+
+%%----------------------------------------------------------------------------
+
+start_link(Q) -> gen_server2:start_link(?MODULE, Q, []).
+
+info_keys() -> ?INFO_KEYS.
+
+%%----------------------------------------------------------------------------
+
+init(Q) ->
+ process_flag(trap_exit, true),
+ ?store_proc_name(Q#amqqueue.name),
+ {ok, init_state(Q#amqqueue{pid = self()}), hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+init_with_backing_queue_state(Q = #amqqueue{exclusive_owner = Owner}, BQ, BQS,
+ RateTRef, Deliveries, Senders, MTC) ->
+ case Owner of
+ none -> ok;
+ _ -> erlang:monitor(process, Owner)
+ end,
+ State = init_state(Q),
+ State1 = State#q{backing_queue = BQ,
+ backing_queue_state = BQS,
+ rate_timer_ref = RateTRef,
+ senders = Senders,
+ msg_id_to_channel = MTC},
+ State2 = process_args_policy(State1),
+ State3 = lists:foldl(fun (Delivery, StateN) ->
+ deliver_or_enqueue(Delivery, true, StateN)
+ end, State2, Deliveries),
+ notify_decorators(startup, State3),
+ State3.
+
+init_state(Q) ->
+ State = #q{q = Q,
+ exclusive_consumer = none,
+ has_had_consumers = false,
+ consumers = rabbit_queue_consumers:new(),
+ senders = pmon:new(delegate),
+ msg_id_to_channel = gb_trees:empty(),
+ status = running,
+ args_policy_version = 0},
+ rabbit_event:init_stats_timer(State, #q.stats_timer).
+
+terminate(shutdown = R, State = #q{backing_queue = BQ}) ->
+ terminate_shutdown(fun (BQS) -> BQ:terminate(R, BQS) end, State);
+terminate({shutdown, missing_owner} = Reason, State) ->
+ %% if the owner was missing then there will be no queue, so don't emit stats
+ terminate_shutdown(terminate_delete(false, Reason, State), State);
+terminate({shutdown, _} = R, State = #q{backing_queue = BQ}) ->
+ terminate_shutdown(fun (BQS) -> BQ:terminate(R, BQS) end, State);
+terminate(Reason, State) ->
+ terminate_shutdown(terminate_delete(true, Reason, State), State).
+
+terminate_delete(EmitStats, Reason,
+ State = #q{q = #amqqueue{name = QName},
+ backing_queue = BQ}) ->
+ fun (BQS) ->
+ BQS1 = BQ:delete_and_terminate(Reason, BQS),
+ if EmitStats -> rabbit_event:if_enabled(State, #q.stats_timer,
+ fun() -> emit_stats(State) end);
+ true -> ok
+ end,
+ %% don't care if the internal delete doesn't return 'ok'.
+ rabbit_amqqueue:internal_delete(QName),
+ BQS1
+ end.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+declare(Recover, From, State = #q{q = Q,
+ backing_queue = undefined,
+ backing_queue_state = undefined}) ->
+ {Recovery, TermsOrNew} = recovery_status(Recover),
+ case rabbit_amqqueue:internal_declare(Q, Recovery /= new) of
+ #amqqueue{} = Q1 ->
+ case matches(Recovery, Q, Q1) of
+ true ->
+ gen_server2:reply(From, {new, Q}),
+ ok = file_handle_cache:register_callback(
+ rabbit_amqqueue, set_maximum_since_use, [self()]),
+ ok = rabbit_memory_monitor:register(
+ self(), {rabbit_amqqueue,
+ set_ram_duration_target, [self()]}),
+ BQ = backing_queue_module(Q1),
+ BQS = bq_init(BQ, Q, TermsOrNew),
+ recovery_barrier(Recovery),
+ State1 = process_args_policy(
+ State#q{backing_queue = BQ,
+ backing_queue_state = BQS}),
+ notify_decorators(startup, State),
+ rabbit_event:notify(queue_created,
+ infos(?CREATION_EVENT_KEYS, State1)),
+ rabbit_event:if_enabled(State1, #q.stats_timer,
+ fun() -> emit_stats(State1) end),
+ noreply(State1);
+ false ->
+ {stop, normal, {existing, Q1}, State}
+ end;
+ Err ->
+ {stop, normal, Err, State}
+ end.
+
+recovery_status(new) -> {new, new};
+recovery_status({Recover, Terms}) -> {Recover, Terms}.
+
+matches(new, Q1, Q2) ->
+ %% i.e. not policy
+ Q1#amqqueue.name =:= Q2#amqqueue.name andalso
+ Q1#amqqueue.durable =:= Q2#amqqueue.durable andalso
+ Q1#amqqueue.auto_delete =:= Q2#amqqueue.auto_delete andalso
+ Q1#amqqueue.exclusive_owner =:= Q2#amqqueue.exclusive_owner andalso
+ Q1#amqqueue.arguments =:= Q2#amqqueue.arguments andalso
+ Q1#amqqueue.pid =:= Q2#amqqueue.pid andalso
+ Q1#amqqueue.slave_pids =:= Q2#amqqueue.slave_pids;
+matches(_, Q, Q) -> true;
+matches(_, _Q, _Q1) -> false.
+
+maybe_notify_decorators(false, State) -> State;
+maybe_notify_decorators(true, State) -> notify_decorators(State), State.
+
+notify_decorators(Event, State) -> decorator_callback(qname(State), Event, []).
+
+notify_decorators(State = #q{consumers = Consumers,
+ backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ P = rabbit_queue_consumers:max_active_priority(Consumers),
+ decorator_callback(qname(State), consumer_state_changed,
+ [P, BQ:is_empty(BQS)]).
+
+decorator_callback(QName, F, A) ->
+ %% Look up again in case policy and hence decorators have changed
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q = #amqqueue{decorators = Ds}} ->
+ [ok = apply(M, F, [Q|A]) || M <- rabbit_queue_decorator:select(Ds)];
+ {error, not_found} ->
+ ok
+ end.
+
+bq_init(BQ, Q, Recover) ->
+ Self = self(),
+ BQ:init(Q, Recover,
+ fun (Mod, Fun) ->
+ rabbit_amqqueue:run_backing_queue(Self, Mod, Fun)
+ end).
+
+recovery_barrier(new) ->
+ ok;
+recovery_barrier(BarrierPid) ->
+ MRef = erlang:monitor(process, BarrierPid),
+ receive
+ {BarrierPid, go} -> erlang:demonitor(MRef, [flush]);
+ {'DOWN', MRef, process, _, _} -> ok
+ end.
+
+process_args_policy(State = #q{q = Q,
+ args_policy_version = N}) ->
+ ArgsTable =
+ [{<<"expires">>, fun res_min/2, fun init_exp/2},
+ {<<"dead-letter-exchange">>, fun res_arg/2, fun init_dlx/2},
+ {<<"dead-letter-routing-key">>, fun res_arg/2, fun init_dlx_rkey/2},
+ {<<"message-ttl">>, fun res_min/2, fun init_ttl/2},
+ {<<"max-length">>, fun res_min/2, fun init_max_length/2}],
+ drop_expired_msgs(
+ lists:foldl(fun({Name, Resolve, Fun}, StateN) ->
+ Fun(args_policy_lookup(Name, Resolve, Q), StateN)
+ end, State#q{args_policy_version = N + 1}, ArgsTable)).
+
+args_policy_lookup(Name, Resolve, Q = #amqqueue{arguments = Args}) ->
+ AName = <<"x-", Name/binary>>,
+ case {rabbit_policy:get(Name, Q), rabbit_misc:table_lookup(Args, AName)} of
+ {undefined, undefined} -> undefined;
+ {undefined, {_Type, Val}} -> Val;
+ {Val, undefined} -> Val;
+ {PolVal, {_Type, ArgVal}} -> Resolve(PolVal, ArgVal)
+ end.
+
+res_arg(_PolVal, ArgVal) -> ArgVal.
+res_min(PolVal, ArgVal) -> erlang:min(PolVal, ArgVal).
+
+%% In both these we init with the undefined variant first to stop any
+%% existing timer, then start a new one which may fire after a
+%% different time.
+init_exp(undefined, State) -> stop_expiry_timer(State#q{expires = undefined});
+init_exp(Expires, State) -> State1 = init_exp(undefined, State),
+ ensure_expiry_timer(State1#q{expires = Expires}).
+
+init_ttl(undefined, State) -> stop_ttl_timer(State#q{ttl = undefined});
+init_ttl(TTL, State) -> (init_ttl(undefined, State))#q{ttl = TTL}.
+
+init_dlx(undefined, State) ->
+ State#q{dlx = undefined};
+init_dlx(DLX, State = #q{q = #amqqueue{name = QName}}) ->
+ State#q{dlx = rabbit_misc:r(QName, exchange, DLX)}.
+
+init_dlx_rkey(RoutingKey, State) -> State#q{dlx_routing_key = RoutingKey}.
+
+init_max_length(MaxLen, State) ->
+ {_Dropped, State1} = maybe_drop_head(State#q{max_length = MaxLen}),
+ State1.
+
+terminate_shutdown(Fun, State) ->
+ State1 = #q{backing_queue_state = BQS, consumers = Consumers} =
+ lists:foldl(fun (F, S) -> F(S) end, State,
+ [fun stop_sync_timer/1,
+ fun stop_rate_timer/1,
+ fun stop_expiry_timer/1,
+ fun stop_ttl_timer/1]),
+ case BQS of
+ undefined -> State1;
+ _ -> ok = rabbit_memory_monitor:deregister(self()),
+ QName = qname(State),
+ notify_decorators(shutdown, State),
+ [emit_consumer_deleted(Ch, CTag, QName) ||
+ {Ch, CTag, _, _, _} <-
+ rabbit_queue_consumers:all(Consumers)],
+ State1#q{backing_queue_state = Fun(BQS)}
+ end.
+
+reply(Reply, NewState) ->
+ {NewState1, Timeout} = next_state(NewState),
+ {reply, Reply, ensure_stats_timer(ensure_rate_timer(NewState1)), Timeout}.
+
+noreply(NewState) ->
+ {NewState1, Timeout} = next_state(NewState),
+ {noreply, ensure_stats_timer(ensure_rate_timer(NewState1)), Timeout}.
+
+next_state(State = #q{backing_queue = BQ,
+ backing_queue_state = BQS,
+ msg_id_to_channel = MTC}) ->
+ assert_invariant(State),
+ {MsgIds, BQS1} = BQ:drain_confirmed(BQS),
+ MTC1 = confirm_messages(MsgIds, MTC),
+ State1 = State#q{backing_queue_state = BQS1, msg_id_to_channel = MTC1},
+ case BQ:needs_timeout(BQS1) of
+ false -> {stop_sync_timer(State1), hibernate };
+ idle -> {stop_sync_timer(State1), ?SYNC_INTERVAL};
+ timed -> {ensure_sync_timer(State1), 0 }
+ end.
+
+backing_queue_module(Q) ->
+ case rabbit_mirror_queue_misc:is_mirrored(Q) of
+ false -> {ok, BQM} = application:get_env(backing_queue_module),
+ BQM;
+ true -> rabbit_mirror_queue_master
+ end.
+
+ensure_sync_timer(State) ->
+ rabbit_misc:ensure_timer(State, #q.sync_timer_ref,
+ ?SYNC_INTERVAL, sync_timeout).
+
+stop_sync_timer(State) -> rabbit_misc:stop_timer(State, #q.sync_timer_ref).
+
+ensure_rate_timer(State) ->
+ rabbit_misc:ensure_timer(State, #q.rate_timer_ref,
+ ?RAM_DURATION_UPDATE_INTERVAL,
+ update_ram_duration).
+
+stop_rate_timer(State) -> rabbit_misc:stop_timer(State, #q.rate_timer_ref).
+
+%% We wish to expire only when there are no consumers *and* the expiry
+%% hasn't been refreshed (by queue.declare or basic.get) for the
+%% configured period.
+ensure_expiry_timer(State = #q{expires = undefined}) ->
+ State;
+ensure_expiry_timer(State = #q{expires = Expires,
+ args_policy_version = Version}) ->
+ case is_unused(State) of
+ true -> NewState = stop_expiry_timer(State),
+ rabbit_misc:ensure_timer(NewState, #q.expiry_timer_ref,
+ Expires, {maybe_expire, Version});
+ false -> State
+ end.
+
+stop_expiry_timer(State) -> rabbit_misc:stop_timer(State, #q.expiry_timer_ref).
+
+ensure_ttl_timer(undefined, State) ->
+ State;
+ensure_ttl_timer(Expiry, State = #q{ttl_timer_ref = undefined,
+ args_policy_version = Version}) ->
+ After = (case Expiry - now_micros() of
+ V when V > 0 -> V + 999; %% always fire later
+ _ -> 0
+ end) div 1000,
+ TRef = erlang:send_after(After, self(), {drop_expired, Version}),
+ State#q{ttl_timer_ref = TRef, ttl_timer_expiry = Expiry};
+ensure_ttl_timer(Expiry, State = #q{ttl_timer_ref = TRef,
+ ttl_timer_expiry = TExpiry})
+ when Expiry + 1000 < TExpiry ->
+ case erlang:cancel_timer(TRef) of
+ false -> State;
+ _ -> ensure_ttl_timer(Expiry, State#q{ttl_timer_ref = undefined})
+ end;
+ensure_ttl_timer(_Expiry, State) ->
+ State.
+
+stop_ttl_timer(State) -> rabbit_misc:stop_timer(State, #q.ttl_timer_ref).
+
+ensure_stats_timer(State) ->
+ rabbit_event:ensure_stats_timer(State, #q.stats_timer, emit_stats).
+
+assert_invariant(State = #q{consumers = Consumers}) ->
+ true = (rabbit_queue_consumers:inactive(Consumers) orelse is_empty(State)).
+
+is_empty(#q{backing_queue = BQ, backing_queue_state = BQS}) -> BQ:is_empty(BQS).
+
+maybe_send_drained(WasEmpty, State) ->
+ case (not WasEmpty) andalso is_empty(State) of
+ true -> notify_decorators(State),
+ rabbit_queue_consumers:send_drained();
+ false -> ok
+ end,
+ State.
+
+confirm_messages([], MTC) ->
+ MTC;
+confirm_messages(MsgIds, MTC) ->
+ {CMs, MTC1} =
+ lists:foldl(
+ fun(MsgId, {CMs, MTC0}) ->
+ case gb_trees:lookup(MsgId, MTC0) of
+ {value, {SenderPid, MsgSeqNo}} ->
+ {rabbit_misc:gb_trees_cons(SenderPid,
+ MsgSeqNo, CMs),
+ gb_trees:delete(MsgId, MTC0)};
+ none ->
+ {CMs, MTC0}
+ end
+ end, {gb_trees:empty(), MTC}, MsgIds),
+ rabbit_misc:gb_trees_foreach(fun rabbit_misc:confirm_to_sender/2, CMs),
+ MTC1.
+
+send_or_record_confirm(#delivery{confirm = false}, State) ->
+ {never, State};
+send_or_record_confirm(#delivery{confirm = true,
+ sender = SenderPid,
+ msg_seq_no = MsgSeqNo,
+ message = #basic_message {
+ is_persistent = true,
+ id = MsgId}},
+ State = #q{q = #amqqueue{durable = true},
+ msg_id_to_channel = MTC}) ->
+ MTC1 = gb_trees:insert(MsgId, {SenderPid, MsgSeqNo}, MTC),
+ {eventually, State#q{msg_id_to_channel = MTC1}};
+send_or_record_confirm(#delivery{confirm = true,
+ sender = SenderPid,
+ msg_seq_no = MsgSeqNo}, State) ->
+ rabbit_misc:confirm_to_sender(SenderPid, [MsgSeqNo]),
+ {immediately, State}.
+
+send_mandatory(#delivery{mandatory = false}) ->
+ ok;
+send_mandatory(#delivery{mandatory = true,
+ sender = SenderPid,
+ msg_seq_no = MsgSeqNo}) ->
+ gen_server2:cast(SenderPid, {mandatory_received, MsgSeqNo}).
+
+discard(#delivery{confirm = Confirm,
+ sender = SenderPid,
+ message = #basic_message{id = MsgId}}, BQ, BQS, MTC) ->
+ MTC1 = case Confirm of
+ true -> confirm_messages([MsgId], MTC);
+ false -> MTC
+ end,
+ BQS1 = BQ:discard(MsgId, SenderPid, BQS),
+ {BQS1, MTC1}.
+
+run_message_queue(State) -> run_message_queue(false, State).
+
+run_message_queue(ActiveConsumersChanged, State) ->
+ case is_empty(State) of
+ true -> maybe_notify_decorators(ActiveConsumersChanged, State);
+ false -> case rabbit_queue_consumers:deliver(
+ fun(AckRequired) -> fetch(AckRequired, State) end,
+ qname(State), State#q.consumers) of
+ {delivered, ActiveConsumersChanged1, State1, Consumers} ->
+ run_message_queue(
+ ActiveConsumersChanged or ActiveConsumersChanged1,
+ State1#q{consumers = Consumers});
+ {undelivered, ActiveConsumersChanged1, Consumers} ->
+ maybe_notify_decorators(
+ ActiveConsumersChanged or ActiveConsumersChanged1,
+ State#q{consumers = Consumers})
+ end
+ end.
+
+attempt_delivery(Delivery = #delivery{sender = SenderPid, message = Message},
+ Props, Delivered, State = #q{backing_queue = BQ,
+ backing_queue_state = BQS,
+ msg_id_to_channel = MTC}) ->
+ case rabbit_queue_consumers:deliver(
+ fun (true) -> true = BQ:is_empty(BQS),
+ {AckTag, BQS1} = BQ:publish_delivered(
+ Message, Props, SenderPid, BQS),
+ {{Message, Delivered, AckTag}, {BQS1, MTC}};
+ (false) -> {{Message, Delivered, undefined},
+ discard(Delivery, BQ, BQS, MTC)}
+ end, qname(State), State#q.consumers) of
+ {delivered, ActiveConsumersChanged, {BQS1, MTC1}, Consumers} ->
+ {delivered, maybe_notify_decorators(
+ ActiveConsumersChanged,
+ State#q{backing_queue_state = BQS1,
+ msg_id_to_channel = MTC1,
+ consumers = Consumers})};
+ {undelivered, ActiveConsumersChanged, Consumers} ->
+ {undelivered, maybe_notify_decorators(
+ ActiveConsumersChanged,
+ State#q{consumers = Consumers})}
+ end.
+
+deliver_or_enqueue(Delivery = #delivery{message = Message, sender = SenderPid},
+ Delivered, State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ send_mandatory(Delivery), %% must do this before confirms
+ {Confirm, State1} = send_or_record_confirm(Delivery, State),
+ Props = message_properties(Message, Confirm, State1),
+ {IsDuplicate, BQS1} = BQ:is_duplicate(Message, BQS),
+ State2 = State1#q{backing_queue_state = BQS1},
+ case IsDuplicate orelse attempt_delivery(Delivery, Props, Delivered,
+ State2) of
+ true ->
+ State2;
+ {delivered, State3} ->
+ State3;
+ %% The next one is an optimisation
+ {undelivered, State3 = #q{ttl = 0, dlx = undefined,
+ backing_queue_state = BQS2,
+ msg_id_to_channel = MTC}} ->
+ {BQS3, MTC1} = discard(Delivery, BQ, BQS2, MTC),
+ State3#q{backing_queue_state = BQS3, msg_id_to_channel = MTC1};
+ {undelivered, State3 = #q{backing_queue_state = BQS2}} ->
+ BQS3 = BQ:publish(Message, Props, Delivered, SenderPid, BQS2),
+ {Dropped, State4 = #q{backing_queue_state = BQS4}} =
+ maybe_drop_head(State3#q{backing_queue_state = BQS3}),
+ QLen = BQ:len(BQS4),
+ %% optimisation: it would be perfectly safe to always
+ %% invoke drop_expired_msgs here, but that is expensive so
+ %% we only do that if a new message that might have an
+ %% expiry ends up at the head of the queue. If the head
+ %% remains unchanged, or if the newly published message
+ %% has no expiry and becomes the head of the queue then
+ %% the call is unnecessary.
+ case {Dropped > 0, QLen =:= 1, Props#message_properties.expiry} of
+ {false, false, _} -> State4;
+ {true, true, undefined} -> State4;
+ {_, _, _} -> drop_expired_msgs(State4)
+ end
+ end.
+
+maybe_drop_head(State = #q{max_length = undefined}) ->
+ {0, State};
+maybe_drop_head(State = #q{max_length = MaxLen,
+ backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ case BQ:len(BQS) - MaxLen of
+ Excess when Excess > 0 ->
+ {Excess,
+ with_dlx(
+ State#q.dlx,
+ fun (X) -> dead_letter_maxlen_msgs(X, Excess, State) end,
+ fun () ->
+ {_, BQS1} = lists:foldl(fun (_, {_, BQS0}) ->
+ BQ:drop(false, BQS0)
+ end, {ok, BQS},
+ lists:seq(1, Excess)),
+ State#q{backing_queue_state = BQS1}
+ end)};
+ _ -> {0, State}
+ end.
+
+requeue_and_run(AckTags, State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ WasEmpty = BQ:is_empty(BQS),
+ {_MsgIds, BQS1} = BQ:requeue(AckTags, BQS),
+ {_Dropped, State1} = maybe_drop_head(State#q{backing_queue_state = BQS1}),
+ run_message_queue(maybe_send_drained(WasEmpty, drop_expired_msgs(State1))).
+
+fetch(AckRequired, State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ {Result, BQS1} = BQ:fetch(AckRequired, BQS),
+ State1 = drop_expired_msgs(State#q{backing_queue_state = BQS1}),
+ {Result, maybe_send_drained(Result =:= empty, State1)}.
+
+ack(AckTags, ChPid, State) ->
+ subtract_acks(ChPid, AckTags, State,
+ fun (State1 = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ {_Guids, BQS1} = BQ:ack(AckTags, BQS),
+ State1#q{backing_queue_state = BQS1}
+ end).
+
+requeue(AckTags, ChPid, State) ->
+ subtract_acks(ChPid, AckTags, State,
+ fun (State1) -> requeue_and_run(AckTags, State1) end).
+
+possibly_unblock(Update, ChPid, State = #q{consumers = Consumers}) ->
+ case rabbit_queue_consumers:possibly_unblock(Update, ChPid, Consumers) of
+ unchanged -> State;
+ {unblocked, Consumers1} -> State1 = State#q{consumers = Consumers1},
+ run_message_queue(true, State1)
+ end.
+
+should_auto_delete(#q{q = #amqqueue{auto_delete = false}}) -> false;
+should_auto_delete(#q{has_had_consumers = false}) -> false;
+should_auto_delete(State) -> is_unused(State).
+
+handle_ch_down(DownPid, State = #q{consumers = Consumers,
+ exclusive_consumer = Holder,
+ senders = Senders}) ->
+ State1 = State#q{senders = case pmon:is_monitored(DownPid, Senders) of
+ false -> Senders;
+ true -> credit_flow:peer_down(DownPid),
+ pmon:demonitor(DownPid, Senders)
+ end},
+ case rabbit_queue_consumers:erase_ch(DownPid, Consumers) of
+ not_found ->
+ {ok, State1};
+ {ChAckTags, ChCTags, Consumers1} ->
+ QName = qname(State1),
+ [emit_consumer_deleted(DownPid, CTag, QName) || CTag <- ChCTags],
+ Holder1 = case Holder of
+ {DownPid, _} -> none;
+ Other -> Other
+ end,
+ State2 = State1#q{consumers = Consumers1,
+ exclusive_consumer = Holder1},
+ notify_decorators(State2),
+ case should_auto_delete(State2) of
+ true -> {stop, State2};
+ false -> {ok, requeue_and_run(ChAckTags,
+ ensure_expiry_timer(State2))}
+ end
+ end.
+
+check_exclusive_access({_ChPid, _ConsumerTag}, _ExclusiveConsume, _State) ->
+ in_use;
+check_exclusive_access(none, false, _State) ->
+ ok;
+check_exclusive_access(none, true, State) ->
+ case is_unused(State) of
+ true -> ok;
+ false -> in_use
+ end.
+
+is_unused(_State) -> rabbit_queue_consumers:count() == 0.
+
+maybe_send_reply(_ChPid, undefined) -> ok;
+maybe_send_reply(ChPid, Msg) -> ok = rabbit_channel:send_command(ChPid, Msg).
+
+qname(#q{q = #amqqueue{name = QName}}) -> QName.
+
+backing_queue_timeout(State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ State#q{backing_queue_state = BQ:timeout(BQS)}.
+
+subtract_acks(ChPid, AckTags, State = #q{consumers = Consumers}, Fun) ->
+ case rabbit_queue_consumers:subtract_acks(ChPid, AckTags, Consumers) of
+ not_found -> State;
+ unchanged -> Fun(State);
+ {unblocked, Consumers1} -> State1 = State#q{consumers = Consumers1},
+ run_message_queue(true, Fun(State1))
+ end.
+
+message_properties(Message, Confirm, #q{ttl = TTL}) ->
+ #message_properties{expiry = calculate_msg_expiry(Message, TTL),
+ needs_confirming = Confirm == eventually}.
+
+calculate_msg_expiry(#basic_message{content = Content}, TTL) ->
+ #content{properties = Props} =
+ rabbit_binary_parser:ensure_content_decoded(Content),
+ %% We assert that the expiration must be valid - we check in the channel.
+ {ok, MsgTTL} = rabbit_basic:parse_expiration(Props),
+ case lists:min([TTL, MsgTTL]) of
+ undefined -> undefined;
+ T -> now_micros() + T * 1000
+ end.
+
+%% Logically this function should invoke maybe_send_drained/2.
+%% However, that is expensive. Since some frequent callers of
+%% drop_expired_msgs/1, in particular deliver_or_enqueue/3, cannot
+%% possibly cause the queue to become empty, we push the
+%% responsibility to the callers. So be cautious when adding new ones.
+drop_expired_msgs(State) ->
+ case is_empty(State) of
+ true -> State;
+ false -> drop_expired_msgs(now_micros(), State)
+ end.
+
+drop_expired_msgs(Now, State = #q{backing_queue_state = BQS,
+ backing_queue = BQ }) ->
+ ExpirePred = fun (#message_properties{expiry = Exp}) -> Now >= Exp end,
+ {Props, State1} =
+ with_dlx(
+ State#q.dlx,
+ fun (X) -> dead_letter_expired_msgs(ExpirePred, X, State) end,
+ fun () -> {Next, BQS1} = BQ:dropwhile(ExpirePred, BQS),
+ {Next, State#q{backing_queue_state = BQS1}} end),
+ ensure_ttl_timer(case Props of
+ undefined -> undefined;
+ #message_properties{expiry = Exp} -> Exp
+ end, State1).
+
+with_dlx(undefined, _With, Without) -> Without();
+with_dlx(DLX, With, Without) -> case rabbit_exchange:lookup(DLX) of
+ {ok, X} -> With(X);
+ {error, not_found} -> Without()
+ end.
+
+dead_letter_expired_msgs(ExpirePred, X, State = #q{backing_queue = BQ}) ->
+ dead_letter_msgs(fun (DLFun, Acc, BQS1) ->
+ BQ:fetchwhile(ExpirePred, DLFun, Acc, BQS1)
+ end, expired, X, State).
+
+dead_letter_rejected_msgs(AckTags, X, State = #q{backing_queue = BQ}) ->
+ {ok, State1} =
+ dead_letter_msgs(
+ fun (DLFun, Acc, BQS) ->
+ {Acc1, BQS1} = BQ:ackfold(DLFun, Acc, BQS, AckTags),
+ {ok, Acc1, BQS1}
+ end, rejected, X, State),
+ State1.
+
+dead_letter_maxlen_msgs(X, Excess, State = #q{backing_queue = BQ}) ->
+ {ok, State1} =
+ dead_letter_msgs(
+ fun (DLFun, Acc, BQS) ->
+ lists:foldl(fun (_, {ok, Acc0, BQS0}) ->
+ {{Msg, _, AckTag}, BQS1} =
+ BQ:fetch(true, BQS0),
+ {ok, DLFun(Msg, AckTag, Acc0), BQS1}
+ end, {ok, Acc, BQS}, lists:seq(1, Excess))
+ end, maxlen, X, State),
+ State1.
+
+dead_letter_msgs(Fun, Reason, X, State = #q{dlx_routing_key = RK,
+ backing_queue_state = BQS,
+ backing_queue = BQ}) ->
+ QName = qname(State),
+ {Res, Acks1, BQS1} =
+ Fun(fun (Msg, AckTag, Acks) ->
+ rabbit_dead_letter:publish(Msg, Reason, X, RK, QName),
+ [AckTag | Acks]
+ end, [], BQS),
+ {_Guids, BQS2} = BQ:ack(Acks1, BQS1),
+ {Res, State#q{backing_queue_state = BQS2}}.
+
+stop(State) -> stop(noreply, State).
+
+stop(noreply, State) -> {stop, normal, State};
+stop(Reply, State) -> {stop, normal, Reply, State}.
+
+now_micros() -> timer:now_diff(now(), {0,0,0}).
+
+infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
+
+i(name, #q{q = #amqqueue{name = Name}}) -> Name;
+i(durable, #q{q = #amqqueue{durable = Durable}}) -> Durable;
+i(auto_delete, #q{q = #amqqueue{auto_delete = AutoDelete}}) -> AutoDelete;
+i(arguments, #q{q = #amqqueue{arguments = Arguments}}) -> Arguments;
+i(pid, _) ->
+ self();
+i(owner_pid, #q{q = #amqqueue{exclusive_owner = none}}) ->
+ '';
+i(owner_pid, #q{q = #amqqueue{exclusive_owner = ExclusiveOwner}}) ->
+ ExclusiveOwner;
+i(policy, #q{q = Q}) ->
+ case rabbit_policy:name(Q) of
+ none -> '';
+ Policy -> Policy
+ end;
+i(exclusive_consumer_pid, #q{exclusive_consumer = none}) ->
+ '';
+i(exclusive_consumer_pid, #q{exclusive_consumer = {ChPid, _ConsumerTag}}) ->
+ ChPid;
+i(exclusive_consumer_tag, #q{exclusive_consumer = none}) ->
+ '';
+i(exclusive_consumer_tag, #q{exclusive_consumer = {_ChPid, ConsumerTag}}) ->
+ ConsumerTag;
+i(messages_ready, #q{backing_queue_state = BQS, backing_queue = BQ}) ->
+ BQ:len(BQS);
+i(messages_unacknowledged, _) ->
+ rabbit_queue_consumers:unacknowledged_message_count();
+i(messages, State) ->
+ lists:sum([i(Item, State) || Item <- [messages_ready,
+ messages_unacknowledged]]);
+i(consumers, _) ->
+ rabbit_queue_consumers:count();
+i(consumer_utilisation, #q{consumers = Consumers}) ->
+ case rabbit_queue_consumers:count() of
+ 0 -> '';
+ _ -> rabbit_queue_consumers:utilisation(Consumers)
+ end;
+i(memory, _) ->
+ {memory, M} = process_info(self(), memory),
+ M;
+i(slave_pids, #q{q = #amqqueue{name = Name}}) ->
+ {ok, Q = #amqqueue{slave_pids = SPids}} =
+ rabbit_amqqueue:lookup(Name),
+ case rabbit_mirror_queue_misc:is_mirrored(Q) of
+ false -> '';
+ true -> SPids
+ end;
+i(synchronised_slave_pids, #q{q = #amqqueue{name = Name}}) ->
+ {ok, Q = #amqqueue{sync_slave_pids = SSPids}} =
+ rabbit_amqqueue:lookup(Name),
+ case rabbit_mirror_queue_misc:is_mirrored(Q) of
+ false -> '';
+ true -> SSPids
+ end;
+i(state, #q{status = running}) -> credit_flow:state();
+i(state, #q{status = State}) -> State;
+i(backing_queue_status, #q{backing_queue_state = BQS, backing_queue = BQ}) ->
+ BQ:status(BQS);
+i(Item, _) ->
+ throw({bad_argument, Item}).
+
+emit_stats(State) ->
+ emit_stats(State, []).
+
+emit_stats(State, Extra) ->
+ ExtraKs = [K || {K, _} <- Extra],
+ Infos = [{K, V} || {K, V} <- infos(?STATISTICS_KEYS, State),
+ not lists:member(K, ExtraKs)],
+ rabbit_event:notify(queue_stats, Extra ++ Infos).
+
+emit_consumer_created(ChPid, CTag, Exclusive, AckRequired, QName,
+ PrefetchCount, Args, Ref) ->
+ rabbit_event:notify(consumer_created,
+ [{consumer_tag, CTag},
+ {exclusive, Exclusive},
+ {ack_required, AckRequired},
+ {channel, ChPid},
+ {queue, QName},
+ {prefetch_count, PrefetchCount},
+ {arguments, Args}],
+ Ref).
+
+emit_consumer_deleted(ChPid, ConsumerTag, QName) ->
+ rabbit_event:notify(consumer_deleted,
+ [{consumer_tag, ConsumerTag},
+ {channel, ChPid},
+ {queue, QName}]).
+
+%%----------------------------------------------------------------------------
+
+prioritise_call(Msg, _From, _Len, State) ->
+ case Msg of
+ info -> 9;
+ {info, _Items} -> 9;
+ consumers -> 9;
+ stat -> 7;
+ {basic_consume, _, _, _, _, _, _, _, _, _} -> consumer_bias(State);
+ {basic_cancel, _, _, _} -> consumer_bias(State);
+ _ -> 0
+ end.
+
+prioritise_cast(Msg, _Len, State) ->
+ case Msg of
+ delete_immediately -> 8;
+ {set_ram_duration_target, _Duration} -> 8;
+ {set_maximum_since_use, _Age} -> 8;
+ {run_backing_queue, _Mod, _Fun} -> 6;
+ {ack, _AckTags, _ChPid} -> 3; %% [1]
+ {resume, _ChPid} -> 2;
+ {notify_sent, _ChPid, _Credit} -> consumer_bias(State);
+ _ -> 0
+ end.
+
+%% [1] It should be safe to always prioritise ack / resume since they
+%% will be rate limited by how fast consumers receive messages -
+%% i.e. by notify_sent. We prioritise ack and resume to discourage
+%% starvation caused by prioritising notify_sent. We don't vary their
+%% prioritiy since acks should stay in order (some parts of the queue
+%% stack are optimised for that) and to make things easier to reason
+%% about. Finally, we prioritise ack over resume since it should
+%% always reduce memory use.
+
+consumer_bias(#q{backing_queue = BQ, backing_queue_state = BQS}) ->
+ case BQ:msg_rates(BQS) of
+ {0.0, _} -> 0;
+ {Ingress, Egress} when Egress / Ingress < ?CONSUMER_BIAS_RATIO -> 1;
+ {_, _} -> 0
+ end.
+
+prioritise_info(Msg, _Len, #q{q = #amqqueue{exclusive_owner = DownPid}}) ->
+ case Msg of
+ {'DOWN', _, process, DownPid, _} -> 8;
+ update_ram_duration -> 8;
+ {maybe_expire, _Version} -> 8;
+ {drop_expired, _Version} -> 8;
+ emit_stats -> 7;
+ sync_timeout -> 6;
+ _ -> 0
+ end.
+
+handle_call({init, Recover}, From,
+ State = #q{q = #amqqueue{exclusive_owner = none}}) ->
+ declare(Recover, From, State);
+
+%% You used to be able to declare an exclusive durable queue. Sadly we
+%% need to still tidy up after that case, there could be the remnants
+%% of one left over from an upgrade. So that's why we don't enforce
+%% Recover = new here.
+handle_call({init, Recover}, From,
+ State = #q{q = #amqqueue{exclusive_owner = Owner}}) ->
+ case rabbit_misc:is_process_alive(Owner) of
+ true -> erlang:monitor(process, Owner),
+ declare(Recover, From, State);
+ false -> #q{backing_queue = undefined,
+ backing_queue_state = undefined,
+ q = Q} = State,
+ gen_server2:reply(From, {owner_died, Q}),
+ BQ = backing_queue_module(Q),
+ {_, Terms} = recovery_status(Recover),
+ BQS = bq_init(BQ, Q, Terms),
+ %% Rely on terminate to delete the queue.
+ {stop, {shutdown, missing_owner},
+ State#q{backing_queue = BQ, backing_queue_state = BQS}}
+ end;
+
+handle_call(info, _From, State) ->
+ reply(infos(?INFO_KEYS, State), State);
+
+handle_call({info, Items}, _From, State) ->
+ try
+ reply({ok, infos(Items, State)}, State)
+ catch Error -> reply({error, Error}, State)
+ end;
+
+handle_call(consumers, _From, State = #q{consumers = Consumers}) ->
+ reply(rabbit_queue_consumers:all(Consumers), State);
+
+handle_call({notify_down, ChPid}, _From, State) ->
+ %% we want to do this synchronously, so that auto_deleted queues
+ %% are no longer visible by the time we send a response to the
+ %% client. The queue is ultimately deleted in terminate/2; if we
+ %% return stop with a reply, terminate/2 will be called by
+ %% gen_server2 *before* the reply is sent.
+ case handle_ch_down(ChPid, State) of
+ {ok, State1} -> reply(ok, State1);
+ {stop, State1} -> stop(ok, State1)
+ end;
+
+handle_call({basic_get, ChPid, NoAck, LimiterPid}, _From,
+ State = #q{q = #amqqueue{name = QName}}) ->
+ AckRequired = not NoAck,
+ State1 = ensure_expiry_timer(State),
+ case fetch(AckRequired, State1) of
+ {empty, State2} ->
+ reply(empty, State2);
+ {{Message, IsDelivered, AckTag},
+ #q{backing_queue = BQ, backing_queue_state = BQS} = State2} ->
+ case AckRequired of
+ true -> ok = rabbit_queue_consumers:record_ack(
+ ChPid, LimiterPid, AckTag);
+ false -> ok
+ end,
+ Msg = {QName, self(), AckTag, IsDelivered, Message},
+ reply({ok, BQ:len(BQS), Msg}, State2)
+ end;
+
+handle_call({basic_consume, NoAck, ChPid, LimiterPid, LimiterActive,
+ PrefetchCount, ConsumerTag, ExclusiveConsume, Args, OkMsg},
+ _From, State = #q{consumers = Consumers,
+ exclusive_consumer = Holder}) ->
+ case check_exclusive_access(Holder, ExclusiveConsume, State) of
+ in_use -> reply({error, exclusive_consume_unavailable}, State);
+ ok -> Consumers1 = rabbit_queue_consumers:add(
+ ChPid, ConsumerTag, NoAck,
+ LimiterPid, LimiterActive,
+ PrefetchCount, Args, is_empty(State),
+ Consumers),
+ ExclusiveConsumer =
+ if ExclusiveConsume -> {ChPid, ConsumerTag};
+ true -> Holder
+ end,
+ State1 = State#q{consumers = Consumers1,
+ has_had_consumers = true,
+ exclusive_consumer = ExclusiveConsumer},
+ ok = maybe_send_reply(ChPid, OkMsg),
+ emit_consumer_created(ChPid, ConsumerTag, ExclusiveConsume,
+ not NoAck, qname(State1),
+ PrefetchCount, Args, none),
+ notify_decorators(State1),
+ reply(ok, run_message_queue(State1))
+ end;
+
+handle_call({basic_cancel, ChPid, ConsumerTag, OkMsg}, _From,
+ State = #q{consumers = Consumers,
+ exclusive_consumer = Holder}) ->
+ ok = maybe_send_reply(ChPid, OkMsg),
+ case rabbit_queue_consumers:remove(ChPid, ConsumerTag, Consumers) of
+ not_found ->
+ reply(ok, State);
+ Consumers1 ->
+ Holder1 = case Holder of
+ {ChPid, ConsumerTag} -> none;
+ _ -> Holder
+ end,
+ State1 = State#q{consumers = Consumers1,
+ exclusive_consumer = Holder1},
+ emit_consumer_deleted(ChPid, ConsumerTag, qname(State1)),
+ notify_decorators(State1),
+ case should_auto_delete(State1) of
+ false -> reply(ok, ensure_expiry_timer(State1));
+ true -> stop(ok, State1)
+ end
+ end;
+
+handle_call(stat, _From, State) ->
+ State1 = #q{backing_queue = BQ, backing_queue_state = BQS} =
+ ensure_expiry_timer(State),
+ reply({ok, BQ:len(BQS), rabbit_queue_consumers:count()}, State1);
+
+handle_call({delete, IfUnused, IfEmpty}, _From,
+ State = #q{backing_queue_state = BQS, backing_queue = BQ}) ->
+ IsEmpty = BQ:is_empty(BQS),
+ IsUnused = is_unused(State),
+ if
+ IfEmpty and not(IsEmpty) -> reply({error, not_empty}, State);
+ IfUnused and not(IsUnused) -> reply({error, in_use}, State);
+ true -> stop({ok, BQ:len(BQS)}, State)
+ end;
+
+handle_call(purge, _From, State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ {Count, BQS1} = BQ:purge(BQS),
+ State1 = State#q{backing_queue_state = BQS1},
+ reply({ok, Count}, maybe_send_drained(Count =:= 0, State1));
+
+handle_call({requeue, AckTags, ChPid}, From, State) ->
+ gen_server2:reply(From, ok),
+ noreply(requeue(AckTags, ChPid, State));
+
+handle_call(sync_mirrors, _From,
+ State = #q{backing_queue = rabbit_mirror_queue_master,
+ backing_queue_state = BQS}) ->
+ S = fun(BQSN) -> State#q{backing_queue_state = BQSN} end,
+ HandleInfo = fun (Status) ->
+ receive {'$gen_call', From, {info, Items}} ->
+ Infos = infos(Items, State#q{status = Status}),
+ gen_server2:reply(From, {ok, Infos})
+ after 0 ->
+ ok
+ end
+ end,
+ EmitStats = fun (Status) ->
+ rabbit_event:if_enabled(
+ State, #q.stats_timer,
+ fun() -> emit_stats(State#q{status = Status}) end)
+ end,
+ case rabbit_mirror_queue_master:sync_mirrors(HandleInfo, EmitStats, BQS) of
+ {ok, BQS1} -> reply(ok, S(BQS1));
+ {stop, Reason, BQS1} -> {stop, Reason, S(BQS1)}
+ end;
+
+handle_call(sync_mirrors, _From, State) ->
+ reply({error, not_mirrored}, State);
+
+%% By definition if we get this message here we do not have to do anything.
+handle_call(cancel_sync_mirrors, _From, State) ->
+ reply({ok, not_syncing}, State).
+
+handle_cast({run_backing_queue, Mod, Fun},
+ State = #q{backing_queue = BQ, backing_queue_state = BQS}) ->
+ noreply(State#q{backing_queue_state = BQ:invoke(Mod, Fun, BQS)});
+
+handle_cast({deliver, Delivery = #delivery{sender = Sender}, Delivered, Flow},
+ State = #q{senders = Senders}) ->
+ Senders1 = case Flow of
+ flow -> credit_flow:ack(Sender),
+ pmon:monitor(Sender, Senders);
+ noflow -> Senders
+ end,
+ State1 = State#q{senders = Senders1},
+ noreply(deliver_or_enqueue(Delivery, Delivered, State1));
+
+handle_cast({ack, AckTags, ChPid}, State) ->
+ noreply(ack(AckTags, ChPid, State));
+
+handle_cast({reject, true, AckTags, ChPid}, State) ->
+ noreply(requeue(AckTags, ChPid, State));
+
+handle_cast({reject, false, AckTags, ChPid}, State) ->
+ noreply(with_dlx(
+ State#q.dlx,
+ fun (X) -> subtract_acks(ChPid, AckTags, State,
+ fun (State1) ->
+ dead_letter_rejected_msgs(
+ AckTags, X, State1)
+ end) end,
+ fun () -> ack(AckTags, ChPid, State) end));
+
+handle_cast(delete_immediately, State) ->
+ stop(State);
+
+handle_cast({resume, ChPid}, State) ->
+ noreply(possibly_unblock(rabbit_queue_consumers:resume_fun(),
+ ChPid, State));
+
+handle_cast({notify_sent, ChPid, Credit}, State) ->
+ noreply(possibly_unblock(rabbit_queue_consumers:notify_sent_fun(Credit),
+ ChPid, State));
+
+handle_cast({activate_limit, ChPid}, State) ->
+ noreply(possibly_unblock(rabbit_queue_consumers:activate_limit_fun(),
+ ChPid, State));
+
+handle_cast({set_ram_duration_target, Duration},
+ State = #q{backing_queue = BQ, backing_queue_state = BQS}) ->
+ BQS1 = BQ:set_ram_duration_target(Duration, BQS),
+ noreply(State#q{backing_queue_state = BQS1});
+
+handle_cast({set_maximum_since_use, Age}, State) ->
+ ok = file_handle_cache:set_maximum_since_use(Age),
+ noreply(State);
+
+handle_cast(start_mirroring, State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ %% lookup again to get policy for init_with_existing_bq
+ {ok, Q} = rabbit_amqqueue:lookup(qname(State)),
+ true = BQ =/= rabbit_mirror_queue_master, %% assertion
+ BQ1 = rabbit_mirror_queue_master,
+ BQS1 = BQ1:init_with_existing_bq(Q, BQ, BQS),
+ noreply(State#q{backing_queue = BQ1,
+ backing_queue_state = BQS1});
+
+handle_cast(stop_mirroring, State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ BQ = rabbit_mirror_queue_master, %% assertion
+ {BQ1, BQS1} = BQ:stop_mirroring(BQS),
+ noreply(State#q{backing_queue = BQ1,
+ backing_queue_state = BQS1});
+
+handle_cast({credit, ChPid, CTag, Credit, Drain},
+ State = #q{consumers = Consumers,
+ backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ Len = BQ:len(BQS),
+ rabbit_channel:send_credit_reply(ChPid, Len),
+ noreply(
+ case rabbit_queue_consumers:credit(Len == 0, Credit, Drain, ChPid, CTag,
+ Consumers) of
+ unchanged -> State;
+ {unblocked, Consumers1} -> State1 = State#q{consumers = Consumers1},
+ run_message_queue(true, State1)
+ end);
+
+handle_cast({force_event_refresh, Ref},
+ State = #q{consumers = Consumers,
+ exclusive_consumer = Exclusive}) ->
+ rabbit_event:notify(queue_created, infos(?CREATION_EVENT_KEYS, State), Ref),
+ QName = qname(State),
+ AllConsumers = rabbit_queue_consumers:all(Consumers),
+ case Exclusive of
+ none -> [emit_consumer_created(
+ Ch, CTag, false, AckRequired, QName, Prefetch,
+ Args, Ref) ||
+ {Ch, CTag, AckRequired, Prefetch, Args}
+ <- AllConsumers];
+ {Ch, CTag} -> [{Ch, CTag, AckRequired, Prefetch, Args}] = AllConsumers,
+ emit_consumer_created(
+ Ch, CTag, true, AckRequired, QName, Prefetch, Args, Ref)
+ end,
+ noreply(State);
+
+handle_cast(notify_decorators, State) ->
+ notify_decorators(State),
+ noreply(State);
+
+handle_cast(policy_changed, State = #q{q = #amqqueue{name = Name}}) ->
+ %% We depend on the #q.q field being up to date at least WRT
+ %% policy (but not slave pids) in various places, so when it
+ %% changes we go and read it from Mnesia again.
+ %%
+ %% This also has the side effect of waking us up so we emit a
+ %% stats event - so event consumers see the changed policy.
+ {ok, Q} = rabbit_amqqueue:lookup(Name),
+ noreply(process_args_policy(State#q{q = Q})).
+
+handle_info({maybe_expire, Vsn}, State = #q{args_policy_version = Vsn}) ->
+ case is_unused(State) of
+ true -> stop(State);
+ false -> noreply(State#q{expiry_timer_ref = undefined})
+ end;
+
+handle_info({maybe_expire, _Vsn}, State) ->
+ noreply(State);
+
+handle_info({drop_expired, Vsn}, State = #q{args_policy_version = Vsn}) ->
+ WasEmpty = is_empty(State),
+ State1 = drop_expired_msgs(State#q{ttl_timer_ref = undefined}),
+ noreply(maybe_send_drained(WasEmpty, State1));
+
+handle_info({drop_expired, _Vsn}, State) ->
+ noreply(State);
+
+handle_info(emit_stats, State) ->
+ emit_stats(State),
+ %% Don't call noreply/1, we don't want to set timers
+ {State1, Timeout} = next_state(rabbit_event:reset_stats_timer(
+ State, #q.stats_timer)),
+ {noreply, State1, Timeout};
+
+handle_info({'DOWN', _MonitorRef, process, DownPid, _Reason},
+ State = #q{q = #amqqueue{exclusive_owner = DownPid}}) ->
+ %% Exclusively owned queues must disappear with their owner. In
+ %% the case of clean shutdown we delete the queue synchronously in
+ %% the reader - although not required by the spec this seems to
+ %% match what people expect (see bug 21824). However we need this
+ %% monitor-and-async- delete in case the connection goes away
+ %% unexpectedly.
+ stop(State);
+
+handle_info({'DOWN', _MonitorRef, process, DownPid, _Reason}, State) ->
+ case handle_ch_down(DownPid, State) of
+ {ok, State1} -> noreply(State1);
+ {stop, State1} -> stop(State1)
+ end;
+
+handle_info(update_ram_duration, State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ {RamDuration, BQS1} = BQ:ram_duration(BQS),
+ DesiredDuration =
+ rabbit_memory_monitor:report_ram_duration(self(), RamDuration),
+ BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1),
+ %% Don't call noreply/1, we don't want to set timers
+ {State1, Timeout} = next_state(State#q{rate_timer_ref = undefined,
+ backing_queue_state = BQS2}),
+ {noreply, State1, Timeout};
+
+handle_info(sync_timeout, State) ->
+ noreply(backing_queue_timeout(State#q{sync_timer_ref = undefined}));
+
+handle_info(timeout, State) ->
+ noreply(backing_queue_timeout(State));
+
+handle_info({'EXIT', _Pid, Reason}, State) ->
+ {stop, Reason, State};
+
+handle_info({bump_credit, Msg}, State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ credit_flow:handle_bump_msg(Msg),
+ noreply(State#q{backing_queue_state = BQ:resume(BQS)});
+
+handle_info(Info, State) ->
+ {stop, {unhandled_info, Info}, State}.
+
+handle_pre_hibernate(State = #q{backing_queue_state = undefined}) ->
+ {hibernate, State};
+handle_pre_hibernate(State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ {RamDuration, BQS1} = BQ:ram_duration(BQS),
+ DesiredDuration =
+ rabbit_memory_monitor:report_ram_duration(self(), RamDuration),
+ BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1),
+ BQS3 = BQ:handle_pre_hibernate(BQS2),
+ rabbit_event:if_enabled(
+ State, #q.stats_timer,
+ fun () -> emit_stats(State, [{idle_since, now()},
+ {consumer_utilisation, ''}]) end),
+ State1 = rabbit_event:stop_stats_timer(State#q{backing_queue_state = BQS3},
+ #q.stats_timer),
+ {hibernate, stop_rate_timer(State1)}.
+
+format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_amqqueue_sup).
+
+-behaviour(supervisor2).
+
+-export([start_link/0, start_child/2]).
+
+-export([init/1]).
+
+-include("rabbit.hrl").
+
+-define(SERVER, ?MODULE).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
+-spec(start_child/2 ::
+ (node(), [any()]) -> rabbit_types:ok(pid() | undefined) |
+ rabbit_types:ok({pid(), any()}) |
+ rabbit_types:error(any())).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ supervisor2:start_link({local, ?SERVER}, ?MODULE, []).
+
+start_child(Node, Args) ->
+ supervisor2:start_child({?SERVER, Node}, Args).
+
+init([]) ->
+ {ok, {{simple_one_for_one, 10, 10},
+ [{rabbit_amqqueue, {rabbit_amqqueue_process, start_link, []},
+ temporary, ?MAX_WAIT, worker, [rabbit_amqqueue_process]}]}}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_auth_backend).
+
+-ifdef(use_specs).
+
+%% A description proplist as with auth mechanisms,
+%% exchanges. Currently unused.
+-callback description() -> [proplists:property()].
+
+%% Check a user can log in, given a username and a proplist of
+%% authentication information (e.g. [{password, Password}]).
+%%
+%% Possible responses:
+%% {ok, User}
+%% Authentication succeeded, and here's the user record.
+%% {error, Error}
+%% Something went wrong. Log and die.
+%% {refused, Msg, Args}
+%% Client failed authentication. Log and die.
+-callback check_user_login(rabbit_types:username(), [term()]) ->
+ {'ok', rabbit_types:user()} |
+ {'refused', string(), [any()]} |
+ {'error', any()}.
+
+%% Given #user and vhost, can a user log in to a vhost?
+%% Possible responses:
+%% true
+%% false
+%% {error, Error}
+%% Something went wrong. Log and die.
+-callback check_vhost_access(rabbit_types:user(), rabbit_types:vhost()) ->
+ boolean() | {'error', any()}.
+
+
+%% Given #user, resource and permission, can a user access a resource?
+%%
+%% Possible responses:
+%% true
+%% false
+%% {error, Error}
+%% Something went wrong. Log and die.
+-callback check_resource_access(rabbit_types:user(),
+ rabbit_types:r(atom()),
+ rabbit_access_control:permission_atom()) ->
+ boolean() | {'error', any()}.
+
+-else.
+
+-export([behaviour_info/1]).
+
+behaviour_info(callbacks) ->
+ [{description, 0}, {check_user_login, 2}, {check_vhost_access, 2},
+ {check_resource_access, 3}];
+behaviour_info(_Other) ->
+ undefined.
+
+-endif.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_auth_backend_dummy).
+-include("rabbit.hrl").
+
+-behaviour(rabbit_auth_backend).
+
+-export([description/0]).
+-export([user/0]).
+-export([check_user_login/2, check_vhost_access/2, check_resource_access/3]).
+
+-ifdef(use_specs).
+
+-spec(user/0 :: () -> rabbit_types:user()).
+
+-endif.
+
+%% A user to be used by the direct client when permission checks are
+%% not needed. This user can do anything AMQPish.
+user() -> #user{username = <<"dummy">>,
+ tags = [],
+ auth_backend = ?MODULE,
+ impl = none}.
+
+%% Implementation of rabbit_auth_backend
+
+description() ->
+ [{name, <<"Dummy">>},
+ {description, <<"Database for the dummy user">>}].
+
+check_user_login(_, _) ->
+ {refused, "cannot log in conventionally as dummy user", []}.
+
+check_vhost_access(#user{}, _VHostPath) -> true.
+check_resource_access(#user{}, #resource{}, _Permission) -> true.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_auth_backend_internal).
+-include("rabbit.hrl").
+
+-behaviour(rabbit_auth_backend).
+
+-export([description/0]).
+-export([check_user_login/2, check_vhost_access/2, check_resource_access/3]).
+
+-export([add_user/2, delete_user/1, lookup_user/1,
+ change_password/2, clear_password/1,
+ hash_password/1, change_password_hash/2,
+ set_tags/2, set_permissions/5, clear_permissions/2]).
+-export([user_info_keys/0, perms_info_keys/0,
+ user_perms_info_keys/0, vhost_perms_info_keys/0,
+ user_vhost_perms_info_keys/0,
+ list_users/0, list_permissions/0,
+ list_user_permissions/1, list_vhost_permissions/1,
+ list_user_vhost_permissions/2]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-type(regexp() :: binary()).
+
+-spec(add_user/2 :: (rabbit_types:username(), rabbit_types:password()) -> 'ok').
+-spec(delete_user/1 :: (rabbit_types:username()) -> 'ok').
+-spec(lookup_user/1 :: (rabbit_types:username())
+ -> rabbit_types:ok(rabbit_types:internal_user())
+ | rabbit_types:error('not_found')).
+-spec(change_password/2 :: (rabbit_types:username(), rabbit_types:password())
+ -> 'ok').
+-spec(clear_password/1 :: (rabbit_types:username()) -> 'ok').
+-spec(hash_password/1 :: (rabbit_types:password())
+ -> rabbit_types:password_hash()).
+-spec(change_password_hash/2 :: (rabbit_types:username(),
+ rabbit_types:password_hash()) -> 'ok').
+-spec(set_tags/2 :: (rabbit_types:username(), [atom()]) -> 'ok').
+-spec(set_permissions/5 ::(rabbit_types:username(), rabbit_types:vhost(),
+ regexp(), regexp(), regexp()) -> 'ok').
+-spec(clear_permissions/2 :: (rabbit_types:username(), rabbit_types:vhost())
+ -> 'ok').
+-spec(user_info_keys/0 :: () -> rabbit_types:info_keys()).
+-spec(perms_info_keys/0 :: () -> rabbit_types:info_keys()).
+-spec(user_perms_info_keys/0 :: () -> rabbit_types:info_keys()).
+-spec(vhost_perms_info_keys/0 :: () -> rabbit_types:info_keys()).
+-spec(user_vhost_perms_info_keys/0 :: () -> rabbit_types:info_keys()).
+-spec(list_users/0 :: () -> [rabbit_types:infos()]).
+-spec(list_permissions/0 :: () -> [rabbit_types:infos()]).
+-spec(list_user_permissions/1 ::
+ (rabbit_types:username()) -> [rabbit_types:infos()]).
+-spec(list_vhost_permissions/1 ::
+ (rabbit_types:vhost()) -> [rabbit_types:infos()]).
+-spec(list_user_vhost_permissions/2 ::
+ (rabbit_types:username(), rabbit_types:vhost())
+ -> [rabbit_types:infos()]).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+%% Implementation of rabbit_auth_backend
+
+description() ->
+ [{name, <<"Internal">>},
+ {description, <<"Internal user / password database">>}].
+
+check_user_login(Username, []) ->
+ internal_check_user_login(Username, fun(_) -> true end);
+check_user_login(Username, [{password, Cleartext}]) ->
+ internal_check_user_login(
+ Username,
+ fun (#internal_user{password_hash = <<Salt:4/binary, Hash/binary>>}) ->
+ Hash =:= salted_md5(Salt, Cleartext);
+ (#internal_user{}) ->
+ false
+ end);
+check_user_login(Username, AuthProps) ->
+ exit({unknown_auth_props, Username, AuthProps}).
+
+internal_check_user_login(Username, Fun) ->
+ Refused = {refused, "user '~s' - invalid credentials", [Username]},
+ case lookup_user(Username) of
+ {ok, User = #internal_user{tags = Tags}} ->
+ case Fun(User) of
+ true -> {ok, #user{username = Username,
+ tags = Tags,
+ auth_backend = ?MODULE,
+ impl = User}};
+ _ -> Refused
+ end;
+ {error, not_found} ->
+ Refused
+ end.
+
+check_vhost_access(#user{username = Username}, VHostPath) ->
+ case mnesia:dirty_read({rabbit_user_permission,
+ #user_vhost{username = Username,
+ virtual_host = VHostPath}}) of
+ [] -> false;
+ [_R] -> true
+ end.
+
+check_resource_access(#user{username = Username},
+ #resource{virtual_host = VHostPath, name = Name},
+ Permission) ->
+ case mnesia:dirty_read({rabbit_user_permission,
+ #user_vhost{username = Username,
+ virtual_host = VHostPath}}) of
+ [] ->
+ false;
+ [#user_permission{permission = P}] ->
+ PermRegexp = case element(permission_index(Permission), P) of
+ %% <<"^$">> breaks Emacs' erlang mode
+ <<"">> -> <<$^, $$>>;
+ RE -> RE
+ end,
+ case re:run(Name, PermRegexp, [{capture, none}]) of
+ match -> true;
+ nomatch -> false
+ end
+ end.
+
+permission_index(configure) -> #permission.configure;
+permission_index(write) -> #permission.write;
+permission_index(read) -> #permission.read.
+
+%%----------------------------------------------------------------------------
+%% Manipulation of the user database
+
+add_user(Username, Password) ->
+ rabbit_log:info("Creating user '~s'~n", [Username]),
+ R = rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ case mnesia:wread({rabbit_user, Username}) of
+ [] ->
+ ok = mnesia:write(
+ rabbit_user,
+ #internal_user{username = Username,
+ password_hash =
+ hash_password(Password),
+ tags = []},
+ write);
+ _ ->
+ mnesia:abort({user_already_exists, Username})
+ end
+ end),
+ rabbit_event:notify(user_created, [{name, Username}]),
+ R.
+
+delete_user(Username) ->
+ rabbit_log:info("Deleting user '~s'~n", [Username]),
+ R = rabbit_misc:execute_mnesia_transaction(
+ rabbit_misc:with_user(
+ Username,
+ fun () ->
+ ok = mnesia:delete({rabbit_user, Username}),
+ [ok = mnesia:delete_object(
+ rabbit_user_permission, R, write) ||
+ R <- mnesia:match_object(
+ rabbit_user_permission,
+ #user_permission{user_vhost = #user_vhost{
+ username = Username,
+ virtual_host = '_'},
+ permission = '_'},
+ write)],
+ ok
+ end)),
+ rabbit_event:notify(user_deleted, [{name, Username}]),
+ R.
+
+lookup_user(Username) ->
+ rabbit_misc:dirty_read({rabbit_user, Username}).
+
+change_password(Username, Password) ->
+ rabbit_log:info("Changing password for '~s'~n", [Username]),
+ R = change_password_hash(Username, hash_password(Password)),
+ rabbit_event:notify(user_password_changed, [{name, Username}]),
+ R.
+
+clear_password(Username) ->
+ rabbit_log:info("Clearing password for '~s'~n", [Username]),
+ R = change_password_hash(Username, <<"">>),
+ rabbit_event:notify(user_password_cleared, [{name, Username}]),
+ R.
+
+hash_password(Cleartext) ->
+ {A1,A2,A3} = now(),
+ random:seed(A1, A2, A3),
+ Salt = random:uniform(16#ffffffff),
+ SaltBin = <<Salt:32>>,
+ Hash = salted_md5(SaltBin, Cleartext),
+ <<SaltBin/binary, Hash/binary>>.
+
+change_password_hash(Username, PasswordHash) ->
+ update_user(Username, fun(User) ->
+ User#internal_user{
+ password_hash = PasswordHash }
+ end).
+
+salted_md5(Salt, Cleartext) ->
+ Salted = <<Salt/binary, Cleartext/binary>>,
+ erlang:md5(Salted).
+
+set_tags(Username, Tags) ->
+ rabbit_log:info("Setting user tags for user '~s' to ~p~n",
+ [Username, Tags]),
+ R = update_user(Username, fun(User) ->
+ User#internal_user{tags = Tags}
+ end),
+ rabbit_event:notify(user_tags_set, [{name, Username}, {tags, Tags}]),
+ R.
+
+set_permissions(Username, VHostPath, ConfigurePerm, WritePerm, ReadPerm) ->
+ rabbit_log:info("Setting permissions for "
+ "'~s' in '~s' to '~s', '~s', '~s'~n",
+ [Username, VHostPath, ConfigurePerm, WritePerm, ReadPerm]),
+ lists:map(
+ fun (RegexpBin) ->
+ Regexp = binary_to_list(RegexpBin),
+ case re:compile(Regexp) of
+ {ok, _} -> ok;
+ {error, Reason} -> throw({error, {invalid_regexp,
+ Regexp, Reason}})
+ end
+ end, [ConfigurePerm, WritePerm, ReadPerm]),
+ R = rabbit_misc:execute_mnesia_transaction(
+ rabbit_misc:with_user_and_vhost(
+ Username, VHostPath,
+ fun () -> ok = mnesia:write(
+ rabbit_user_permission,
+ #user_permission{user_vhost = #user_vhost{
+ username = Username,
+ virtual_host = VHostPath},
+ permission = #permission{
+ configure = ConfigurePerm,
+ write = WritePerm,
+ read = ReadPerm}},
+ write)
+ end)),
+ rabbit_event:notify(permission_created, [{user, Username},
+ {vhost, VHostPath},
+ {configure, ConfigurePerm},
+ {write, WritePerm},
+ {read, ReadPerm}]),
+ R.
+
+clear_permissions(Username, VHostPath) ->
+ R = rabbit_misc:execute_mnesia_transaction(
+ rabbit_misc:with_user_and_vhost(
+ Username, VHostPath,
+ fun () ->
+ ok = mnesia:delete({rabbit_user_permission,
+ #user_vhost{username = Username,
+ virtual_host = VHostPath}})
+ end)),
+ rabbit_event:notify(permission_deleted, [{user, Username},
+ {vhost, VHostPath}]),
+ R.
+
+
+update_user(Username, Fun) ->
+ rabbit_misc:execute_mnesia_transaction(
+ rabbit_misc:with_user(
+ Username,
+ fun () ->
+ {ok, User} = lookup_user(Username),
+ ok = mnesia:write(rabbit_user, Fun(User), write)
+ end)).
+
+%%----------------------------------------------------------------------------
+%% Listing
+
+-define(PERMS_INFO_KEYS, [configure, write, read]).
+-define(USER_INFO_KEYS, [user, tags]).
+
+user_info_keys() -> ?USER_INFO_KEYS.
+
+perms_info_keys() -> [user, vhost | ?PERMS_INFO_KEYS].
+vhost_perms_info_keys() -> [user | ?PERMS_INFO_KEYS].
+user_perms_info_keys() -> [vhost | ?PERMS_INFO_KEYS].
+user_vhost_perms_info_keys() -> ?PERMS_INFO_KEYS.
+
+list_users() ->
+ [[{user, Username}, {tags, Tags}] ||
+ #internal_user{username = Username, tags = Tags} <-
+ mnesia:dirty_match_object(rabbit_user, #internal_user{_ = '_'})].
+
+list_permissions() ->
+ list_permissions(perms_info_keys(), match_user_vhost('_', '_')).
+
+list_permissions(Keys, QueryThunk) ->
+ [filter_props(Keys, [{user, Username},
+ {vhost, VHostPath},
+ {configure, ConfigurePerm},
+ {write, WritePerm},
+ {read, ReadPerm}]) ||
+ #user_permission{user_vhost = #user_vhost{username = Username,
+ virtual_host = VHostPath},
+ permission = #permission{ configure = ConfigurePerm,
+ write = WritePerm,
+ read = ReadPerm}} <-
+ %% TODO: use dirty ops instead
+ rabbit_misc:execute_mnesia_transaction(QueryThunk)].
+
+filter_props(Keys, Props) -> [T || T = {K, _} <- Props, lists:member(K, Keys)].
+
+list_user_permissions(Username) ->
+ list_permissions(
+ user_perms_info_keys(),
+ rabbit_misc:with_user(Username, match_user_vhost(Username, '_'))).
+
+list_vhost_permissions(VHostPath) ->
+ list_permissions(
+ vhost_perms_info_keys(),
+ rabbit_vhost:with(VHostPath, match_user_vhost('_', VHostPath))).
+
+list_user_vhost_permissions(Username, VHostPath) ->
+ list_permissions(
+ user_vhost_perms_info_keys(),
+ rabbit_misc:with_user_and_vhost(
+ Username, VHostPath, match_user_vhost(Username, VHostPath))).
+
+match_user_vhost(Username, VHostPath) ->
+ fun () -> mnesia:match_object(
+ rabbit_user_permission,
+ #user_permission{user_vhost = #user_vhost{
+ username = Username,
+ virtual_host = VHostPath},
+ permission = '_'},
+ read)
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_auth_mechanism).
+
+-ifdef(use_specs).
+
+%% A description.
+-callback description() -> [proplists:property()].
+
+%% If this mechanism is enabled, should it be offered for a given socket?
+%% (primarily so EXTERNAL can be SSL-only)
+-callback should_offer(rabbit_net:socket()) -> boolean().
+
+%% Called before authentication starts. Should create a state
+%% object to be passed through all the stages of authentication.
+-callback init(rabbit_net:socket()) -> any().
+
+%% Handle a stage of authentication. Possible responses:
+%% {ok, User}
+%% Authentication succeeded, and here's the user record.
+%% {challenge, Challenge, NextState}
+%% Another round is needed. Here's the state I want next time.
+%% {protocol_error, Msg, Args}
+%% Client got the protocol wrong. Log and die.
+%% {refused, Msg, Args}
+%% Client failed authentication. Log and die.
+-callback handle_response(binary(), any()) ->
+ {'ok', rabbit_types:user()} |
+ {'challenge', binary(), any()} |
+ {'protocol_error', string(), [any()]} |
+ {'refused', string(), [any()]}.
+
+-else.
+
+-export([behaviour_info/1]).
+
+behaviour_info(callbacks) ->
+ [{description, 0}, {should_offer, 1}, {init, 1}, {handle_response, 2}];
+behaviour_info(_Other) ->
+ undefined.
+
+-endif.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_auth_mechanism_amqplain).
+-include("rabbit.hrl").
+
+-behaviour(rabbit_auth_mechanism).
+
+-export([description/0, should_offer/1, init/1, handle_response/2]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "auth mechanism amqplain"},
+ {mfa, {rabbit_registry, register,
+ [auth_mechanism, <<"AMQPLAIN">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+%% AMQPLAIN, as used by Qpid Python test suite. The 0-8 spec actually
+%% defines this as PLAIN, but in 0-9 that definition is gone, instead
+%% referring generically to "SASL security mechanism", i.e. the above.
+
+description() ->
+ [{description, <<"QPid AMQPLAIN mechanism">>}].
+
+should_offer(_Sock) ->
+ true.
+
+init(_Sock) ->
+ [].
+
+handle_response(Response, _State) ->
+ LoginTable = rabbit_binary_parser:parse_table(Response),
+ case {lists:keysearch(<<"LOGIN">>, 1, LoginTable),
+ lists:keysearch(<<"PASSWORD">>, 1, LoginTable)} of
+ {{value, {_, longstr, User}},
+ {value, {_, longstr, Pass}}} ->
+ rabbit_access_control:check_user_pass_login(User, Pass);
+ _ ->
+ {protocol_error,
+ "AMQPLAIN auth info ~w is missing LOGIN or PASSWORD field",
+ [LoginTable]}
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_auth_mechanism_cr_demo).
+-include("rabbit.hrl").
+
+-behaviour(rabbit_auth_mechanism).
+
+-export([description/0, should_offer/1, init/1, handle_response/2]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "auth mechanism cr-demo"},
+ {mfa, {rabbit_registry, register,
+ [auth_mechanism, <<"RABBIT-CR-DEMO">>,
+ ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+-record(state, {username = undefined}).
+
+%% Provides equivalent security to PLAIN but demos use of Connection.Secure(Ok)
+%% START-OK: Username
+%% SECURE: "Please tell me your password"
+%% SECURE-OK: "My password is ~s", [Password]
+
+description() ->
+ [{description, <<"RabbitMQ Demo challenge-response authentication "
+ "mechanism">>}].
+
+should_offer(_Sock) ->
+ true.
+
+init(_Sock) ->
+ #state{}.
+
+handle_response(Response, State = #state{username = undefined}) ->
+ {challenge, <<"Please tell me your password">>,
+ State#state{username = Response}};
+
+handle_response(<<"My password is ", Password/binary>>,
+ #state{username = Username}) ->
+ rabbit_access_control:check_user_pass_login(Username, Password);
+handle_response(Response, _State) ->
+ {protocol_error, "Invalid response '~s'", [Response]}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_auth_mechanism_plain).
+-include("rabbit.hrl").
+
+-behaviour(rabbit_auth_mechanism).
+
+-export([description/0, should_offer/1, init/1, handle_response/2]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "auth mechanism plain"},
+ {mfa, {rabbit_registry, register,
+ [auth_mechanism, <<"PLAIN">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+%% SASL PLAIN, as used by the Qpid Java client and our clients. Also,
+%% apparently, by OpenAMQ.
+
+%% TODO: reimplement this using the binary module? - that makes use of
+%% BIFs to do binary matching and will thus be much faster.
+
+description() ->
+ [{description, <<"SASL PLAIN authentication mechanism">>}].
+
+should_offer(_Sock) ->
+ true.
+
+init(_Sock) ->
+ [].
+
+handle_response(Response, _State) ->
+ case extract_user_pass(Response) of
+ {ok, User, Pass} ->
+ rabbit_access_control:check_user_pass_login(User, Pass);
+ error ->
+ {protocol_error, "response ~p invalid", [Response]}
+ end.
+
+extract_user_pass(Response) ->
+ case extract_elem(Response) of
+ {ok, User, Response1} -> case extract_elem(Response1) of
+ {ok, Pass, <<>>} -> {ok, User, Pass};
+ _ -> error
+ end;
+ error -> error
+ end.
+
+extract_elem(<<0:8, Rest/binary>>) ->
+ Count = next_null_pos(Rest, 0),
+ <<Elem:Count/binary, Rest1/binary>> = Rest,
+ {ok, Elem, Rest1};
+extract_elem(_) ->
+ error.
+
+next_null_pos(<<>>, Count) -> Count;
+next_null_pos(<<0:8, _Rest/binary>>, Count) -> Count;
+next_null_pos(<<_:8, Rest/binary>>, Count) -> next_null_pos(Rest, Count + 1).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_autoheal).
+
+-export([init/0, maybe_start/1, rabbit_down/2, node_down/2, handle_msg/3]).
+
+%% The named process we are running in.
+-define(SERVER, rabbit_node_monitor).
+
+%%----------------------------------------------------------------------------
+
+%% In order to autoheal we want to:
+%%
+%% * Find the winning partition
+%% * Stop all nodes in other partitions
+%% * Wait for them all to be stopped
+%% * Start them again
+%%
+%% To keep things simple, we assume all nodes are up. We don't start
+%% unless all nodes are up, and if a node goes down we abandon the
+%% whole process. To further keep things simple we also defer the
+%% decision as to the winning node to the "leader" - arbitrarily
+%% selected as the first node in the cluster.
+%%
+%% To coordinate the restarting nodes we pick a special node from the
+%% winning partition - the "winner". Restarting nodes then stop, and
+%% wait for it to tell them it is safe to start again. The winner
+%% determines that a node has stopped just by seeing if its rabbit app
+%% stops - if a node stops for any other reason it just gets a message
+%% it will ignore, and otherwise we carry on.
+%%
+%% The winner and the leader are not necessarily the same node.
+%%
+%% Possible states:
+%%
+%% not_healing
+%% - the default
+%%
+%% {winner_waiting, OutstandingStops, Notify}
+%% - we are the winner and are waiting for all losing nodes to stop
+%% before telling them they can restart
+%%
+%% restarting
+%% - we are restarting. Of course the node monitor immediately dies
+%% then so this state does not last long. We therefore send the
+%% autoheal_safe_to_start message to the rabbit_outside_app_process
+%% instead.
+
+%%----------------------------------------------------------------------------
+
+init() -> not_healing.
+
+maybe_start(not_healing) ->
+ case enabled() of
+ true -> [Leader | _] = lists:usort(rabbit_mnesia:cluster_nodes(all)),
+ send(Leader, {request_start, node()}),
+ rabbit_log:info("Autoheal request sent to ~p~n", [Leader]),
+ not_healing;
+ false -> not_healing
+ end;
+maybe_start(State) ->
+ State.
+
+enabled() ->
+ {ok, autoheal} =:= application:get_env(rabbit, cluster_partition_handling).
+
+
+%% This is the winner receiving its last notification that a node has
+%% stopped - all nodes can now start again
+rabbit_down(Node, {winner_waiting, [Node], Notify}) ->
+ rabbit_log:info("Autoheal: final node has stopped, starting...~n",[]),
+ notify_safe(Notify),
+ not_healing;
+
+rabbit_down(Node, {winner_waiting, WaitFor, Notify}) ->
+ {winner_waiting, WaitFor -- [Node], Notify};
+
+rabbit_down(Node, {leader_waiting, [Node]}) ->
+ not_healing;
+
+rabbit_down(Node, {leader_waiting, WaitFor}) ->
+ {leader_waiting, WaitFor -- [Node]};
+
+rabbit_down(_Node, State) ->
+ %% ignore, we already cancelled the autoheal process
+ State.
+
+node_down(_Node, not_healing) ->
+ not_healing;
+
+node_down(Node, {winner_waiting, _, Notify}) ->
+ rabbit_log:info("Autoheal: aborting - ~p went down~n", [Node]),
+ %% Make sure any nodes waiting for us start - it won't necessarily
+ %% heal the partition but at least they won't get stuck.
+ notify_safe(Notify),
+ not_healing;
+
+node_down(Node, _State) ->
+ rabbit_log:info("Autoheal: aborting - ~p went down~n", [Node]),
+ not_healing.
+
+%% By receiving this message we become the leader
+%% TODO should we try to debounce this?
+handle_msg({request_start, Node},
+ not_healing, Partitions) ->
+ rabbit_log:info("Autoheal request received from ~p~n", [Node]),
+ rabbit_node_monitor:ping_all(),
+ case rabbit_node_monitor:all_rabbit_nodes_up() of
+ false -> not_healing;
+ true -> AllPartitions = all_partitions(Partitions),
+ {Winner, Losers} = make_decision(AllPartitions),
+ rabbit_log:info("Autoheal decision~n"
+ " * Partitions: ~p~n"
+ " * Winner: ~p~n"
+ " * Losers: ~p~n",
+ [AllPartitions, Winner, Losers]),
+ [send(L, {winner_is, Winner}) || L <- Losers],
+ Continue = fun(Msg) ->
+ handle_msg(Msg, not_healing, Partitions)
+ end,
+ case node() =:= Winner of
+ true -> Continue({become_winner, Losers});
+ false -> send(Winner, {become_winner, Losers}), %% [0]
+ case lists:member(node(), Losers) of
+ true -> Continue({winner_is, Winner});
+ false -> {leader_waiting, Losers}
+ end
+ end
+ end;
+%% [0] If we are a loser we will never receive this message - but it
+%% won't stick in the mailbox as we are restarting anyway
+
+handle_msg({request_start, Node},
+ State, _Partitions) ->
+ rabbit_log:info("Autoheal request received from ~p when in state ~p; "
+ "ignoring~n", [Node, State]),
+ State;
+
+handle_msg({become_winner, Losers},
+ not_healing, _Partitions) ->
+ rabbit_log:info("Autoheal: I am the winner, waiting for ~p to stop~n",
+ [Losers]),
+ {winner_waiting, Losers, Losers};
+
+handle_msg({become_winner, Losers},
+ {winner_waiting, WaitFor, Notify}, _Partitions) ->
+ rabbit_log:info("Autoheal: I am the winner, waiting additionally for "
+ "~p to stop~n", [Losers]),
+ {winner_waiting, lists:usort(Losers ++ WaitFor),
+ lists:usort(Losers ++ Notify)};
+
+handle_msg({winner_is, Winner},
+ not_healing, _Partitions) ->
+ rabbit_log:warning(
+ "Autoheal: we were selected to restart; winner is ~p~n", [Winner]),
+ rabbit_node_monitor:run_outside_applications(
+ fun () ->
+ MRef = erlang:monitor(process, {?SERVER, Winner}),
+ rabbit:stop(),
+ receive
+ {'DOWN', MRef, process, {?SERVER, Winner}, _Reason} -> ok;
+ autoheal_safe_to_start -> ok
+ end,
+ erlang:demonitor(MRef, [flush]),
+ rabbit:start()
+ end),
+ restarting;
+
+handle_msg(_, restarting, _Partitions) ->
+ %% ignore, we can contribute no further
+ restarting.
+
+%%----------------------------------------------------------------------------
+
+send(Node, Msg) -> {?SERVER, Node} ! {autoheal_msg, Msg}.
+
+notify_safe(Notify) ->
+ [{rabbit_outside_app_process, N} ! autoheal_safe_to_start || N <- Notify].
+
+make_decision(AllPartitions) ->
+ Sorted = lists:sort([{partition_value(P), P} || P <- AllPartitions]),
+ [[Winner | _] | Rest] = lists:reverse([P || {_, P} <- Sorted]),
+ {Winner, lists:append(Rest)}.
+
+partition_value(Partition) ->
+ Connections = [Res || Node <- Partition,
+ Res <- [rpc:call(Node, rabbit_networking,
+ connections_local, [])],
+ is_list(Res)],
+ {length(lists:append(Connections)), length(Partition)}.
+
+%% We have our local understanding of what partitions exist; but we
+%% only know which nodes we have been partitioned from, not which
+%% nodes are partitioned from each other.
+all_partitions(PartitionedWith) ->
+ Nodes = rabbit_mnesia:cluster_nodes(all),
+ Partitions = [{node(), PartitionedWith} |
+ rabbit_node_monitor:partitions(Nodes -- [node()])],
+ all_partitions(Partitions, [Nodes]).
+
+all_partitions([], Partitions) ->
+ Partitions;
+all_partitions([{Node, CantSee} | Rest], Partitions) ->
+ {[Containing], Others} =
+ lists:partition(fun (Part) -> lists:member(Node, Part) end, Partitions),
+ A = Containing -- CantSee,
+ B = Containing -- A,
+ Partitions1 = case {A, B} of
+ {[], _} -> Partitions;
+ {_, []} -> Partitions;
+ _ -> [A, B | Others]
+ end,
+ all_partitions(Rest, Partitions1).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_backing_queue).
+
+-ifdef(use_specs).
+
+%% We can't specify a per-queue ack/state with callback signatures
+-type(ack() :: any()).
+-type(state() :: any()).
+
+-type(msg_ids() :: [rabbit_types:msg_id()]).
+-type(fetch_result(Ack) ::
+ ('empty' | {rabbit_types:basic_message(), boolean(), Ack})).
+-type(drop_result(Ack) ::
+ ('empty' | {rabbit_types:msg_id(), Ack})).
+-type(recovery_terms() :: [term()] | 'non_clean_shutdown').
+-type(recovery_info() :: 'new' | recovery_terms()).
+-type(purged_msg_count() :: non_neg_integer()).
+-type(async_callback() ::
+ fun ((atom(), fun ((atom(), state()) -> state())) -> 'ok')).
+-type(duration() :: ('undefined' | 'infinity' | number())).
+
+-type(msg_fun(A) :: fun ((rabbit_types:basic_message(), ack(), A) -> A)).
+-type(msg_pred() :: fun ((rabbit_types:message_properties()) -> boolean())).
+
+%% Called on startup with a list of durable queue names. The queues
+%% aren't being started at this point, but this call allows the
+%% backing queue to perform any checking necessary for the consistency
+%% of those queues, or initialise any other shared resources.
+%%
+%% The list of queue recovery terms returned as {ok, Terms} must be given
+%% in the same order as the list of queue names supplied.
+-callback start([rabbit_amqqueue:name()]) -> rabbit_types:ok(recovery_terms()).
+
+%% Called to tear down any state/resources. NB: Implementations should
+%% not depend on this function being called on shutdown and instead
+%% should hook into the rabbit supervision hierarchy.
+-callback stop() -> 'ok'.
+
+%% Initialise the backing queue and its state.
+%%
+%% Takes
+%% 1. the amqqueue record
+%% 2. a term indicating whether the queue is an existing queue that
+%% should be recovered or not. When 'new' is given, no recovery is
+%% taking place, otherwise a list of recovery terms is given, or
+%% the atom 'non_clean_shutdown' if no recovery terms are available.
+%% 3. an asynchronous callback which accepts a function of type
+%% backing-queue-state to backing-queue-state. This callback
+%% function can be safely invoked from any process, which makes it
+%% useful for passing messages back into the backing queue,
+%% especially as the backing queue does not have control of its own
+%% mailbox.
+-callback init(rabbit_types:amqqueue(), recovery_info(),
+ async_callback()) -> state().
+
+%% Called on queue shutdown when queue isn't being deleted.
+-callback terminate(any(), state()) -> state().
+
+%% Called when the queue is terminating and needs to delete all its
+%% content.
+-callback delete_and_terminate(any(), state()) -> state().
+
+%% Remove all 'fetchable' messages from the queue, i.e. all messages
+%% except those that have been fetched already and are pending acks.
+-callback purge(state()) -> {purged_msg_count(), state()}.
+
+%% Remove all messages in the queue which have been fetched and are
+%% pending acks.
+-callback purge_acks(state()) -> state().
+
+%% Publish a message.
+-callback publish(rabbit_types:basic_message(),
+ rabbit_types:message_properties(), boolean(), pid(),
+ state()) -> state().
+
+%% Called for messages which have already been passed straight
+%% out to a client. The queue will be empty for these calls
+%% (i.e. saves the round trip through the backing queue).
+-callback publish_delivered(rabbit_types:basic_message(),
+ rabbit_types:message_properties(), pid(), state())
+ -> {ack(), state()}.
+
+%% Called to inform the BQ about messages which have reached the
+%% queue, but are not going to be further passed to BQ.
+-callback discard(rabbit_types:msg_id(), pid(), state()) -> state().
+
+%% Return ids of messages which have been confirmed since the last
+%% invocation of this function (or initialisation).
+%%
+%% Message ids should only appear in the result of drain_confirmed
+%% under the following circumstances:
+%%
+%% 1. The message appears in a call to publish_delivered/4 and the
+%% first argument (ack_required) is false; or
+%% 2. The message is fetched from the queue with fetch/2 and the first
+%% argument (ack_required) is false; or
+%% 3. The message is acked (ack/2 is called for the message); or
+%% 4. The message is fully fsync'd to disk in such a way that the
+%% recovery of the message is guaranteed in the event of a crash of
+%% this rabbit node (excluding hardware failure).
+%%
+%% In addition to the above conditions, a message id may only appear
+%% in the result of drain_confirmed if
+%% #message_properties.needs_confirming = true when the msg was
+%% published (through whichever means) to the backing queue.
+%%
+%% It is legal for the same message id to appear in the results of
+%% multiple calls to drain_confirmed, which means that the backing
+%% queue is not required to keep track of which messages it has
+%% already confirmed. The confirm will be issued to the publisher the
+%% first time the message id appears in the result of
+%% drain_confirmed. All subsequent appearances of that message id will
+%% be ignored.
+-callback drain_confirmed(state()) -> {msg_ids(), state()}.
+
+%% Drop messages from the head of the queue while the supplied
+%% predicate on message properties returns true. Returns the first
+%% message properties for which the predictate returned false, or
+%% 'undefined' if the whole backing queue was traversed w/o the
+%% predicate ever returning false.
+-callback dropwhile(msg_pred(), state())
+ -> {rabbit_types:message_properties() | undefined, state()}.
+
+%% Like dropwhile, except messages are fetched in "require
+%% acknowledgement" mode and are passed, together with their ack tag,
+%% to the supplied function. The function is also fed an
+%% accumulator. The result of fetchwhile is as for dropwhile plus the
+%% accumulator.
+-callback fetchwhile(msg_pred(), msg_fun(A), A, state())
+ -> {rabbit_types:message_properties() | undefined,
+ A, state()}.
+
+%% Produce the next message.
+-callback fetch(true, state()) -> {fetch_result(ack()), state()};
+ (false, state()) -> {fetch_result(undefined), state()}.
+
+%% Remove the next message.
+-callback drop(true, state()) -> {drop_result(ack()), state()};
+ (false, state()) -> {drop_result(undefined), state()}.
+
+%% Acktags supplied are for messages which can now be forgotten
+%% about. Must return 1 msg_id per Ack, in the same order as Acks.
+-callback ack([ack()], state()) -> {msg_ids(), state()}.
+
+%% Reinsert messages into the queue which have already been delivered
+%% and were pending acknowledgement.
+-callback requeue([ack()], state()) -> {msg_ids(), state()}.
+
+%% Fold over messages by ack tag. The supplied function is called with
+%% each message, its ack tag, and an accumulator.
+-callback ackfold(msg_fun(A), A, state(), [ack()]) -> {A, state()}.
+
+%% Fold over all the messages in a queue and return the accumulated
+%% results, leaving the queue undisturbed.
+-callback fold(fun((rabbit_types:basic_message(),
+ rabbit_types:message_properties(),
+ boolean(), A) -> {('stop' | 'cont'), A}),
+ A, state()) -> {A, state()}.
+
+%% How long is my queue?
+-callback len(state()) -> non_neg_integer().
+
+%% Is my queue empty?
+-callback is_empty(state()) -> boolean().
+
+%% What's the queue depth, where depth = length + number of pending acks
+-callback depth(state()) -> non_neg_integer().
+
+%% For the next three functions, the assumption is that you're
+%% monitoring something like the ingress and egress rates of the
+%% queue. The RAM duration is thus the length of time represented by
+%% the messages held in RAM given the current rates. If you want to
+%% ignore all of this stuff, then do so, and return 0 in
+%% ram_duration/1.
+
+%% The target is to have no more messages in RAM than indicated by the
+%% duration and the current queue rates.
+-callback set_ram_duration_target(duration(), state()) -> state().
+
+%% Optionally recalculate the duration internally (likely to be just
+%% update your internal rates), and report how many seconds the
+%% messages in RAM represent given the current rates of the queue.
+-callback ram_duration(state()) -> {duration(), state()}.
+
+%% Should 'timeout' be called as soon as the queue process can manage
+%% (either on an empty mailbox, or when a timer fires)?
+-callback needs_timeout(state()) -> 'false' | 'timed' | 'idle'.
+
+%% Called (eventually) after needs_timeout returns 'idle' or 'timed'.
+%% Note this may be called more than once for each 'idle' or 'timed'
+%% returned from needs_timeout
+-callback timeout(state()) -> state().
+
+%% Called immediately before the queue hibernates.
+-callback handle_pre_hibernate(state()) -> state().
+
+%% Called when more credit has become available for credit_flow.
+-callback resume(state()) -> state().
+
+%% Used to help prioritisation in rabbit_amqqueue_process. The rate of
+%% inbound messages and outbound messages at the moment.
+-callback msg_rates(state()) -> {float(), float()}.
+
+%% Exists for debugging purposes, to be able to expose state via
+%% rabbitmqctl list_queues backing_queue_status
+-callback status(state()) -> [{atom(), any()}].
+
+%% Passed a function to be invoked with the relevant backing queue's
+%% state. Useful for when the backing queue or other components need
+%% to pass functions into the backing queue.
+-callback invoke(atom(), fun ((atom(), A) -> A), state()) -> state().
+
+%% Called prior to a publish or publish_delivered call. Allows the BQ
+%% to signal that it's already seen this message, (e.g. it was published
+%% or discarded previously) and thus the message should be dropped.
+-callback is_duplicate(rabbit_types:basic_message(), state())
+ -> {boolean(), state()}.
+
+-else.
+
+-export([behaviour_info/1]).
+
+behaviour_info(callbacks) ->
+ [{start, 1}, {stop, 0}, {init, 3}, {terminate, 2},
+ {delete_and_terminate, 2}, {purge, 1}, {purge_acks, 1}, {publish, 5},
+ {publish_delivered, 4}, {discard, 3}, {drain_confirmed, 1},
+ {dropwhile, 2}, {fetchwhile, 4},
+ {fetch, 2}, {ack, 2}, {requeue, 2}, {ackfold, 4}, {fold, 3}, {len, 1},
+ {is_empty, 1}, {depth, 1}, {set_ram_duration_target, 2},
+ {ram_duration, 1}, {needs_timeout, 1}, {timeout, 1},
+ {handle_pre_hibernate, 1}, {resume, 1}, {msg_rates, 1}, {status, 1},
+ {invoke, 3}, {is_duplicate, 2}] ;
+behaviour_info(_Other) ->
+ undefined.
+
+-endif.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_backing_queue_qc).
+-ifdef(use_proper_qc).
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+-include_lib("proper/include/proper.hrl").
+
+-behaviour(proper_statem).
+
+-define(BQMOD, rabbit_variable_queue).
+-define(QUEUE_MAXLEN, 10000).
+-define(TIMEOUT_LIMIT, 100).
+
+-define(RECORD_INDEX(Key, Record),
+ proplists:get_value(
+ Key, lists:zip(record_info(fields, Record),
+ lists:seq(2, record_info(size, Record))))).
+
+-export([initial_state/0, command/1, precondition/2, postcondition/3,
+ next_state/3]).
+
+-export([prop_backing_queue_test/0, publish_multiple/1,
+ timeout/2, bump_credit/1]).
+
+-record(state, {bqstate,
+ len, %% int
+ next_seq_id, %% int
+ messages, %% gb_trees of seqid => {msg_props, basic_msg}
+ acks, %% [{acktag, {seqid, {msg_props, basic_msg}}}]
+ confirms, %% set of msgid
+ publishing}).%% int
+
+%% Initialise model
+
+initial_state() ->
+ #state{bqstate = qc_variable_queue_init(qc_test_queue()),
+ len = 0,
+ next_seq_id = 0,
+ messages = gb_trees:empty(),
+ acks = [],
+ confirms = gb_sets:new(),
+ publishing = 0}.
+
+%% Property
+
+prop_backing_queue_test() ->
+ ?FORALL(Cmds, commands(?MODULE, initial_state()),
+ backing_queue_test(Cmds)).
+
+backing_queue_test(Cmds) ->
+ {ok, FileSizeLimit} =
+ application:get_env(rabbit, msg_store_file_size_limit),
+ application:set_env(rabbit, msg_store_file_size_limit, 512,
+ infinity),
+ {ok, MaxJournal} =
+ application:get_env(rabbit, queue_index_max_journal_entries),
+ application:set_env(rabbit, queue_index_max_journal_entries, 128,
+ infinity),
+
+ {_H, #state{bqstate = BQ}, Res} = run_commands(?MODULE, Cmds),
+
+ application:set_env(rabbit, msg_store_file_size_limit,
+ FileSizeLimit, infinity),
+ application:set_env(rabbit, queue_index_max_journal_entries,
+ MaxJournal, infinity),
+
+ ?BQMOD:delete_and_terminate(shutdown, BQ),
+ ?WHENFAIL(
+ io:format("Result: ~p~n", [Res]),
+ aggregate(command_names(Cmds), Res =:= ok)).
+
+%% Commands
+
+%% Command frequencies are tuned so that queues are normally
+%% reasonably short, but they may sometimes exceed
+%% ?QUEUE_MAXLEN. Publish-multiple and purging cause extreme queue
+%% lengths, so these have lower probabilities. Fetches/drops are
+%% sufficiently frequent so that commands that need acktags get decent
+%% coverage.
+
+command(S) ->
+ frequency([{10, qc_publish(S)},
+ {1, qc_publish_delivered(S)},
+ {1, qc_publish_multiple(S)}, %% very slow
+ {9, qc_fetch(S)}, %% needed for ack and requeue
+ {6, qc_drop(S)}, %%
+ {15, qc_ack(S)},
+ {15, qc_requeue(S)},
+ {3, qc_set_ram_duration_target(S)},
+ {1, qc_ram_duration(S)},
+ {1, qc_drain_confirmed(S)},
+ {1, qc_dropwhile(S)},
+ {1, qc_is_empty(S)},
+ {1, qc_timeout(S)},
+ {1, qc_bump_credit(S)},
+ {1, qc_purge(S)},
+ {1, qc_fold(S)}]).
+
+qc_publish(#state{bqstate = BQ}) ->
+ {call, ?BQMOD, publish,
+ [qc_message(),
+ #message_properties{needs_confirming = frequency([{1, true},
+ {20, false}]),
+ expiry = oneof([undefined | lists:seq(1, 10)])},
+ false, self(), BQ]}.
+
+qc_publish_multiple(#state{}) ->
+ {call, ?MODULE, publish_multiple, [resize(?QUEUE_MAXLEN, pos_integer())]}.
+
+qc_publish_delivered(#state{bqstate = BQ}) ->
+ {call, ?BQMOD, publish_delivered,
+ [qc_message(), #message_properties{}, self(), BQ]}.
+
+qc_fetch(#state{bqstate = BQ}) ->
+ {call, ?BQMOD, fetch, [boolean(), BQ]}.
+
+qc_drop(#state{bqstate = BQ}) ->
+ {call, ?BQMOD, drop, [boolean(), BQ]}.
+
+qc_ack(#state{bqstate = BQ, acks = Acks}) ->
+ {call, ?BQMOD, ack, [rand_choice(proplists:get_keys(Acks)), BQ]}.
+
+qc_requeue(#state{bqstate = BQ, acks = Acks}) ->
+ {call, ?BQMOD, requeue, [rand_choice(proplists:get_keys(Acks)), BQ]}.
+
+qc_set_ram_duration_target(#state{bqstate = BQ}) ->
+ {call, ?BQMOD, set_ram_duration_target,
+ [oneof([0, 1, 2, resize(1000, pos_integer()), infinity]), BQ]}.
+
+qc_ram_duration(#state{bqstate = BQ}) ->
+ {call, ?BQMOD, ram_duration, [BQ]}.
+
+qc_drain_confirmed(#state{bqstate = BQ}) ->
+ {call, ?BQMOD, drain_confirmed, [BQ]}.
+
+qc_dropwhile(#state{bqstate = BQ}) ->
+ {call, ?BQMOD, dropwhile, [fun dropfun/1, BQ]}.
+
+qc_is_empty(#state{bqstate = BQ}) ->
+ {call, ?BQMOD, is_empty, [BQ]}.
+
+qc_timeout(#state{bqstate = BQ}) ->
+ {call, ?MODULE, timeout, [BQ, ?TIMEOUT_LIMIT]}.
+
+qc_bump_credit(#state{bqstate = BQ}) ->
+ {call, ?MODULE, bump_credit, [BQ]}.
+
+qc_purge(#state{bqstate = BQ}) ->
+ {call, ?BQMOD, purge, [BQ]}.
+
+qc_fold(#state{bqstate = BQ}) ->
+ {call, ?BQMOD, fold, [makefoldfun(pos_integer()), foldacc(), BQ]}.
+
+%% Preconditions
+
+%% Create long queues by only allowing publishing
+precondition(#state{publishing = Count}, {call, _Mod, Fun, _Arg})
+ when Count > 0, Fun /= publish ->
+ false;
+precondition(#state{acks = Acks}, {call, ?BQMOD, Fun, _Arg})
+ when Fun =:= ack; Fun =:= requeue ->
+ length(Acks) > 0;
+precondition(#state{messages = Messages},
+ {call, ?BQMOD, publish_delivered, _Arg}) ->
+ gb_trees:is_empty(Messages);
+precondition(_S, {call, ?BQMOD, _Fun, _Arg}) ->
+ true;
+precondition(_S, {call, ?MODULE, timeout, _Arg}) ->
+ true;
+precondition(_S, {call, ?MODULE, bump_credit, _Arg}) ->
+ true;
+precondition(#state{len = Len}, {call, ?MODULE, publish_multiple, _Arg}) ->
+ Len < ?QUEUE_MAXLEN.
+
+%% Model updates
+
+next_state(S, BQ, {call, ?BQMOD, publish, [Msg, MsgProps, _Del, _Pid, _BQ]}) ->
+ #state{len = Len,
+ messages = Messages,
+ confirms = Confirms,
+ publishing = PublishCount,
+ next_seq_id = NextSeq} = S,
+ MsgId = {call, erlang, element, [?RECORD_INDEX(id, basic_message), Msg]},
+ NeedsConfirm =
+ {call, erlang, element,
+ [?RECORD_INDEX(needs_confirming, message_properties), MsgProps]},
+ S#state{bqstate = BQ,
+ len = Len + 1,
+ next_seq_id = NextSeq + 1,
+ messages = gb_trees:insert(NextSeq, {MsgProps, Msg}, Messages),
+ publishing = {call, erlang, max, [0, {call, erlang, '-',
+ [PublishCount, 1]}]},
+ confirms = case eval(NeedsConfirm) of
+ true -> gb_sets:add(MsgId, Confirms);
+ _ -> Confirms
+ end};
+
+next_state(S, _BQ, {call, ?MODULE, publish_multiple, [PublishCount]}) ->
+ S#state{publishing = PublishCount};
+
+next_state(S, Res,
+ {call, ?BQMOD, publish_delivered,
+ [Msg, MsgProps, _Pid, _BQ]}) ->
+ #state{confirms = Confirms, acks = Acks, next_seq_id = NextSeq} = S,
+ AckTag = {call, erlang, element, [1, Res]},
+ BQ1 = {call, erlang, element, [2, Res]},
+ MsgId = {call, erlang, element, [?RECORD_INDEX(id, basic_message), Msg]},
+ NeedsConfirm =
+ {call, erlang, element,
+ [?RECORD_INDEX(needs_confirming, message_properties), MsgProps]},
+ S#state{bqstate = BQ1,
+ next_seq_id = NextSeq + 1,
+ confirms = case eval(NeedsConfirm) of
+ true -> gb_sets:add(MsgId, Confirms);
+ _ -> Confirms
+ end,
+ acks = [{AckTag, {NextSeq, {MsgProps, Msg}}}|Acks]
+ };
+
+next_state(S, Res, {call, ?BQMOD, fetch, [AckReq, _BQ]}) ->
+ next_state_fetch_and_drop(S, Res, AckReq, 3);
+
+next_state(S, Res, {call, ?BQMOD, drop, [AckReq, _BQ]}) ->
+ next_state_fetch_and_drop(S, Res, AckReq, 2);
+
+next_state(S, Res, {call, ?BQMOD, ack, [AcksArg, _BQ]}) ->
+ #state{acks = AcksState} = S,
+ BQ1 = {call, erlang, element, [2, Res]},
+ S#state{bqstate = BQ1,
+ acks = lists:foldl(fun proplists:delete/2, AcksState, AcksArg)};
+
+next_state(S, Res, {call, ?BQMOD, requeue, [AcksArg, _V]}) ->
+ #state{messages = Messages, acks = AcksState} = S,
+ BQ1 = {call, erlang, element, [2, Res]},
+ Messages1 = lists:foldl(fun (AckTag, Msgs) ->
+ {SeqId, MsgPropsMsg} =
+ proplists:get_value(AckTag, AcksState),
+ gb_trees:insert(SeqId, MsgPropsMsg, Msgs)
+ end, Messages, AcksArg),
+ S#state{bqstate = BQ1,
+ len = gb_trees:size(Messages1),
+ messages = Messages1,
+ acks = lists:foldl(fun proplists:delete/2, AcksState, AcksArg)};
+
+next_state(S, BQ, {call, ?BQMOD, set_ram_duration_target, _Args}) ->
+ S#state{bqstate = BQ};
+
+next_state(S, Res, {call, ?BQMOD, ram_duration, _Args}) ->
+ BQ1 = {call, erlang, element, [2, Res]},
+ S#state{bqstate = BQ1};
+
+next_state(S, Res, {call, ?BQMOD, drain_confirmed, _Args}) ->
+ BQ1 = {call, erlang, element, [2, Res]},
+ S#state{bqstate = BQ1};
+
+next_state(S, Res, {call, ?BQMOD, dropwhile, _Args}) ->
+ BQ = {call, erlang, element, [2, Res]},
+ #state{messages = Messages} = S,
+ Msgs1 = drop_messages(Messages),
+ S#state{bqstate = BQ, len = gb_trees:size(Msgs1), messages = Msgs1};
+
+next_state(S, _Res, {call, ?BQMOD, is_empty, _Args}) ->
+ S;
+
+next_state(S, BQ, {call, ?MODULE, timeout, _Args}) ->
+ S#state{bqstate = BQ};
+next_state(S, BQ, {call, ?MODULE, bump_credit, _Args}) ->
+ S#state{bqstate = BQ};
+
+next_state(S, Res, {call, ?BQMOD, purge, _Args}) ->
+ BQ1 = {call, erlang, element, [2, Res]},
+ S#state{bqstate = BQ1, len = 0, messages = gb_trees:empty()};
+
+next_state(S, Res, {call, ?BQMOD, fold, _Args}) ->
+ BQ1 = {call, erlang, element, [2, Res]},
+ S#state{bqstate = BQ1}.
+
+%% Postconditions
+
+postcondition(S, {call, ?BQMOD, fetch, _Args}, Res) ->
+ #state{messages = Messages, len = Len, acks = Acks, confirms = Confrms} = S,
+ case Res of
+ {{MsgFetched, _IsDelivered, AckTag}, _BQ} ->
+ {_SeqId, {_MsgProps, Msg}} = gb_trees:smallest(Messages),
+ MsgFetched =:= Msg andalso
+ not proplists:is_defined(AckTag, Acks) andalso
+ not gb_sets:is_element(AckTag, Confrms) andalso
+ Len =/= 0;
+ {empty, _BQ} ->
+ Len =:= 0
+ end;
+
+postcondition(S, {call, ?BQMOD, drop, _Args}, Res) ->
+ #state{messages = Messages, len = Len, acks = Acks, confirms = Confrms} = S,
+ case Res of
+ {{MsgIdFetched, AckTag}, _BQ} ->
+ {_SeqId, {_MsgProps, Msg}} = gb_trees:smallest(Messages),
+ MsgId = eval({call, erlang, element,
+ [?RECORD_INDEX(id, basic_message), Msg]}),
+ MsgIdFetched =:= MsgId andalso
+ not proplists:is_defined(AckTag, Acks) andalso
+ not gb_sets:is_element(AckTag, Confrms) andalso
+ Len =/= 0;
+ {empty, _BQ} ->
+ Len =:= 0
+ end;
+
+postcondition(S, {call, ?BQMOD, publish_delivered, _Args}, {AckTag, _BQ}) ->
+ #state{acks = Acks, confirms = Confrms} = S,
+ not proplists:is_defined(AckTag, Acks) andalso
+ not gb_sets:is_element(AckTag, Confrms);
+
+postcondition(#state{len = Len}, {call, ?BQMOD, purge, _Args}, Res) ->
+ {PurgeCount, _BQ} = Res,
+ Len =:= PurgeCount;
+
+postcondition(#state{len = Len}, {call, ?BQMOD, is_empty, _Args}, Res) ->
+ (Len =:= 0) =:= Res;
+
+postcondition(S, {call, ?BQMOD, drain_confirmed, _Args}, Res) ->
+ #state{confirms = Confirms} = S,
+ {ReportedConfirmed, _BQ} = Res,
+ lists:all(fun (M) -> gb_sets:is_element(M, Confirms) end,
+ ReportedConfirmed);
+
+postcondition(S, {call, ?BQMOD, fold, [FoldFun, Acc0, _BQ0]}, {Res, _BQ1}) ->
+ #state{messages = Messages} = S,
+ {_, Model} = lists:foldl(fun ({_SeqId, {_MsgProps, _Msg}}, {stop, Acc}) ->
+ {stop, Acc};
+ ({_SeqId, {MsgProps, Msg}}, {cont, Acc}) ->
+ FoldFun(Msg, MsgProps, false, Acc)
+ end, {cont, Acc0}, gb_trees:to_list(Messages)),
+ true = Model =:= Res;
+
+postcondition(#state{bqstate = BQ, len = Len}, {call, _M, _F, _A}, _Res) ->
+ ?BQMOD:len(BQ) =:= Len.
+
+%% Helpers
+
+publish_multiple(_C) ->
+ ok.
+
+timeout(BQ, 0) ->
+ BQ;
+timeout(BQ, AtMost) ->
+ case ?BQMOD:needs_timeout(BQ) of
+ false -> BQ;
+ _ -> timeout(?BQMOD:timeout(BQ), AtMost - 1)
+ end.
+
+bump_credit(BQ) ->
+ case credit_flow:blocked() of
+ false -> BQ;
+ true -> receive
+ {bump_credit, Msg} ->
+ credit_flow:handle_bump_msg(Msg),
+ ?BQMOD:resume(BQ)
+ end
+ end.
+
+qc_message_payload() -> ?SIZED(Size, resize(Size * Size, binary())).
+
+qc_routing_key() -> noshrink(binary(10)).
+
+qc_delivery_mode() -> oneof([1, 2]).
+
+qc_message() -> qc_message(qc_delivery_mode()).
+
+qc_message(DeliveryMode) ->
+ {call, rabbit_basic, message, [qc_default_exchange(),
+ qc_routing_key(),
+ #'P_basic'{delivery_mode = DeliveryMode},
+ qc_message_payload()]}.
+
+qc_default_exchange() ->
+ {call, rabbit_misc, r, [<<>>, exchange, <<>>]}.
+
+qc_variable_queue_init(Q) ->
+ {call, ?BQMOD, init,
+ [Q, new, function(2, {ok, []})]}.
+
+qc_test_q() -> {call, rabbit_misc, r, [<<"/">>, queue, noshrink(binary(16))]}.
+
+qc_test_queue() -> qc_test_queue(boolean()).
+
+qc_test_queue(Durable) ->
+ #amqqueue{name = qc_test_q(),
+ durable = Durable,
+ auto_delete = false,
+ arguments = [],
+ pid = self()}.
+
+rand_choice([]) -> [];
+rand_choice(List) -> rand_choice(List, [], random:uniform(length(List))).
+
+rand_choice(_List, Selection, 0) ->
+ Selection;
+rand_choice(List, Selection, N) ->
+ Picked = lists:nth(random:uniform(length(List)), List),
+ rand_choice(List -- [Picked], [Picked | Selection],
+ N - 1).
+
+makefoldfun(Size) ->
+ fun (Msg, _MsgProps, Unacked, Acc) ->
+ case {length(Acc) > Size, Unacked} of
+ {false, false} -> {cont, [Msg | Acc]};
+ {false, true} -> {cont, Acc};
+ {true, _} -> {stop, Acc}
+ end
+ end.
+foldacc() -> [].
+
+dropfun(Props) ->
+ Expiry = eval({call, erlang, element,
+ [?RECORD_INDEX(expiry, message_properties), Props]}),
+ Expiry =/= 1.
+
+drop_messages(Messages) ->
+ case gb_trees:is_empty(Messages) of
+ true ->
+ Messages;
+ false -> {_Seq, MsgProps_Msg, M2} = gb_trees:take_smallest(Messages),
+ MsgProps = {call, erlang, element, [1, MsgProps_Msg]},
+ case dropfun(MsgProps) of
+ true -> drop_messages(M2);
+ false -> Messages
+ end
+ end.
+
+next_state_fetch_and_drop(S, Res, AckReq, AckTagIdx) ->
+ #state{len = Len, messages = Messages, acks = Acks} = S,
+ ResultInfo = {call, erlang, element, [1, Res]},
+ BQ1 = {call, erlang, element, [2, Res]},
+ AckTag = {call, erlang, element, [AckTagIdx, ResultInfo]},
+ S1 = S#state{bqstate = BQ1},
+ case gb_trees:is_empty(Messages) of
+ true -> S1;
+ false -> {SeqId, MsgProp_Msg, M2} = gb_trees:take_smallest(Messages),
+ S2 = S1#state{len = Len - 1, messages = M2},
+ case AckReq of
+ true ->
+ S2#state{acks = [{AckTag, {SeqId, MsgProp_Msg}}|Acks]};
+ false ->
+ S2
+ end
+ end.
+
+-else.
+
+-export([prop_disabled/0]).
+
+prop_disabled() ->
+ exit({compiled_without_proper,
+ "PropEr was not present during compilation of the test module. "
+ "Hence all tests are disabled."}).
+
+-endif.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_basic).
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+-export([publish/4, publish/5, publish/1,
+ message/3, message/4, properties/1, prepend_table_header/3,
+ extract_headers/1, map_headers/2, delivery/4, header_routes/1,
+ parse_expiration/1]).
+-export([build_content/2, from_content/1, msg_size/1]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-type(properties_input() ::
+ (rabbit_framing:amqp_property_record() | [{atom(), any()}])).
+-type(publish_result() ::
+ ({ok, [pid()]} | rabbit_types:error('not_found'))).
+-type(headers() :: rabbit_framing:amqp_table() | 'undefined').
+
+-type(exchange_input() :: (rabbit_types:exchange() | rabbit_exchange:name())).
+-type(body_input() :: (binary() | [binary()])).
+
+-spec(publish/4 ::
+ (exchange_input(), rabbit_router:routing_key(), properties_input(),
+ body_input()) -> publish_result()).
+-spec(publish/5 ::
+ (exchange_input(), rabbit_router:routing_key(), boolean(),
+ properties_input(), body_input()) -> publish_result()).
+-spec(publish/1 ::
+ (rabbit_types:delivery()) -> publish_result()).
+-spec(delivery/4 ::
+ (boolean(), boolean(), rabbit_types:message(), undefined | integer()) ->
+ rabbit_types:delivery()).
+-spec(message/4 ::
+ (rabbit_exchange:name(), rabbit_router:routing_key(),
+ properties_input(), binary()) -> rabbit_types:message()).
+-spec(message/3 ::
+ (rabbit_exchange:name(), rabbit_router:routing_key(),
+ rabbit_types:decoded_content()) ->
+ rabbit_types:ok_or_error2(rabbit_types:message(), any())).
+-spec(properties/1 ::
+ (properties_input()) -> rabbit_framing:amqp_property_record()).
+
+-spec(prepend_table_header/3 ::
+ (binary(), rabbit_framing:amqp_table(), headers()) -> headers()).
+
+-spec(extract_headers/1 :: (rabbit_types:content()) -> headers()).
+
+-spec(map_headers/2 :: (fun((headers()) -> headers()), rabbit_types:content())
+ -> rabbit_types:content()).
+
+-spec(header_routes/1 ::
+ (undefined | rabbit_framing:amqp_table()) -> [string()]).
+-spec(build_content/2 :: (rabbit_framing:amqp_property_record(),
+ binary() | [binary()]) -> rabbit_types:content()).
+-spec(from_content/1 :: (rabbit_types:content()) ->
+ {rabbit_framing:amqp_property_record(), binary()}).
+-spec(parse_expiration/1 ::
+ (rabbit_framing:amqp_property_record())
+ -> rabbit_types:ok_or_error2('undefined' | non_neg_integer(), any())).
+
+-spec(msg_size/1 :: (rabbit_types:content() | rabbit_types:message()) ->
+ non_neg_integer()).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+%% Convenience function, for avoiding round-trips in calls across the
+%% erlang distributed network.
+publish(Exchange, RoutingKeyBin, Properties, Body) ->
+ publish(Exchange, RoutingKeyBin, false, Properties, Body).
+
+%% Convenience function, for avoiding round-trips in calls across the
+%% erlang distributed network.
+publish(X = #exchange{name = XName}, RKey, Mandatory, Props, Body) ->
+ Message = message(XName, RKey, properties(Props), Body),
+ publish(X, delivery(Mandatory, false, Message, undefined));
+publish(XName, RKey, Mandatory, Props, Body) ->
+ Message = message(XName, RKey, properties(Props), Body),
+ publish(delivery(Mandatory, false, Message, undefined)).
+
+publish(Delivery = #delivery{
+ message = #basic_message{exchange_name = XName}}) ->
+ case rabbit_exchange:lookup(XName) of
+ {ok, X} -> publish(X, Delivery);
+ Err -> Err
+ end.
+
+publish(X, Delivery) ->
+ Qs = rabbit_amqqueue:lookup(rabbit_exchange:route(X, Delivery)),
+ DeliveredQPids = rabbit_amqqueue:deliver(Qs, Delivery),
+ {ok, DeliveredQPids}.
+
+delivery(Mandatory, Confirm, Message, MsgSeqNo) ->
+ #delivery{mandatory = Mandatory, confirm = Confirm, sender = self(),
+ message = Message, msg_seq_no = MsgSeqNo}.
+
+build_content(Properties, BodyBin) when is_binary(BodyBin) ->
+ build_content(Properties, [BodyBin]);
+
+build_content(Properties, PFR) ->
+ %% basic.publish hasn't changed so we can just hard-code amqp_0_9_1
+ {ClassId, _MethodId} =
+ rabbit_framing_amqp_0_9_1:method_id('basic.publish'),
+ #content{class_id = ClassId,
+ properties = Properties,
+ properties_bin = none,
+ protocol = none,
+ payload_fragments_rev = PFR}.
+
+from_content(Content) ->
+ #content{class_id = ClassId,
+ properties = Props,
+ payload_fragments_rev = FragmentsRev} =
+ rabbit_binary_parser:ensure_content_decoded(Content),
+ %% basic.publish hasn't changed so we can just hard-code amqp_0_9_1
+ {ClassId, _MethodId} =
+ rabbit_framing_amqp_0_9_1:method_id('basic.publish'),
+ {Props, list_to_binary(lists:reverse(FragmentsRev))}.
+
+%% This breaks the spec rule forbidding message modification
+strip_header(#content{properties = #'P_basic'{headers = undefined}}
+ = DecodedContent, _Key) ->
+ DecodedContent;
+strip_header(#content{properties = Props = #'P_basic'{headers = Headers}}
+ = DecodedContent, Key) ->
+ case lists:keysearch(Key, 1, Headers) of
+ false -> DecodedContent;
+ {value, Found} -> Headers0 = lists:delete(Found, Headers),
+ rabbit_binary_generator:clear_encoded_content(
+ DecodedContent#content{
+ properties = Props#'P_basic'{
+ headers = Headers0}})
+ end.
+
+message(XName, RoutingKey, #content{properties = Props} = DecodedContent) ->
+ try
+ {ok, #basic_message{
+ exchange_name = XName,
+ content = strip_header(DecodedContent, ?DELETED_HEADER),
+ id = rabbit_guid:gen(),
+ is_persistent = is_message_persistent(DecodedContent),
+ routing_keys = [RoutingKey |
+ header_routes(Props#'P_basic'.headers)]}}
+ catch
+ {error, _Reason} = Error -> Error
+ end.
+
+message(XName, RoutingKey, RawProperties, Body) ->
+ Properties = properties(RawProperties),
+ Content = build_content(Properties, Body),
+ {ok, Msg} = message(XName, RoutingKey, Content),
+ Msg.
+
+properties(P = #'P_basic'{}) ->
+ P;
+properties(P) when is_list(P) ->
+ %% Yes, this is O(length(P) * record_info(size, 'P_basic') / 2),
+ %% i.e. slow. Use the definition of 'P_basic' directly if
+ %% possible!
+ lists:foldl(fun ({Key, Value}, Acc) ->
+ case indexof(record_info(fields, 'P_basic'), Key) of
+ 0 -> throw({unknown_basic_property, Key});
+ N -> setelement(N + 1, Acc, Value)
+ end
+ end, #'P_basic'{}, P).
+
+prepend_table_header(Name, Info, undefined) ->
+ prepend_table_header(Name, Info, []);
+prepend_table_header(Name, Info, Headers) ->
+ case rabbit_misc:table_lookup(Headers, Name) of
+ {array, Existing} ->
+ prepend_table(Name, Info, Existing, Headers);
+ undefined ->
+ prepend_table(Name, Info, [], Headers);
+ Other ->
+ Headers2 = prepend_table(Name, Info, [], Headers),
+ set_invalid_header(Name, Other, Headers2)
+ end.
+
+prepend_table(Name, Info, Prior, Headers) ->
+ rabbit_misc:set_table_value(Headers, Name, array, [{table, Info} | Prior]).
+
+set_invalid_header(Name, {_, _}=Value, Headers) when is_list(Headers) ->
+ case rabbit_misc:table_lookup(Headers, ?INVALID_HEADERS_KEY) of
+ undefined ->
+ set_invalid([{Name, array, [Value]}], Headers);
+ {table, ExistingHdr} ->
+ update_invalid(Name, Value, ExistingHdr, Headers);
+ Other ->
+ %% somehow the x-invalid-headers header is corrupt
+ Invalid = [{?INVALID_HEADERS_KEY, array, [Other]}],
+ set_invalid_header(Name, Value, set_invalid(Invalid, Headers))
+ end.
+
+set_invalid(NewHdr, Headers) ->
+ rabbit_misc:set_table_value(Headers, ?INVALID_HEADERS_KEY, table, NewHdr).
+
+update_invalid(Name, Value, ExistingHdr, Header) ->
+ Values = case rabbit_misc:table_lookup(ExistingHdr, Name) of
+ undefined -> [Value];
+ {array, Prior} -> [Value | Prior]
+ end,
+ NewHdr = rabbit_misc:set_table_value(ExistingHdr, Name, array, Values),
+ set_invalid(NewHdr, Header).
+
+extract_headers(Content) ->
+ #content{properties = #'P_basic'{headers = Headers}} =
+ rabbit_binary_parser:ensure_content_decoded(Content),
+ Headers.
+
+map_headers(F, Content) ->
+ Content1 = rabbit_binary_parser:ensure_content_decoded(Content),
+ #content{properties = #'P_basic'{headers = Headers} = Props} = Content1,
+ Headers1 = F(Headers),
+ rabbit_binary_generator:clear_encoded_content(
+ Content1#content{properties = Props#'P_basic'{headers = Headers1}}).
+
+indexof(L, Element) -> indexof(L, Element, 1).
+
+indexof([], _Element, _N) -> 0;
+indexof([Element | _Rest], Element, N) -> N;
+indexof([_ | Rest], Element, N) -> indexof(Rest, Element, N + 1).
+
+is_message_persistent(#content{properties = #'P_basic'{
+ delivery_mode = Mode}}) ->
+ case Mode of
+ 1 -> false;
+ 2 -> true;
+ undefined -> false;
+ Other -> throw({error, {delivery_mode_unknown, Other}})
+ end.
+
+%% Extract CC routes from headers
+header_routes(undefined) ->
+ [];
+header_routes(HeadersTable) ->
+ lists:append(
+ [case rabbit_misc:table_lookup(HeadersTable, HeaderKey) of
+ {array, Routes} -> [Route || {longstr, Route} <- Routes];
+ undefined -> [];
+ {Type, _Val} -> throw({error, {unacceptable_type_in_header,
+ binary_to_list(HeaderKey), Type}})
+ end || HeaderKey <- ?ROUTING_HEADERS]).
+
+parse_expiration(#'P_basic'{expiration = undefined}) ->
+ {ok, undefined};
+parse_expiration(#'P_basic'{expiration = Expiration}) ->
+ case string:to_integer(binary_to_list(Expiration)) of
+ {error, no_integer} = E ->
+ E;
+ {N, ""} ->
+ case rabbit_misc:check_expiry(N) of
+ ok -> {ok, N};
+ E = {error, _} -> E
+ end;
+ {_, S} ->
+ {error, {leftover_string, S}}
+ end.
+
+msg_size(#content{payload_fragments_rev = PFR}) -> iolist_size(PFR);
+msg_size(#basic_message{content = Content}) -> msg_size(Content).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_binary_generator).
+-include("rabbit_framing.hrl").
+-include("rabbit.hrl").
+
+-export([build_simple_method_frame/3,
+ build_simple_content_frames/4,
+ build_heartbeat_frame/0]).
+-export([generate_table/1]).
+-export([check_empty_frame_size/0]).
+-export([ensure_content_encoded/2, clear_encoded_content/1]).
+-export([map_exception/3]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-type(frame() :: [binary()]).
+
+-spec(build_simple_method_frame/3 ::
+ (rabbit_channel:channel_number(), rabbit_framing:amqp_method_record(),
+ rabbit_types:protocol())
+ -> frame()).
+-spec(build_simple_content_frames/4 ::
+ (rabbit_channel:channel_number(), rabbit_types:content(),
+ non_neg_integer(), rabbit_types:protocol())
+ -> [frame()]).
+-spec(build_heartbeat_frame/0 :: () -> frame()).
+-spec(generate_table/1 :: (rabbit_framing:amqp_table()) -> binary()).
+-spec(check_empty_frame_size/0 :: () -> 'ok').
+-spec(ensure_content_encoded/2 ::
+ (rabbit_types:content(), rabbit_types:protocol()) ->
+ rabbit_types:encoded_content()).
+-spec(clear_encoded_content/1 ::
+ (rabbit_types:content()) -> rabbit_types:unencoded_content()).
+-spec(map_exception/3 :: (rabbit_channel:channel_number(),
+ rabbit_types:amqp_error() | any(),
+ rabbit_types:protocol()) ->
+ {rabbit_channel:channel_number(),
+ rabbit_framing:amqp_method_record()}).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+build_simple_method_frame(ChannelInt, MethodRecord, Protocol) ->
+ MethodFields = Protocol:encode_method_fields(MethodRecord),
+ MethodName = rabbit_misc:method_record_type(MethodRecord),
+ {ClassId, MethodId} = Protocol:method_id(MethodName),
+ create_frame(1, ChannelInt, [<<ClassId:16, MethodId:16>>, MethodFields]).
+
+build_simple_content_frames(ChannelInt, Content, FrameMax, Protocol) ->
+ #content{class_id = ClassId,
+ properties_bin = ContentPropertiesBin,
+ payload_fragments_rev = PayloadFragmentsRev} =
+ ensure_content_encoded(Content, Protocol),
+ {BodySize, ContentFrames} =
+ build_content_frames(PayloadFragmentsRev, FrameMax, ChannelInt),
+ HeaderFrame = create_frame(2, ChannelInt,
+ [<<ClassId:16, 0:16, BodySize:64>>,
+ ContentPropertiesBin]),
+ [HeaderFrame | ContentFrames].
+
+build_content_frames(FragsRev, FrameMax, ChannelInt) ->
+ BodyPayloadMax = if FrameMax == 0 -> iolist_size(FragsRev);
+ true -> FrameMax - ?EMPTY_FRAME_SIZE
+ end,
+ build_content_frames(0, [], BodyPayloadMax, [],
+ lists:reverse(FragsRev), BodyPayloadMax, ChannelInt).
+
+build_content_frames(SizeAcc, FramesAcc, _FragSizeRem, [],
+ [], _BodyPayloadMax, _ChannelInt) ->
+ {SizeAcc, lists:reverse(FramesAcc)};
+build_content_frames(SizeAcc, FramesAcc, FragSizeRem, FragAcc,
+ Frags, BodyPayloadMax, ChannelInt)
+ when FragSizeRem == 0 orelse Frags == [] ->
+ Frame = create_frame(3, ChannelInt, lists:reverse(FragAcc)),
+ FrameSize = BodyPayloadMax - FragSizeRem,
+ build_content_frames(SizeAcc + FrameSize, [Frame | FramesAcc],
+ BodyPayloadMax, [], Frags, BodyPayloadMax, ChannelInt);
+build_content_frames(SizeAcc, FramesAcc, FragSizeRem, FragAcc,
+ [Frag | Frags], BodyPayloadMax, ChannelInt) ->
+ Size = size(Frag),
+ {NewFragSizeRem, NewFragAcc, NewFrags} =
+ if Size == 0 -> {FragSizeRem, FragAcc, Frags};
+ Size =< FragSizeRem -> {FragSizeRem - Size, [Frag | FragAcc], Frags};
+ true -> <<Head:FragSizeRem/binary, Tail/binary>> =
+ Frag,
+ {0, [Head | FragAcc], [Tail | Frags]}
+ end,
+ build_content_frames(SizeAcc, FramesAcc, NewFragSizeRem, NewFragAcc,
+ NewFrags, BodyPayloadMax, ChannelInt).
+
+build_heartbeat_frame() ->
+ create_frame(?FRAME_HEARTBEAT, 0, <<>>).
+
+create_frame(TypeInt, ChannelInt, Payload) ->
+ [<<TypeInt:8, ChannelInt:16, (iolist_size(Payload)):32>>, Payload,
+ ?FRAME_END].
+
+%% table_field_to_binary supports the AMQP 0-8/0-9 standard types, S,
+%% I, D, T and F, as well as the QPid extensions b, d, f, l, s, t, x,
+%% and V.
+table_field_to_binary({FName, T, V}) ->
+ [short_string_to_binary(FName) | field_value_to_binary(T, V)].
+
+field_value_to_binary(longstr, V) -> [$S | long_string_to_binary(V)];
+field_value_to_binary(signedint, V) -> [$I, <<V:32/signed>>];
+field_value_to_binary(decimal, V) -> {Before, After} = V,
+ [$D, Before, <<After:32>>];
+field_value_to_binary(timestamp, V) -> [$T, <<V:64>>];
+field_value_to_binary(table, V) -> [$F | table_to_binary(V)];
+field_value_to_binary(array, V) -> [$A | array_to_binary(V)];
+field_value_to_binary(byte, V) -> [$b, <<V:8/signed>>];
+field_value_to_binary(double, V) -> [$d, <<V:64/float>>];
+field_value_to_binary(float, V) -> [$f, <<V:32/float>>];
+field_value_to_binary(long, V) -> [$l, <<V:64/signed>>];
+field_value_to_binary(short, V) -> [$s, <<V:16/signed>>];
+field_value_to_binary(bool, V) -> [$t, if V -> 1; true -> 0 end];
+field_value_to_binary(binary, V) -> [$x | long_string_to_binary(V)];
+field_value_to_binary(void, _V) -> [$V].
+
+table_to_binary(Table) when is_list(Table) ->
+ BinTable = generate_table_iolist(Table),
+ [<<(iolist_size(BinTable)):32>> | BinTable].
+
+array_to_binary(Array) when is_list(Array) ->
+ BinArray = generate_array_iolist(Array),
+ [<<(iolist_size(BinArray)):32>> | BinArray].
+
+generate_table(Table) when is_list(Table) ->
+ list_to_binary(generate_table_iolist(Table)).
+
+generate_table_iolist(Table) ->
+ lists:map(fun table_field_to_binary/1, Table).
+
+generate_array_iolist(Array) ->
+ lists:map(fun ({T, V}) -> field_value_to_binary(T, V) end, Array).
+
+short_string_to_binary(String) ->
+ Len = string_length(String),
+ if Len < 256 -> [<<Len:8>>, String];
+ true -> exit(content_properties_shortstr_overflow)
+ end.
+
+long_string_to_binary(String) ->
+ Len = string_length(String),
+ [<<Len:32>>, String].
+
+string_length(String) when is_binary(String) -> size(String);
+string_length(String) -> length(String).
+
+check_empty_frame_size() ->
+ %% Intended to ensure that EMPTY_FRAME_SIZE is defined correctly.
+ case iolist_size(create_frame(?FRAME_BODY, 0, <<>>)) of
+ ?EMPTY_FRAME_SIZE -> ok;
+ ComputedSize -> exit({incorrect_empty_frame_size,
+ ComputedSize, ?EMPTY_FRAME_SIZE})
+ end.
+
+ensure_content_encoded(Content = #content{properties_bin = PropBin,
+ protocol = Protocol}, Protocol)
+ when PropBin =/= none ->
+ Content;
+ensure_content_encoded(Content = #content{properties = none,
+ properties_bin = PropBin,
+ protocol = Protocol}, Protocol1)
+ when PropBin =/= none ->
+ Props = Protocol:decode_properties(Content#content.class_id, PropBin),
+ Content#content{properties = Props,
+ properties_bin = Protocol1:encode_properties(Props),
+ protocol = Protocol1};
+ensure_content_encoded(Content = #content{properties = Props}, Protocol)
+ when Props =/= none ->
+ Content#content{properties_bin = Protocol:encode_properties(Props),
+ protocol = Protocol}.
+
+clear_encoded_content(Content = #content{properties_bin = none,
+ protocol = none}) ->
+ Content;
+clear_encoded_content(Content = #content{properties = none}) ->
+ %% Only clear when we can rebuild the properties_bin later in
+ %% accordance to the content record definition comment - maximum
+ %% one of properties and properties_bin can be 'none'
+ Content;
+clear_encoded_content(Content = #content{}) ->
+ Content#content{properties_bin = none, protocol = none}.
+
+%% NB: this function is also used by the Erlang client
+map_exception(Channel, Reason, Protocol) ->
+ {SuggestedClose, ReplyCode, ReplyText, FailedMethod} =
+ lookup_amqp_exception(Reason, Protocol),
+ {ClassId, MethodId} = case FailedMethod of
+ {_, _} -> FailedMethod;
+ none -> {0, 0};
+ _ -> Protocol:method_id(FailedMethod)
+ end,
+ case SuggestedClose orelse (Channel == 0) of
+ true -> {0, #'connection.close'{reply_code = ReplyCode,
+ reply_text = ReplyText,
+ class_id = ClassId,
+ method_id = MethodId}};
+ false -> {Channel, #'channel.close'{reply_code = ReplyCode,
+ reply_text = ReplyText,
+ class_id = ClassId,
+ method_id = MethodId}}
+ end.
+
+lookup_amqp_exception(#amqp_error{name = Name,
+ explanation = Expl,
+ method = Method},
+ Protocol) ->
+ {ShouldClose, Code, Text} = Protocol:lookup_amqp_exception(Name),
+ ExplBin = amqp_exception_explanation(Text, Expl),
+ {ShouldClose, Code, ExplBin, Method};
+lookup_amqp_exception(Other, Protocol) ->
+ rabbit_log:warning("Non-AMQP exit reason '~p'~n", [Other]),
+ {ShouldClose, Code, Text} = Protocol:lookup_amqp_exception(internal_error),
+ {ShouldClose, Code, Text, none}.
+
+amqp_exception_explanation(Text, Expl) ->
+ ExplBin = list_to_binary(Expl),
+ CompleteTextBin = <<Text/binary, " - ", ExplBin/binary>>,
+ if size(CompleteTextBin) > 255 -> <<CompleteTextBin:252/binary, "...">>;
+ true -> CompleteTextBin
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_binary_parser).
+
+-include("rabbit.hrl").
+
+-export([parse_table/1]).
+-export([ensure_content_decoded/1, clear_decoded_content/1]).
+-export([validate_utf8/1, assert_utf8/1]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(parse_table/1 :: (binary()) -> rabbit_framing:amqp_table()).
+-spec(ensure_content_decoded/1 ::
+ (rabbit_types:content()) -> rabbit_types:decoded_content()).
+-spec(clear_decoded_content/1 ::
+ (rabbit_types:content()) -> rabbit_types:undecoded_content()).
+-spec(validate_utf8/1 :: (binary()) -> 'ok' | 'error').
+-spec(assert_utf8/1 :: (binary()) -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+%% parse_table supports the AMQP 0-8/0-9 standard types, S, I, D, T
+%% and F, as well as the QPid extensions b, d, f, l, s, t, x, and V.
+
+parse_table(<<>>) ->
+ [];
+parse_table(<<NLen:8/unsigned, NameString:NLen/binary, ValueAndRest/binary>>) ->
+ {Type, Value, Rest} = parse_field_value(ValueAndRest),
+ [{NameString, Type, Value} | parse_table(Rest)].
+
+parse_array(<<>>) ->
+ [];
+parse_array(<<ValueAndRest/binary>>) ->
+ {Type, Value, Rest} = parse_field_value(ValueAndRest),
+ [{Type, Value} | parse_array(Rest)].
+
+parse_field_value(<<$S, VLen:32/unsigned, V:VLen/binary, R/binary>>) ->
+ {longstr, V, R};
+
+parse_field_value(<<$I, V:32/signed, R/binary>>) ->
+ {signedint, V, R};
+
+parse_field_value(<<$D, Before:8/unsigned, After:32/unsigned, R/binary>>) ->
+ {decimal, {Before, After}, R};
+
+parse_field_value(<<$T, V:64/unsigned, R/binary>>) ->
+ {timestamp, V, R};
+
+parse_field_value(<<$F, VLen:32/unsigned, Table:VLen/binary, R/binary>>) ->
+ {table, parse_table(Table), R};
+
+parse_field_value(<<$A, VLen:32/unsigned, Array:VLen/binary, R/binary>>) ->
+ {array, parse_array(Array), R};
+
+parse_field_value(<<$b, V:8/signed, R/binary>>) -> {byte, V, R};
+parse_field_value(<<$d, V:64/float, R/binary>>) -> {double, V, R};
+parse_field_value(<<$f, V:32/float, R/binary>>) -> {float, V, R};
+parse_field_value(<<$l, V:64/signed, R/binary>>) -> {long, V, R};
+parse_field_value(<<$s, V:16/signed, R/binary>>) -> {short, V, R};
+parse_field_value(<<$t, V:8/unsigned, R/binary>>) -> {bool, (V /= 0), R};
+
+parse_field_value(<<$x, VLen:32/unsigned, V:VLen/binary, R/binary>>) ->
+ {binary, V, R};
+
+parse_field_value(<<$V, R/binary>>) ->
+ {void, undefined, R}.
+
+ensure_content_decoded(Content = #content{properties = Props})
+ when Props =/= none ->
+ Content;
+ensure_content_decoded(Content = #content{properties_bin = PropBin,
+ protocol = Protocol})
+ when PropBin =/= none ->
+ Content#content{properties = Protocol:decode_properties(
+ Content#content.class_id, PropBin)}.
+
+clear_decoded_content(Content = #content{properties = none}) ->
+ Content;
+clear_decoded_content(Content = #content{properties_bin = none}) ->
+ %% Only clear when we can rebuild the properties later in
+ %% accordance to the content record definition comment - maximum
+ %% one of properties and properties_bin can be 'none'
+ Content;
+clear_decoded_content(Content = #content{}) ->
+ Content#content{properties = none}.
+
+assert_utf8(B) ->
+ case validate_utf8(B) of
+ ok -> ok;
+ error -> rabbit_misc:protocol_error(
+ frame_error, "Malformed UTF-8 in shortstr", [])
+ end.
+
+validate_utf8(Bin) ->
+ try
+ xmerl_ucs:from_utf8(Bin),
+ ok
+ catch exit:{ucs, _} ->
+ error
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_binding).
+-include("rabbit.hrl").
+
+-export([recover/2, exists/1, add/1, add/2, remove/1, remove/2, list/1]).
+-export([list_for_source/1, list_for_destination/1,
+ list_for_source_and_destination/2]).
+-export([new_deletions/0, combine_deletions/2, add_deletion/3,
+ process_deletions/1]).
+-export([info_keys/0, info/1, info/2, info_all/1, info_all/2]).
+%% these must all be run inside a mnesia tx
+-export([has_for_source/1, remove_for_source/1,
+ remove_for_destination/1, remove_transient_for_destination/1]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-export_type([key/0, deletions/0]).
+
+-type(key() :: binary()).
+
+-type(bind_errors() :: rabbit_types:error(
+ {'resources_missing',
+ [{'not_found', (rabbit_types:binding_source() |
+ rabbit_types:binding_destination())} |
+ {'absent', rabbit_types:amqqueue()}]})).
+
+-type(bind_ok_or_error() :: 'ok' | bind_errors() |
+ rabbit_types:error(
+ 'binding_not_found' |
+ {'binding_invalid', string(), [any()]})).
+-type(bind_res() :: bind_ok_or_error() | rabbit_misc:thunk(bind_ok_or_error())).
+-type(inner_fun() ::
+ fun((rabbit_types:exchange(),
+ rabbit_types:exchange() | rabbit_types:amqqueue()) ->
+ rabbit_types:ok_or_error(rabbit_types:amqp_error()))).
+-type(bindings() :: [rabbit_types:binding()]).
+
+-opaque(deletions() :: dict()).
+
+-spec(recover/2 :: ([rabbit_exchange:name()], [rabbit_amqqueue:name()]) ->
+ 'ok').
+-spec(exists/1 :: (rabbit_types:binding()) -> boolean() | bind_errors()).
+-spec(add/1 :: (rabbit_types:binding()) -> bind_res()).
+-spec(add/2 :: (rabbit_types:binding(), inner_fun()) -> bind_res()).
+-spec(remove/1 :: (rabbit_types:binding()) -> bind_res()).
+-spec(remove/2 :: (rabbit_types:binding(), inner_fun()) -> bind_res()).
+-spec(list/1 :: (rabbit_types:vhost()) -> bindings()).
+-spec(list_for_source/1 ::
+ (rabbit_types:binding_source()) -> bindings()).
+-spec(list_for_destination/1 ::
+ (rabbit_types:binding_destination()) -> bindings()).
+-spec(list_for_source_and_destination/2 ::
+ (rabbit_types:binding_source(), rabbit_types:binding_destination()) ->
+ bindings()).
+-spec(info_keys/0 :: () -> rabbit_types:info_keys()).
+-spec(info/1 :: (rabbit_types:binding()) -> rabbit_types:infos()).
+-spec(info/2 :: (rabbit_types:binding(), rabbit_types:info_keys()) ->
+ rabbit_types:infos()).
+-spec(info_all/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]).
+-spec(info_all/2 ::(rabbit_types:vhost(), rabbit_types:info_keys())
+ -> [rabbit_types:infos()]).
+-spec(has_for_source/1 :: (rabbit_types:binding_source()) -> boolean()).
+-spec(remove_for_source/1 :: (rabbit_types:binding_source()) -> bindings()).
+-spec(remove_for_destination/1 ::
+ (rabbit_types:binding_destination()) -> deletions()).
+-spec(remove_transient_for_destination/1 ::
+ (rabbit_types:binding_destination()) -> deletions()).
+-spec(process_deletions/1 :: (deletions()) -> rabbit_misc:thunk('ok')).
+-spec(combine_deletions/2 :: (deletions(), deletions()) -> deletions()).
+-spec(add_deletion/3 :: (rabbit_exchange:name(),
+ {'undefined' | rabbit_types:exchange(),
+ 'deleted' | 'not_deleted',
+ bindings()}, deletions()) -> deletions()).
+-spec(new_deletions/0 :: () -> deletions()).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+-define(INFO_KEYS, [source_name, source_kind,
+ destination_name, destination_kind,
+ routing_key, arguments]).
+
+recover(XNames, QNames) ->
+ rabbit_misc:table_filter(
+ fun (Route) ->
+ mnesia:read({rabbit_semi_durable_route, Route}) =:= []
+ end,
+ fun (Route, true) ->
+ ok = mnesia:write(rabbit_semi_durable_route, Route, write);
+ (_Route, false) ->
+ ok
+ end, rabbit_durable_route),
+ XNameSet = sets:from_list(XNames),
+ QNameSet = sets:from_list(QNames),
+ SelectSet = fun (#resource{kind = exchange}) -> XNameSet;
+ (#resource{kind = queue}) -> QNameSet
+ end,
+ {ok, Gatherer} = gatherer:start_link(),
+ [recover_semi_durable_route(Gatherer, R, SelectSet(Dst)) ||
+ R = #route{binding = #binding{destination = Dst}} <-
+ rabbit_misc:dirty_read_all(rabbit_semi_durable_route)],
+ empty = gatherer:out(Gatherer),
+ ok = gatherer:stop(Gatherer),
+ ok.
+
+recover_semi_durable_route(Gatherer, R = #route{binding = B}, ToRecover) ->
+ #binding{source = Src, destination = Dst} = B,
+ case sets:is_element(Dst, ToRecover) of
+ true -> {ok, X} = rabbit_exchange:lookup(Src),
+ ok = gatherer:fork(Gatherer),
+ ok = worker_pool:submit_async(
+ fun () ->
+ recover_semi_durable_route_txn(R, X),
+ gatherer:finish(Gatherer)
+ end);
+ false -> ok
+ end.
+
+recover_semi_durable_route_txn(R = #route{binding = B}, X) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ case mnesia:match_object(rabbit_semi_durable_route, R, read) of
+ [] -> no_recover;
+ _ -> ok = sync_transient_route(R, fun mnesia:write/3),
+ rabbit_exchange:serial(X)
+ end
+ end,
+ fun (no_recover, _) -> ok;
+ (_Serial, true) -> x_callback(transaction, X, add_binding, B);
+ (Serial, false) -> x_callback(Serial, X, add_binding, B)
+ end).
+
+exists(Binding) ->
+ binding_action(
+ Binding, fun (_Src, _Dst, B) ->
+ rabbit_misc:const(mnesia:read({rabbit_route, B}) /= [])
+ end, fun not_found_or_absent_errs/1).
+
+add(Binding) -> add(Binding, fun (_Src, _Dst) -> ok end).
+
+add(Binding, InnerFun) ->
+ binding_action(
+ Binding,
+ fun (Src, Dst, B) ->
+ case rabbit_exchange:validate_binding(Src, B) of
+ ok ->
+ %% this argument is used to check queue exclusivity;
+ %% in general, we want to fail on that in preference to
+ %% anything else
+ case InnerFun(Src, Dst) of
+ ok ->
+ case mnesia:read({rabbit_route, B}) of
+ [] -> add(Src, Dst, B);
+ [_] -> fun () -> ok end
+ end;
+ {error, _} = Err ->
+ rabbit_misc:const(Err)
+ end;
+ {error, _} = Err ->
+ rabbit_misc:const(Err)
+ end
+ end, fun not_found_or_absent_errs/1).
+
+add(Src, Dst, B) ->
+ [SrcDurable, DstDurable] = [durable(E) || E <- [Src, Dst]],
+ case (SrcDurable andalso DstDurable andalso
+ mnesia:read({rabbit_durable_route, B}) =/= []) of
+ false -> ok = sync_route(#route{binding = B}, SrcDurable, DstDurable,
+ fun mnesia:write/3),
+ x_callback(transaction, Src, add_binding, B),
+ Serial = rabbit_exchange:serial(Src),
+ fun () ->
+ x_callback(Serial, Src, add_binding, B),
+ ok = rabbit_event:notify(binding_created, info(B))
+ end;
+ true -> rabbit_misc:const({error, binding_not_found})
+ end.
+
+remove(Binding) -> remove(Binding, fun (_Src, _Dst) -> ok end).
+
+remove(Binding, InnerFun) ->
+ binding_action(
+ Binding,
+ fun (Src, Dst, B) ->
+ case mnesia:read(rabbit_route, B, write) of
+ [] -> case mnesia:read(rabbit_durable_route, B, write) of
+ [] -> rabbit_misc:const(ok);
+ _ -> rabbit_misc:const({error, binding_not_found})
+ end;
+ _ -> case InnerFun(Src, Dst) of
+ ok -> remove(Src, Dst, B);
+ {error, _} = Err -> rabbit_misc:const(Err)
+ end
+ end
+ end, fun absent_errs_only/1).
+
+remove(Src, Dst, B) ->
+ ok = sync_route(#route{binding = B}, durable(Src), durable(Dst),
+ fun mnesia:delete_object/3),
+ Deletions = maybe_auto_delete(B#binding.source, [B], new_deletions()),
+ process_deletions(Deletions).
+
+list(VHostPath) ->
+ VHostResource = rabbit_misc:r(VHostPath, '_'),
+ Route = #route{binding = #binding{source = VHostResource,
+ destination = VHostResource,
+ _ = '_'},
+ _ = '_'},
+ [B || #route{binding = B} <- mnesia:dirty_match_object(rabbit_route,
+ Route)].
+
+list_for_source(SrcName) ->
+ mnesia:async_dirty(
+ fun() ->
+ Route = #route{binding = #binding{source = SrcName, _ = '_'}},
+ [B || #route{binding = B}
+ <- mnesia:match_object(rabbit_route, Route, read)]
+ end).
+
+list_for_destination(DstName) ->
+ mnesia:async_dirty(
+ fun() ->
+ Route = #route{binding = #binding{destination = DstName,
+ _ = '_'}},
+ [reverse_binding(B) ||
+ #reverse_route{reverse_binding = B} <-
+ mnesia:match_object(rabbit_reverse_route,
+ reverse_route(Route), read)]
+ end).
+
+list_for_source_and_destination(SrcName, DstName) ->
+ mnesia:async_dirty(
+ fun() ->
+ Route = #route{binding = #binding{source = SrcName,
+ destination = DstName,
+ _ = '_'}},
+ [B || #route{binding = B} <- mnesia:match_object(rabbit_route,
+ Route, read)]
+ end).
+
+info_keys() -> ?INFO_KEYS.
+
+map(VHostPath, F) ->
+ %% TODO: there is scope for optimisation here, e.g. using a
+ %% cursor, parallelising the function invocation
+ lists:map(F, list(VHostPath)).
+
+infos(Items, B) -> [{Item, i(Item, B)} || Item <- Items].
+
+i(source_name, #binding{source = SrcName}) -> SrcName#resource.name;
+i(source_kind, #binding{source = SrcName}) -> SrcName#resource.kind;
+i(destination_name, #binding{destination = DstName}) -> DstName#resource.name;
+i(destination_kind, #binding{destination = DstName}) -> DstName#resource.kind;
+i(routing_key, #binding{key = RoutingKey}) -> RoutingKey;
+i(arguments, #binding{args = Arguments}) -> Arguments;
+i(Item, _) -> throw({bad_argument, Item}).
+
+info(B = #binding{}) -> infos(?INFO_KEYS, B).
+
+info(B = #binding{}, Items) -> infos(Items, B).
+
+info_all(VHostPath) -> map(VHostPath, fun (B) -> info(B) end).
+
+info_all(VHostPath, Items) -> map(VHostPath, fun (B) -> info(B, Items) end).
+
+has_for_source(SrcName) ->
+ Match = #route{binding = #binding{source = SrcName, _ = '_'}},
+ %% we need to check for semi-durable routes (which subsumes
+ %% durable routes) here too in case a bunch of routes to durable
+ %% queues have been removed temporarily as a result of a node
+ %% failure
+ contains(rabbit_route, Match) orelse
+ contains(rabbit_semi_durable_route, Match).
+
+remove_for_source(SrcName) ->
+ lock_route_tables(),
+ Match = #route{binding = #binding{source = SrcName, _ = '_'}},
+ remove_routes(
+ lists:usort(
+ mnesia:match_object(rabbit_route, Match, write) ++
+ mnesia:match_object(rabbit_semi_durable_route, Match, write))).
+
+remove_for_destination(DstName) ->
+ remove_for_destination(DstName, fun remove_routes/1).
+
+remove_transient_for_destination(DstName) ->
+ remove_for_destination(DstName, fun remove_transient_routes/1).
+
+%%----------------------------------------------------------------------------
+
+durable(#exchange{durable = D}) -> D;
+durable(#amqqueue{durable = D}) -> D.
+
+binding_action(Binding = #binding{source = SrcName,
+ destination = DstName,
+ args = Arguments}, Fun, ErrFun) ->
+ call_with_source_and_destination(
+ SrcName, DstName,
+ fun (Src, Dst) ->
+ SortedArgs = rabbit_misc:sort_field_table(Arguments),
+ Fun(Src, Dst, Binding#binding{args = SortedArgs})
+ end, ErrFun).
+
+delete_object(Tab, Record, LockKind) ->
+ %% this 'guarded' delete prevents unnecessary writes to the mnesia
+ %% disk log
+ case mnesia:match_object(Tab, Record, LockKind) of
+ [] -> ok;
+ [_] -> mnesia:delete_object(Tab, Record, LockKind)
+ end.
+
+sync_route(Route, true, true, Fun) ->
+ ok = Fun(rabbit_durable_route, Route, write),
+ sync_route(Route, false, true, Fun);
+
+sync_route(Route, false, true, Fun) ->
+ ok = Fun(rabbit_semi_durable_route, Route, write),
+ sync_route(Route, false, false, Fun);
+
+sync_route(Route, _SrcDurable, false, Fun) ->
+ sync_transient_route(Route, Fun).
+
+sync_transient_route(Route, Fun) ->
+ ok = Fun(rabbit_route, Route, write),
+ ok = Fun(rabbit_reverse_route, reverse_route(Route), write).
+
+call_with_source_and_destination(SrcName, DstName, Fun, ErrFun) ->
+ SrcTable = table_for_resource(SrcName),
+ DstTable = table_for_resource(DstName),
+ rabbit_misc:execute_mnesia_tx_with_tail(
+ fun () ->
+ case {mnesia:read({SrcTable, SrcName}),
+ mnesia:read({DstTable, DstName})} of
+ {[Src], [Dst]} -> Fun(Src, Dst);
+ {[], [_] } -> ErrFun([SrcName]);
+ {[_], [] } -> ErrFun([DstName]);
+ {[], [] } -> ErrFun([SrcName, DstName])
+ end
+ end).
+
+not_found_or_absent_errs(Names) ->
+ Errs = [not_found_or_absent(Name) || Name <- Names],
+ rabbit_misc:const({error, {resources_missing, Errs}}).
+
+absent_errs_only(Names) ->
+ Errs = [E || Name <- Names,
+ {absent, _Q} = E <- [not_found_or_absent(Name)]],
+ rabbit_misc:const(case Errs of
+ [] -> ok;
+ _ -> {error, {resources_missing, Errs}}
+ end).
+
+table_for_resource(#resource{kind = exchange}) -> rabbit_exchange;
+table_for_resource(#resource{kind = queue}) -> rabbit_queue.
+
+not_found_or_absent(#resource{kind = exchange} = Name) ->
+ {not_found, Name};
+not_found_or_absent(#resource{kind = queue} = Name) ->
+ case rabbit_amqqueue:not_found_or_absent(Name) of
+ not_found -> {not_found, Name};
+ {absent, _Q} = R -> R
+ end.
+
+contains(Table, MatchHead) ->
+ continue(mnesia:select(Table, [{MatchHead, [], ['$_']}], 1, read)).
+
+continue('$end_of_table') -> false;
+continue({[_|_], _}) -> true;
+continue({[], Continuation}) -> continue(mnesia:select(Continuation)).
+
+%% For bulk operations we lock the tables we are operating on in order
+%% to reduce the time complexity. Without the table locks we end up
+%% with num_tables*num_bulk_bindings row-level locks. Taking each lock
+%% takes time proportional to the number of existing locks, thus
+%% resulting in O(num_bulk_bindings^2) complexity.
+%%
+%% The locks need to be write locks since ultimately we end up
+%% removing all these rows.
+%%
+%% The downside of all this is that no other binding operations except
+%% lookup/routing (which uses dirty ops) can take place
+%% concurrently. However, that is the case already since the bulk
+%% operations involve mnesia:match_object calls with a partial key,
+%% which entails taking a table lock.
+lock_route_tables() ->
+ [mnesia:lock({table, T}, write) || T <- [rabbit_route,
+ rabbit_reverse_route,
+ rabbit_semi_durable_route,
+ rabbit_durable_route]].
+
+remove_routes(Routes) ->
+ %% This partitioning allows us to suppress unnecessary delete
+ %% operations on disk tables, which require an fsync.
+ {RamRoutes, DiskRoutes} =
+ lists:partition(fun (R) -> mnesia:match_object(
+ rabbit_durable_route, R, write) == [] end,
+ Routes),
+ %% Of course the destination might not really be durable but it's
+ %% just as easy to try to delete it from the semi-durable table
+ %% than check first
+ [ok = sync_route(R, false, true, fun mnesia:delete_object/3) ||
+ R <- RamRoutes],
+ [ok = sync_route(R, true, true, fun mnesia:delete_object/3) ||
+ R <- DiskRoutes],
+ [R#route.binding || R <- Routes].
+
+remove_transient_routes(Routes) ->
+ [begin
+ ok = sync_transient_route(R, fun delete_object/3),
+ R#route.binding
+ end || R <- Routes].
+
+remove_for_destination(DstName, Fun) ->
+ lock_route_tables(),
+ Match = reverse_route(
+ #route{binding = #binding{destination = DstName, _ = '_'}}),
+ Routes = [reverse_route(R) || R <- mnesia:match_object(
+ rabbit_reverse_route, Match, write)],
+ Bindings = Fun(Routes),
+ group_bindings_fold(fun maybe_auto_delete/3, new_deletions(),
+ lists:keysort(#binding.source, Bindings)).
+
+%% Requires that its input binding list is sorted in exchange-name
+%% order, so that the grouping of bindings (for passing to
+%% group_bindings_and_auto_delete1) works properly.
+group_bindings_fold(_Fun, Acc, []) ->
+ Acc;
+group_bindings_fold(Fun, Acc, [B = #binding{source = SrcName} | Bs]) ->
+ group_bindings_fold(Fun, SrcName, Acc, Bs, [B]).
+
+group_bindings_fold(
+ Fun, SrcName, Acc, [B = #binding{source = SrcName} | Bs], Bindings) ->
+ group_bindings_fold(Fun, SrcName, Acc, Bs, [B | Bindings]);
+group_bindings_fold(Fun, SrcName, Acc, Removed, Bindings) ->
+ %% Either Removed is [], or its head has a non-matching SrcName.
+ group_bindings_fold(Fun, Fun(SrcName, Bindings, Acc), Removed).
+
+maybe_auto_delete(XName, Bindings, Deletions) ->
+ {Entry, Deletions1} =
+ case mnesia:read({rabbit_exchange, XName}) of
+ [] -> {{undefined, not_deleted, Bindings}, Deletions};
+ [X] -> case rabbit_exchange:maybe_auto_delete(X) of
+ not_deleted ->
+ {{X, not_deleted, Bindings}, Deletions};
+ {deleted, Deletions2} ->
+ {{X, deleted, Bindings},
+ combine_deletions(Deletions, Deletions2)}
+ end
+ end,
+ add_deletion(XName, Entry, Deletions1).
+
+reverse_route(#route{binding = Binding}) ->
+ #reverse_route{reverse_binding = reverse_binding(Binding)};
+
+reverse_route(#reverse_route{reverse_binding = Binding}) ->
+ #route{binding = reverse_binding(Binding)}.
+
+reverse_binding(#reverse_binding{source = SrcName,
+ destination = DstName,
+ key = Key,
+ args = Args}) ->
+ #binding{source = SrcName,
+ destination = DstName,
+ key = Key,
+ args = Args};
+
+reverse_binding(#binding{source = SrcName,
+ destination = DstName,
+ key = Key,
+ args = Args}) ->
+ #reverse_binding{source = SrcName,
+ destination = DstName,
+ key = Key,
+ args = Args}.
+
+%% ----------------------------------------------------------------------------
+%% Binding / exchange deletion abstraction API
+%% ----------------------------------------------------------------------------
+
+anything_but( NotThis, NotThis, NotThis) -> NotThis;
+anything_but( NotThis, NotThis, This) -> This;
+anything_but( NotThis, This, NotThis) -> This;
+anything_but(_NotThis, This, This) -> This.
+
+new_deletions() -> dict:new().
+
+add_deletion(XName, Entry, Deletions) ->
+ dict:update(XName, fun (Entry1) -> merge_entry(Entry1, Entry) end,
+ Entry, Deletions).
+
+combine_deletions(Deletions1, Deletions2) ->
+ dict:merge(fun (_XName, Entry1, Entry2) -> merge_entry(Entry1, Entry2) end,
+ Deletions1, Deletions2).
+
+merge_entry({X1, Deleted1, Bindings1}, {X2, Deleted2, Bindings2}) ->
+ {anything_but(undefined, X1, X2),
+ anything_but(not_deleted, Deleted1, Deleted2),
+ [Bindings1 | Bindings2]}.
+
+process_deletions(Deletions) ->
+ AugmentedDeletions =
+ dict:map(fun (_XName, {X, deleted, Bindings}) ->
+ Bs = lists:flatten(Bindings),
+ x_callback(transaction, X, delete, Bs),
+ {X, deleted, Bs, none};
+ (_XName, {X, not_deleted, Bindings}) ->
+ Bs = lists:flatten(Bindings),
+ x_callback(transaction, X, remove_bindings, Bs),
+ {X, not_deleted, Bs, rabbit_exchange:serial(X)}
+ end, Deletions),
+ fun() ->
+ dict:fold(fun (XName, {X, deleted, Bs, Serial}, ok) ->
+ ok = rabbit_event:notify(
+ exchange_deleted, [{name, XName}]),
+ del_notify(Bs),
+ x_callback(Serial, X, delete, Bs);
+ (_XName, {X, not_deleted, Bs, Serial}, ok) ->
+ del_notify(Bs),
+ x_callback(Serial, X, remove_bindings, Bs)
+ end, ok, AugmentedDeletions)
+ end.
+
+del_notify(Bs) -> [rabbit_event:notify(binding_deleted, info(B)) || B <- Bs].
+
+x_callback(Serial, X, F, Bs) ->
+ ok = rabbit_exchange:callback(X, F, Serial, [X, Bs]).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_channel).
+-include("rabbit_framing.hrl").
+-include("rabbit.hrl").
+
+-behaviour(gen_server2).
+
+-export([start_link/11, do/2, do/3, do_flow/3, flush/1, shutdown/1]).
+-export([send_command/2, deliver/4, send_credit_reply/2, send_drained/2]).
+-export([list/0, info_keys/0, info/1, info/2, info_all/0, info_all/1]).
+-export([refresh_config_local/0, ready_for_close/1]).
+-export([force_event_refresh/1]).
+
+-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
+ handle_info/2, handle_pre_hibernate/1, prioritise_call/4,
+ prioritise_cast/3, prioritise_info/3, format_message_queue/2]).
+%% Internal
+-export([list_local/0]).
+
+-record(ch, {state, protocol, channel, reader_pid, writer_pid, conn_pid,
+ conn_name, limiter, tx, next_tag, unacked_message_q, user,
+ virtual_host, most_recently_declared_queue,
+ queue_names, queue_monitors, consumer_mapping,
+ queue_consumers, delivering_queues,
+ queue_collector_pid, stats_timer, confirm_enabled, publish_seqno,
+ unconfirmed, confirmed, mandatory, capabilities, trace_state,
+ consumer_prefetch}).
+
+-define(MAX_PERMISSION_CACHE_SIZE, 12).
+
+-define(STATISTICS_KEYS,
+ [pid,
+ transactional,
+ confirm,
+ consumer_count,
+ messages_unacknowledged,
+ messages_unconfirmed,
+ messages_uncommitted,
+ acks_uncommitted,
+ prefetch_count,
+ global_prefetch_count,
+ state]).
+
+-define(CREATION_EVENT_KEYS,
+ [pid,
+ name,
+ connection,
+ number,
+ user,
+ vhost]).
+
+-define(INFO_KEYS, ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [pid]).
+
+-define(INCR_STATS(Incs, Measure, State),
+ case rabbit_event:stats_level(State, #ch.stats_timer) of
+ fine -> incr_stats(Incs, Measure);
+ _ -> ok
+ end).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-export_type([channel_number/0]).
+
+-type(channel_number() :: non_neg_integer()).
+
+-spec(start_link/11 ::
+ (channel_number(), pid(), pid(), pid(), string(),
+ rabbit_types:protocol(), rabbit_types:user(), rabbit_types:vhost(),
+ rabbit_framing:amqp_table(), pid(), pid()) ->
+ rabbit_types:ok_pid_or_error()).
+-spec(do/2 :: (pid(), rabbit_framing:amqp_method_record()) -> 'ok').
+-spec(do/3 :: (pid(), rabbit_framing:amqp_method_record(),
+ rabbit_types:maybe(rabbit_types:content())) -> 'ok').
+-spec(do_flow/3 :: (pid(), rabbit_framing:amqp_method_record(),
+ rabbit_types:maybe(rabbit_types:content())) -> 'ok').
+-spec(flush/1 :: (pid()) -> 'ok').
+-spec(shutdown/1 :: (pid()) -> 'ok').
+-spec(send_command/2 :: (pid(), rabbit_framing:amqp_method_record()) -> 'ok').
+-spec(deliver/4 ::
+ (pid(), rabbit_types:ctag(), boolean(), rabbit_amqqueue:qmsg())
+ -> 'ok').
+-spec(send_credit_reply/2 :: (pid(), non_neg_integer()) -> 'ok').
+-spec(send_drained/2 :: (pid(), [{rabbit_types:ctag(), non_neg_integer()}])
+ -> 'ok').
+-spec(list/0 :: () -> [pid()]).
+-spec(list_local/0 :: () -> [pid()]).
+-spec(info_keys/0 :: () -> rabbit_types:info_keys()).
+-spec(info/1 :: (pid()) -> rabbit_types:infos()).
+-spec(info/2 :: (pid(), rabbit_types:info_keys()) -> rabbit_types:infos()).
+-spec(info_all/0 :: () -> [rabbit_types:infos()]).
+-spec(info_all/1 :: (rabbit_types:info_keys()) -> [rabbit_types:infos()]).
+-spec(refresh_config_local/0 :: () -> 'ok').
+-spec(ready_for_close/1 :: (pid()) -> 'ok').
+-spec(force_event_refresh/1 :: (reference()) -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start_link(Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol, User,
+ VHost, Capabilities, CollectorPid, Limiter) ->
+ gen_server2:start_link(
+ ?MODULE, [Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol,
+ User, VHost, Capabilities, CollectorPid, Limiter], []).
+
+do(Pid, Method) ->
+ do(Pid, Method, none).
+
+do(Pid, Method, Content) ->
+ gen_server2:cast(Pid, {method, Method, Content, noflow}).
+
+do_flow(Pid, Method, Content) ->
+ credit_flow:send(Pid),
+ gen_server2:cast(Pid, {method, Method, Content, flow}).
+
+flush(Pid) ->
+ gen_server2:call(Pid, flush, infinity).
+
+shutdown(Pid) ->
+ gen_server2:cast(Pid, terminate).
+
+send_command(Pid, Msg) ->
+ gen_server2:cast(Pid, {command, Msg}).
+
+deliver(Pid, ConsumerTag, AckRequired, Msg) ->
+ gen_server2:cast(Pid, {deliver, ConsumerTag, AckRequired, Msg}).
+
+send_credit_reply(Pid, Len) ->
+ gen_server2:cast(Pid, {send_credit_reply, Len}).
+
+send_drained(Pid, CTagCredit) ->
+ gen_server2:cast(Pid, {send_drained, CTagCredit}).
+
+list() ->
+ rabbit_misc:append_rpc_all_nodes(rabbit_mnesia:cluster_nodes(running),
+ rabbit_channel, list_local, []).
+
+list_local() ->
+ pg_local:get_members(rabbit_channels).
+
+info_keys() -> ?INFO_KEYS.
+
+info(Pid) ->
+ gen_server2:call(Pid, info, infinity).
+
+info(Pid, Items) ->
+ case gen_server2:call(Pid, {info, Items}, infinity) of
+ {ok, Res} -> Res;
+ {error, Error} -> throw(Error)
+ end.
+
+info_all() ->
+ rabbit_misc:filter_exit_map(fun (C) -> info(C) end, list()).
+
+info_all(Items) ->
+ rabbit_misc:filter_exit_map(fun (C) -> info(C, Items) end, list()).
+
+refresh_config_local() ->
+ rabbit_misc:upmap(
+ fun (C) -> gen_server2:call(C, refresh_config, infinity) end,
+ list_local()),
+ ok.
+
+ready_for_close(Pid) ->
+ gen_server2:cast(Pid, ready_for_close).
+
+force_event_refresh(Ref) ->
+ [gen_server2:cast(C, {force_event_refresh, Ref}) || C <- list()],
+ ok.
+
+%%---------------------------------------------------------------------------
+
+init([Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol, User, VHost,
+ Capabilities, CollectorPid, LimiterPid]) ->
+ process_flag(trap_exit, true),
+ ?store_proc_name({ConnName, Channel}),
+ ok = pg_local:join(rabbit_channels, self()),
+ State = #ch{state = starting,
+ protocol = Protocol,
+ channel = Channel,
+ reader_pid = ReaderPid,
+ writer_pid = WriterPid,
+ conn_pid = ConnPid,
+ conn_name = ConnName,
+ limiter = rabbit_limiter:new(LimiterPid),
+ tx = none,
+ next_tag = 1,
+ unacked_message_q = queue:new(),
+ user = User,
+ virtual_host = VHost,
+ most_recently_declared_queue = <<>>,
+ queue_names = dict:new(),
+ queue_monitors = pmon:new(),
+ consumer_mapping = dict:new(),
+ queue_consumers = dict:new(),
+ delivering_queues = sets:new(),
+ queue_collector_pid = CollectorPid,
+ confirm_enabled = false,
+ publish_seqno = 1,
+ unconfirmed = dtree:empty(),
+ confirmed = [],
+ mandatory = dtree:empty(),
+ capabilities = Capabilities,
+ trace_state = rabbit_trace:init(VHost),
+ consumer_prefetch = 0},
+ State1 = rabbit_event:init_stats_timer(State, #ch.stats_timer),
+ rabbit_event:notify(channel_created, infos(?CREATION_EVENT_KEYS, State1)),
+ rabbit_event:if_enabled(State1, #ch.stats_timer,
+ fun() -> emit_stats(State1) end),
+ {ok, State1, hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+prioritise_call(Msg, _From, _Len, _State) ->
+ case Msg of
+ info -> 9;
+ {info, _Items} -> 9;
+ _ -> 0
+ end.
+
+prioritise_cast(Msg, _Len, _State) ->
+ case Msg of
+ {confirm, _MsgSeqNos, _QPid} -> 5;
+ {mandatory_received, _MsgSeqNo, _QPid} -> 5;
+ _ -> 0
+ end.
+
+prioritise_info(Msg, _Len, _State) ->
+ case Msg of
+ emit_stats -> 7;
+ _ -> 0
+ end.
+
+handle_call(flush, _From, State) ->
+ reply(ok, State);
+
+handle_call(info, _From, State) ->
+ reply(infos(?INFO_KEYS, State), State);
+
+handle_call({info, Items}, _From, State) ->
+ try
+ reply({ok, infos(Items, State)}, State)
+ catch Error -> reply({error, Error}, State)
+ end;
+
+handle_call(refresh_config, _From, State = #ch{virtual_host = VHost}) ->
+ reply(ok, State#ch{trace_state = rabbit_trace:init(VHost)});
+
+handle_call(_Request, _From, State) ->
+ noreply(State).
+
+handle_cast({method, Method, Content, Flow},
+ State = #ch{reader_pid = Reader,
+ virtual_host = VHost}) ->
+ case Flow of
+ flow -> credit_flow:ack(Reader);
+ noflow -> ok
+ end,
+ try handle_method(rabbit_channel_interceptor:intercept_method(
+ expand_shortcuts(Method, State), VHost),
+ Content, State) of
+ {reply, Reply, NewState} ->
+ ok = send(Reply, NewState),
+ noreply(NewState);
+ {noreply, NewState} ->
+ noreply(NewState);
+ stop ->
+ {stop, normal, State}
+ catch
+ exit:Reason = #amqp_error{} ->
+ MethodName = rabbit_misc:method_record_type(Method),
+ handle_exception(Reason#amqp_error{method = MethodName}, State);
+ _:Reason ->
+ {stop, {Reason, erlang:get_stacktrace()}, State}
+ end;
+
+handle_cast(ready_for_close, State = #ch{state = closing,
+ writer_pid = WriterPid}) ->
+ ok = rabbit_writer:send_command_sync(WriterPid, #'channel.close_ok'{}),
+ {stop, normal, State};
+
+handle_cast(terminate, State = #ch{writer_pid = WriterPid}) ->
+ ok = rabbit_writer:flush(WriterPid),
+ {stop, normal, State};
+
+handle_cast({command, #'basic.consume_ok'{consumer_tag = CTag} = Msg}, State) ->
+ ok = send(Msg, State),
+ noreply(consumer_monitor(CTag, State));
+
+handle_cast({command, Msg}, State) ->
+ ok = send(Msg, State),
+ noreply(State);
+
+handle_cast({deliver, _CTag, _AckReq, _Msg}, State = #ch{state = closing}) ->
+ noreply(State);
+handle_cast({deliver, ConsumerTag, AckRequired,
+ Msg = {_QName, QPid, _MsgId, Redelivered,
+ #basic_message{exchange_name = ExchangeName,
+ routing_keys = [RoutingKey | _CcRoutes],
+ content = Content}}},
+ State = #ch{writer_pid = WriterPid,
+ next_tag = DeliveryTag}) ->
+ ok = rabbit_writer:send_command_and_notify(
+ WriterPid, QPid, self(),
+ #'basic.deliver'{consumer_tag = ConsumerTag,
+ delivery_tag = DeliveryTag,
+ redelivered = Redelivered,
+ exchange = ExchangeName#resource.name,
+ routing_key = RoutingKey},
+ Content),
+ noreply(record_sent(ConsumerTag, AckRequired, Msg, State));
+
+handle_cast({send_credit_reply, Len}, State = #ch{writer_pid = WriterPid}) ->
+ ok = rabbit_writer:send_command(
+ WriterPid, #'basic.credit_ok'{available = Len}),
+ noreply(State);
+
+handle_cast({send_drained, CTagCredit}, State = #ch{writer_pid = WriterPid}) ->
+ [ok = rabbit_writer:send_command(
+ WriterPid, #'basic.credit_drained'{consumer_tag = ConsumerTag,
+ credit_drained = CreditDrained})
+ || {ConsumerTag, CreditDrained} <- CTagCredit],
+ noreply(State);
+
+handle_cast({force_event_refresh, Ref}, State) ->
+ rabbit_event:notify(channel_created, infos(?CREATION_EVENT_KEYS, State),
+ Ref),
+ noreply(State);
+
+handle_cast({mandatory_received, MsgSeqNo}, State = #ch{mandatory = Mand}) ->
+ %% NB: don't call noreply/1 since we don't want to send confirms.
+ noreply_coalesce(State#ch{mandatory = dtree:drop(MsgSeqNo, Mand)});
+
+handle_cast({confirm, MsgSeqNos, QPid}, State = #ch{unconfirmed = UC}) ->
+ {MXs, UC1} = dtree:take(MsgSeqNos, QPid, UC),
+ %% NB: don't call noreply/1 since we don't want to send confirms.
+ noreply_coalesce(record_confirms(MXs, State#ch{unconfirmed = UC1})).
+
+handle_info({bump_credit, Msg}, State) ->
+ credit_flow:handle_bump_msg(Msg),
+ noreply(State);
+
+handle_info(timeout, State) ->
+ noreply(State);
+
+handle_info(emit_stats, State) ->
+ emit_stats(State),
+ State1 = rabbit_event:reset_stats_timer(State, #ch.stats_timer),
+ %% NB: don't call noreply/1 since we don't want to kick off the
+ %% stats timer.
+ {noreply, send_confirms(State1), hibernate};
+
+handle_info({'DOWN', _MRef, process, QPid, Reason}, State) ->
+ State1 = handle_publishing_queue_down(QPid, Reason, State),
+ State3 = handle_consuming_queue_down(QPid, State1),
+ State4 = handle_delivering_queue_down(QPid, State3),
+ credit_flow:peer_down(QPid),
+ #ch{queue_names = QNames, queue_monitors = QMons} = State4,
+ case dict:find(QPid, QNames) of
+ {ok, QName} -> erase_queue_stats(QName);
+ error -> ok
+ end,
+ noreply(State4#ch{queue_names = dict:erase(QPid, QNames),
+ queue_monitors = pmon:erase(QPid, QMons)});
+
+handle_info({'EXIT', _Pid, Reason}, State) ->
+ {stop, Reason, State}.
+
+handle_pre_hibernate(State) ->
+ ok = clear_permission_cache(),
+ rabbit_event:if_enabled(
+ State, #ch.stats_timer,
+ fun () -> emit_stats(State, [{idle_since, now()}]) end),
+ {hibernate, rabbit_event:stop_stats_timer(State, #ch.stats_timer)}.
+
+terminate(Reason, State) ->
+ {Res, _State1} = notify_queues(State),
+ case Reason of
+ normal -> ok = Res;
+ shutdown -> ok = Res;
+ {shutdown, _Term} -> ok = Res;
+ _ -> ok
+ end,
+ pg_local:leave(rabbit_channels, self()),
+ rabbit_event:if_enabled(State, #ch.stats_timer,
+ fun() -> emit_stats(State) end),
+ rabbit_event:notify(channel_closed, [{pid, self()}]).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ).
+
+%%---------------------------------------------------------------------------
+
+reply(Reply, NewState) -> {reply, Reply, next_state(NewState), hibernate}.
+
+noreply(NewState) -> {noreply, next_state(NewState), hibernate}.
+
+next_state(State) -> ensure_stats_timer(send_confirms(State)).
+
+noreply_coalesce(State = #ch{confirmed = C}) ->
+ Timeout = case C of [] -> hibernate; _ -> 0 end,
+ {noreply, ensure_stats_timer(State), Timeout}.
+
+ensure_stats_timer(State) ->
+ rabbit_event:ensure_stats_timer(State, #ch.stats_timer, emit_stats).
+
+return_ok(State, true, _Msg) -> {noreply, State};
+return_ok(State, false, Msg) -> {reply, Msg, State}.
+
+ok_msg(true, _Msg) -> undefined;
+ok_msg(false, Msg) -> Msg.
+
+send(_Command, #ch{state = closing}) ->
+ ok;
+send(Command, #ch{writer_pid = WriterPid}) ->
+ ok = rabbit_writer:send_command(WriterPid, Command).
+
+handle_exception(Reason, State = #ch{protocol = Protocol,
+ channel = Channel,
+ writer_pid = WriterPid,
+ reader_pid = ReaderPid,
+ conn_pid = ConnPid}) ->
+ %% something bad's happened: notify_queues may not be 'ok'
+ {_Result, State1} = notify_queues(State),
+ case rabbit_binary_generator:map_exception(Channel, Reason, Protocol) of
+ {Channel, CloseMethod} ->
+ rabbit_log:error("connection ~p, channel ~p - soft error:~n~p~n",
+ [ConnPid, Channel, Reason]),
+ ok = rabbit_writer:send_command(WriterPid, CloseMethod),
+ {noreply, State1};
+ {0, _} ->
+ ReaderPid ! {channel_exit, Channel, Reason},
+ {stop, normal, State1}
+ end.
+
+-ifdef(use_specs).
+-spec(precondition_failed/1 :: (string()) -> no_return()).
+-endif.
+precondition_failed(Format) -> precondition_failed(Format, []).
+
+-ifdef(use_specs).
+-spec(precondition_failed/2 :: (string(), [any()]) -> no_return()).
+-endif.
+precondition_failed(Format, Params) ->
+ rabbit_misc:protocol_error(precondition_failed, Format, Params).
+
+return_queue_declare_ok(#resource{name = ActualName},
+ NoWait, MessageCount, ConsumerCount, State) ->
+ return_ok(State#ch{most_recently_declared_queue = ActualName}, NoWait,
+ #'queue.declare_ok'{queue = ActualName,
+ message_count = MessageCount,
+ consumer_count = ConsumerCount}).
+
+check_resource_access(User, Resource, Perm) ->
+ V = {Resource, Perm},
+ Cache = case get(permission_cache) of
+ undefined -> [];
+ Other -> Other
+ end,
+ case lists:member(V, Cache) of
+ true -> ok;
+ false -> ok = rabbit_access_control:check_resource_access(
+ User, Resource, Perm),
+ CacheTail = lists:sublist(Cache, ?MAX_PERMISSION_CACHE_SIZE-1),
+ put(permission_cache, [V | CacheTail])
+ end.
+
+clear_permission_cache() -> erase(permission_cache),
+ ok.
+
+check_configure_permitted(Resource, #ch{user = User}) ->
+ check_resource_access(User, Resource, configure).
+
+check_write_permitted(Resource, #ch{user = User}) ->
+ check_resource_access(User, Resource, write).
+
+check_read_permitted(Resource, #ch{user = User}) ->
+ check_resource_access(User, Resource, read).
+
+check_user_id_header(#'P_basic'{user_id = undefined}, _) ->
+ ok;
+check_user_id_header(#'P_basic'{user_id = Username},
+ #ch{user = #user{username = Username}}) ->
+ ok;
+check_user_id_header(
+ #'P_basic'{}, #ch{user = #user{auth_backend = rabbit_auth_backend_dummy}}) ->
+ ok;
+check_user_id_header(#'P_basic'{user_id = Claimed},
+ #ch{user = #user{username = Actual,
+ tags = Tags}}) ->
+ case lists:member(impersonator, Tags) of
+ true -> ok;
+ false -> precondition_failed(
+ "user_id property set to '~s' but authenticated user was "
+ "'~s'", [Claimed, Actual])
+ end.
+
+check_expiration_header(Props) ->
+ case rabbit_basic:parse_expiration(Props) of
+ {ok, _} -> ok;
+ {error, E} -> precondition_failed("invalid expiration '~s': ~p",
+ [Props#'P_basic'.expiration, E])
+ end.
+
+check_internal_exchange(#exchange{name = Name, internal = true}) ->
+ rabbit_misc:protocol_error(access_refused,
+ "cannot publish to internal ~s",
+ [rabbit_misc:rs(Name)]);
+check_internal_exchange(_) ->
+ ok.
+
+check_msg_size(Content) ->
+ Size = rabbit_basic:msg_size(Content),
+ case Size > ?MAX_MSG_SIZE of
+ true -> precondition_failed("message size ~B larger than max size ~B",
+ [Size, ?MAX_MSG_SIZE]);
+ false -> ok
+ end.
+
+qbin_to_resource(QueueNameBin, State) ->
+ name_to_resource(queue, QueueNameBin, State).
+
+name_to_resource(Type, NameBin, #ch{virtual_host = VHostPath}) ->
+ rabbit_misc:r(VHostPath, Type, NameBin).
+
+expand_queue_name_shortcut(<<>>, #ch{most_recently_declared_queue = <<>>}) ->
+ rabbit_misc:protocol_error(not_found, "no previously declared queue", []);
+expand_queue_name_shortcut(<<>>, #ch{most_recently_declared_queue = MRDQ}) ->
+ MRDQ;
+expand_queue_name_shortcut(QueueNameBin, _) ->
+ QueueNameBin.
+
+expand_routing_key_shortcut(<<>>, <<>>,
+ #ch{most_recently_declared_queue = <<>>}) ->
+ rabbit_misc:protocol_error(not_found, "no previously declared queue", []);
+expand_routing_key_shortcut(<<>>, <<>>,
+ #ch{most_recently_declared_queue = MRDQ}) ->
+ MRDQ;
+expand_routing_key_shortcut(_QueueNameBin, RoutingKey, _State) ->
+ RoutingKey.
+
+expand_shortcuts(#'basic.get' {queue = Q} = M, State) ->
+ M#'basic.get' {queue = expand_queue_name_shortcut(Q, State)};
+expand_shortcuts(#'basic.consume'{queue = Q} = M, State) ->
+ M#'basic.consume'{queue = expand_queue_name_shortcut(Q, State)};
+expand_shortcuts(#'queue.delete' {queue = Q} = M, State) ->
+ M#'queue.delete' {queue = expand_queue_name_shortcut(Q, State)};
+expand_shortcuts(#'queue.purge' {queue = Q} = M, State) ->
+ M#'queue.purge' {queue = expand_queue_name_shortcut(Q, State)};
+expand_shortcuts(#'queue.bind' {queue = Q, routing_key = K} = M, State) ->
+ M#'queue.bind' {queue = expand_queue_name_shortcut(Q, State),
+ routing_key = expand_routing_key_shortcut(Q, K, State)};
+expand_shortcuts(#'queue.unbind' {queue = Q, routing_key = K} = M, State) ->
+ M#'queue.unbind' {queue = expand_queue_name_shortcut(Q, State),
+ routing_key = expand_routing_key_shortcut(Q, K, State)};
+expand_shortcuts(M, _State) ->
+ M.
+
+check_not_default_exchange(#resource{kind = exchange, name = <<"">>}) ->
+ rabbit_misc:protocol_error(
+ access_refused, "operation not permitted on the default exchange", []);
+check_not_default_exchange(_) ->
+ ok.
+
+check_exchange_deletion(XName = #resource{name = <<"amq.rabbitmq.", _/binary>>,
+ kind = exchange}) ->
+ rabbit_misc:protocol_error(
+ access_refused, "deletion of system ~s not allowed",
+ [rabbit_misc:rs(XName)]);
+check_exchange_deletion(_) ->
+ ok.
+
+%% check that an exchange/queue name does not contain the reserved
+%% "amq." prefix.
+%%
+%% As per the AMQP 0-9-1 spec, the exclusion of "amq." prefixed names
+%% only applies on actual creation, and not in the cases where the
+%% entity already exists or passive=true.
+%%
+%% NB: We deliberately do not enforce the other constraints on names
+%% required by the spec.
+check_name(Kind, NameBin = <<"amq.", _/binary>>) ->
+ rabbit_misc:protocol_error(
+ access_refused,
+ "~s name '~s' contains reserved prefix 'amq.*'",[Kind, NameBin]);
+check_name(_Kind, NameBin) ->
+ NameBin.
+
+record_confirms([], State) ->
+ State;
+record_confirms(MXs, State = #ch{confirmed = C}) ->
+ State#ch{confirmed = [MXs | C]}.
+
+handle_method(#'channel.open'{}, _, State = #ch{state = starting}) ->
+ %% Don't leave "starting" as the state for 5s. TODO is this TRTTD?
+ State1 = State#ch{state = running},
+ rabbit_event:if_enabled(State1, #ch.stats_timer,
+ fun() -> emit_stats(State1) end),
+ {reply, #'channel.open_ok'{}, State1};
+
+handle_method(#'channel.open'{}, _, _State) ->
+ rabbit_misc:protocol_error(
+ command_invalid, "second 'channel.open' seen", []);
+
+handle_method(_Method, _, #ch{state = starting}) ->
+ rabbit_misc:protocol_error(channel_error, "expected 'channel.open'", []);
+
+handle_method(#'channel.close_ok'{}, _, #ch{state = closing}) ->
+ stop;
+
+handle_method(#'channel.close'{}, _, State = #ch{writer_pid = WriterPid,
+ state = closing}) ->
+ ok = rabbit_writer:send_command(WriterPid, #'channel.close_ok'{}),
+ {noreply, State};
+
+handle_method(_Method, _, State = #ch{state = closing}) ->
+ {noreply, State};
+
+handle_method(#'channel.close'{}, _, State = #ch{reader_pid = ReaderPid}) ->
+ {ok, State1} = notify_queues(State),
+ %% We issue the channel.close_ok response after a handshake with
+ %% the reader, the other half of which is ready_for_close. That
+ %% way the reader forgets about the channel before we send the
+ %% response (and this channel process terminates). If we didn't do
+ %% that, a channel.open for the same channel number, which a
+ %% client is entitled to send as soon as it has received the
+ %% close_ok, might be received by the reader before it has seen
+ %% the termination and hence be sent to the old, now dead/dying
+ %% channel process, instead of a new process, and thus lost.
+ ReaderPid ! {channel_closing, self()},
+ {noreply, State1};
+
+%% Even though the spec prohibits the client from sending commands
+%% while waiting for the reply to a synchronous command, we generally
+%% do allow this...except in the case of a pending tx.commit, where
+%% it could wreak havoc.
+handle_method(_Method, _, #ch{tx = Tx})
+ when Tx =:= committing orelse Tx =:= failed ->
+ rabbit_misc:protocol_error(
+ channel_error, "unexpected command while processing 'tx.commit'", []);
+
+handle_method(#'access.request'{},_, State) ->
+ {reply, #'access.request_ok'{ticket = 1}, State};
+
+handle_method(#'basic.publish'{immediate = true}, _Content, _State) ->
+ rabbit_misc:protocol_error(not_implemented, "immediate=true", []);
+
+handle_method(#'basic.publish'{exchange = ExchangeNameBin,
+ routing_key = RoutingKey,
+ mandatory = Mandatory},
+ Content, State = #ch{virtual_host = VHostPath,
+ tx = Tx,
+ confirm_enabled = ConfirmEnabled,
+ trace_state = TraceState}) ->
+ check_msg_size(Content),
+ ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin),
+ check_write_permitted(ExchangeName, State),
+ Exchange = rabbit_exchange:lookup_or_die(ExchangeName),
+ check_internal_exchange(Exchange),
+ %% We decode the content's properties here because we're almost
+ %% certain to want to look at delivery-mode and priority.
+ DecodedContent = #content {properties = Props} =
+ rabbit_binary_parser:ensure_content_decoded(Content),
+ check_user_id_header(Props, State),
+ check_expiration_header(Props),
+ DoConfirm = Tx =/= none orelse ConfirmEnabled,
+ {MsgSeqNo, State1} =
+ case DoConfirm orelse Mandatory of
+ false -> {undefined, State};
+ true -> SeqNo = State#ch.publish_seqno,
+ {SeqNo, State#ch{publish_seqno = SeqNo + 1}}
+ end,
+ case rabbit_basic:message(ExchangeName, RoutingKey, DecodedContent) of
+ {ok, Message} ->
+ rabbit_trace:tap_in(Message, TraceState),
+ Delivery = rabbit_basic:delivery(
+ Mandatory, DoConfirm, Message, MsgSeqNo),
+ QNames = rabbit_exchange:route(Exchange, Delivery),
+ DQ = {Delivery, QNames},
+ {noreply, case Tx of
+ none -> deliver_to_queues(DQ, State1);
+ {Msgs, Acks} -> Msgs1 = queue:in(DQ, Msgs),
+ State1#ch{tx = {Msgs1, Acks}}
+ end};
+ {error, Reason} ->
+ precondition_failed("invalid message: ~p", [Reason])
+ end;
+
+handle_method(#'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = Multiple,
+ requeue = Requeue}, _, State) ->
+ reject(DeliveryTag, Requeue, Multiple, State);
+
+handle_method(#'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = Multiple},
+ _, State = #ch{unacked_message_q = UAMQ, tx = Tx}) ->
+ {Acked, Remaining} = collect_acks(UAMQ, DeliveryTag, Multiple),
+ State1 = State#ch{unacked_message_q = Remaining},
+ {noreply, case Tx of
+ none -> ack(Acked, State1),
+ State1;
+ {Msgs, Acks} -> Acks1 = ack_cons(ack, Acked, Acks),
+ State1#ch{tx = {Msgs, Acks1}}
+ end};
+
+handle_method(#'basic.get'{queue = QueueNameBin, no_ack = NoAck},
+ _, State = #ch{writer_pid = WriterPid,
+ conn_pid = ConnPid,
+ limiter = Limiter,
+ next_tag = DeliveryTag}) ->
+ QueueName = qbin_to_resource(QueueNameBin, State),
+ check_read_permitted(QueueName, State),
+ case rabbit_amqqueue:with_exclusive_access_or_die(
+ QueueName, ConnPid,
+ fun (Q) -> rabbit_amqqueue:basic_get(
+ Q, self(), NoAck, rabbit_limiter:pid(Limiter))
+ end) of
+ {ok, MessageCount,
+ Msg = {QName, QPid, _MsgId, Redelivered,
+ #basic_message{exchange_name = ExchangeName,
+ routing_keys = [RoutingKey | _CcRoutes],
+ content = Content}}} ->
+ ok = rabbit_writer:send_command(
+ WriterPid,
+ #'basic.get_ok'{delivery_tag = DeliveryTag,
+ redelivered = Redelivered,
+ exchange = ExchangeName#resource.name,
+ routing_key = RoutingKey,
+ message_count = MessageCount},
+ Content),
+ State1 = monitor_delivering_queue(NoAck, QPid, QName, State),
+ {noreply, record_sent(none, not(NoAck), Msg, State1)};
+ empty ->
+ {reply, #'basic.get_empty'{}, State}
+ end;
+
+handle_method(#'basic.consume'{queue = QueueNameBin,
+ consumer_tag = ConsumerTag,
+ no_local = _, % FIXME: implement
+ no_ack = NoAck,
+ exclusive = ExclusiveConsume,
+ nowait = NoWait,
+ arguments = Args},
+ _, State = #ch{consumer_prefetch = ConsumerPrefetch,
+ consumer_mapping = ConsumerMapping}) ->
+ case dict:find(ConsumerTag, ConsumerMapping) of
+ error ->
+ QueueName = qbin_to_resource(QueueNameBin, State),
+ check_read_permitted(QueueName, State),
+ ActualConsumerTag =
+ case ConsumerTag of
+ <<>> -> rabbit_guid:binary(rabbit_guid:gen_secure(),
+ "amq.ctag");
+ Other -> Other
+ end,
+ case basic_consume(
+ QueueName, NoAck, ConsumerPrefetch, ActualConsumerTag,
+ ExclusiveConsume, Args, NoWait, State) of
+ {ok, State1} ->
+ {noreply, State1};
+ {error, exclusive_consume_unavailable} ->
+ rabbit_misc:protocol_error(
+ access_refused, "~s in exclusive use",
+ [rabbit_misc:rs(QueueName)])
+ end;
+ {ok, _} ->
+ %% Attempted reuse of consumer tag.
+ rabbit_misc:protocol_error(
+ not_allowed, "attempt to reuse consumer tag '~s'", [ConsumerTag])
+ end;
+
+handle_method(#'basic.cancel'{consumer_tag = ConsumerTag, nowait = NoWait},
+ _, State = #ch{consumer_mapping = ConsumerMapping,
+ queue_consumers = QCons}) ->
+ OkMsg = #'basic.cancel_ok'{consumer_tag = ConsumerTag},
+ case dict:find(ConsumerTag, ConsumerMapping) of
+ error ->
+ %% Spec requires we ignore this situation.
+ return_ok(State, NoWait, OkMsg);
+ {ok, {Q = #amqqueue{pid = QPid}, _CParams}} ->
+ ConsumerMapping1 = dict:erase(ConsumerTag, ConsumerMapping),
+ QCons1 =
+ case dict:find(QPid, QCons) of
+ error -> QCons;
+ {ok, CTags} -> CTags1 = gb_sets:delete(ConsumerTag, CTags),
+ case gb_sets:is_empty(CTags1) of
+ true -> dict:erase(QPid, QCons);
+ false -> dict:store(QPid, CTags1, QCons)
+ end
+ end,
+ NewState = State#ch{consumer_mapping = ConsumerMapping1,
+ queue_consumers = QCons1},
+ %% In order to ensure that no more messages are sent to
+ %% the consumer after the cancel_ok has been sent, we get
+ %% the queue process to send the cancel_ok on our
+ %% behalf. If we were sending the cancel_ok ourselves it
+ %% might overtake a message sent previously by the queue.
+ case rabbit_misc:with_exit_handler(
+ fun () -> {error, not_found} end,
+ fun () ->
+ rabbit_amqqueue:basic_cancel(
+ Q, self(), ConsumerTag, ok_msg(NoWait, OkMsg))
+ end) of
+ ok ->
+ {noreply, NewState};
+ {error, not_found} ->
+ %% Spec requires we ignore this situation.
+ return_ok(NewState, NoWait, OkMsg)
+ end
+ end;
+
+handle_method(#'basic.qos'{prefetch_size = Size}, _, _State) when Size /= 0 ->
+ rabbit_misc:protocol_error(not_implemented,
+ "prefetch_size!=0 (~w)", [Size]);
+
+handle_method(#'basic.qos'{global = false,
+ prefetch_count = PrefetchCount}, _, State) ->
+ {reply, #'basic.qos_ok'{}, State#ch{consumer_prefetch = PrefetchCount}};
+
+handle_method(#'basic.qos'{global = true,
+ prefetch_count = 0},
+ _, State = #ch{limiter = Limiter}) ->
+ Limiter1 = rabbit_limiter:unlimit_prefetch(Limiter),
+ {reply, #'basic.qos_ok'{}, State#ch{limiter = Limiter1}};
+
+handle_method(#'basic.qos'{global = true,
+ prefetch_count = PrefetchCount},
+ _, State = #ch{limiter = Limiter, unacked_message_q = UAMQ}) ->
+ %% TODO queue:len(UAMQ) is not strictly right since that counts
+ %% unacked messages from basic.get too. Pretty obscure though.
+ Limiter1 = rabbit_limiter:limit_prefetch(Limiter,
+ PrefetchCount, queue:len(UAMQ)),
+ case ((not rabbit_limiter:is_active(Limiter)) andalso
+ rabbit_limiter:is_active(Limiter1)) of
+ true -> rabbit_amqqueue:activate_limit_all(
+ consumer_queues(State#ch.consumer_mapping), self());
+ false -> ok
+ end,
+ {reply, #'basic.qos_ok'{}, State#ch{limiter = Limiter1}};
+
+handle_method(#'basic.recover_async'{requeue = true},
+ _, State = #ch{unacked_message_q = UAMQ, limiter = Limiter}) ->
+ OkFun = fun () -> ok end,
+ UAMQL = queue:to_list(UAMQ),
+ foreach_per_queue(
+ fun (QPid, MsgIds) ->
+ rabbit_misc:with_exit_handler(
+ OkFun,
+ fun () -> rabbit_amqqueue:requeue(QPid, MsgIds, self()) end)
+ end, lists:reverse(UAMQL)),
+ ok = notify_limiter(Limiter, UAMQL),
+ %% No answer required - basic.recover is the newer, synchronous
+ %% variant of this method
+ {noreply, State#ch{unacked_message_q = queue:new()}};
+
+handle_method(#'basic.recover_async'{requeue = false}, _, _State) ->
+ rabbit_misc:protocol_error(not_implemented, "requeue=false", []);
+
+handle_method(#'basic.recover'{requeue = Requeue}, Content, State) ->
+ {noreply, State1} = handle_method(#'basic.recover_async'{requeue = Requeue},
+ Content, State),
+ {reply, #'basic.recover_ok'{}, State1};
+
+handle_method(#'basic.reject'{delivery_tag = DeliveryTag, requeue = Requeue},
+ _, State) ->
+ reject(DeliveryTag, Requeue, false, State);
+
+handle_method(#'exchange.declare'{exchange = ExchangeNameBin,
+ type = TypeNameBin,
+ passive = false,
+ durable = Durable,
+ auto_delete = AutoDelete,
+ internal = Internal,
+ nowait = NoWait,
+ arguments = Args},
+ _, State = #ch{virtual_host = VHostPath}) ->
+ CheckedType = rabbit_exchange:check_type(TypeNameBin),
+ ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin),
+ check_not_default_exchange(ExchangeName),
+ check_configure_permitted(ExchangeName, State),
+ X = case rabbit_exchange:lookup(ExchangeName) of
+ {ok, FoundX} -> FoundX;
+ {error, not_found} ->
+ check_name('exchange', ExchangeNameBin),
+ AeKey = <<"alternate-exchange">>,
+ case rabbit_misc:r_arg(VHostPath, exchange, Args, AeKey) of
+ undefined -> ok;
+ {error, {invalid_type, Type}} ->
+ precondition_failed(
+ "invalid type '~s' for arg '~s' in ~s",
+ [Type, AeKey, rabbit_misc:rs(ExchangeName)]);
+ AName -> check_read_permitted(ExchangeName, State),
+ check_write_permitted(AName, State),
+ ok
+ end,
+ rabbit_exchange:declare(ExchangeName,
+ CheckedType,
+ Durable,
+ AutoDelete,
+ Internal,
+ Args)
+ end,
+ ok = rabbit_exchange:assert_equivalence(X, CheckedType, Durable,
+ AutoDelete, Internal, Args),
+ return_ok(State, NoWait, #'exchange.declare_ok'{});
+
+handle_method(#'exchange.declare'{exchange = ExchangeNameBin,
+ passive = true,
+ nowait = NoWait},
+ _, State = #ch{virtual_host = VHostPath}) ->
+ ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin),
+ check_not_default_exchange(ExchangeName),
+ _ = rabbit_exchange:lookup_or_die(ExchangeName),
+ return_ok(State, NoWait, #'exchange.declare_ok'{});
+
+handle_method(#'exchange.delete'{exchange = ExchangeNameBin,
+ if_unused = IfUnused,
+ nowait = NoWait},
+ _, State = #ch{virtual_host = VHostPath}) ->
+ ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin),
+ check_not_default_exchange(ExchangeName),
+ check_exchange_deletion(ExchangeName),
+ check_configure_permitted(ExchangeName, State),
+ case rabbit_exchange:delete(ExchangeName, IfUnused) of
+ {error, not_found} ->
+ return_ok(State, NoWait, #'exchange.delete_ok'{});
+ {error, in_use} ->
+ precondition_failed("~s in use", [rabbit_misc:rs(ExchangeName)]);
+ ok ->
+ return_ok(State, NoWait, #'exchange.delete_ok'{})
+ end;
+
+handle_method(#'exchange.bind'{destination = DestinationNameBin,
+ source = SourceNameBin,
+ routing_key = RoutingKey,
+ nowait = NoWait,
+ arguments = Arguments}, _, State) ->
+ binding_action(fun rabbit_binding:add/2,
+ SourceNameBin, exchange, DestinationNameBin, RoutingKey,
+ Arguments, #'exchange.bind_ok'{}, NoWait, State);
+
+handle_method(#'exchange.unbind'{destination = DestinationNameBin,
+ source = SourceNameBin,
+ routing_key = RoutingKey,
+ nowait = NoWait,
+ arguments = Arguments}, _, State) ->
+ binding_action(fun rabbit_binding:remove/2,
+ SourceNameBin, exchange, DestinationNameBin, RoutingKey,
+ Arguments, #'exchange.unbind_ok'{}, NoWait, State);
+
+handle_method(#'queue.declare'{queue = QueueNameBin,
+ passive = false,
+ durable = DurableDeclare,
+ exclusive = ExclusiveDeclare,
+ auto_delete = AutoDelete,
+ nowait = NoWait,
+ arguments = Args} = Declare,
+ _, State = #ch{virtual_host = VHostPath,
+ conn_pid = ConnPid,
+ queue_collector_pid = CollectorPid}) ->
+ Owner = case ExclusiveDeclare of
+ true -> ConnPid;
+ false -> none
+ end,
+ Durable = DurableDeclare andalso not ExclusiveDeclare,
+ ActualNameBin = case QueueNameBin of
+ <<>> -> rabbit_guid:binary(rabbit_guid:gen_secure(),
+ "amq.gen");
+ Other -> check_name('queue', Other)
+ end,
+ QueueName = rabbit_misc:r(VHostPath, queue, ActualNameBin),
+ check_configure_permitted(QueueName, State),
+ case rabbit_amqqueue:with(
+ QueueName,
+ fun (Q) -> ok = rabbit_amqqueue:assert_equivalence(
+ Q, Durable, AutoDelete, Args, Owner),
+ rabbit_amqqueue:stat(Q)
+ end) of
+ {ok, MessageCount, ConsumerCount} ->
+ return_queue_declare_ok(QueueName, NoWait, MessageCount,
+ ConsumerCount, State);
+ {error, not_found} ->
+ DlxKey = <<"x-dead-letter-exchange">>,
+ case rabbit_misc:r_arg(VHostPath, exchange, Args, DlxKey) of
+ undefined ->
+ ok;
+ {error, {invalid_type, Type}} ->
+ precondition_failed(
+ "invalid type '~s' for arg '~s' in ~s",
+ [Type, DlxKey, rabbit_misc:rs(QueueName)]);
+ DLX ->
+ check_read_permitted(QueueName, State),
+ check_write_permitted(DLX, State),
+ ok
+ end,
+ case rabbit_amqqueue:declare(QueueName, Durable, AutoDelete,
+ Args, Owner) of
+ {new, #amqqueue{pid = QPid}} ->
+ %% We need to notify the reader within the channel
+ %% process so that we can be sure there are no
+ %% outstanding exclusive queues being declared as
+ %% the connection shuts down.
+ ok = case Owner of
+ none -> ok;
+ _ -> rabbit_queue_collector:register(
+ CollectorPid, QPid)
+ end,
+ return_queue_declare_ok(QueueName, NoWait, 0, 0, State);
+ {existing, _Q} ->
+ %% must have been created between the stat and the
+ %% declare. Loop around again.
+ handle_method(Declare, none, State);
+ {absent, Q} ->
+ rabbit_misc:absent(Q);
+ {owner_died, _Q} ->
+ %% Presumably our own days are numbered since the
+ %% connection has died. Pretend the queue exists though,
+ %% just so nothing fails.
+ return_queue_declare_ok(QueueName, NoWait, 0, 0, State)
+ end;
+ {error, {absent, Q}} ->
+ rabbit_misc:absent(Q)
+ end;
+
+handle_method(#'queue.declare'{queue = QueueNameBin,
+ passive = true,
+ nowait = NoWait},
+ _, State = #ch{virtual_host = VHostPath,
+ conn_pid = ConnPid}) ->
+ QueueName = rabbit_misc:r(VHostPath, queue, QueueNameBin),
+ {{ok, MessageCount, ConsumerCount}, #amqqueue{} = Q} =
+ rabbit_amqqueue:with_or_die(
+ QueueName, fun (Q) -> {rabbit_amqqueue:stat(Q), Q} end),
+ ok = rabbit_amqqueue:check_exclusive_access(Q, ConnPid),
+ return_queue_declare_ok(QueueName, NoWait, MessageCount, ConsumerCount,
+ State);
+
+handle_method(#'queue.delete'{queue = QueueNameBin,
+ if_unused = IfUnused,
+ if_empty = IfEmpty,
+ nowait = NoWait},
+ _, State = #ch{conn_pid = ConnPid}) ->
+ QueueName = qbin_to_resource(QueueNameBin, State),
+ check_configure_permitted(QueueName, State),
+ case rabbit_amqqueue:with(
+ QueueName,
+ fun (Q) ->
+ rabbit_amqqueue:check_exclusive_access(Q, ConnPid),
+ rabbit_amqqueue:delete(Q, IfUnused, IfEmpty)
+ end,
+ fun (not_found) -> {ok, 0};
+ ({absent, Q}) -> rabbit_misc:absent(Q)
+ end) of
+ {error, in_use} ->
+ precondition_failed("~s in use", [rabbit_misc:rs(QueueName)]);
+ {error, not_empty} ->
+ precondition_failed("~s not empty", [rabbit_misc:rs(QueueName)]);
+ {ok, PurgedMessageCount} ->
+ return_ok(State, NoWait,
+ #'queue.delete_ok'{message_count = PurgedMessageCount})
+ end;
+
+handle_method(#'queue.bind'{queue = QueueNameBin,
+ exchange = ExchangeNameBin,
+ routing_key = RoutingKey,
+ nowait = NoWait,
+ arguments = Arguments}, _, State) ->
+ binding_action(fun rabbit_binding:add/2,
+ ExchangeNameBin, queue, QueueNameBin, RoutingKey, Arguments,
+ #'queue.bind_ok'{}, NoWait, State);
+
+handle_method(#'queue.unbind'{queue = QueueNameBin,
+ exchange = ExchangeNameBin,
+ routing_key = RoutingKey,
+ arguments = Arguments}, _, State) ->
+ binding_action(fun rabbit_binding:remove/2,
+ ExchangeNameBin, queue, QueueNameBin, RoutingKey, Arguments,
+ #'queue.unbind_ok'{}, false, State);
+
+handle_method(#'queue.purge'{queue = QueueNameBin, nowait = NoWait},
+ _, State = #ch{conn_pid = ConnPid}) ->
+ QueueName = qbin_to_resource(QueueNameBin, State),
+ check_read_permitted(QueueName, State),
+ {ok, PurgedMessageCount} = rabbit_amqqueue:with_exclusive_access_or_die(
+ QueueName, ConnPid,
+ fun (Q) -> rabbit_amqqueue:purge(Q) end),
+ return_ok(State, NoWait,
+ #'queue.purge_ok'{message_count = PurgedMessageCount});
+
+handle_method(#'tx.select'{}, _, #ch{confirm_enabled = true}) ->
+ precondition_failed("cannot switch from confirm to tx mode");
+
+handle_method(#'tx.select'{}, _, State = #ch{tx = none}) ->
+ {reply, #'tx.select_ok'{}, State#ch{tx = new_tx()}};
+
+handle_method(#'tx.select'{}, _, State) ->
+ {reply, #'tx.select_ok'{}, State};
+
+handle_method(#'tx.commit'{}, _, #ch{tx = none}) ->
+ precondition_failed("channel is not transactional");
+
+handle_method(#'tx.commit'{}, _, State = #ch{tx = {Msgs, Acks},
+ limiter = Limiter}) ->
+ State1 = rabbit_misc:queue_fold(fun deliver_to_queues/2, State, Msgs),
+ Rev = fun (X) -> lists:reverse(lists:sort(X)) end,
+ lists:foreach(fun ({ack, A}) -> ack(Rev(A), State1);
+ ({Requeue, A}) -> reject(Requeue, Rev(A), Limiter)
+ end, lists:reverse(Acks)),
+ {noreply, maybe_complete_tx(State1#ch{tx = committing})};
+
+handle_method(#'tx.rollback'{}, _, #ch{tx = none}) ->
+ precondition_failed("channel is not transactional");
+
+handle_method(#'tx.rollback'{}, _, State = #ch{unacked_message_q = UAMQ,
+ tx = {_Msgs, Acks}}) ->
+ AcksL = lists:append(lists:reverse([lists:reverse(L) || {_, L} <- Acks])),
+ UAMQ1 = queue:from_list(lists:usort(AcksL ++ queue:to_list(UAMQ))),
+ {reply, #'tx.rollback_ok'{}, State#ch{unacked_message_q = UAMQ1,
+ tx = new_tx()}};
+
+handle_method(#'confirm.select'{}, _, #ch{tx = {_, _}}) ->
+ precondition_failed("cannot switch from tx to confirm mode");
+
+handle_method(#'confirm.select'{nowait = NoWait}, _, State) ->
+ return_ok(State#ch{confirm_enabled = true},
+ NoWait, #'confirm.select_ok'{});
+
+handle_method(#'channel.flow'{active = true}, _, State) ->
+ {reply, #'channel.flow_ok'{active = true}, State};
+
+handle_method(#'channel.flow'{active = false}, _, _State) ->
+ rabbit_misc:protocol_error(not_implemented, "active=false", []);
+
+handle_method(#'basic.credit'{consumer_tag = CTag,
+ credit = Credit,
+ drain = Drain},
+ _, State = #ch{consumer_mapping = Consumers}) ->
+ case dict:find(CTag, Consumers) of
+ {ok, {Q, _CParams}} -> ok = rabbit_amqqueue:credit(
+ Q, self(), CTag, Credit, Drain),
+ {noreply, State};
+ error -> precondition_failed(
+ "unknown consumer tag '~s'", [CTag])
+ end;
+
+handle_method(_MethodRecord, _Content, _State) ->
+ rabbit_misc:protocol_error(
+ command_invalid, "unimplemented method", []).
+
+%%----------------------------------------------------------------------------
+
+%% We get the queue process to send the consume_ok on our behalf. This
+%% is for symmetry with basic.cancel - see the comment in that method
+%% for why.
+basic_consume(QueueName, NoAck, ConsumerPrefetch, ActualConsumerTag,
+ ExclusiveConsume, Args, NoWait,
+ State = #ch{conn_pid = ConnPid,
+ limiter = Limiter,
+ consumer_mapping = ConsumerMapping}) ->
+ case rabbit_amqqueue:with_exclusive_access_or_die(
+ QueueName, ConnPid,
+ fun (Q) ->
+ {rabbit_amqqueue:basic_consume(
+ Q, NoAck, self(),
+ rabbit_limiter:pid(Limiter),
+ rabbit_limiter:is_active(Limiter),
+ ConsumerPrefetch, ActualConsumerTag,
+ ExclusiveConsume, Args,
+ ok_msg(NoWait, #'basic.consume_ok'{
+ consumer_tag = ActualConsumerTag})),
+ Q}
+ end) of
+ {ok, Q = #amqqueue{pid = QPid, name = QName}} ->
+ CM1 = dict:store(
+ ActualConsumerTag,
+ {Q, {NoAck, ConsumerPrefetch, ExclusiveConsume, Args}},
+ ConsumerMapping),
+ State1 = monitor_delivering_queue(
+ NoAck, QPid, QName,
+ State#ch{consumer_mapping = CM1}),
+ {ok, case NoWait of
+ true -> consumer_monitor(ActualConsumerTag, State1);
+ false -> State1
+ end};
+ {{error, exclusive_consume_unavailable} = E, _Q} ->
+ E
+ end.
+
+consumer_monitor(ConsumerTag,
+ State = #ch{consumer_mapping = ConsumerMapping,
+ queue_monitors = QMons,
+ queue_consumers = QCons}) ->
+ {#amqqueue{pid = QPid}, _CParams} =
+ dict:fetch(ConsumerTag, ConsumerMapping),
+ QCons1 = dict:update(QPid, fun (CTags) ->
+ gb_sets:insert(ConsumerTag, CTags)
+ end,
+ gb_sets:singleton(ConsumerTag), QCons),
+ State#ch{queue_monitors = pmon:monitor(QPid, QMons),
+ queue_consumers = QCons1}.
+
+monitor_delivering_queue(NoAck, QPid, QName,
+ State = #ch{queue_names = QNames,
+ queue_monitors = QMons,
+ delivering_queues = DQ}) ->
+ State#ch{queue_names = dict:store(QPid, QName, QNames),
+ queue_monitors = pmon:monitor(QPid, QMons),
+ delivering_queues = case NoAck of
+ true -> DQ;
+ false -> sets:add_element(QPid, DQ)
+ end}.
+
+handle_publishing_queue_down(QPid, Reason, State = #ch{unconfirmed = UC,
+ mandatory = Mand}) ->
+ {MMsgs, Mand1} = dtree:take(QPid, Mand),
+ [basic_return(Msg, State, no_route) || {_, Msg} <- MMsgs],
+ State1 = State#ch{mandatory = Mand1},
+ case rabbit_misc:is_abnormal_exit(Reason) of
+ true -> {MXs, UC1} = dtree:take_all(QPid, UC),
+ send_nacks(MXs, State1#ch{unconfirmed = UC1});
+ false -> {MXs, UC1} = dtree:take(QPid, UC),
+ record_confirms(MXs, State1#ch{unconfirmed = UC1})
+
+ end.
+
+handle_consuming_queue_down(QPid, State = #ch{queue_consumers = QCons,
+ queue_names = QNames}) ->
+ ConsumerTags = case dict:find(QPid, QCons) of
+ error -> gb_sets:new();
+ {ok, CTags} -> CTags
+ end,
+ gb_sets:fold(
+ fun (CTag, StateN = #ch{consumer_mapping = CMap}) ->
+ QName = dict:fetch(QPid, QNames),
+ case queue_down_consumer_action(CTag, CMap) of
+ remove ->
+ cancel_consumer(CTag, QName, StateN);
+ {recover, {NoAck, ConsumerPrefetch, Exclusive, Args}} ->
+ case catch basic_consume( %% [0]
+ QName, NoAck, ConsumerPrefetch, CTag,
+ Exclusive, Args, true, StateN) of
+ {ok, StateN1} -> StateN1;
+ _ -> cancel_consumer(CTag, QName, StateN)
+ end
+ end
+ end, State#ch{queue_consumers = dict:erase(QPid, QCons)}, ConsumerTags).
+
+%% [0] There is a slight danger here that if a queue is deleted and
+%% then recreated again the reconsume will succeed even though it was
+%% not an HA failover. But the likelihood is not great and most users
+%% are unlikely to care.
+
+cancel_consumer(CTag, QName, State = #ch{capabilities = Capabilities,
+ consumer_mapping = CMap}) ->
+ case rabbit_misc:table_lookup(
+ Capabilities, <<"consumer_cancel_notify">>) of
+ {bool, true} -> ok = send(#'basic.cancel'{consumer_tag = CTag,
+ nowait = true}, State);
+ _ -> ok
+ end,
+ rabbit_event:notify(consumer_deleted, [{consumer_tag, CTag},
+ {channel, self()},
+ {queue, QName}]),
+ State#ch{consumer_mapping = dict:erase(CTag, CMap)}.
+
+queue_down_consumer_action(CTag, CMap) ->
+ {_, {_, _, _, Args} = ConsumeSpec} = dict:fetch(CTag, CMap),
+ case rabbit_misc:table_lookup(Args, <<"x-cancel-on-ha-failover">>) of
+ {bool, true} -> remove;
+ _ -> {recover, ConsumeSpec}
+ end.
+
+handle_delivering_queue_down(QPid, State = #ch{delivering_queues = DQ}) ->
+ State#ch{delivering_queues = sets:del_element(QPid, DQ)}.
+
+binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin,
+ RoutingKey, Arguments, ReturnMethod, NoWait,
+ State = #ch{virtual_host = VHostPath,
+ conn_pid = ConnPid }) ->
+ DestinationName = name_to_resource(DestinationType, DestinationNameBin, State),
+ check_write_permitted(DestinationName, State),
+ ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin),
+ [check_not_default_exchange(N) || N <- [DestinationName, ExchangeName]],
+ check_read_permitted(ExchangeName, State),
+ case Fun(#binding{source = ExchangeName,
+ destination = DestinationName,
+ key = RoutingKey,
+ args = Arguments},
+ fun (_X, Q = #amqqueue{}) ->
+ try rabbit_amqqueue:check_exclusive_access(Q, ConnPid)
+ catch exit:Reason -> {error, Reason}
+ end;
+ (_X, #exchange{}) ->
+ ok
+ end) of
+ {error, {resources_missing, [{not_found, Name} | _]}} ->
+ rabbit_misc:not_found(Name);
+ {error, {resources_missing, [{absent, Q} | _]}} ->
+ rabbit_misc:absent(Q);
+ {error, binding_not_found} ->
+ rabbit_misc:protocol_error(
+ not_found, "no binding ~s between ~s and ~s",
+ [RoutingKey, rabbit_misc:rs(ExchangeName),
+ rabbit_misc:rs(DestinationName)]);
+ {error, {binding_invalid, Fmt, Args}} ->
+ rabbit_misc:protocol_error(precondition_failed, Fmt, Args);
+ {error, #amqp_error{} = Error} ->
+ rabbit_misc:protocol_error(Error);
+ ok -> return_ok(State, NoWait, ReturnMethod)
+ end.
+
+basic_return(#basic_message{exchange_name = ExchangeName,
+ routing_keys = [RoutingKey | _CcRoutes],
+ content = Content},
+ State = #ch{protocol = Protocol, writer_pid = WriterPid},
+ Reason) ->
+ ?INCR_STATS([{exchange_stats, ExchangeName, 1}], return_unroutable, State),
+ {_Close, ReplyCode, ReplyText} = Protocol:lookup_amqp_exception(Reason),
+ ok = rabbit_writer:send_command(
+ WriterPid,
+ #'basic.return'{reply_code = ReplyCode,
+ reply_text = ReplyText,
+ exchange = ExchangeName#resource.name,
+ routing_key = RoutingKey},
+ Content).
+
+reject(DeliveryTag, Requeue, Multiple,
+ State = #ch{unacked_message_q = UAMQ, tx = Tx}) ->
+ {Acked, Remaining} = collect_acks(UAMQ, DeliveryTag, Multiple),
+ State1 = State#ch{unacked_message_q = Remaining},
+ {noreply, case Tx of
+ none -> reject(Requeue, Acked, State1#ch.limiter),
+ State1;
+ {Msgs, Acks} -> Acks1 = ack_cons(Requeue, Acked, Acks),
+ State1#ch{tx = {Msgs, Acks1}}
+ end}.
+
+%% NB: Acked is in youngest-first order
+reject(Requeue, Acked, Limiter) ->
+ foreach_per_queue(
+ fun (QPid, MsgIds) ->
+ rabbit_amqqueue:reject(QPid, Requeue, MsgIds, self())
+ end, Acked),
+ ok = notify_limiter(Limiter, Acked).
+
+record_sent(ConsumerTag, AckRequired,
+ Msg = {QName, QPid, MsgId, Redelivered, _Message},
+ State = #ch{unacked_message_q = UAMQ,
+ next_tag = DeliveryTag,
+ trace_state = TraceState}) ->
+ ?INCR_STATS([{queue_stats, QName, 1}], case {ConsumerTag, AckRequired} of
+ {none, true} -> get;
+ {none, false} -> get_no_ack;
+ {_ , true} -> deliver;
+ {_ , false} -> deliver_no_ack
+ end, State),
+ case Redelivered of
+ true -> ?INCR_STATS([{queue_stats, QName, 1}], redeliver, State);
+ false -> ok
+ end,
+ rabbit_trace:tap_out(Msg, TraceState),
+ UAMQ1 = case AckRequired of
+ true -> queue:in({DeliveryTag, ConsumerTag, {QPid, MsgId}},
+ UAMQ);
+ false -> UAMQ
+ end,
+ State#ch{unacked_message_q = UAMQ1, next_tag = DeliveryTag + 1}.
+
+%% NB: returns acks in youngest-first order
+collect_acks(Q, 0, true) ->
+ {lists:reverse(queue:to_list(Q)), queue:new()};
+collect_acks(Q, DeliveryTag, Multiple) ->
+ collect_acks([], [], Q, DeliveryTag, Multiple).
+
+collect_acks(ToAcc, PrefixAcc, Q, DeliveryTag, Multiple) ->
+ case queue:out(Q) of
+ {{value, UnackedMsg = {CurrentDeliveryTag, _ConsumerTag, _Msg}},
+ QTail} ->
+ if CurrentDeliveryTag == DeliveryTag ->
+ {[UnackedMsg | ToAcc],
+ case PrefixAcc of
+ [] -> QTail;
+ _ -> queue:join(
+ queue:from_list(lists:reverse(PrefixAcc)),
+ QTail)
+ end};
+ Multiple ->
+ collect_acks([UnackedMsg | ToAcc], PrefixAcc,
+ QTail, DeliveryTag, Multiple);
+ true ->
+ collect_acks(ToAcc, [UnackedMsg | PrefixAcc],
+ QTail, DeliveryTag, Multiple)
+ end;
+ {empty, _} ->
+ precondition_failed("unknown delivery tag ~w", [DeliveryTag])
+ end.
+
+%% NB: Acked is in youngest-first order
+ack(Acked, State = #ch{queue_names = QNames}) ->
+ foreach_per_queue(
+ fun (QPid, MsgIds) ->
+ ok = rabbit_amqqueue:ack(QPid, MsgIds, self()),
+ ?INCR_STATS(case dict:find(QPid, QNames) of
+ {ok, QName} -> Count = length(MsgIds),
+ [{queue_stats, QName, Count}];
+ error -> []
+ end, ack, State)
+ end, Acked),
+ ok = notify_limiter(State#ch.limiter, Acked).
+
+%% {Msgs, Acks}
+%%
+%% Msgs is a queue.
+%%
+%% Acks looks s.t. like this:
+%% [{false,[5,4]},{true,[3]},{ack,[2,1]}, ...]
+%%
+%% Each element is a pair consisting of a tag and a list of
+%% ack'ed/reject'ed msg ids. The tag is one of 'ack' (to ack), 'true'
+%% (reject w requeue), 'false' (reject w/o requeue). The msg ids, as
+%% well as the list overall, are in "most-recent (generally youngest)
+%% ack first" order.
+new_tx() -> {queue:new(), []}.
+
+notify_queues(State = #ch{state = closing}) ->
+ {ok, State};
+notify_queues(State = #ch{consumer_mapping = Consumers,
+ delivering_queues = DQ }) ->
+ QPids = sets:to_list(
+ sets:union(sets:from_list(consumer_queues(Consumers)), DQ)),
+ {rabbit_amqqueue:notify_down_all(QPids, self()), State#ch{state = closing}}.
+
+foreach_per_queue(_F, []) ->
+ ok;
+foreach_per_queue(F, [{_DTag, _CTag, {QPid, MsgId}}]) -> %% common case
+ F(QPid, [MsgId]);
+%% NB: UAL should be in youngest-first order; the tree values will
+%% then be in oldest-first order
+foreach_per_queue(F, UAL) ->
+ T = lists:foldl(fun ({_DTag, _CTag, {QPid, MsgId}}, T) ->
+ rabbit_misc:gb_trees_cons(QPid, MsgId, T)
+ end, gb_trees:empty(), UAL),
+ rabbit_misc:gb_trees_foreach(F, T).
+
+consumer_queues(Consumers) ->
+ lists:usort([QPid || {_Key, {#amqqueue{pid = QPid}, _CParams}}
+ <- dict:to_list(Consumers)]).
+
+%% tell the limiter about the number of acks that have been received
+%% for messages delivered to subscribed consumers, but not acks for
+%% messages sent in a response to a basic.get (identified by their
+%% 'none' consumer tag)
+notify_limiter(Limiter, Acked) ->
+ %% optimisation: avoid the potentially expensive 'foldl' in the
+ %% common case.
+ case rabbit_limiter:is_active(Limiter) of
+ false -> ok;
+ true -> case lists:foldl(fun ({_, none, _}, Acc) -> Acc;
+ ({_, _, _}, Acc) -> Acc + 1
+ end, 0, Acked) of
+ 0 -> ok;
+ Count -> rabbit_limiter:ack(Limiter, Count)
+ end
+ end.
+
+deliver_to_queues({#delivery{message = #basic_message{exchange_name = XName},
+ confirm = false,
+ mandatory = false},
+ []}, State) -> %% optimisation
+ ?INCR_STATS([{exchange_stats, XName, 1}], publish, State),
+ State;
+deliver_to_queues({Delivery = #delivery{message = Message = #basic_message{
+ exchange_name = XName},
+ mandatory = Mandatory,
+ confirm = Confirm,
+ msg_seq_no = MsgSeqNo},
+ DelQNames}, State = #ch{queue_names = QNames,
+ queue_monitors = QMons}) ->
+ Qs = rabbit_amqqueue:lookup(DelQNames),
+ DeliveredQPids = rabbit_amqqueue:deliver_flow(Qs, Delivery),
+ %% The pmon:monitor_all/2 monitors all queues to which we
+ %% delivered. But we want to monitor even queues we didn't deliver
+ %% to, since we need their 'DOWN' messages to clean
+ %% queue_names. So we also need to monitor each QPid from
+ %% queues. But that only gets the masters (which is fine for
+ %% cleaning queue_names), so we need the union of both.
+ %%
+ %% ...and we need to add even non-delivered queues to queue_names
+ %% since alternative algorithms to update queue_names less
+ %% frequently would in fact be more expensive in the common case.
+ {QNames1, QMons1} =
+ lists:foldl(fun (#amqqueue{pid = QPid, name = QName},
+ {QNames0, QMons0}) ->
+ {case dict:is_key(QPid, QNames0) of
+ true -> QNames0;
+ false -> dict:store(QPid, QName, QNames0)
+ end, pmon:monitor(QPid, QMons0)}
+ end, {QNames, pmon:monitor_all(DeliveredQPids, QMons)}, Qs),
+ State1 = State#ch{queue_names = QNames1,
+ queue_monitors = QMons1},
+ %% NB: the order here is important since basic.returns must be
+ %% sent before confirms.
+ State2 = process_routing_mandatory(Mandatory, DeliveredQPids, MsgSeqNo,
+ Message, State1),
+ State3 = process_routing_confirm( Confirm, DeliveredQPids, MsgSeqNo,
+ XName, State2),
+ ?INCR_STATS([{exchange_stats, XName, 1} |
+ [{queue_exchange_stats, {QName, XName}, 1} ||
+ QPid <- DeliveredQPids,
+ {ok, QName} <- [dict:find(QPid, QNames1)]]],
+ publish, State3),
+ State3.
+
+process_routing_mandatory(false, _, _MsgSeqNo, _Msg, State) ->
+ State;
+process_routing_mandatory(true, [], _MsgSeqNo, Msg, State) ->
+ ok = basic_return(Msg, State, no_route),
+ State;
+process_routing_mandatory(true, QPids, MsgSeqNo, Msg, State) ->
+ State#ch{mandatory = dtree:insert(MsgSeqNo, QPids, Msg,
+ State#ch.mandatory)}.
+
+process_routing_confirm(false, _, _MsgSeqNo, _XName, State) ->
+ State;
+process_routing_confirm(true, [], MsgSeqNo, XName, State) ->
+ record_confirms([{MsgSeqNo, XName}], State);
+process_routing_confirm(true, QPids, MsgSeqNo, XName, State) ->
+ State#ch{unconfirmed = dtree:insert(MsgSeqNo, QPids, XName,
+ State#ch.unconfirmed)}.
+
+send_nacks([], State) ->
+ State;
+send_nacks(_MXs, State = #ch{state = closing,
+ tx = none}) -> %% optimisation
+ State;
+send_nacks(MXs, State = #ch{tx = none}) ->
+ coalesce_and_send([MsgSeqNo || {MsgSeqNo, _} <- MXs],
+ fun(MsgSeqNo, Multiple) ->
+ #'basic.nack'{delivery_tag = MsgSeqNo,
+ multiple = Multiple}
+ end, State);
+send_nacks(_MXs, State = #ch{state = closing}) -> %% optimisation
+ State#ch{tx = failed};
+send_nacks(_, State) ->
+ maybe_complete_tx(State#ch{tx = failed}).
+
+send_confirms(State = #ch{tx = none, confirmed = []}) ->
+ State;
+send_confirms(State = #ch{tx = none, confirmed = C}) ->
+ case rabbit_node_monitor:pause_minority_guard() of
+ ok -> MsgSeqNos =
+ lists:foldl(
+ fun ({MsgSeqNo, XName}, MSNs) ->
+ ?INCR_STATS([{exchange_stats, XName, 1}],
+ confirm, State),
+ [MsgSeqNo | MSNs]
+ end, [], lists:append(C)),
+ send_confirms(MsgSeqNos, State#ch{confirmed = []});
+ pausing -> State
+ end;
+send_confirms(State) ->
+ case rabbit_node_monitor:pause_minority_guard() of
+ ok -> maybe_complete_tx(State);
+ pausing -> State
+ end.
+
+send_confirms([], State) ->
+ State;
+send_confirms(_Cs, State = #ch{state = closing}) -> %% optimisation
+ State;
+send_confirms([MsgSeqNo], State) ->
+ ok = send(#'basic.ack'{delivery_tag = MsgSeqNo}, State),
+ State;
+send_confirms(Cs, State) ->
+ coalesce_and_send(Cs, fun(MsgSeqNo, Multiple) ->
+ #'basic.ack'{delivery_tag = MsgSeqNo,
+ multiple = Multiple}
+ end, State).
+
+coalesce_and_send(MsgSeqNos, MkMsgFun, State = #ch{unconfirmed = UC}) ->
+ SMsgSeqNos = lists:usort(MsgSeqNos),
+ CutOff = case dtree:is_empty(UC) of
+ true -> lists:last(SMsgSeqNos) + 1;
+ false -> {SeqNo, _XName} = dtree:smallest(UC), SeqNo
+ end,
+ {Ms, Ss} = lists:splitwith(fun(X) -> X < CutOff end, SMsgSeqNos),
+ case Ms of
+ [] -> ok;
+ _ -> ok = send(MkMsgFun(lists:last(Ms), true), State)
+ end,
+ [ok = send(MkMsgFun(SeqNo, false), State) || SeqNo <- Ss],
+ State.
+
+ack_cons(Tag, Acked, [{Tag, Acks} | L]) -> [{Tag, Acked ++ Acks} | L];
+ack_cons(Tag, Acked, Acks) -> [{Tag, Acked} | Acks].
+
+ack_len(Acks) -> lists:sum([length(L) || {ack, L} <- Acks]).
+
+maybe_complete_tx(State = #ch{tx = {_, _}}) ->
+ State;
+maybe_complete_tx(State = #ch{unconfirmed = UC}) ->
+ case dtree:is_empty(UC) of
+ false -> State;
+ true -> complete_tx(State#ch{confirmed = []})
+ end.
+
+complete_tx(State = #ch{tx = committing}) ->
+ ok = send(#'tx.commit_ok'{}, State),
+ State#ch{tx = new_tx()};
+complete_tx(State = #ch{tx = failed}) ->
+ {noreply, State1} = handle_exception(
+ rabbit_misc:amqp_error(
+ precondition_failed, "partial tx completion", [],
+ 'tx.commit'),
+ State),
+ State1#ch{tx = new_tx()}.
+
+infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
+
+i(pid, _) -> self();
+i(connection, #ch{conn_pid = ConnPid}) -> ConnPid;
+i(number, #ch{channel = Channel}) -> Channel;
+i(user, #ch{user = User}) -> User#user.username;
+i(vhost, #ch{virtual_host = VHost}) -> VHost;
+i(transactional, #ch{tx = Tx}) -> Tx =/= none;
+i(confirm, #ch{confirm_enabled = CE}) -> CE;
+i(name, State) -> name(State);
+i(consumer_count, #ch{consumer_mapping = CM}) -> dict:size(CM);
+i(messages_unconfirmed, #ch{unconfirmed = UC}) -> dtree:size(UC);
+i(messages_unacknowledged, #ch{unacked_message_q = UAMQ}) -> queue:len(UAMQ);
+i(messages_uncommitted, #ch{tx = {Msgs, _Acks}}) -> queue:len(Msgs);
+i(messages_uncommitted, #ch{}) -> 0;
+i(acks_uncommitted, #ch{tx = {_Msgs, Acks}}) -> ack_len(Acks);
+i(acks_uncommitted, #ch{}) -> 0;
+i(state, #ch{state = running}) -> credit_flow:state();
+i(state, #ch{state = State}) -> State;
+i(prefetch_count, #ch{consumer_prefetch = C}) -> C;
+i(global_prefetch_count, #ch{limiter = Limiter}) ->
+ rabbit_limiter:get_prefetch_limit(Limiter);
+i(Item, _) ->
+ throw({bad_argument, Item}).
+
+name(#ch{conn_name = ConnName, channel = Channel}) ->
+ list_to_binary(rabbit_misc:format("~s (~p)", [ConnName, Channel])).
+
+incr_stats(Incs, Measure) ->
+ [update_measures(Type, Key, Inc, Measure) || {Type, Key, Inc} <- Incs].
+
+update_measures(Type, Key, Inc, Measure) ->
+ Measures = case get({Type, Key}) of
+ undefined -> [];
+ D -> D
+ end,
+ Cur = case orddict:find(Measure, Measures) of
+ error -> 0;
+ {ok, C} -> C
+ end,
+ put({Type, Key}, orddict:store(Measure, Cur + Inc, Measures)).
+
+emit_stats(State) -> emit_stats(State, []).
+
+emit_stats(State, Extra) ->
+ Coarse = infos(?STATISTICS_KEYS, State),
+ case rabbit_event:stats_level(State, #ch.stats_timer) of
+ coarse -> rabbit_event:notify(channel_stats, Extra ++ Coarse);
+ fine -> Fine = [{channel_queue_stats,
+ [{QName, Stats} ||
+ {{queue_stats, QName}, Stats} <- get()]},
+ {channel_exchange_stats,
+ [{XName, Stats} ||
+ {{exchange_stats, XName}, Stats} <- get()]},
+ {channel_queue_exchange_stats,
+ [{QX, Stats} ||
+ {{queue_exchange_stats, QX}, Stats} <- get()]}],
+ rabbit_event:notify(channel_stats, Extra ++ Coarse ++ Fine)
+ end.
+
+erase_queue_stats(QName) ->
+ erase({queue_stats, QName}),
+ [erase({queue_exchange_stats, QX}) ||
+ {{queue_exchange_stats, QX = {QName0, _}}, _} <- get(),
+ QName0 =:= QName].
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% Since the AMQP methods used here are queue related,
+%% maybe we want this to be a queue_interceptor.
+
+-module(rabbit_channel_interceptor).
+
+-include("rabbit_framing.hrl").
+-include("rabbit.hrl").
+
+-export([intercept_method/2]).
+
+-ifdef(use_specs).
+
+-type(intercept_method() :: rabbit_framing:amqp_method_name()).
+-type(original_method() :: rabbit_framing:amqp_method_record()).
+-type(processed_method() :: rabbit_framing:amqp_method_record()).
+
+-callback description() -> [proplists:property()].
+
+-callback intercept(original_method(), rabbit_types:vhost()) ->
+ rabbit_types:ok_or_error2(processed_method(), any()).
+
+%% Whether the interceptor wishes to intercept the amqp method
+-callback applies_to(intercept_method()) -> boolean().
+
+-else.
+
+-export([behaviour_info/1]).
+
+behaviour_info(callbacks) ->
+ [{description, 0}, {intercept, 2}, {applies_to, 1}];
+behaviour_info(_Other) ->
+ undefined.
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+intercept_method(#'basic.publish'{} = M, _VHost) -> M;
+intercept_method(#'basic.ack'{} = M, _VHost) -> M;
+intercept_method(#'basic.nack'{} = M, _VHost) -> M;
+intercept_method(#'basic.reject'{} = M, _VHost) -> M;
+intercept_method(#'basic.credit'{} = M, _VHost) -> M;
+intercept_method(M, VHost) ->
+ intercept_method(M, VHost, select(rabbit_misc:method_record_type(M))).
+
+intercept_method(M, _VHost, []) ->
+ M;
+intercept_method(M, VHost, [I]) ->
+ case I:intercept(M, VHost) of
+ {ok, M2} ->
+ case validate_method(M, M2) of
+ true ->
+ M2;
+ _ ->
+ internal_error("Interceptor: ~p expected "
+ "to return method: ~p but returned: ~p",
+ [I, rabbit_misc:method_record_type(M),
+ rabbit_misc:method_record_type(M2)])
+ end;
+ {error, Reason} ->
+ internal_error("Interceptor: ~p failed with reason: ~p",
+ [I, Reason])
+ end;
+intercept_method(M, _VHost, Is) ->
+ internal_error("More than one interceptor for method: ~p -- ~p",
+ [rabbit_misc:method_record_type(M), Is]).
+
+%% select the interceptors that apply to intercept_method().
+select(Method) ->
+ [M || {_, M} <- rabbit_registry:lookup_all(channel_interceptor),
+ code:which(M) =/= non_existing,
+ M:applies_to(Method)].
+
+validate_method(M, M2) ->
+ rabbit_misc:method_record_type(M) =:= rabbit_misc:method_record_type(M2).
+
+%% keep dialyzer happy
+-spec internal_error(string(), [any()]) -> no_return().
+internal_error(Format, Args) ->
+ rabbit_misc:protocol_error(internal_error, Format, Args).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_channel_sup).
+
+-behaviour(supervisor2).
+
+-export([start_link/1]).
+
+-export([init/1]).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-export_type([start_link_args/0]).
+
+-type(start_link_args() ::
+ {'tcp', rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), pid(), string(), rabbit_types:protocol(),
+ rabbit_types:user(), rabbit_types:vhost(), rabbit_framing:amqp_table(),
+ pid()} |
+ {'direct', rabbit_channel:channel_number(), pid(), string(),
+ rabbit_types:protocol(), rabbit_types:user(), rabbit_types:vhost(),
+ rabbit_framing:amqp_table(), pid()}).
+
+-spec(start_link/1 :: (start_link_args()) -> {'ok', pid(), {pid(), any()}}).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start_link({tcp, Sock, Channel, FrameMax, ReaderPid, ConnName, Protocol, User,
+ VHost, Capabilities, Collector}) ->
+ {ok, SupPid} = supervisor2:start_link(
+ ?MODULE, {tcp, Sock, Channel, FrameMax,
+ ReaderPid, Protocol, {ConnName, Channel}}),
+ [LimiterPid] = supervisor2:find_child(SupPid, limiter),
+ [WriterPid] = supervisor2:find_child(SupPid, writer),
+ {ok, ChannelPid} =
+ supervisor2:start_child(
+ SupPid,
+ {channel, {rabbit_channel, start_link,
+ [Channel, ReaderPid, WriterPid, ReaderPid, ConnName,
+ Protocol, User, VHost, Capabilities, Collector,
+ LimiterPid]},
+ intrinsic, ?MAX_WAIT, worker, [rabbit_channel]}),
+ {ok, AState} = rabbit_command_assembler:init(Protocol),
+ {ok, SupPid, {ChannelPid, AState}};
+start_link({direct, Channel, ClientChannelPid, ConnPid, ConnName, Protocol,
+ User, VHost, Capabilities, Collector}) ->
+ {ok, SupPid} = supervisor2:start_link(
+ ?MODULE, {direct, {ConnName, Channel}}),
+ [LimiterPid] = supervisor2:find_child(SupPid, limiter),
+ {ok, ChannelPid} =
+ supervisor2:start_child(
+ SupPid,
+ {channel, {rabbit_channel, start_link,
+ [Channel, ClientChannelPid, ClientChannelPid, ConnPid,
+ ConnName, Protocol, User, VHost, Capabilities, Collector,
+ LimiterPid]},
+ intrinsic, ?MAX_WAIT, worker, [rabbit_channel]}),
+ {ok, SupPid, {ChannelPid, none}}.
+
+%%----------------------------------------------------------------------------
+
+init(Type) ->
+ {ok, {{one_for_all, 0, 1}, child_specs(Type)}}.
+
+child_specs({tcp, Sock, Channel, FrameMax, ReaderPid, Protocol, Identity}) ->
+ [{writer, {rabbit_writer, start_link,
+ [Sock, Channel, FrameMax, Protocol, ReaderPid, Identity, true]},
+ intrinsic, ?MAX_WAIT, worker, [rabbit_writer]}
+ | child_specs({direct, Identity})];
+child_specs({direct, Identity}) ->
+ [{limiter, {rabbit_limiter, start_link, [Identity]},
+ transient, ?MAX_WAIT, worker, [rabbit_limiter]}].
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_channel_sup_sup).
+
+-behaviour(supervisor2).
+
+-export([start_link/0, start_channel/2]).
+
+-export([init/1]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
+-spec(start_channel/2 :: (pid(), rabbit_channel_sup:start_link_args()) ->
+ {'ok', pid(), {pid(), any()}}).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ supervisor2:start_link(?MODULE, []).
+
+start_channel(Pid, Args) ->
+ supervisor2:start_child(Pid, [Args]).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{simple_one_for_one, 0, 1},
+ [{channel_sup, {rabbit_channel_sup, start_link, []},
+ temporary, infinity, supervisor, [rabbit_channel_sup]}]}}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_client_sup).
+
+-behaviour(supervisor2).
+
+-export([start_link/1, start_link/2, start_link_worker/2]).
+
+-export([init/1]).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/1 :: (rabbit_types:mfargs()) ->
+ rabbit_types:ok_pid_or_error()).
+-spec(start_link/2 :: ({'local', atom()}, rabbit_types:mfargs()) ->
+ rabbit_types:ok_pid_or_error()).
+-spec(start_link_worker/2 :: ({'local', atom()}, rabbit_types:mfargs()) ->
+ rabbit_types:ok_pid_or_error()).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start_link(Callback) ->
+ supervisor2:start_link(?MODULE, Callback).
+
+start_link(SupName, Callback) ->
+ supervisor2:start_link(SupName, ?MODULE, Callback).
+
+start_link_worker(SupName, Callback) ->
+ supervisor2:start_link(SupName, ?MODULE, {Callback, worker}).
+
+init({M,F,A}) ->
+ {ok, {{simple_one_for_one, 0, 1},
+ [{client, {M,F,A}, temporary, infinity, supervisor, [M]}]}};
+init({{M,F,A}, worker}) ->
+ {ok, {{simple_one_for_one, 0, 1},
+ [{client, {M,F,A}, temporary, ?MAX_WAIT, worker, [M]}]}}.
+
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_command_assembler).
+-include("rabbit_framing.hrl").
+-include("rabbit.hrl").
+
+-export([analyze_frame/3, init/1, process/2]).
+
+%%----------------------------------------------------------------------------
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-export_type([frame/0]).
+
+-type(frame_type() :: ?FRAME_METHOD | ?FRAME_HEADER | ?FRAME_BODY |
+ ?FRAME_OOB_METHOD | ?FRAME_OOB_HEADER | ?FRAME_OOB_BODY |
+ ?FRAME_TRACE | ?FRAME_HEARTBEAT).
+-type(protocol() :: rabbit_framing:protocol()).
+-type(method() :: rabbit_framing:amqp_method_record()).
+-type(class_id() :: rabbit_framing:amqp_class_id()).
+-type(weight() :: non_neg_integer()).
+-type(body_size() :: non_neg_integer()).
+-type(content() :: rabbit_types:undecoded_content()).
+
+-type(frame() ::
+ {'method', rabbit_framing:amqp_method_name(), binary()} |
+ {'content_header', class_id(), weight(), body_size(), binary()} |
+ {'content_body', binary()}).
+
+-type(state() ::
+ {'method', protocol()} |
+ {'content_header', method(), class_id(), protocol()} |
+ {'content_body', method(), body_size(), class_id(), protocol()}).
+
+-spec(analyze_frame/3 :: (frame_type(), binary(), protocol()) ->
+ frame() | 'heartbeat' | 'error').
+
+-spec(init/1 :: (protocol()) -> {ok, state()}).
+-spec(process/2 :: (frame(), state()) ->
+ {ok, state()} |
+ {ok, method(), state()} |
+ {ok, method(), content(), state()} |
+ {error, rabbit_types:amqp_error()}).
+
+-endif.
+
+%%--------------------------------------------------------------------
+
+analyze_frame(?FRAME_METHOD,
+ <<ClassId:16, MethodId:16, MethodFields/binary>>,
+ Protocol) ->
+ MethodName = Protocol:lookup_method_name({ClassId, MethodId}),
+ {method, MethodName, MethodFields};
+analyze_frame(?FRAME_HEADER,
+ <<ClassId:16, Weight:16, BodySize:64, Properties/binary>>,
+ _Protocol) ->
+ {content_header, ClassId, Weight, BodySize, Properties};
+analyze_frame(?FRAME_BODY, Body, _Protocol) ->
+ {content_body, Body};
+analyze_frame(?FRAME_HEARTBEAT, <<>>, _Protocol) ->
+ heartbeat;
+analyze_frame(_Type, _Body, _Protocol) ->
+ error.
+
+init(Protocol) -> {ok, {method, Protocol}}.
+
+process({method, MethodName, FieldsBin}, {method, Protocol}) ->
+ try
+ Method = Protocol:decode_method_fields(MethodName, FieldsBin),
+ case Protocol:method_has_content(MethodName) of
+ true -> {ClassId, _MethodId} = Protocol:method_id(MethodName),
+ {ok, {content_header, Method, ClassId, Protocol}};
+ false -> {ok, Method, {method, Protocol}}
+ end
+ catch exit:#amqp_error{} = Reason -> {error, Reason}
+ end;
+process(_Frame, {method, _Protocol}) ->
+ unexpected_frame("expected method frame, "
+ "got non method frame instead", [], none);
+process({content_header, ClassId, 0, 0, PropertiesBin},
+ {content_header, Method, ClassId, Protocol}) ->
+ Content = empty_content(ClassId, PropertiesBin, Protocol),
+ {ok, Method, Content, {method, Protocol}};
+process({content_header, ClassId, 0, BodySize, PropertiesBin},
+ {content_header, Method, ClassId, Protocol}) ->
+ Content = empty_content(ClassId, PropertiesBin, Protocol),
+ {ok, {content_body, Method, BodySize, Content, Protocol}};
+process({content_header, HeaderClassId, 0, _BodySize, _PropertiesBin},
+ {content_header, Method, ClassId, _Protocol}) ->
+ unexpected_frame("expected content header for class ~w, "
+ "got one for class ~w instead",
+ [ClassId, HeaderClassId], Method);
+process(_Frame, {content_header, Method, ClassId, _Protocol}) ->
+ unexpected_frame("expected content header for class ~w, "
+ "got non content header frame instead", [ClassId], Method);
+process({content_body, FragmentBin},
+ {content_body, Method, RemainingSize,
+ Content = #content{payload_fragments_rev = Fragments}, Protocol}) ->
+ NewContent = Content#content{
+ payload_fragments_rev = [FragmentBin | Fragments]},
+ case RemainingSize - size(FragmentBin) of
+ 0 -> {ok, Method, NewContent, {method, Protocol}};
+ Sz -> {ok, {content_body, Method, Sz, NewContent, Protocol}}
+ end;
+process(_Frame, {content_body, Method, _RemainingSize, _Content, _Protocol}) ->
+ unexpected_frame("expected content body, "
+ "got non content body frame instead", [], Method).
+
+%%--------------------------------------------------------------------
+
+empty_content(ClassId, PropertiesBin, Protocol) ->
+ #content{class_id = ClassId,
+ properties = none,
+ properties_bin = PropertiesBin,
+ protocol = Protocol,
+ payload_fragments_rev = []}.
+
+unexpected_frame(Format, Params, Method) when is_atom(Method) ->
+ {error, rabbit_misc:amqp_error(unexpected_frame, Format, Params, Method)};
+unexpected_frame(Format, Params, Method) ->
+ unexpected_frame(Format, Params, rabbit_misc:method_record_type(Method)).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_connection_helper_sup).
+
+-behaviour(supervisor2).
+
+-export([start_link/0]).
+-export([start_channel_sup_sup/1,
+ start_queue_collector/2]).
+
+-export([init/1]).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
+-spec(start_channel_sup_sup/1 :: (pid()) -> rabbit_types:ok_pid_or_error()).
+-spec(start_queue_collector/2 :: (pid(), rabbit_types:proc_name()) ->
+ rabbit_types:ok_pid_or_error()).
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ supervisor2:start_link(?MODULE, []).
+
+start_channel_sup_sup(SupPid) ->
+ supervisor2:start_child(
+ SupPid,
+ {channel_sup_sup, {rabbit_channel_sup_sup, start_link, []},
+ intrinsic, infinity, supervisor, [rabbit_channel_sup_sup]}).
+
+start_queue_collector(SupPid, Identity) ->
+ supervisor2:start_child(
+ SupPid,
+ {collector, {rabbit_queue_collector, start_link, [Identity]},
+ intrinsic, ?MAX_WAIT, worker, [rabbit_queue_collector]}).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{one_for_one, 10, 10}, []}}.
+
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_connection_sup).
+
+-behaviour(supervisor2).
+
+-export([start_link/0, reader/1]).
+
+-export([init/1]).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/0 :: () -> {'ok', pid(), pid()}).
+-spec(reader/1 :: (pid()) -> pid()).
+
+-endif.
+
+%%--------------------------------------------------------------------------
+
+start_link() ->
+ {ok, SupPid} = supervisor2:start_link(?MODULE, []),
+ %% We need to get channels in the hierarchy here so they get shut
+ %% down after the reader, so the reader gets a chance to terminate
+ %% them cleanly. But for 1.0 readers we can't start the real
+ %% ch_sup_sup (because we don't know if we will be 0-9-1 or 1.0) -
+ %% so we add another supervisor into the hierarchy.
+ %%
+ %% This supervisor also acts as an intermediary for heartbeaters and
+ %% the queue collector process, since these must not be siblings of the
+ %% reader due to the potential for deadlock if they are added/restarted
+ %% whilst the supervision tree is shutting down.
+ {ok, HelperSup} =
+ supervisor2:start_child(
+ SupPid,
+ {helper_sup, {rabbit_connection_helper_sup, start_link, []},
+ intrinsic, infinity, supervisor, [rabbit_connection_helper_sup]}),
+ {ok, ReaderPid} =
+ supervisor2:start_child(
+ SupPid,
+ {reader, {rabbit_reader, start_link, [HelperSup]},
+ intrinsic, ?MAX_WAIT, worker, [rabbit_reader]}),
+ {ok, SupPid, ReaderPid}.
+
+reader(Pid) ->
+ hd(supervisor2:find_child(Pid, reader)).
+
+%%--------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{one_for_all, 0, 1}, []}}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_control_main).
+-include("rabbit.hrl").
+
+-export([start/0, stop/0, parse_arguments/2, action/5,
+ sync_queue/1, cancel_sync_queue/1]).
+
+-define(RPC_TIMEOUT, infinity).
+-define(EXTERNAL_CHECK_INTERVAL, 1000).
+
+-define(QUIET_OPT, "-q").
+-define(NODE_OPT, "-n").
+-define(VHOST_OPT, "-p").
+-define(PRIORITY_OPT, "--priority").
+-define(APPLY_TO_OPT, "--apply-to").
+-define(RAM_OPT, "--ram").
+-define(OFFLINE_OPT, "--offline").
+
+-define(QUIET_DEF, {?QUIET_OPT, flag}).
+-define(NODE_DEF(Node), {?NODE_OPT, {option, Node}}).
+-define(VHOST_DEF, {?VHOST_OPT, {option, "/"}}).
+-define(PRIORITY_DEF, {?PRIORITY_OPT, {option, "0"}}).
+-define(APPLY_TO_DEF, {?APPLY_TO_OPT, {option, "all"}}).
+-define(RAM_DEF, {?RAM_OPT, flag}).
+-define(OFFLINE_DEF, {?OFFLINE_OPT, flag}).
+
+-define(GLOBAL_DEFS(Node), [?QUIET_DEF, ?NODE_DEF(Node)]).
+
+-define(COMMANDS,
+ [stop,
+ stop_app,
+ start_app,
+ wait,
+ reset,
+ force_reset,
+ rotate_logs,
+
+ {join_cluster, [?RAM_DEF]},
+ change_cluster_node_type,
+ update_cluster_nodes,
+ {forget_cluster_node, [?OFFLINE_DEF]},
+ cluster_status,
+ {sync_queue, [?VHOST_DEF]},
+ {cancel_sync_queue, [?VHOST_DEF]},
+
+ add_user,
+ delete_user,
+ change_password,
+ clear_password,
+ set_user_tags,
+ list_users,
+
+ add_vhost,
+ delete_vhost,
+ list_vhosts,
+ {set_permissions, [?VHOST_DEF]},
+ {clear_permissions, [?VHOST_DEF]},
+ {list_permissions, [?VHOST_DEF]},
+ list_user_permissions,
+
+ {set_parameter, [?VHOST_DEF]},
+ {clear_parameter, [?VHOST_DEF]},
+ {list_parameters, [?VHOST_DEF]},
+
+ {set_policy, [?VHOST_DEF, ?PRIORITY_DEF, ?APPLY_TO_DEF]},
+ {clear_policy, [?VHOST_DEF]},
+ {list_policies, [?VHOST_DEF]},
+
+ {list_queues, [?VHOST_DEF]},
+ {list_exchanges, [?VHOST_DEF]},
+ {list_bindings, [?VHOST_DEF]},
+ {list_connections, [?VHOST_DEF]},
+ list_channels,
+ {list_consumers, [?VHOST_DEF]},
+ status,
+ environment,
+ report,
+ set_cluster_name,
+ eval,
+
+ close_connection,
+ {trace_on, [?VHOST_DEF]},
+ {trace_off, [?VHOST_DEF]},
+ set_vm_memory_high_watermark
+ ]).
+
+-define(GLOBAL_QUERIES,
+ [{"Connections", rabbit_networking, connection_info_all,
+ connection_info_keys},
+ {"Channels", rabbit_channel, info_all, info_keys}]).
+
+-define(VHOST_QUERIES,
+ [{"Queues", rabbit_amqqueue, info_all, info_keys},
+ {"Exchanges", rabbit_exchange, info_all, info_keys},
+ {"Bindings", rabbit_binding, info_all, info_keys},
+ {"Consumers", rabbit_amqqueue, consumers_all, consumer_info_keys},
+ {"Permissions", rabbit_auth_backend_internal, list_vhost_permissions,
+ vhost_perms_info_keys},
+ {"Policies", rabbit_policy, list_formatted, info_keys},
+ {"Parameters", rabbit_runtime_parameters, list_formatted, info_keys}]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start/0 :: () -> no_return()).
+-spec(stop/0 :: () -> 'ok').
+-spec(action/5 ::
+ (atom(), node(), [string()], [{string(), any()}],
+ fun ((string(), [any()]) -> 'ok'))
+ -> 'ok').
+-spec(usage/0 :: () -> no_return()).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start() ->
+ {ok, [[NodeStr|_]|_]} = init:get_argument(nodename),
+ {Command, Opts, Args} =
+ case parse_arguments(init:get_plain_arguments(), NodeStr) of
+ {ok, Res} -> Res;
+ no_command -> print_error("could not recognise command", []),
+ usage()
+ end,
+ Quiet = proplists:get_bool(?QUIET_OPT, Opts),
+ Node = proplists:get_value(?NODE_OPT, Opts),
+ Inform = case Quiet of
+ true -> fun (_Format, _Args1) -> ok end;
+ false -> fun (Format, Args1) ->
+ io:format(Format ++ " ...~n", Args1)
+ end
+ end,
+ PrintInvalidCommandError =
+ fun () ->
+ print_error("invalid command '~s'",
+ [string:join([atom_to_list(Command) | Args], " ")])
+ end,
+
+ %% The reason we don't use a try/catch here is that rpc:call turns
+ %% thrown errors into normal return values
+ case catch action(Command, Node, Args, Opts, Inform) of
+ ok ->
+ case Quiet of
+ true -> ok;
+ false -> io:format("...done.~n")
+ end,
+ rabbit_misc:quit(0);
+ {ok, Info} ->
+ case Quiet of
+ true -> ok;
+ false -> io:format("...done (~p).~n", [Info])
+ end,
+ rabbit_misc:quit(0);
+ {'EXIT', {function_clause, [{?MODULE, action, _} | _]}} -> %% < R15
+ PrintInvalidCommandError(),
+ usage();
+ {'EXIT', {function_clause, [{?MODULE, action, _, _} | _]}} -> %% >= R15
+ PrintInvalidCommandError(),
+ usage();
+ {'EXIT', {badarg, _}} ->
+ print_error("invalid parameter: ~p", [Args]),
+ usage();
+ {error, {Problem, Reason}} when is_atom(Problem), is_binary(Reason) ->
+ %% We handle this common case specially to avoid ~p since
+ %% that has i18n issues
+ print_error("~s: ~s", [Problem, Reason]),
+ rabbit_misc:quit(2);
+ {error, Reason} ->
+ print_error("~p", [Reason]),
+ rabbit_misc:quit(2);
+ {error_string, Reason} ->
+ print_error("~s", [Reason]),
+ rabbit_misc:quit(2);
+ {badrpc, {'EXIT', Reason}} ->
+ print_error("~p", [Reason]),
+ rabbit_misc:quit(2);
+ {badrpc, Reason} ->
+ print_error("unable to connect to node ~w: ~w", [Node, Reason]),
+ print_badrpc_diagnostics([Node]),
+ rabbit_misc:quit(2);
+ {badrpc_multi, Reason, Nodes} ->
+ print_error("unable to connect to nodes ~p: ~w", [Nodes, Reason]),
+ print_badrpc_diagnostics(Nodes),
+ rabbit_misc:quit(2);
+ Other ->
+ print_error("~p", [Other]),
+ rabbit_misc:quit(2)
+ end.
+
+fmt_stderr(Format, Args) -> rabbit_misc:format_stderr(Format ++ "~n", Args).
+
+print_report(Node, {Descr, Module, InfoFun, KeysFun}) ->
+ io:format("~s:~n", [Descr]),
+ print_report0(Node, {Module, InfoFun, KeysFun}, []).
+
+print_report(Node, {Descr, Module, InfoFun, KeysFun}, VHostArg) ->
+ io:format("~s on ~s:~n", [Descr, VHostArg]),
+ print_report0(Node, {Module, InfoFun, KeysFun}, VHostArg).
+
+print_report0(Node, {Module, InfoFun, KeysFun}, VHostArg) ->
+ case rpc_call(Node, Module, InfoFun, VHostArg) of
+ [_|_] = Results -> InfoItems = rpc_call(Node, Module, KeysFun, []),
+ display_row([atom_to_list(I) || I <- InfoItems]),
+ display_info_list(Results, InfoItems);
+ _ -> ok
+ end,
+ io:nl().
+
+print_error(Format, Args) -> fmt_stderr("Error: " ++ Format, Args).
+
+print_badrpc_diagnostics(Nodes) ->
+ fmt_stderr(rabbit_nodes:diagnostics(Nodes), []).
+
+stop() ->
+ ok.
+
+usage() ->
+ io:format("~s", [rabbit_ctl_usage:usage()]),
+ rabbit_misc:quit(1).
+
+parse_arguments(CmdLine, NodeStr) ->
+ case rabbit_misc:parse_arguments(
+ ?COMMANDS, ?GLOBAL_DEFS(NodeStr), CmdLine) of
+ {ok, {Cmd, Opts0, Args}} ->
+ Opts = [case K of
+ ?NODE_OPT -> {?NODE_OPT, rabbit_nodes:make(V)};
+ _ -> {K, V}
+ end || {K, V} <- Opts0],
+ {ok, {Cmd, Opts, Args}};
+ E ->
+ E
+ end.
+
+%%----------------------------------------------------------------------------
+
+action(stop, Node, Args, _Opts, Inform) ->
+ Inform("Stopping and halting node ~p", [Node]),
+ Res = call(Node, {rabbit, stop_and_halt, []}),
+ case {Res, Args} of
+ {ok, [PidFile]} -> wait_for_process_death(
+ read_pid_file(PidFile, false));
+ {ok, [_, _| _]} -> exit({badarg, Args});
+ _ -> ok
+ end,
+ Res;
+
+action(stop_app, Node, [], _Opts, Inform) ->
+ Inform("Stopping node ~p", [Node]),
+ call(Node, {rabbit, stop, []});
+
+action(start_app, Node, [], _Opts, Inform) ->
+ Inform("Starting node ~p", [Node]),
+ call(Node, {rabbit, start, []});
+
+action(reset, Node, [], _Opts, Inform) ->
+ Inform("Resetting node ~p", [Node]),
+ call(Node, {rabbit_mnesia, reset, []});
+
+action(force_reset, Node, [], _Opts, Inform) ->
+ Inform("Forcefully resetting node ~p", [Node]),
+ call(Node, {rabbit_mnesia, force_reset, []});
+
+action(join_cluster, Node, [ClusterNodeS], Opts, Inform) ->
+ ClusterNode = list_to_atom(ClusterNodeS),
+ NodeType = case proplists:get_bool(?RAM_OPT, Opts) of
+ true -> ram;
+ false -> disc
+ end,
+ Inform("Clustering node ~p with ~p", [Node, ClusterNode]),
+ rpc_call(Node, rabbit_mnesia, join_cluster, [ClusterNode, NodeType]);
+
+action(change_cluster_node_type, Node, ["ram"], _Opts, Inform) ->
+ Inform("Turning ~p into a ram node", [Node]),
+ rpc_call(Node, rabbit_mnesia, change_cluster_node_type, [ram]);
+action(change_cluster_node_type, Node, [Type], _Opts, Inform)
+ when Type =:= "disc" orelse Type =:= "disk" ->
+ Inform("Turning ~p into a disc node", [Node]),
+ rpc_call(Node, rabbit_mnesia, change_cluster_node_type, [disc]);
+
+action(update_cluster_nodes, Node, [ClusterNodeS], _Opts, Inform) ->
+ ClusterNode = list_to_atom(ClusterNodeS),
+ Inform("Updating cluster nodes for ~p from ~p", [Node, ClusterNode]),
+ rpc_call(Node, rabbit_mnesia, update_cluster_nodes, [ClusterNode]);
+
+action(forget_cluster_node, Node, [ClusterNodeS], Opts, Inform) ->
+ ClusterNode = list_to_atom(ClusterNodeS),
+ RemoveWhenOffline = proplists:get_bool(?OFFLINE_OPT, Opts),
+ Inform("Removing node ~p from cluster", [ClusterNode]),
+ rpc_call(Node, rabbit_mnesia, forget_cluster_node,
+ [ClusterNode, RemoveWhenOffline]);
+
+action(sync_queue, Node, [Q], Opts, Inform) ->
+ VHost = proplists:get_value(?VHOST_OPT, Opts),
+ QName = rabbit_misc:r(list_to_binary(VHost), queue, list_to_binary(Q)),
+ Inform("Synchronising ~s", [rabbit_misc:rs(QName)]),
+ rpc_call(Node, rabbit_control_main, sync_queue, [QName]);
+
+action(cancel_sync_queue, Node, [Q], Opts, Inform) ->
+ VHost = proplists:get_value(?VHOST_OPT, Opts),
+ QName = rabbit_misc:r(list_to_binary(VHost), queue, list_to_binary(Q)),
+ Inform("Stopping synchronising ~s", [rabbit_misc:rs(QName)]),
+ rpc_call(Node, rabbit_control_main, cancel_sync_queue, [QName]);
+
+action(wait, Node, [PidFile], _Opts, Inform) ->
+ Inform("Waiting for ~p", [Node]),
+ wait_for_application(Node, PidFile, rabbit_and_plugins, Inform);
+action(wait, Node, [PidFile, App], _Opts, Inform) ->
+ Inform("Waiting for ~p on ~p", [App, Node]),
+ wait_for_application(Node, PidFile, list_to_atom(App), Inform);
+
+action(status, Node, [], _Opts, Inform) ->
+ Inform("Status of node ~p", [Node]),
+ display_call_result(Node, {rabbit, status, []});
+
+action(cluster_status, Node, [], _Opts, Inform) ->
+ Inform("Cluster status of node ~p", [Node]),
+ display_call_result(Node, {rabbit_mnesia, status, []});
+
+action(environment, Node, _App, _Opts, Inform) ->
+ Inform("Application environment of node ~p", [Node]),
+ display_call_result(Node, {rabbit, environment, []});
+
+action(rotate_logs, Node, [], _Opts, Inform) ->
+ Inform("Reopening logs for node ~p", [Node]),
+ call(Node, {rabbit, rotate_logs, [""]});
+action(rotate_logs, Node, Args = [Suffix], _Opts, Inform) ->
+ Inform("Rotating logs to files with suffix \"~s\"", [Suffix]),
+ call(Node, {rabbit, rotate_logs, Args});
+
+action(close_connection, Node, [PidStr, Explanation], _Opts, Inform) ->
+ Inform("Closing connection \"~s\"", [PidStr]),
+ rpc_call(Node, rabbit_networking, close_connection,
+ [rabbit_misc:string_to_pid(PidStr), Explanation]);
+
+action(add_user, Node, Args = [Username, _Password], _Opts, Inform) ->
+ Inform("Creating user \"~s\"", [Username]),
+ call(Node, {rabbit_auth_backend_internal, add_user, Args});
+
+action(delete_user, Node, Args = [_Username], _Opts, Inform) ->
+ Inform("Deleting user \"~s\"", Args),
+ call(Node, {rabbit_auth_backend_internal, delete_user, Args});
+
+action(change_password, Node, Args = [Username, _Newpassword], _Opts, Inform) ->
+ Inform("Changing password for user \"~s\"", [Username]),
+ call(Node, {rabbit_auth_backend_internal, change_password, Args});
+
+action(clear_password, Node, Args = [Username], _Opts, Inform) ->
+ Inform("Clearing password for user \"~s\"", [Username]),
+ call(Node, {rabbit_auth_backend_internal, clear_password, Args});
+
+action(set_user_tags, Node, [Username | TagsStr], _Opts, Inform) ->
+ Tags = [list_to_atom(T) || T <- TagsStr],
+ Inform("Setting tags for user \"~s\" to ~p", [Username, Tags]),
+ rpc_call(Node, rabbit_auth_backend_internal, set_tags,
+ [list_to_binary(Username), Tags]);
+
+action(list_users, Node, [], _Opts, Inform) ->
+ Inform("Listing users", []),
+ display_info_list(
+ call(Node, {rabbit_auth_backend_internal, list_users, []}),
+ rabbit_auth_backend_internal:user_info_keys());
+
+action(add_vhost, Node, Args = [_VHostPath], _Opts, Inform) ->
+ Inform("Creating vhost \"~s\"", Args),
+ call(Node, {rabbit_vhost, add, Args});
+
+action(delete_vhost, Node, Args = [_VHostPath], _Opts, Inform) ->
+ Inform("Deleting vhost \"~s\"", Args),
+ call(Node, {rabbit_vhost, delete, Args});
+
+action(list_vhosts, Node, Args, _Opts, Inform) ->
+ Inform("Listing vhosts", []),
+ ArgAtoms = default_if_empty(Args, [name]),
+ display_info_list(call(Node, {rabbit_vhost, info_all, []}), ArgAtoms);
+
+action(list_user_permissions, Node, Args = [_Username], _Opts, Inform) ->
+ Inform("Listing permissions for user ~p", Args),
+ display_info_list(call(Node, {rabbit_auth_backend_internal,
+ list_user_permissions, Args}),
+ rabbit_auth_backend_internal:user_perms_info_keys());
+
+action(list_queues, Node, Args, Opts, Inform) ->
+ Inform("Listing queues", []),
+ VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
+ ArgAtoms = default_if_empty(Args, [name, messages]),
+ display_info_list(rpc_call(Node, rabbit_amqqueue, info_all,
+ [VHostArg, ArgAtoms]),
+ ArgAtoms);
+
+action(list_exchanges, Node, Args, Opts, Inform) ->
+ Inform("Listing exchanges", []),
+ VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
+ ArgAtoms = default_if_empty(Args, [name, type]),
+ display_info_list(rpc_call(Node, rabbit_exchange, info_all,
+ [VHostArg, ArgAtoms]),
+ ArgAtoms);
+
+action(list_bindings, Node, Args, Opts, Inform) ->
+ Inform("Listing bindings", []),
+ VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
+ ArgAtoms = default_if_empty(Args, [source_name, source_kind,
+ destination_name, destination_kind,
+ routing_key, arguments]),
+ display_info_list(rpc_call(Node, rabbit_binding, info_all,
+ [VHostArg, ArgAtoms]),
+ ArgAtoms);
+
+action(list_connections, Node, Args, _Opts, Inform) ->
+ Inform("Listing connections", []),
+ ArgAtoms = default_if_empty(Args, [user, peer_host, peer_port, state]),
+ display_info_list(rpc_call(Node, rabbit_networking, connection_info_all,
+ [ArgAtoms]),
+ ArgAtoms);
+
+action(list_channels, Node, Args, _Opts, Inform) ->
+ Inform("Listing channels", []),
+ ArgAtoms = default_if_empty(Args, [pid, user, consumer_count,
+ messages_unacknowledged]),
+ display_info_list(rpc_call(Node, rabbit_channel, info_all, [ArgAtoms]),
+ ArgAtoms);
+
+action(list_consumers, Node, _Args, Opts, Inform) ->
+ Inform("Listing consumers", []),
+ VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
+ display_info_list(rpc_call(Node, rabbit_amqqueue, consumers_all, [VHostArg]),
+ rabbit_amqqueue:consumer_info_keys());
+
+action(trace_on, Node, [], Opts, Inform) ->
+ VHost = proplists:get_value(?VHOST_OPT, Opts),
+ Inform("Starting tracing for vhost \"~s\"", [VHost]),
+ rpc_call(Node, rabbit_trace, start, [list_to_binary(VHost)]);
+
+action(trace_off, Node, [], Opts, Inform) ->
+ VHost = proplists:get_value(?VHOST_OPT, Opts),
+ Inform("Stopping tracing for vhost \"~s\"", [VHost]),
+ rpc_call(Node, rabbit_trace, stop, [list_to_binary(VHost)]);
+
+action(set_vm_memory_high_watermark, Node, [Arg], _Opts, Inform) ->
+ Frac = list_to_float(case string:chr(Arg, $.) of
+ 0 -> Arg ++ ".0";
+ _ -> Arg
+ end),
+ Inform("Setting memory threshold on ~p to ~p", [Node, Frac]),
+ rpc_call(Node, vm_memory_monitor, set_vm_memory_high_watermark, [Frac]);
+
+action(set_permissions, Node, [Username, CPerm, WPerm, RPerm], Opts, Inform) ->
+ VHost = proplists:get_value(?VHOST_OPT, Opts),
+ Inform("Setting permissions for user \"~s\" in vhost \"~s\"",
+ [Username, VHost]),
+ call(Node, {rabbit_auth_backend_internal, set_permissions,
+ [Username, VHost, CPerm, WPerm, RPerm]});
+
+action(clear_permissions, Node, [Username], Opts, Inform) ->
+ VHost = proplists:get_value(?VHOST_OPT, Opts),
+ Inform("Clearing permissions for user \"~s\" in vhost \"~s\"",
+ [Username, VHost]),
+ call(Node, {rabbit_auth_backend_internal, clear_permissions,
+ [Username, VHost]});
+
+action(list_permissions, Node, [], Opts, Inform) ->
+ VHost = proplists:get_value(?VHOST_OPT, Opts),
+ Inform("Listing permissions in vhost \"~s\"", [VHost]),
+ display_info_list(call(Node, {rabbit_auth_backend_internal,
+ list_vhost_permissions, [VHost]}),
+ rabbit_auth_backend_internal:vhost_perms_info_keys());
+
+action(set_parameter, Node, [Component, Key, Value], Opts, Inform) ->
+ VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
+ Inform("Setting runtime parameter ~p for component ~p to ~p",
+ [Key, Component, Value]),
+ rpc_call(
+ Node, rabbit_runtime_parameters, parse_set,
+ [VHostArg, list_to_binary(Component), list_to_binary(Key), Value, none]);
+
+action(clear_parameter, Node, [Component, Key], Opts, Inform) ->
+ VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
+ Inform("Clearing runtime parameter ~p for component ~p", [Key, Component]),
+ rpc_call(Node, rabbit_runtime_parameters, clear, [VHostArg,
+ list_to_binary(Component),
+ list_to_binary(Key)]);
+
+action(list_parameters, Node, [], Opts, Inform) ->
+ VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
+ Inform("Listing runtime parameters", []),
+ display_info_list(
+ rpc_call(Node, rabbit_runtime_parameters, list_formatted, [VHostArg]),
+ rabbit_runtime_parameters:info_keys());
+
+action(set_policy, Node, [Key, Pattern, Defn], Opts, Inform) ->
+ Msg = "Setting policy ~p for pattern ~p to ~p with priority ~p",
+ VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
+ PriorityArg = proplists:get_value(?PRIORITY_OPT, Opts),
+ ApplyToArg = list_to_binary(proplists:get_value(?APPLY_TO_OPT, Opts)),
+ Inform(Msg, [Key, Pattern, Defn, PriorityArg]),
+ rpc_call(
+ Node, rabbit_policy, parse_set,
+ [VHostArg, list_to_binary(Key), Pattern, Defn, PriorityArg, ApplyToArg]);
+
+action(clear_policy, Node, [Key], Opts, Inform) ->
+ VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
+ Inform("Clearing policy ~p", [Key]),
+ rpc_call(Node, rabbit_policy, delete, [VHostArg, list_to_binary(Key)]);
+
+action(list_policies, Node, [], Opts, Inform) ->
+ VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
+ Inform("Listing policies", []),
+ display_info_list(rpc_call(Node, rabbit_policy, list_formatted, [VHostArg]),
+ rabbit_policy:info_keys());
+
+action(report, Node, _Args, _Opts, Inform) ->
+ Inform("Reporting server status on ~p~n~n", [erlang:universaltime()]),
+ [begin ok = action(Action, N, [], [], Inform), io:nl() end ||
+ N <- unsafe_rpc(Node, rabbit_mnesia, cluster_nodes, [running]),
+ Action <- [status, cluster_status, environment]],
+ VHosts = unsafe_rpc(Node, rabbit_vhost, list, []),
+ [print_report(Node, Q) || Q <- ?GLOBAL_QUERIES],
+ [print_report(Node, Q, [V]) || Q <- ?VHOST_QUERIES, V <- VHosts],
+ ok;
+
+action(set_cluster_name, Node, [Name], _Opts, Inform) ->
+ Inform("Setting cluster name to ~s", [Name]),
+ rpc_call(Node, rabbit_nodes, set_cluster_name, [list_to_binary(Name)]);
+
+action(eval, Node, [Expr], _Opts, _Inform) ->
+ case erl_scan:string(Expr) of
+ {ok, Scanned, _} ->
+ case erl_parse:parse_exprs(Scanned) of
+ {ok, Parsed} -> {value, Value, _} =
+ unsafe_rpc(
+ Node, erl_eval, exprs, [Parsed, []]),
+ io:format("~p~n", [Value]),
+ ok;
+ {error, E} -> {error_string, format_parse_error(E)}
+ end;
+ {error, E, _} ->
+ {error_string, format_parse_error(E)}
+ end.
+
+format_parse_error({_Line, Mod, Err}) -> lists:flatten(Mod:format_error(Err)).
+
+sync_queue(Q) ->
+ rabbit_amqqueue:with(
+ Q, fun(#amqqueue{pid = QPid}) -> rabbit_amqqueue:sync_mirrors(QPid) end).
+
+cancel_sync_queue(Q) ->
+ rabbit_amqqueue:with(
+ Q, fun(#amqqueue{pid = QPid}) ->
+ rabbit_amqqueue:cancel_sync_mirrors(QPid)
+ end).
+
+%%----------------------------------------------------------------------------
+
+wait_for_application(Node, PidFile, Application, Inform) ->
+ Pid = read_pid_file(PidFile, true),
+ Inform("pid is ~s", [Pid]),
+ wait_for_application(Node, Pid, Application).
+
+wait_for_application(Node, Pid, rabbit_and_plugins) ->
+ wait_for_startup(Node, Pid);
+wait_for_application(Node, Pid, Application) ->
+ while_process_is_alive(
+ Node, Pid, fun() -> rabbit_nodes:is_running(Node, Application) end).
+
+wait_for_startup(Node, Pid) ->
+ while_process_is_alive(
+ Node, Pid, fun() -> rpc:call(Node, rabbit, await_startup, []) =:= ok end).
+
+while_process_is_alive(Node, Pid, Activity) ->
+ case process_up(Pid) of
+ true -> case Activity() of
+ true -> ok;
+ false -> timer:sleep(?EXTERNAL_CHECK_INTERVAL),
+ while_process_is_alive(Node, Pid, Activity)
+ end;
+ false -> {error, process_not_running}
+ end.
+
+wait_for_process_death(Pid) ->
+ case process_up(Pid) of
+ true -> timer:sleep(?EXTERNAL_CHECK_INTERVAL),
+ wait_for_process_death(Pid);
+ false -> ok
+ end.
+
+read_pid_file(PidFile, Wait) ->
+ case {file:read_file(PidFile), Wait} of
+ {{ok, Bin}, _} ->
+ S = binary_to_list(Bin),
+ {match, [PidS]} = re:run(S, "[^\\s]+",
+ [{capture, all, list}]),
+ try list_to_integer(PidS)
+ catch error:badarg ->
+ exit({error, {garbage_in_pid_file, PidFile}})
+ end,
+ PidS;
+ {{error, enoent}, true} ->
+ timer:sleep(?EXTERNAL_CHECK_INTERVAL),
+ read_pid_file(PidFile, Wait);
+ {{error, _} = E, _} ->
+ exit({error, {could_not_read_pid, E}})
+ end.
+
+% Test using some OS clunkiness since we shouldn't trust
+% rpc:call(os, getpid, []) at this point
+process_up(Pid) ->
+ with_os([{unix, fun () ->
+ run_ps(Pid) =:= 0
+ end},
+ {win32, fun () ->
+ Cmd = "tasklist /nh /fi \"pid eq " ++ Pid ++ "\" ",
+ Res = rabbit_misc:os_cmd(Cmd ++ "2>&1"),
+ case re:run(Res, "erl\\.exe", [{capture, none}]) of
+ match -> true;
+ _ -> false
+ end
+ end}]).
+
+with_os(Handlers) ->
+ {OsFamily, _} = os:type(),
+ case proplists:get_value(OsFamily, Handlers) of
+ undefined -> throw({unsupported_os, OsFamily});
+ Handler -> Handler()
+ end.
+
+run_ps(Pid) ->
+ Port = erlang:open_port({spawn, "ps -p " ++ Pid},
+ [exit_status, {line, 16384},
+ use_stdio, stderr_to_stdout]),
+ exit_loop(Port).
+
+exit_loop(Port) ->
+ receive
+ {Port, {exit_status, Rc}} -> Rc;
+ {Port, _} -> exit_loop(Port)
+ end.
+
+%%----------------------------------------------------------------------------
+
+default_if_empty(List, Default) when is_list(List) ->
+ if List == [] -> Default;
+ true -> [list_to_atom(X) || X <- List]
+ end.
+
+display_info_list(Results, InfoItemKeys) when is_list(Results) ->
+ lists:foreach(
+ fun (Result) -> display_row(
+ [format_info_item(proplists:get_value(X, Result)) ||
+ X <- InfoItemKeys])
+ end, lists:sort(Results)),
+ ok;
+display_info_list(Other, _) ->
+ Other.
+
+display_row(Row) ->
+ io:fwrite(string:join(Row, "\t")),
+ io:nl().
+
+-define(IS_U8(X), (X >= 0 andalso X =< 255)).
+-define(IS_U16(X), (X >= 0 andalso X =< 65535)).
+
+format_info_item(#resource{name = Name}) ->
+ escape(Name);
+format_info_item({N1, N2, N3, N4} = Value) when
+ ?IS_U8(N1), ?IS_U8(N2), ?IS_U8(N3), ?IS_U8(N4) ->
+ rabbit_misc:ntoa(Value);
+format_info_item({K1, K2, K3, K4, K5, K6, K7, K8} = Value) when
+ ?IS_U16(K1), ?IS_U16(K2), ?IS_U16(K3), ?IS_U16(K4),
+ ?IS_U16(K5), ?IS_U16(K6), ?IS_U16(K7), ?IS_U16(K8) ->
+ rabbit_misc:ntoa(Value);
+format_info_item(Value) when is_pid(Value) ->
+ rabbit_misc:pid_to_string(Value);
+format_info_item(Value) when is_binary(Value) ->
+ escape(Value);
+format_info_item(Value) when is_atom(Value) ->
+ escape(atom_to_list(Value));
+format_info_item([{TableEntryKey, TableEntryType, _TableEntryValue} | _] =
+ Value) when is_binary(TableEntryKey) andalso
+ is_atom(TableEntryType) ->
+ io_lib:format("~1000000000000p", [prettify_amqp_table(Value)]);
+format_info_item([T | _] = Value)
+ when is_tuple(T) orelse is_pid(T) orelse is_binary(T) orelse is_atom(T) orelse
+ is_list(T) ->
+ "[" ++
+ lists:nthtail(2, lists:append(
+ [", " ++ format_info_item(E) || E <- Value])) ++ "]";
+format_info_item(Value) ->
+ io_lib:format("~w", [Value]).
+
+display_call_result(Node, MFA) ->
+ case call(Node, MFA) of
+ {badrpc, _} = Res -> throw(Res);
+ Res -> io:format("~p~n", [Res]),
+ ok
+ end.
+
+unsafe_rpc(Node, Mod, Fun, Args) ->
+ case rpc_call(Node, Mod, Fun, Args) of
+ {badrpc, _} = Res -> throw(Res);
+ Normal -> Normal
+ end.
+
+call(Node, {Mod, Fun, Args}) ->
+ rpc_call(Node, Mod, Fun, lists:map(fun list_to_binary_utf8/1, Args)).
+
+list_to_binary_utf8(L) ->
+ B = list_to_binary(L),
+ case rabbit_binary_parser:validate_utf8(B) of
+ ok -> B;
+ error -> throw({error, {not_utf_8, L}})
+ end.
+
+rpc_call(Node, Mod, Fun, Args) ->
+ rpc:call(Node, Mod, Fun, Args, ?RPC_TIMEOUT).
+
+%% escape does C-style backslash escaping of non-printable ASCII
+%% characters. We don't escape characters above 127, since they may
+%% form part of UTF-8 strings.
+
+escape(Atom) when is_atom(Atom) -> escape(atom_to_list(Atom));
+escape(Bin) when is_binary(Bin) -> escape(binary_to_list(Bin));
+escape(L) when is_list(L) -> escape_char(lists:reverse(L), []).
+
+escape_char([$\\ | T], Acc) ->
+ escape_char(T, [$\\, $\\ | Acc]);
+escape_char([X | T], Acc) when X >= 32, X /= 127 ->
+ escape_char(T, [X | Acc]);
+escape_char([X | T], Acc) ->
+ escape_char(T, [$\\, $0 + (X bsr 6), $0 + (X band 8#070 bsr 3),
+ $0 + (X band 7) | Acc]);
+escape_char([], Acc) ->
+ Acc.
+
+prettify_amqp_table(Table) ->
+ [{escape(K), prettify_typed_amqp_value(T, V)} || {K, T, V} <- Table].
+
+prettify_typed_amqp_value(longstr, Value) -> escape(Value);
+prettify_typed_amqp_value(table, Value) -> prettify_amqp_table(Value);
+prettify_typed_amqp_value(array, Value) -> [prettify_typed_amqp_value(T, V) ||
+ {T, V} <- Value];
+prettify_typed_amqp_value(_Type, Value) -> Value.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_dead_letter).
+
+-export([publish/5]).
+
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-type reason() :: 'expired' | 'rejected' | 'maxlen'.
+
+-spec publish(rabbit_types:message(), reason(), rabbit_types:exchange(),
+ 'undefined' | binary(), rabbit_amqqueue:name()) -> 'ok'.
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+publish(Msg, Reason, X, RK, QName) ->
+ DLMsg = make_msg(Msg, Reason, X#exchange.name, RK, QName),
+ Delivery = rabbit_basic:delivery(false, false, DLMsg, undefined),
+ {Queues, Cycles} = detect_cycles(Reason, DLMsg,
+ rabbit_exchange:route(X, Delivery)),
+ lists:foreach(fun log_cycle_once/1, Cycles),
+ rabbit_amqqueue:deliver(rabbit_amqqueue:lookup(Queues), Delivery),
+ ok.
+
+make_msg(Msg = #basic_message{content = Content,
+ exchange_name = Exchange,
+ routing_keys = RoutingKeys},
+ Reason, DLX, RK, #resource{name = QName}) ->
+ {DeathRoutingKeys, HeadersFun1} =
+ case RK of
+ undefined -> {RoutingKeys, fun (H) -> H end};
+ _ -> {[RK], fun (H) -> lists:keydelete(<<"CC">>, 1, H) end}
+ end,
+ ReasonBin = list_to_binary(atom_to_list(Reason)),
+ TimeSec = rabbit_misc:now_ms() div 1000,
+ PerMsgTTL = per_msg_ttl_header(Content#content.properties),
+ HeadersFun2 =
+ fun (Headers) ->
+ %% The first routing key is the one specified in the
+ %% basic.publish; all others are CC or BCC keys.
+ RKs = [hd(RoutingKeys) | rabbit_basic:header_routes(Headers)],
+ RKs1 = [{longstr, Key} || Key <- RKs],
+ Info = [{<<"reason">>, longstr, ReasonBin},
+ {<<"queue">>, longstr, QName},
+ {<<"time">>, timestamp, TimeSec},
+ {<<"exchange">>, longstr, Exchange#resource.name},
+ {<<"routing-keys">>, array, RKs1}] ++ PerMsgTTL,
+ HeadersFun1(rabbit_basic:prepend_table_header(<<"x-death">>,
+ Info, Headers))
+ end,
+ Content1 = #content{properties = Props} =
+ rabbit_basic:map_headers(HeadersFun2, Content),
+ Content2 = Content1#content{properties =
+ Props#'P_basic'{expiration = undefined}},
+ Msg#basic_message{exchange_name = DLX,
+ id = rabbit_guid:gen(),
+ routing_keys = DeathRoutingKeys,
+ content = Content2}.
+
+per_msg_ttl_header(#'P_basic'{expiration = undefined}) ->
+ [];
+per_msg_ttl_header(#'P_basic'{expiration = Expiration}) ->
+ [{<<"original-expiration">>, longstr, Expiration}];
+per_msg_ttl_header(_) ->
+ [].
+
+detect_cycles(rejected, _Msg, Queues) ->
+ {Queues, []};
+
+detect_cycles(_Reason, #basic_message{content = Content}, Queues) ->
+ #content{properties = #'P_basic'{headers = Headers}} =
+ rabbit_binary_parser:ensure_content_decoded(Content),
+ NoCycles = {Queues, []},
+ case Headers of
+ undefined ->
+ NoCycles;
+ _ ->
+ case rabbit_misc:table_lookup(Headers, <<"x-death">>) of
+ {array, Deaths} ->
+ {Cycling, NotCycling} =
+ lists:partition(fun (#resource{name = Queue}) ->
+ is_cycle(Queue, Deaths)
+ end, Queues),
+ OldQueues = [rabbit_misc:table_lookup(D, <<"queue">>) ||
+ {table, D} <- Deaths],
+ OldQueues1 = [QName || {longstr, QName} <- OldQueues],
+ {NotCycling, [[QName | OldQueues1] ||
+ #resource{name = QName} <- Cycling]};
+ _ ->
+ NoCycles
+ end
+ end.
+
+is_cycle(Queue, Deaths) ->
+ {Cycle, Rest} =
+ lists:splitwith(
+ fun ({table, D}) ->
+ {longstr, Queue} =/= rabbit_misc:table_lookup(D, <<"queue">>);
+ (_) ->
+ true
+ end, Deaths),
+ %% Is there a cycle, and if so, is it "fully automatic", i.e. with
+ %% no reject in it?
+ case Rest of
+ [] -> false;
+ [H|_] -> lists:all(
+ fun ({table, D}) ->
+ {longstr, <<"rejected">>} =/=
+ rabbit_misc:table_lookup(D, <<"reason">>);
+ (_) ->
+ false
+ end, Cycle ++ [H])
+ end.
+
+log_cycle_once(Queues) ->
+ Key = {queue_cycle, Queues},
+ case get(Key) of
+ true -> ok;
+ undefined -> rabbit_log:warning(
+ "Message dropped. Dead-letter queues cycle detected" ++
+ ": ~p~nThis cycle will NOT be reported again.~n",
+ [Queues]),
+ put(Key, true)
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_diagnostics).
+
+-define(PROCESS_INFO,
+ [current_stacktrace, initial_call, dictionary, message_queue_len,
+ links, monitors, monitored_by, heap_size]).
+
+-export([maybe_stuck/0, maybe_stuck/1]).
+
+maybe_stuck() -> maybe_stuck(5000).
+
+maybe_stuck(Timeout) ->
+ Pids = processes(),
+ io:format("There are ~p processes.~n", [length(Pids)]),
+ maybe_stuck(Pids, Timeout).
+
+maybe_stuck(Pids, Timeout) when Timeout =< 0 ->
+ io:format("Found ~p suspicious processes.~n", [length(Pids)]),
+ [io:format("~p~n", [info(Pid)]) || Pid <- Pids],
+ ok;
+maybe_stuck(Pids, Timeout) ->
+ Pids2 = [P || P <- Pids, looks_stuck(P)],
+ io:format("Investigated ~p processes this round, ~pms to go.~n",
+ [length(Pids2), Timeout]),
+ timer:sleep(500),
+ maybe_stuck(Pids2, Timeout - 500).
+
+looks_stuck(Pid) ->
+ case process_info(Pid, status) of
+ {status, waiting} ->
+ %% It's tempting to just check for message_queue_len > 0
+ %% here rather than mess around with stack traces and
+ %% heuristics. But really, sometimes freshly stuck
+ %% processes can have 0 messages...
+ case erlang:process_info(Pid, current_stacktrace) of
+ {current_stacktrace, [H|_]} ->
+ maybe_stuck_stacktrace(H);
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end.
+
+maybe_stuck_stacktrace({gen_server2, process_next_msg, _}) -> false;
+maybe_stuck_stacktrace({gen_event, fetch_msg, _}) -> false;
+maybe_stuck_stacktrace({prim_inet, accept0, _}) -> false;
+maybe_stuck_stacktrace({prim_inet, recv0, _}) -> false;
+maybe_stuck_stacktrace({rabbit_heartbeat, heartbeater, _}) -> false;
+maybe_stuck_stacktrace({rabbit_net, recv, _}) -> false;
+maybe_stuck_stacktrace({mochiweb_http, request, _}) -> false;
+maybe_stuck_stacktrace({group, _, _}) -> false;
+maybe_stuck_stacktrace({shell, _, _}) -> false;
+maybe_stuck_stacktrace({io, _, _}) -> false;
+maybe_stuck_stacktrace({M, F, A, _}) ->
+ maybe_stuck_stacktrace({M, F, A});
+maybe_stuck_stacktrace({_M, F, _A}) ->
+ case string:str(atom_to_list(F), "loop") of
+ 0 -> true;
+ _ -> false
+ end.
+
+info(Pid) ->
+ [{pid, Pid} | process_info(Pid, ?PROCESS_INFO)].
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_direct).
+
+-export([boot/0, force_event_refresh/1, list/0, connect/5,
+ start_channel/9, disconnect/2]).
+%% Internal
+-export([list_local/0]).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(boot/0 :: () -> 'ok').
+-spec(force_event_refresh/1 :: (reference()) -> 'ok').
+-spec(list/0 :: () -> [pid()]).
+-spec(list_local/0 :: () -> [pid()]).
+-spec(connect/5 :: (({'none', 'none'} | {rabbit_types:username(), 'none'} |
+ {rabbit_types:username(), rabbit_types:password()}),
+ rabbit_types:vhost(), rabbit_types:protocol(), pid(),
+ rabbit_event:event_props()) ->
+ rabbit_types:ok_or_error2(
+ {rabbit_types:user(), rabbit_framing:amqp_table()},
+ 'broker_not_found_on_node' |
+ {'auth_failure', string()} | 'access_refused')).
+-spec(start_channel/9 ::
+ (rabbit_channel:channel_number(), pid(), pid(), string(),
+ rabbit_types:protocol(), rabbit_types:user(), rabbit_types:vhost(),
+ rabbit_framing:amqp_table(), pid()) -> {'ok', pid()}).
+-spec(disconnect/2 :: (pid(), rabbit_event:event_props()) -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+boot() -> rabbit_sup:start_supervisor_child(
+ rabbit_direct_client_sup, rabbit_client_sup,
+ [{local, rabbit_direct_client_sup},
+ {rabbit_channel_sup, start_link, []}]).
+
+force_event_refresh(Ref) ->
+ [Pid ! {force_event_refresh, Ref} || Pid <- list()],
+ ok.
+
+list_local() ->
+ pg_local:get_members(rabbit_direct).
+
+list() ->
+ rabbit_misc:append_rpc_all_nodes(rabbit_mnesia:cluster_nodes(running),
+ rabbit_direct, list_local, []).
+
+%%----------------------------------------------------------------------------
+
+connect({none, _}, VHost, Protocol, Pid, Infos) ->
+ connect0(fun () -> {ok, rabbit_auth_backend_dummy:user()} end,
+ VHost, Protocol, Pid, Infos);
+
+connect({Username, none}, VHost, Protocol, Pid, Infos) ->
+ connect0(fun () -> rabbit_access_control:check_user_login(Username, []) end,
+ VHost, Protocol, Pid, Infos);
+
+connect({Username, Password}, VHost, Protocol, Pid, Infos) ->
+ connect0(fun () -> rabbit_access_control:check_user_pass_login(
+ Username, Password) end,
+ VHost, Protocol, Pid, Infos).
+
+connect0(AuthFun, VHost, Protocol, Pid, Infos) ->
+ case rabbit:is_running() of
+ true -> case AuthFun() of
+ {ok, User} ->
+ connect1(User, VHost, Protocol, Pid, Infos);
+ {refused, _M, _A} ->
+ {error, {auth_failure, "Refused"}}
+ end;
+ false -> {error, broker_not_found_on_node}
+ end.
+
+connect1(User, VHost, Protocol, Pid, Infos) ->
+ try rabbit_access_control:check_vhost_access(User, VHost) of
+ ok -> ok = pg_local:join(rabbit_direct, Pid),
+ rabbit_event:notify(connection_created, Infos),
+ {ok, {User, rabbit_reader:server_properties(Protocol)}}
+ catch
+ exit:#amqp_error{name = access_refused} ->
+ {error, access_refused}
+ end.
+
+start_channel(Number, ClientChannelPid, ConnPid, ConnName, Protocol, User,
+ VHost, Capabilities, Collector) ->
+ {ok, _, {ChannelPid, _}} =
+ supervisor2:start_child(
+ rabbit_direct_client_sup,
+ [{direct, Number, ClientChannelPid, ConnPid, ConnName, Protocol,
+ User, VHost, Capabilities, Collector}]),
+ {ok, ChannelPid}.
+
+disconnect(Pid, Infos) ->
+ pg_local:leave(rabbit_direct, Pid),
+ rabbit_event:notify(connection_closed, Infos).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_disk_monitor).
+
+-behaviour(gen_server).
+
+-export([start_link/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-export([get_disk_free_limit/0, set_disk_free_limit/1,
+ get_min_check_interval/0, set_min_check_interval/1,
+ get_max_check_interval/0, set_max_check_interval/1,
+ get_disk_free/0]).
+
+-define(SERVER, ?MODULE).
+-define(DEFAULT_MIN_DISK_CHECK_INTERVAL, 100).
+-define(DEFAULT_MAX_DISK_CHECK_INTERVAL, 10000).
+%% 250MB/s i.e. 250kB/ms
+-define(FAST_RATE, (250 * 1000)).
+
+-record(state, {dir,
+ limit,
+ actual,
+ min_interval,
+ max_interval,
+ timer,
+ alarmed
+ }).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-type(disk_free_limit() :: (integer() | {'mem_relative', float()})).
+-spec(start_link/1 :: (disk_free_limit()) -> rabbit_types:ok_pid_or_error()).
+-spec(get_disk_free_limit/0 :: () -> integer()).
+-spec(set_disk_free_limit/1 :: (disk_free_limit()) -> 'ok').
+-spec(get_min_check_interval/0 :: () -> integer()).
+-spec(set_min_check_interval/1 :: (integer()) -> 'ok').
+-spec(get_max_check_interval/0 :: () -> integer()).
+-spec(set_max_check_interval/1 :: (integer()) -> 'ok').
+-spec(get_disk_free/0 :: () -> (integer() | 'unknown')).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+%% Public API
+%%----------------------------------------------------------------------------
+
+get_disk_free_limit() ->
+ gen_server:call(?MODULE, get_disk_free_limit, infinity).
+
+set_disk_free_limit(Limit) ->
+ gen_server:call(?MODULE, {set_disk_free_limit, Limit}, infinity).
+
+get_min_check_interval() ->
+ gen_server:call(?MODULE, get_min_check_interval, infinity).
+
+set_min_check_interval(Interval) ->
+ gen_server:call(?MODULE, {set_min_check_interval, Interval}, infinity).
+
+get_max_check_interval() ->
+ gen_server:call(?MODULE, get_max_check_interval, infinity).
+
+set_max_check_interval(Interval) ->
+ gen_server:call(?MODULE, {set_max_check_interval, Interval}, infinity).
+
+get_disk_free() ->
+ gen_server:call(?MODULE, get_disk_free, infinity).
+
+%%----------------------------------------------------------------------------
+%% gen_server callbacks
+%%----------------------------------------------------------------------------
+
+start_link(Args) ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [Args], []).
+
+init([Limit]) ->
+ Dir = dir(),
+ State = #state{dir = Dir,
+ min_interval = ?DEFAULT_MIN_DISK_CHECK_INTERVAL,
+ max_interval = ?DEFAULT_MAX_DISK_CHECK_INTERVAL,
+ alarmed = false},
+ case {catch get_disk_free(Dir),
+ vm_memory_monitor:get_total_memory()} of
+ {N1, N2} when is_integer(N1), is_integer(N2) ->
+ {ok, start_timer(set_disk_limits(State, Limit))};
+ Err ->
+ rabbit_log:info("Disabling disk free space monitoring "
+ "on unsupported platform:~n~p~n", [Err]),
+ {stop, unsupported_platform}
+ end.
+
+handle_call(get_disk_free_limit, _From, State = #state{limit = Limit}) ->
+ {reply, Limit, State};
+
+handle_call({set_disk_free_limit, Limit}, _From, State) ->
+ {reply, ok, set_disk_limits(State, Limit)};
+
+handle_call(get_min_check_interval, _From, State) ->
+ {reply, State#state.min_interval, State};
+
+handle_call(get_max_check_interval, _From, State) ->
+ {reply, State#state.max_interval, State};
+
+handle_call({set_min_check_interval, MinInterval}, _From, State) ->
+ {reply, ok, State#state{min_interval = MinInterval}};
+
+handle_call({set_max_check_interval, MaxInterval}, _From, State) ->
+ {reply, ok, State#state{max_interval = MaxInterval}};
+
+handle_call(get_disk_free, _From, State = #state { actual = Actual }) ->
+ {reply, Actual, State};
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast(_Request, State) ->
+ {noreply, State}.
+
+handle_info(update, State) ->
+ {noreply, start_timer(internal_update(State))};
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+%% Server Internals
+%%----------------------------------------------------------------------------
+
+% the partition / drive containing this directory will be monitored
+dir() -> rabbit_mnesia:dir().
+
+set_disk_limits(State, Limit0) ->
+ Limit = interpret_limit(Limit0),
+ State1 = State#state { limit = Limit },
+ rabbit_log:info("Disk free limit set to ~pMB~n",
+ [trunc(Limit / 1000000)]),
+ internal_update(State1).
+
+internal_update(State = #state { limit = Limit,
+ dir = Dir,
+ alarmed = Alarmed}) ->
+ CurrentFree = get_disk_free(Dir),
+ NewAlarmed = CurrentFree < Limit,
+ case {Alarmed, NewAlarmed} of
+ {false, true} ->
+ emit_update_info("insufficient", CurrentFree, Limit),
+ rabbit_alarm:set_alarm({{resource_limit, disk, node()}, []});
+ {true, false} ->
+ emit_update_info("sufficient", CurrentFree, Limit),
+ rabbit_alarm:clear_alarm({resource_limit, disk, node()});
+ _ ->
+ ok
+ end,
+ State #state {alarmed = NewAlarmed, actual = CurrentFree}.
+
+get_disk_free(Dir) ->
+ get_disk_free(Dir, os:type()).
+
+get_disk_free(Dir, {unix, Sun})
+ when Sun =:= sunos; Sun =:= sunos4; Sun =:= solaris ->
+ parse_free_unix(rabbit_misc:os_cmd("/usr/bin/df -k " ++ Dir));
+get_disk_free(Dir, {unix, _}) ->
+ parse_free_unix(rabbit_misc:os_cmd("/bin/df -kP " ++ Dir));
+get_disk_free(Dir, {win32, _}) ->
+ parse_free_win32(rabbit_misc:os_cmd("dir /-C /W \"" ++ Dir ++ "\"")).
+
+parse_free_unix(Str) ->
+ case string:tokens(Str, "\n") of
+ [_, S | _] -> case string:tokens(S, " \t") of
+ [_, _, _, Free | _] -> list_to_integer(Free) * 1024;
+ _ -> exit({unparseable, Str})
+ end;
+ _ -> exit({unparseable, Str})
+ end.
+
+parse_free_win32(CommandResult) ->
+ LastLine = lists:last(string:tokens(CommandResult, "\r\n")),
+ {match, [Free]} = re:run(lists:reverse(LastLine), "(\\d+)",
+ [{capture, all_but_first, list}]),
+ list_to_integer(lists:reverse(Free)).
+
+interpret_limit({mem_relative, R}) ->
+ round(R * vm_memory_monitor:get_total_memory());
+interpret_limit(L) ->
+ L.
+
+emit_update_info(StateStr, CurrentFree, Limit) ->
+ rabbit_log:info(
+ "Disk free space ~s. Free bytes:~p Limit:~p~n",
+ [StateStr, CurrentFree, Limit]).
+
+start_timer(State) ->
+ State#state{timer = erlang:send_after(interval(State), self(), update)}.
+
+interval(#state{alarmed = true,
+ max_interval = MaxInterval}) ->
+ MaxInterval;
+interval(#state{limit = Limit,
+ actual = Actual,
+ min_interval = MinInterval,
+ max_interval = MaxInterval}) ->
+ IdealInterval = 2 * (Actual - Limit) / ?FAST_RATE,
+ trunc(erlang:max(MinInterval, erlang:min(MaxInterval, IdealInterval))).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_error_logger).
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+-define(LOG_EXCH_NAME, <<"amq.rabbitmq.log">>).
+
+-behaviour(gen_event).
+
+-export([start/0, stop/0]).
+
+-export([init/1, terminate/2, code_change/3, handle_call/2, handle_event/2,
+ handle_info/2]).
+
+-import(rabbit_error_logger_file_h, [safe_handle_event/3]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start/0 :: () -> 'ok').
+-spec(stop/0 :: () -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start() ->
+ {ok, DefaultVHost} = application:get_env(default_vhost),
+ ok = error_logger:add_report_handler(?MODULE, [DefaultVHost]).
+
+stop() ->
+ terminated_ok = error_logger:delete_report_handler(rabbit_error_logger),
+ ok.
+
+%%----------------------------------------------------------------------------
+
+init([DefaultVHost]) ->
+ #exchange{} = rabbit_exchange:declare(
+ rabbit_misc:r(DefaultVHost, exchange, ?LOG_EXCH_NAME),
+ topic, true, false, true, []),
+ {ok, #resource{virtual_host = DefaultVHost,
+ kind = exchange,
+ name = ?LOG_EXCH_NAME}}.
+
+terminate(_Arg, _State) ->
+ terminated_ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+handle_call(_Request, State) ->
+ {ok, not_understood, State}.
+
+handle_event(Event, State) ->
+ safe_handle_event(fun handle_event0/2, Event, State).
+
+handle_event0({Kind, _Gleader, {_Pid, Format, Data}}, State) ->
+ ok = publish(Kind, Format, Data, State),
+ {ok, State};
+handle_event0(_Event, State) ->
+ {ok, State}.
+
+handle_info(_Info, State) ->
+ {ok, State}.
+
+publish(error, Format, Data, State) ->
+ publish1(<<"error">>, Format, Data, State);
+publish(warning_msg, Format, Data, State) ->
+ publish1(<<"warning">>, Format, Data, State);
+publish(info_msg, Format, Data, State) ->
+ publish1(<<"info">>, Format, Data, State);
+publish(_Other, _Format, _Data, _State) ->
+ ok.
+
+publish1(RoutingKey, Format, Data, LogExch) ->
+ %% 0-9-1 says the timestamp is a "64 bit POSIX timestamp". That's
+ %% second resolution, not millisecond.
+ Timestamp = rabbit_misc:now_ms() div 1000,
+
+ Args = [truncate:term(A, ?LOG_TRUNC) || A <- Data],
+ {ok, _DeliveredQPids} =
+ rabbit_basic:publish(LogExch, RoutingKey,
+ #'P_basic'{content_type = <<"text/plain">>,
+ timestamp = Timestamp},
+ list_to_binary(io_lib:format(Format, Args))),
+ ok.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_error_logger_file_h).
+-include("rabbit.hrl").
+
+-behaviour(gen_event).
+
+-export([init/1, handle_event/2, handle_call/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-export([safe_handle_event/3]).
+
+%% rabbit_error_logger_file_h is a wrapper around the error_logger_file_h
+%% module because the original's init/1 does not match properly
+%% with the result of closing the old handler when swapping handlers.
+%% The first init/1 additionally allows for simple log rotation
+%% when the suffix is not the empty string.
+%% The original init/2 also opened the file in 'write' mode, thus
+%% overwriting old logs. To remedy this, init/2 from
+%% lib/stdlib/src/error_logger_file_h.erl from R14B3 was copied as
+%% init_file/2 and changed so that it opens the file in 'append' mode.
+
+%% Used only when swapping handlers in log rotation
+init({{File, Suffix}, []}) ->
+ case rabbit_file:append_file(File, Suffix) of
+ ok -> file:delete(File),
+ ok;
+ {error, Error} ->
+ rabbit_log:error("Failed to append contents of "
+ "log file '~s' to '~s':~n~p~n",
+ [File, [File, Suffix], Error])
+ end,
+ init(File);
+%% Used only when swapping handlers and the original handler
+%% failed to terminate or was never installed
+init({{File, _}, error}) ->
+ init(File);
+%% Used only when swapping handlers without performing
+%% log rotation
+init({File, []}) ->
+ init(File);
+%% Used only when taking over from the tty handler
+init({{File, []}, _}) ->
+ init(File);
+init({File, {error_logger, Buf}}) ->
+ rabbit_file:ensure_parent_dirs_exist(File),
+ init_file(File, {error_logger, Buf});
+init(File) ->
+ rabbit_file:ensure_parent_dirs_exist(File),
+ init_file(File, []).
+
+init_file(File, {error_logger, Buf}) ->
+ case init_file(File, error_logger) of
+ {ok, {Fd, File, PrevHandler}} ->
+ [handle_event(Event, {Fd, File, PrevHandler}) ||
+ {_, Event} <- lists:reverse(Buf)],
+ {ok, {Fd, File, PrevHandler}};
+ Error ->
+ Error
+ end;
+init_file(File, PrevHandler) ->
+ process_flag(trap_exit, true),
+ case file:open(File, [append]) of
+ {ok,Fd} -> {ok, {Fd, File, PrevHandler}};
+ Error -> Error
+ end.
+
+handle_event(Event, State) ->
+ safe_handle_event(fun handle_event0/2, Event, State).
+
+safe_handle_event(HandleEvent, Event, State) ->
+ try
+ HandleEvent(Event, State)
+ catch
+ _:Error ->
+ io:format("Event crashed log handler:~n~P~n~P~n",
+ [Event, 30, Error, 30]),
+ {ok, State}
+ end.
+
+%% filter out "application: foo; exited: stopped; type: temporary"
+handle_event0({info_report, _, {_, std_info, _}}, State) ->
+ {ok, State};
+%% When a node restarts quickly it is possible the rest of the cluster
+%% will not have had the chance to remove its queues from
+%% Mnesia. That's why rabbit_amqqueue:recover/0 invokes
+%% on_node_down(node()). But before we get there we can receive lots
+%% of messages intended for the old version of the node. The emulator
+%% logs an event for every one of those messages; in extremis this can
+%% bring the server to its knees just logging "Discarding..."
+%% again and again. So just log the first one, then go silent.
+handle_event0(Event = {error, _, {emulator, _, ["Discarding message" ++ _]}},
+ State) ->
+ case get(discarding_message_seen) of
+ true -> {ok, State};
+ undefined -> put(discarding_message_seen, true),
+ error_logger_file_h:handle_event(t(Event), State)
+ end;
+%% Clear this state if we log anything else (but not a progress report).
+handle_event0(Event = {info_msg, _, _}, State) ->
+ erase(discarding_message_seen),
+ error_logger_file_h:handle_event(t(Event), State);
+handle_event0(Event, State) ->
+ error_logger_file_h:handle_event(t(Event), State).
+
+handle_info(Info, State) ->
+ error_logger_file_h:handle_info(Info, State).
+
+handle_call(Call, State) ->
+ error_logger_file_h:handle_call(Call, State).
+
+terminate(Reason, State) ->
+ error_logger_file_h:terminate(Reason, State).
+
+code_change(OldVsn, State, Extra) ->
+ error_logger_file_h:code_change(OldVsn, State, Extra).
+
+%%----------------------------------------------------------------------
+
+t(Term) -> truncate:log_event(Term, ?LOG_TRUNC).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_event).
+
+-include("rabbit.hrl").
+
+-export([start_link/0]).
+-export([init_stats_timer/2, init_disabled_stats_timer/2,
+ ensure_stats_timer/3, stop_stats_timer/2, reset_stats_timer/2]).
+-export([stats_level/2, if_enabled/3]).
+-export([notify/2, notify/3, notify_if/3]).
+
+%%----------------------------------------------------------------------------
+
+-record(state, {level, interval, timer}).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-export_type([event_type/0, event_props/0, event_timestamp/0, event/0]).
+
+-type(event_type() :: atom()).
+-type(event_props() :: term()).
+-type(event_timestamp() ::
+ {non_neg_integer(), non_neg_integer(), non_neg_integer()}).
+
+-type(event() :: #event { type :: event_type(),
+ props :: event_props(),
+ reference :: 'none' | reference(),
+ timestamp :: event_timestamp() }).
+
+-type(level() :: 'none' | 'coarse' | 'fine').
+
+-type(timer_fun() :: fun (() -> 'ok')).
+-type(container() :: tuple()).
+-type(pos() :: non_neg_integer()).
+
+-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
+-spec(init_stats_timer/2 :: (container(), pos()) -> container()).
+-spec(init_disabled_stats_timer/2 :: (container(), pos()) -> container()).
+-spec(ensure_stats_timer/3 :: (container(), pos(), term()) -> container()).
+-spec(stop_stats_timer/2 :: (container(), pos()) -> container()).
+-spec(reset_stats_timer/2 :: (container(), pos()) -> container()).
+-spec(stats_level/2 :: (container(), pos()) -> level()).
+-spec(if_enabled/3 :: (container(), pos(), timer_fun()) -> 'ok').
+-spec(notify/2 :: (event_type(), event_props()) -> 'ok').
+-spec(notify/3 :: (event_type(), event_props(), reference() | 'none') -> 'ok').
+-spec(notify_if/3 :: (boolean(), event_type(), event_props()) -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ gen_event:start_link({local, ?MODULE}).
+
+%% The idea is, for each stat-emitting object:
+%%
+%% On startup:
+%% init_stats_timer(State)
+%% notify(created event)
+%% if_enabled(internal_emit_stats) - so we immediately send something
+%%
+%% On wakeup:
+%% ensure_stats_timer(State, emit_stats)
+%% (Note we can't emit stats immediately, the timer may have fired 1ms ago.)
+%%
+%% emit_stats:
+%% if_enabled(internal_emit_stats)
+%% reset_stats_timer(State) - just bookkeeping
+%%
+%% Pre-hibernation:
+%% if_enabled(internal_emit_stats)
+%% stop_stats_timer(State)
+%%
+%% internal_emit_stats:
+%% notify(stats)
+
+init_stats_timer(C, P) ->
+ {ok, StatsLevel} = application:get_env(rabbit, collect_statistics),
+ {ok, Interval} = application:get_env(rabbit, collect_statistics_interval),
+ setelement(P, C, #state{level = StatsLevel, interval = Interval,
+ timer = undefined}).
+
+init_disabled_stats_timer(C, P) ->
+ setelement(P, C, #state{level = none, interval = 0, timer = undefined}).
+
+ensure_stats_timer(C, P, Msg) ->
+ case element(P, C) of
+ #state{level = Level, interval = Interval, timer = undefined} = State
+ when Level =/= none ->
+ TRef = erlang:send_after(Interval, self(), Msg),
+ setelement(P, C, State#state{timer = TRef});
+ #state{} ->
+ C
+ end.
+
+stop_stats_timer(C, P) ->
+ case element(P, C) of
+ #state{timer = TRef} = State when TRef =/= undefined ->
+ case erlang:cancel_timer(TRef) of
+ false -> C;
+ _ -> setelement(P, C, State#state{timer = undefined})
+ end;
+ #state{} ->
+ C
+ end.
+
+reset_stats_timer(C, P) ->
+ case element(P, C) of
+ #state{timer = TRef} = State when TRef =/= undefined ->
+ setelement(P, C, State#state{timer = undefined});
+ #state{} ->
+ C
+ end.
+
+stats_level(C, P) ->
+ #state{level = Level} = element(P, C),
+ Level.
+
+if_enabled(C, P, Fun) ->
+ case element(P, C) of
+ #state{level = none} -> ok;
+ #state{} -> Fun(), ok
+ end.
+
+notify_if(true, Type, Props) -> notify(Type, Props);
+notify_if(false, _Type, _Props) -> ok.
+
+notify(Type, Props) -> notify(Type, Props, none).
+
+notify(Type, Props, Ref) ->
+ gen_event:notify(?MODULE, #event{type = Type,
+ props = Props,
+ reference = Ref,
+ timestamp = os:timestamp()}).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_exchange).
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+-export([recover/0, policy_changed/2, callback/4, declare/6,
+ assert_equivalence/6, assert_args_equivalence/2, check_type/1,
+ lookup/1, lookup_or_die/1, list/1, lookup_scratch/2, update_scratch/3,
+ info_keys/0, info/1, info/2, info_all/1, info_all/2,
+ route/2, delete/2, validate_binding/2]).
+%% these must be run inside a mnesia tx
+-export([maybe_auto_delete/1, serial/1, peek_serial/1, update/2]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-export_type([name/0, type/0]).
+
+-type(name() :: rabbit_types:r('exchange')).
+-type(type() :: atom()).
+-type(fun_name() :: atom()).
+
+-spec(recover/0 :: () -> [name()]).
+-spec(callback/4::
+ (rabbit_types:exchange(), fun_name(),
+ fun((boolean()) -> non_neg_integer()) | atom(), [any()]) -> 'ok').
+-spec(policy_changed/2 ::
+ (rabbit_types:exchange(), rabbit_types:exchange()) -> 'ok').
+-spec(declare/6 ::
+ (name(), type(), boolean(), boolean(), boolean(),
+ rabbit_framing:amqp_table())
+ -> rabbit_types:exchange()).
+-spec(check_type/1 ::
+ (binary()) -> atom() | rabbit_types:connection_exit()).
+-spec(assert_equivalence/6 ::
+ (rabbit_types:exchange(), atom(), boolean(), boolean(), boolean(),
+ rabbit_framing:amqp_table())
+ -> 'ok' | rabbit_types:connection_exit()).
+-spec(assert_args_equivalence/2 ::
+ (rabbit_types:exchange(), rabbit_framing:amqp_table())
+ -> 'ok' | rabbit_types:connection_exit()).
+-spec(lookup/1 ::
+ (name()) -> rabbit_types:ok(rabbit_types:exchange()) |
+ rabbit_types:error('not_found')).
+-spec(lookup_or_die/1 ::
+ (name()) -> rabbit_types:exchange() |
+ rabbit_types:channel_exit()).
+-spec(list/1 :: (rabbit_types:vhost()) -> [rabbit_types:exchange()]).
+-spec(lookup_scratch/2 :: (name(), atom()) ->
+ rabbit_types:ok(term()) |
+ rabbit_types:error('not_found')).
+-spec(update_scratch/3 :: (name(), atom(), fun((any()) -> any())) -> 'ok').
+-spec(update/2 ::
+ (name(),
+ fun((rabbit_types:exchange()) -> rabbit_types:exchange()))
+ -> not_found | rabbit_types:exchange()).
+-spec(info_keys/0 :: () -> rabbit_types:info_keys()).
+-spec(info/1 :: (rabbit_types:exchange()) -> rabbit_types:infos()).
+-spec(info/2 ::
+ (rabbit_types:exchange(), rabbit_types:info_keys())
+ -> rabbit_types:infos()).
+-spec(info_all/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]).
+-spec(info_all/2 ::(rabbit_types:vhost(), rabbit_types:info_keys())
+ -> [rabbit_types:infos()]).
+-spec(route/2 :: (rabbit_types:exchange(), rabbit_types:delivery())
+ -> [rabbit_amqqueue:name()]).
+-spec(delete/2 ::
+ (name(), 'true') -> 'ok' | rabbit_types:error('not_found' | 'in_use');
+ (name(), 'false') -> 'ok' | rabbit_types:error('not_found')).
+-spec(validate_binding/2 ::
+ (rabbit_types:exchange(), rabbit_types:binding())
+ -> rabbit_types:ok_or_error({'binding_invalid', string(), [any()]})).
+-spec(maybe_auto_delete/1::
+ (rabbit_types:exchange())
+ -> 'not_deleted' | {'deleted', rabbit_binding:deletions()}).
+-spec(serial/1 :: (rabbit_types:exchange()) ->
+ fun((boolean()) -> 'none' | pos_integer())).
+-spec(peek_serial/1 :: (name()) -> pos_integer() | 'undefined').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+-define(INFO_KEYS, [name, type, durable, auto_delete, internal, arguments,
+ policy]).
+
+recover() ->
+ Xs = rabbit_misc:table_filter(
+ fun (#exchange{name = XName}) ->
+ mnesia:read({rabbit_exchange, XName}) =:= []
+ end,
+ fun (X, Tx) ->
+ case Tx of
+ true -> store(X);
+ false -> ok
+ end,
+ callback(X, create, map_create_tx(Tx), [X])
+ end,
+ rabbit_durable_exchange),
+ report_missing_decorators(Xs),
+ [XName || #exchange{name = XName} <- Xs].
+
+report_missing_decorators(Xs) ->
+ Mods = lists:usort(lists:append([rabbit_exchange_decorator:select(raw, D) ||
+ #exchange{decorators = D} <- Xs])),
+ case [M || M <- Mods, code:which(M) =:= non_existing] of
+ [] -> ok;
+ M -> rabbit_log:warning("Missing exchange decorators: ~p~n", [M])
+ end.
+
+callback(X = #exchange{type = XType,
+ decorators = Decorators}, Fun, Serial0, Args) ->
+ Serial = if is_function(Serial0) -> Serial0;
+ is_atom(Serial0) -> fun (_Bool) -> Serial0 end
+ end,
+ [ok = apply(M, Fun, [Serial(M:serialise_events(X)) | Args]) ||
+ M <- rabbit_exchange_decorator:select(all, Decorators)],
+ Module = type_to_module(XType),
+ apply(Module, Fun, [Serial(Module:serialise_events()) | Args]).
+
+policy_changed(X = #exchange{type = XType,
+ decorators = Decorators},
+ X1 = #exchange{decorators = Decorators1}) ->
+ D = rabbit_exchange_decorator:select(all, Decorators),
+ D1 = rabbit_exchange_decorator:select(all, Decorators1),
+ DAll = lists:usort(D ++ D1),
+ [ok = M:policy_changed(X, X1) || M <- [type_to_module(XType) | DAll]],
+ ok.
+
+serialise_events(X = #exchange{type = Type, decorators = Decorators}) ->
+ lists:any(fun (M) -> M:serialise_events(X) end,
+ rabbit_exchange_decorator:select(all, Decorators))
+ orelse (type_to_module(Type)):serialise_events().
+
+serial(#exchange{name = XName} = X) ->
+ Serial = case serialise_events(X) of
+ true -> next_serial(XName);
+ false -> none
+ end,
+ fun (true) -> Serial;
+ (false) -> none
+ end.
+
+declare(XName, Type, Durable, AutoDelete, Internal, Args) ->
+ X = rabbit_policy:set(#exchange{name = XName,
+ type = Type,
+ durable = Durable,
+ auto_delete = AutoDelete,
+ internal = Internal,
+ arguments = Args}),
+ XT = type_to_module(Type),
+ %% We want to upset things if it isn't ok
+ ok = XT:validate(X),
+ rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ case mnesia:wread({rabbit_exchange, XName}) of
+ [] ->
+ store(X),
+ ok = case Durable of
+ true -> mnesia:write(rabbit_durable_exchange,
+ X, write);
+ false -> ok
+ end,
+ {new, X};
+ [ExistingX] ->
+ {existing, ExistingX}
+ end
+ end,
+ fun ({new, Exchange}, Tx) ->
+ ok = callback(X, create, map_create_tx(Tx), [Exchange]),
+ rabbit_event:notify_if(not Tx, exchange_created, info(Exchange)),
+ Exchange;
+ ({existing, Exchange}, _Tx) ->
+ Exchange;
+ (Err, _Tx) ->
+ Err
+ end).
+
+map_create_tx(true) -> transaction;
+map_create_tx(false) -> none.
+
+store(X) -> ok = mnesia:write(rabbit_exchange, X, write).
+
+%% Used with binaries sent over the wire; the type may not exist.
+check_type(TypeBin) ->
+ case rabbit_registry:binary_to_type(TypeBin) of
+ {error, not_found} ->
+ rabbit_misc:protocol_error(
+ command_invalid, "unknown exchange type '~s'", [TypeBin]);
+ T ->
+ case rabbit_registry:lookup_module(exchange, T) of
+ {error, not_found} -> rabbit_misc:protocol_error(
+ command_invalid,
+ "invalid exchange type '~s'", [T]);
+ {ok, _Module} -> T
+ end
+ end.
+
+assert_equivalence(X = #exchange{ durable = Durable,
+ auto_delete = AutoDelete,
+ internal = Internal,
+ type = Type},
+ Type, Durable, AutoDelete, Internal, RequiredArgs) ->
+ (type_to_module(Type)):assert_args_equivalence(X, RequiredArgs);
+assert_equivalence(#exchange{ name = Name },
+ _Type, _Durable, _Internal, _AutoDelete, _Args) ->
+ rabbit_misc:protocol_error(
+ precondition_failed,
+ "cannot redeclare ~s with different type, durable, "
+ "internal or autodelete value",
+ [rabbit_misc:rs(Name)]).
+
+assert_args_equivalence(#exchange{ name = Name, arguments = Args },
+ RequiredArgs) ->
+ %% The spec says "Arguments are compared for semantic
+ %% equivalence". The only arg we care about is
+ %% "alternate-exchange".
+ rabbit_misc:assert_args_equivalence(Args, RequiredArgs, Name,
+ [<<"alternate-exchange">>]).
+
+lookup(Name) ->
+ rabbit_misc:dirty_read({rabbit_exchange, Name}).
+
+lookup_or_die(Name) ->
+ case lookup(Name) of
+ {ok, X} -> X;
+ {error, not_found} -> rabbit_misc:not_found(Name)
+ end.
+
+%% Not dirty_match_object since that would not be transactional when used in a
+%% tx context
+list(VHostPath) ->
+ mnesia:async_dirty(
+ fun () ->
+ mnesia:match_object(
+ rabbit_exchange,
+ #exchange{name = rabbit_misc:r(VHostPath, exchange), _ = '_'},
+ read)
+ end).
+
+lookup_scratch(Name, App) ->
+ case lookup(Name) of
+ {ok, #exchange{scratches = undefined}} ->
+ {error, not_found};
+ {ok, #exchange{scratches = Scratches}} ->
+ case orddict:find(App, Scratches) of
+ {ok, Value} -> {ok, Value};
+ error -> {error, not_found}
+ end;
+ {error, not_found} ->
+ {error, not_found}
+ end.
+
+update_scratch(Name, App, Fun) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ update(Name,
+ fun(X = #exchange{scratches = Scratches0}) ->
+ Scratches1 = case Scratches0 of
+ undefined -> orddict:new();
+ _ -> Scratches0
+ end,
+ Scratch = case orddict:find(App, Scratches1) of
+ {ok, S} -> S;
+ error -> undefined
+ end,
+ Scratches2 = orddict:store(
+ App, Fun(Scratch), Scratches1),
+ X#exchange{scratches = Scratches2}
+ end),
+ ok
+ end).
+
+update(Name, Fun) ->
+ case mnesia:wread({rabbit_exchange, Name}) of
+ [X = #exchange{durable = Durable}] ->
+ X1 = Fun(X),
+ ok = mnesia:write(rabbit_exchange, X1, write),
+ case Durable of
+ true -> ok = mnesia:write(rabbit_durable_exchange, X1, write);
+ _ -> ok
+ end,
+ X1;
+ [] ->
+ not_found
+ end.
+
+info_keys() -> ?INFO_KEYS.
+
+map(VHostPath, F) ->
+ %% TODO: there is scope for optimisation here, e.g. using a
+ %% cursor, parallelising the function invocation
+ lists:map(F, list(VHostPath)).
+
+infos(Items, X) -> [{Item, i(Item, X)} || Item <- Items].
+
+i(name, #exchange{name = Name}) -> Name;
+i(type, #exchange{type = Type}) -> Type;
+i(durable, #exchange{durable = Durable}) -> Durable;
+i(auto_delete, #exchange{auto_delete = AutoDelete}) -> AutoDelete;
+i(internal, #exchange{internal = Internal}) -> Internal;
+i(arguments, #exchange{arguments = Arguments}) -> Arguments;
+i(policy, X) -> case rabbit_policy:name(X) of
+ none -> '';
+ Policy -> Policy
+ end;
+i(Item, _) -> throw({bad_argument, Item}).
+
+info(X = #exchange{}) -> infos(?INFO_KEYS, X).
+
+info(X = #exchange{}, Items) -> infos(Items, X).
+
+info_all(VHostPath) -> map(VHostPath, fun (X) -> info(X) end).
+
+info_all(VHostPath, Items) -> map(VHostPath, fun (X) -> info(X, Items) end).
+
+route(#exchange{name = #resource{virtual_host = VHost, name = RName} = XName,
+ decorators = Decorators} = X,
+ #delivery{message = #basic_message{routing_keys = RKs}} = Delivery) ->
+ case {RName, rabbit_exchange_decorator:select(route, Decorators)} of
+ {<<"">>, []} ->
+ %% Optimisation
+ [rabbit_misc:r(VHost, queue, RK) || RK <- lists:usort(RKs)];
+ {_, SelectedDecorators} ->
+ lists:usort(route1(Delivery, SelectedDecorators, {[X], XName, []}))
+ end.
+
+route1(_, _, {[], _, QNames}) ->
+ QNames;
+route1(Delivery, Decorators,
+ {[X = #exchange{type = Type} | WorkList], SeenXs, QNames}) ->
+ ExchangeDests = (type_to_module(Type)):route(X, Delivery),
+ DecorateDests = process_decorators(X, Decorators, Delivery),
+ AlternateDests = process_alternate(X, ExchangeDests),
+ route1(Delivery, Decorators,
+ lists:foldl(fun process_route/2, {WorkList, SeenXs, QNames},
+ AlternateDests ++ DecorateDests ++ ExchangeDests)).
+
+process_alternate(X = #exchange{name = XName}, []) ->
+ case rabbit_policy:get_arg(
+ <<"alternate-exchange">>, <<"alternate-exchange">>, X) of
+ undefined -> [];
+ AName -> [rabbit_misc:r(XName, exchange, AName)]
+ end;
+process_alternate(_X, _Results) ->
+ [].
+
+process_decorators(_, [], _) -> %% optimisation
+ [];
+process_decorators(X, Decorators, Delivery) ->
+ lists:append([Decorator:route(X, Delivery) || Decorator <- Decorators]).
+
+process_route(#resource{kind = exchange} = XName,
+ {_WorkList, XName, _QNames} = Acc) ->
+ Acc;
+process_route(#resource{kind = exchange} = XName,
+ {WorkList, #resource{kind = exchange} = SeenX, QNames}) ->
+ {cons_if_present(XName, WorkList),
+ gb_sets:from_list([SeenX, XName]), QNames};
+process_route(#resource{kind = exchange} = XName,
+ {WorkList, SeenXs, QNames} = Acc) ->
+ case gb_sets:is_element(XName, SeenXs) of
+ true -> Acc;
+ false -> {cons_if_present(XName, WorkList),
+ gb_sets:add_element(XName, SeenXs), QNames}
+ end;
+process_route(#resource{kind = queue} = QName,
+ {WorkList, SeenXs, QNames}) ->
+ {WorkList, SeenXs, [QName | QNames]}.
+
+cons_if_present(XName, L) ->
+ case lookup(XName) of
+ {ok, X} -> [X | L];
+ {error, not_found} -> L
+ end.
+
+call_with_exchange(XName, Fun) ->
+ rabbit_misc:execute_mnesia_tx_with_tail(
+ fun () -> case mnesia:read({rabbit_exchange, XName}) of
+ [] -> rabbit_misc:const({error, not_found});
+ [X] -> Fun(X)
+ end
+ end).
+
+delete(XName, IfUnused) ->
+ Fun = case IfUnused of
+ true -> fun conditional_delete/1;
+ false -> fun unconditional_delete/1
+ end,
+ call_with_exchange(
+ XName,
+ fun (X) ->
+ case Fun(X) of
+ {deleted, X, Bs, Deletions} ->
+ rabbit_binding:process_deletions(
+ rabbit_binding:add_deletion(
+ XName, {X, deleted, Bs}, Deletions));
+ {error, _InUseOrNotFound} = E ->
+ rabbit_misc:const(E)
+ end
+ end).
+
+validate_binding(X = #exchange{type = XType}, Binding) ->
+ Module = type_to_module(XType),
+ Module:validate_binding(X, Binding).
+
+maybe_auto_delete(#exchange{auto_delete = false}) ->
+ not_deleted;
+maybe_auto_delete(#exchange{auto_delete = true} = X) ->
+ case conditional_delete(X) of
+ {error, in_use} -> not_deleted;
+ {deleted, X, [], Deletions} -> {deleted, Deletions}
+ end.
+
+conditional_delete(X = #exchange{name = XName}) ->
+ case rabbit_binding:has_for_source(XName) of
+ false -> unconditional_delete(X);
+ true -> {error, in_use}
+ end.
+
+unconditional_delete(X = #exchange{name = XName}) ->
+ %% this 'guarded' delete prevents unnecessary writes to the mnesia
+ %% disk log
+ case mnesia:wread({rabbit_durable_exchange, XName}) of
+ [] -> ok;
+ [_] -> ok = mnesia:delete({rabbit_durable_exchange, XName})
+ end,
+ ok = mnesia:delete({rabbit_exchange, XName}),
+ ok = mnesia:delete({rabbit_exchange_serial, XName}),
+ Bindings = rabbit_binding:remove_for_source(XName),
+ {deleted, X, Bindings, rabbit_binding:remove_for_destination(XName)}.
+
+next_serial(XName) ->
+ Serial = peek_serial(XName, write),
+ ok = mnesia:write(rabbit_exchange_serial,
+ #exchange_serial{name = XName, next = Serial + 1}, write),
+ Serial.
+
+peek_serial(XName) -> peek_serial(XName, read).
+
+peek_serial(XName, LockType) ->
+ case mnesia:read(rabbit_exchange_serial, XName, LockType) of
+ [#exchange_serial{next = Serial}] -> Serial;
+ _ -> 1
+ end.
+
+invalid_module(T) ->
+ rabbit_log:warning("Could not find exchange type ~s.~n", [T]),
+ put({xtype_to_module, T}, rabbit_exchange_type_invalid),
+ rabbit_exchange_type_invalid.
+
+%% Used with atoms from records; e.g., the type is expected to exist.
+type_to_module(T) ->
+ case get({xtype_to_module, T}) of
+ undefined ->
+ case rabbit_registry:lookup_module(exchange, T) of
+ {ok, Module} -> put({xtype_to_module, T}, Module),
+ Module;
+ {error, not_found} -> invalid_module(T)
+ end;
+ Module ->
+ Module
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_exchange_decorator).
+
+-include("rabbit.hrl").
+
+-export([select/2, set/1]).
+
+%% This is like an exchange type except that:
+%%
+%% 1) It applies to all exchanges as soon as it is installed, therefore
+%% 2) It is not allowed to affect validation, so no validate/1 or
+%% assert_args_equivalence/2
+%%
+%% It's possible in the future we might make decorators
+%% able to manipulate messages as they are published.
+
+-ifdef(use_specs).
+
+-type(tx() :: 'transaction' | 'none').
+-type(serial() :: pos_integer() | tx()).
+
+-callback description() -> [proplists:property()].
+
+%% Should Rabbit ensure that all binding events that are
+%% delivered to an individual exchange can be serialised? (they
+%% might still be delivered out of order, but there'll be a
+%% serial number).
+-callback serialise_events(rabbit_types:exchange()) -> boolean().
+
+%% called after declaration and recovery
+-callback create(tx(), rabbit_types:exchange()) -> 'ok'.
+
+%% called after exchange (auto)deletion.
+-callback delete(tx(), rabbit_types:exchange(), [rabbit_types:binding()]) ->
+ 'ok'.
+
+%% called when the policy attached to this exchange changes.
+-callback policy_changed(rabbit_types:exchange(), rabbit_types:exchange()) ->
+ 'ok'.
+
+%% called after a binding has been added or recovered
+-callback add_binding(serial(), rabbit_types:exchange(),
+ rabbit_types:binding()) -> 'ok'.
+
+%% called after bindings have been deleted.
+-callback remove_bindings(serial(), rabbit_types:exchange(),
+ [rabbit_types:binding()]) -> 'ok'.
+
+%% Allows additional destinations to be added to the routing decision.
+-callback route(rabbit_types:exchange(), rabbit_types:delivery()) ->
+ [rabbit_amqqueue:name() | rabbit_exchange:name()].
+
+%% Whether the decorator wishes to receive callbacks for the exchange
+%% none:no callbacks, noroute:all callbacks except route, all:all callbacks
+-callback active_for(rabbit_types:exchange()) -> 'none' | 'noroute' | 'all'.
+
+-else.
+
+-export([behaviour_info/1]).
+
+behaviour_info(callbacks) ->
+ [{description, 0}, {serialise_events, 1}, {create, 2}, {delete, 3},
+ {policy_changed, 2}, {add_binding, 3}, {remove_bindings, 3},
+ {route, 2}, {active_for, 1}];
+behaviour_info(_Other) ->
+ undefined.
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+%% select a subset of active decorators
+select(all, {Route, NoRoute}) -> filter(Route ++ NoRoute);
+select(route, {Route, _NoRoute}) -> filter(Route);
+select(raw, {Route, NoRoute}) -> Route ++ NoRoute.
+
+filter(Modules) ->
+ [M || M <- Modules, code:which(M) =/= non_existing].
+
+set(X) ->
+ Decs = lists:foldl(fun (D, {Route, NoRoute}) ->
+ ActiveFor = D:active_for(X),
+ {cons_if_eq(all, ActiveFor, D, Route),
+ cons_if_eq(noroute, ActiveFor, D, NoRoute)}
+ end, {[], []}, list()),
+ X#exchange{decorators = Decs}.
+
+list() -> [M || {_, M} <- rabbit_registry:lookup_all(exchange_decorator)].
+
+cons_if_eq(Select, Select, Item, List) -> [Item | List];
+cons_if_eq(_Select, _Other, _Item, List) -> List.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_exchange_type).
+
+-ifdef(use_specs).
+
+-type(tx() :: 'transaction' | 'none').
+-type(serial() :: pos_integer() | tx()).
+
+-callback description() -> [proplists:property()].
+
+%% Should Rabbit ensure that all binding events that are
+%% delivered to an individual exchange can be serialised? (they
+%% might still be delivered out of order, but there'll be a
+%% serial number).
+-callback serialise_events() -> boolean().
+
+%% The no_return is there so that we can have an "invalid" exchange
+%% type (see rabbit_exchange_type_invalid).
+-callback route(rabbit_types:exchange(), rabbit_types:delivery()) ->
+ rabbit_router:match_result().
+
+%% called BEFORE declaration, to check args etc; may exit with #amqp_error{}
+-callback validate(rabbit_types:exchange()) -> 'ok'.
+
+%% called BEFORE declaration, to check args etc
+-callback validate_binding(rabbit_types:exchange(), rabbit_types:binding()) ->
+ rabbit_types:ok_or_error({'binding_invalid', string(), [any()]}).
+
+%% called after declaration and recovery
+-callback create(tx(), rabbit_types:exchange()) -> 'ok'.
+
+%% called after exchange (auto)deletion.
+-callback delete(tx(), rabbit_types:exchange(), [rabbit_types:binding()]) ->
+ 'ok'.
+
+%% called when the policy attached to this exchange changes.
+-callback policy_changed(rabbit_types:exchange(), rabbit_types:exchange()) ->
+ 'ok'.
+
+%% called after a binding has been added or recovered
+-callback add_binding(serial(), rabbit_types:exchange(),
+ rabbit_types:binding()) -> 'ok'.
+
+%% called after bindings have been deleted.
+-callback remove_bindings(serial(), rabbit_types:exchange(),
+ [rabbit_types:binding()]) -> 'ok'.
+
+%% called when comparing exchanges for equivalence - should return ok or
+%% exit with #amqp_error{}
+-callback assert_args_equivalence(rabbit_types:exchange(),
+ rabbit_framing:amqp_table()) ->
+ 'ok' | rabbit_types:connection_exit().
+
+-else.
+
+-export([behaviour_info/1]).
+
+behaviour_info(callbacks) ->
+ [{description, 0}, {serialise_events, 0}, {route, 2},
+ {validate, 1}, {validate_binding, 2}, {policy_changed, 2},
+ {create, 2}, {delete, 3}, {add_binding, 3}, {remove_bindings, 3},
+ {assert_args_equivalence, 2}];
+behaviour_info(_Other) ->
+ undefined.
+
+-endif.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_exchange_type_direct).
+-include("rabbit.hrl").
+
+-behaviour(rabbit_exchange_type).
+
+-export([description/0, serialise_events/0, route/2]).
+-export([validate/1, validate_binding/2,
+ create/2, delete/3, policy_changed/2, add_binding/3,
+ remove_bindings/3, assert_args_equivalence/2]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "exchange type direct"},
+ {mfa, {rabbit_registry, register,
+ [exchange, <<"direct">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+description() ->
+ [{description, <<"AMQP direct exchange, as per the AMQP specification">>}].
+
+serialise_events() -> false.
+
+route(#exchange{name = Name},
+ #delivery{message = #basic_message{routing_keys = Routes}}) ->
+ rabbit_router:match_routing_key(Name, Routes).
+
+validate(_X) -> ok.
+validate_binding(_X, _B) -> ok.
+create(_Tx, _X) -> ok.
+delete(_Tx, _X, _Bs) -> ok.
+policy_changed(_X1, _X2) -> ok.
+add_binding(_Tx, _X, _B) -> ok.
+remove_bindings(_Tx, _X, _Bs) -> ok.
+assert_args_equivalence(X, Args) ->
+ rabbit_exchange:assert_args_equivalence(X, Args).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_exchange_type_fanout).
+-include("rabbit.hrl").
+
+-behaviour(rabbit_exchange_type).
+
+-export([description/0, serialise_events/0, route/2]).
+-export([validate/1, validate_binding/2,
+ create/2, delete/3, policy_changed/2, add_binding/3,
+ remove_bindings/3, assert_args_equivalence/2]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "exchange type fanout"},
+ {mfa, {rabbit_registry, register,
+ [exchange, <<"fanout">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+description() ->
+ [{description, <<"AMQP fanout exchange, as per the AMQP specification">>}].
+
+serialise_events() -> false.
+
+route(#exchange{name = Name}, _Delivery) ->
+ rabbit_router:match_routing_key(Name, ['_']).
+
+validate(_X) -> ok.
+validate_binding(_X, _B) -> ok.
+create(_Tx, _X) -> ok.
+delete(_Tx, _X, _Bs) -> ok.
+policy_changed(_X1, _X2) -> ok.
+add_binding(_Tx, _X, _B) -> ok.
+remove_bindings(_Tx, _X, _Bs) -> ok.
+assert_args_equivalence(X, Args) ->
+ rabbit_exchange:assert_args_equivalence(X, Args).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_exchange_type_headers).
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+-behaviour(rabbit_exchange_type).
+
+-export([description/0, serialise_events/0, route/2]).
+-export([validate/1, validate_binding/2,
+ create/2, delete/3, policy_changed/2, add_binding/3,
+ remove_bindings/3, assert_args_equivalence/2]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "exchange type headers"},
+ {mfa, {rabbit_registry, register,
+ [exchange, <<"headers">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+-ifdef(use_specs).
+-spec(headers_match/2 :: (rabbit_framing:amqp_table(),
+ rabbit_framing:amqp_table()) -> boolean()).
+-endif.
+
+description() ->
+ [{description, <<"AMQP headers exchange, as per the AMQP specification">>}].
+
+serialise_events() -> false.
+
+route(#exchange{name = Name},
+ #delivery{message = #basic_message{content = Content}}) ->
+ Headers = case (Content#content.properties)#'P_basic'.headers of
+ undefined -> [];
+ H -> rabbit_misc:sort_field_table(H)
+ end,
+ rabbit_router:match_bindings(
+ Name, fun (#binding{args = Spec}) -> headers_match(Spec, Headers) end).
+
+validate_binding(_X, #binding{args = Args}) ->
+ case rabbit_misc:table_lookup(Args, <<"x-match">>) of
+ {longstr, <<"all">>} -> ok;
+ {longstr, <<"any">>} -> ok;
+ {longstr, Other} -> {error,
+ {binding_invalid,
+ "Invalid x-match field value ~p; "
+ "expected all or any", [Other]}};
+ {Type, Other} -> {error,
+ {binding_invalid,
+ "Invalid x-match field type ~p (value ~p); "
+ "expected longstr", [Type, Other]}};
+ undefined -> ok %% [0]
+ end.
+%% [0] spec is vague on whether it can be omitted but in practice it's
+%% useful to allow people to do this
+
+parse_x_match({longstr, <<"all">>}) -> all;
+parse_x_match({longstr, <<"any">>}) -> any;
+parse_x_match(_) -> all. %% legacy; we didn't validate
+
+%% Horrendous matching algorithm. Depends for its merge-like
+%% (linear-time) behaviour on the lists:keysort
+%% (rabbit_misc:sort_field_table) that route/1 and
+%% rabbit_binding:{add,remove}/2 do.
+%%
+%% !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+%% In other words: REQUIRES BOTH PATTERN AND DATA TO BE SORTED ASCENDING BY KEY.
+%% !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+%%
+headers_match(Args, Data) ->
+ MK = parse_x_match(rabbit_misc:table_lookup(Args, <<"x-match">>)),
+ headers_match(Args, Data, true, false, MK).
+
+headers_match([], _Data, AllMatch, _AnyMatch, all) ->
+ AllMatch;
+headers_match([], _Data, _AllMatch, AnyMatch, any) ->
+ AnyMatch;
+headers_match([{<<"x-", _/binary>>, _PT, _PV} | PRest], Data,
+ AllMatch, AnyMatch, MatchKind) ->
+ headers_match(PRest, Data, AllMatch, AnyMatch, MatchKind);
+headers_match(_Pattern, [], _AllMatch, AnyMatch, MatchKind) ->
+ headers_match([], [], false, AnyMatch, MatchKind);
+headers_match(Pattern = [{PK, _PT, _PV} | _], [{DK, _DT, _DV} | DRest],
+ AllMatch, AnyMatch, MatchKind) when PK > DK ->
+ headers_match(Pattern, DRest, AllMatch, AnyMatch, MatchKind);
+headers_match([{PK, _PT, _PV} | PRest], Data = [{DK, _DT, _DV} | _],
+ _AllMatch, AnyMatch, MatchKind) when PK < DK ->
+ headers_match(PRest, Data, false, AnyMatch, MatchKind);
+headers_match([{PK, PT, PV} | PRest], [{DK, DT, DV} | DRest],
+ AllMatch, AnyMatch, MatchKind) when PK == DK ->
+ {AllMatch1, AnyMatch1} =
+ case rabbit_misc:type_class(PT) == rabbit_misc:type_class(DT) of
+ %% It's not properly specified, but a "no value" in a
+ %% pattern field is supposed to mean simple presence of
+ %% the corresponding data field. I've interpreted that to
+ %% mean a type of "void" for the pattern field.
+ _ when PT == void -> {AllMatch, true};
+ false -> {false, AnyMatch};
+ _ when PV == DV -> {AllMatch, true};
+ _ -> {false, AnyMatch}
+ end,
+ headers_match(PRest, DRest, AllMatch1, AnyMatch1, MatchKind).
+
+validate(_X) -> ok.
+create(_Tx, _X) -> ok.
+delete(_Tx, _X, _Bs) -> ok.
+policy_changed(_X1, _X2) -> ok.
+add_binding(_Tx, _X, _B) -> ok.
+remove_bindings(_Tx, _X, _Bs) -> ok.
+assert_args_equivalence(X, Args) ->
+ rabbit_exchange:assert_args_equivalence(X, Args).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_exchange_type_invalid).
+-include("rabbit.hrl").
+
+-behaviour(rabbit_exchange_type).
+
+-export([description/0, serialise_events/0, route/2]).
+-export([validate/1, validate_binding/2,
+ create/2, delete/3, policy_changed/2, add_binding/3,
+ remove_bindings/3, assert_args_equivalence/2]).
+
+description() ->
+ [{description,
+ <<"Dummy exchange type, to be used when the intended one is not found.">>
+ }].
+
+serialise_events() -> false.
+
+-ifdef(use_specs).
+-spec(route/2 :: (rabbit_types:exchange(), rabbit_types:delivery())
+ -> no_return()).
+-endif.
+route(#exchange{name = Name, type = Type}, _) ->
+ rabbit_misc:protocol_error(
+ precondition_failed,
+ "Cannot route message through ~s: exchange type ~s not found",
+ [rabbit_misc:rs(Name), Type]).
+
+validate(_X) -> ok.
+validate_binding(_X, _B) -> ok.
+create(_Tx, _X) -> ok.
+delete(_Tx, _X, _Bs) -> ok.
+policy_changed(_X1, _X2) -> ok.
+add_binding(_Tx, _X, _B) -> ok.
+remove_bindings(_Tx, _X, _Bs) -> ok.
+assert_args_equivalence(X, Args) ->
+ rabbit_exchange:assert_args_equivalence(X, Args).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_exchange_type_topic).
+
+-include("rabbit.hrl").
+
+-behaviour(rabbit_exchange_type).
+
+-export([description/0, serialise_events/0, route/2]).
+-export([validate/1, validate_binding/2,
+ create/2, delete/3, policy_changed/2, add_binding/3,
+ remove_bindings/3, assert_args_equivalence/2]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "exchange type topic"},
+ {mfa, {rabbit_registry, register,
+ [exchange, <<"topic">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+%%----------------------------------------------------------------------------
+
+description() ->
+ [{description, <<"AMQP topic exchange, as per the AMQP specification">>}].
+
+serialise_events() -> false.
+
+%% NB: This may return duplicate results in some situations (that's ok)
+route(#exchange{name = X},
+ #delivery{message = #basic_message{routing_keys = Routes}}) ->
+ lists:append([begin
+ Words = split_topic_key(RKey),
+ mnesia:async_dirty(fun trie_match/2, [X, Words])
+ end || RKey <- Routes]).
+
+validate(_X) -> ok.
+validate_binding(_X, _B) -> ok.
+create(_Tx, _X) -> ok.
+
+delete(transaction, #exchange{name = X}, _Bs) ->
+ trie_remove_all_nodes(X),
+ trie_remove_all_edges(X),
+ trie_remove_all_bindings(X),
+ ok;
+delete(none, _Exchange, _Bs) ->
+ ok.
+
+policy_changed(_X1, _X2) -> ok.
+
+add_binding(transaction, _Exchange, Binding) ->
+ internal_add_binding(Binding);
+add_binding(none, _Exchange, _Binding) ->
+ ok.
+
+remove_bindings(transaction, _X, Bs) ->
+ %% See rabbit_binding:lock_route_tables for the rationale for
+ %% taking table locks.
+ case Bs of
+ [_] -> ok;
+ _ -> [mnesia:lock({table, T}, write) ||
+ T <- [rabbit_topic_trie_node,
+ rabbit_topic_trie_edge,
+ rabbit_topic_trie_binding]]
+ end,
+ [begin
+ Path = [{FinalNode, _} | _] =
+ follow_down_get_path(X, split_topic_key(K)),
+ trie_remove_binding(X, FinalNode, D, Args),
+ remove_path_if_empty(X, Path)
+ end || #binding{source = X, key = K, destination = D, args = Args} <- Bs],
+ ok;
+remove_bindings(none, _X, _Bs) ->
+ ok.
+
+assert_args_equivalence(X, Args) ->
+ rabbit_exchange:assert_args_equivalence(X, Args).
+
+%%----------------------------------------------------------------------------
+
+internal_add_binding(#binding{source = X, key = K, destination = D,
+ args = Args}) ->
+ FinalNode = follow_down_create(X, split_topic_key(K)),
+ trie_add_binding(X, FinalNode, D, Args),
+ ok.
+
+trie_match(X, Words) ->
+ trie_match(X, root, Words, []).
+
+trie_match(X, Node, [], ResAcc) ->
+ trie_match_part(X, Node, "#", fun trie_match_skip_any/4, [],
+ trie_bindings(X, Node) ++ ResAcc);
+trie_match(X, Node, [W | RestW] = Words, ResAcc) ->
+ lists:foldl(fun ({WArg, MatchFun, RestWArg}, Acc) ->
+ trie_match_part(X, Node, WArg, MatchFun, RestWArg, Acc)
+ end, ResAcc, [{W, fun trie_match/4, RestW},
+ {"*", fun trie_match/4, RestW},
+ {"#", fun trie_match_skip_any/4, Words}]).
+
+trie_match_part(X, Node, Search, MatchFun, RestW, ResAcc) ->
+ case trie_child(X, Node, Search) of
+ {ok, NextNode} -> MatchFun(X, NextNode, RestW, ResAcc);
+ error -> ResAcc
+ end.
+
+trie_match_skip_any(X, Node, [], ResAcc) ->
+ trie_match(X, Node, [], ResAcc);
+trie_match_skip_any(X, Node, [_ | RestW] = Words, ResAcc) ->
+ trie_match_skip_any(X, Node, RestW,
+ trie_match(X, Node, Words, ResAcc)).
+
+follow_down_create(X, Words) ->
+ case follow_down_last_node(X, Words) of
+ {ok, FinalNode} -> FinalNode;
+ {error, Node, RestW} -> lists:foldl(
+ fun (W, CurNode) ->
+ NewNode = new_node_id(),
+ trie_add_edge(X, CurNode, NewNode, W),
+ NewNode
+ end, Node, RestW)
+ end.
+
+follow_down_last_node(X, Words) ->
+ follow_down(X, fun (_, Node, _) -> Node end, root, Words).
+
+follow_down_get_path(X, Words) ->
+ {ok, Path} =
+ follow_down(X, fun (W, Node, PathAcc) -> [{Node, W} | PathAcc] end,
+ [{root, none}], Words),
+ Path.
+
+follow_down(X, AccFun, Acc0, Words) ->
+ follow_down(X, root, AccFun, Acc0, Words).
+
+follow_down(_X, _CurNode, _AccFun, Acc, []) ->
+ {ok, Acc};
+follow_down(X, CurNode, AccFun, Acc, Words = [W | RestW]) ->
+ case trie_child(X, CurNode, W) of
+ {ok, NextNode} -> follow_down(X, NextNode, AccFun,
+ AccFun(W, NextNode, Acc), RestW);
+ error -> {error, Acc, Words}
+ end.
+
+remove_path_if_empty(_, [{root, none}]) ->
+ ok;
+remove_path_if_empty(X, [{Node, W} | [{Parent, _} | _] = RestPath]) ->
+ case mnesia:read(rabbit_topic_trie_node,
+ #trie_node{exchange_name = X, node_id = Node}, write) of
+ [] -> trie_remove_edge(X, Parent, Node, W),
+ remove_path_if_empty(X, RestPath);
+ _ -> ok
+ end.
+
+trie_child(X, Node, Word) ->
+ case mnesia:read({rabbit_topic_trie_edge,
+ #trie_edge{exchange_name = X,
+ node_id = Node,
+ word = Word}}) of
+ [#topic_trie_edge{node_id = NextNode}] -> {ok, NextNode};
+ [] -> error
+ end.
+
+trie_bindings(X, Node) ->
+ MatchHead = #topic_trie_binding{
+ trie_binding = #trie_binding{exchange_name = X,
+ node_id = Node,
+ destination = '$1',
+ arguments = '_'}},
+ mnesia:select(rabbit_topic_trie_binding, [{MatchHead, [], ['$1']}]).
+
+trie_update_node_counts(X, Node, Field, Delta) ->
+ E = case mnesia:read(rabbit_topic_trie_node,
+ #trie_node{exchange_name = X,
+ node_id = Node}, write) of
+ [] -> #topic_trie_node{trie_node = #trie_node{
+ exchange_name = X,
+ node_id = Node},
+ edge_count = 0,
+ binding_count = 0};
+ [E0] -> E0
+ end,
+ case setelement(Field, E, element(Field, E) + Delta) of
+ #topic_trie_node{edge_count = 0, binding_count = 0} ->
+ ok = mnesia:delete_object(rabbit_topic_trie_node, E, write);
+ EN ->
+ ok = mnesia:write(rabbit_topic_trie_node, EN, write)
+ end.
+
+trie_add_edge(X, FromNode, ToNode, W) ->
+ trie_update_node_counts(X, FromNode, #topic_trie_node.edge_count, +1),
+ trie_edge_op(X, FromNode, ToNode, W, fun mnesia:write/3).
+
+trie_remove_edge(X, FromNode, ToNode, W) ->
+ trie_update_node_counts(X, FromNode, #topic_trie_node.edge_count, -1),
+ trie_edge_op(X, FromNode, ToNode, W, fun mnesia:delete_object/3).
+
+trie_edge_op(X, FromNode, ToNode, W, Op) ->
+ ok = Op(rabbit_topic_trie_edge,
+ #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X,
+ node_id = FromNode,
+ word = W},
+ node_id = ToNode},
+ write).
+
+trie_add_binding(X, Node, D, Args) ->
+ trie_update_node_counts(X, Node, #topic_trie_node.binding_count, +1),
+ trie_binding_op(X, Node, D, Args, fun mnesia:write/3).
+
+trie_remove_binding(X, Node, D, Args) ->
+ trie_update_node_counts(X, Node, #topic_trie_node.binding_count, -1),
+ trie_binding_op(X, Node, D, Args, fun mnesia:delete_object/3).
+
+trie_binding_op(X, Node, D, Args, Op) ->
+ ok = Op(rabbit_topic_trie_binding,
+ #topic_trie_binding{
+ trie_binding = #trie_binding{exchange_name = X,
+ node_id = Node,
+ destination = D,
+ arguments = Args}},
+ write).
+
+trie_remove_all_nodes(X) ->
+ remove_all(rabbit_topic_trie_node,
+ #topic_trie_node{trie_node = #trie_node{exchange_name = X,
+ _ = '_'},
+ _ = '_'}).
+
+trie_remove_all_edges(X) ->
+ remove_all(rabbit_topic_trie_edge,
+ #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X,
+ _ = '_'},
+ _ = '_'}).
+
+trie_remove_all_bindings(X) ->
+ remove_all(rabbit_topic_trie_binding,
+ #topic_trie_binding{
+ trie_binding = #trie_binding{exchange_name = X, _ = '_'},
+ _ = '_'}).
+
+remove_all(Table, Pattern) ->
+ lists:foreach(fun (R) -> mnesia:delete_object(Table, R, write) end,
+ mnesia:match_object(Table, Pattern, write)).
+
+new_node_id() ->
+ rabbit_guid:gen().
+
+split_topic_key(Key) ->
+ split_topic_key(Key, [], []).
+
+split_topic_key(<<>>, [], []) ->
+ [];
+split_topic_key(<<>>, RevWordAcc, RevResAcc) ->
+ lists:reverse([lists:reverse(RevWordAcc) | RevResAcc]);
+split_topic_key(<<$., Rest/binary>>, RevWordAcc, RevResAcc) ->
+ split_topic_key(Rest, [], [lists:reverse(RevWordAcc) | RevResAcc]);
+split_topic_key(<<C:8, Rest/binary>>, RevWordAcc, RevResAcc) ->
+ split_topic_key(Rest, [C | RevWordAcc], RevResAcc).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_file).
+
+-include_lib("kernel/include/file.hrl").
+
+-export([is_file/1, is_dir/1, file_size/1, ensure_dir/1, wildcard/2, list_dir/1]).
+-export([read_term_file/1, write_term_file/2, write_file/2, write_file/3]).
+-export([append_file/2, ensure_parent_dirs_exist/1]).
+-export([rename/2, delete/1, recursive_delete/1, recursive_copy/2]).
+-export([lock_file/1]).
+
+-import(file_handle_cache, [with_handle/1, with_handle/2]).
+
+-define(TMP_EXT, ".tmp").
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-type(ok_or_error() :: rabbit_types:ok_or_error(any())).
+
+-spec(is_file/1 :: ((file:filename())) -> boolean()).
+-spec(is_dir/1 :: ((file:filename())) -> boolean()).
+-spec(file_size/1 :: ((file:filename())) -> non_neg_integer()).
+-spec(ensure_dir/1 :: ((file:filename())) -> ok_or_error()).
+-spec(wildcard/2 :: (string(), file:filename()) -> [file:filename()]).
+-spec(list_dir/1 :: (file:filename()) -> rabbit_types:ok_or_error2(
+ [file:filename()], any())).
+-spec(read_term_file/1 ::
+ (file:filename()) -> {'ok', [any()]} | rabbit_types:error(any())).
+-spec(write_term_file/2 :: (file:filename(), [any()]) -> ok_or_error()).
+-spec(write_file/2 :: (file:filename(), iodata()) -> ok_or_error()).
+-spec(write_file/3 :: (file:filename(), iodata(), [any()]) -> ok_or_error()).
+-spec(append_file/2 :: (file:filename(), string()) -> ok_or_error()).
+-spec(ensure_parent_dirs_exist/1 :: (string()) -> 'ok').
+-spec(rename/2 ::
+ (file:filename(), file:filename()) -> ok_or_error()).
+-spec(delete/1 :: ([file:filename()]) -> ok_or_error()).
+-spec(recursive_delete/1 ::
+ ([file:filename()])
+ -> rabbit_types:ok_or_error({file:filename(), any()})).
+-spec(recursive_copy/2 ::
+ (file:filename(), file:filename())
+ -> rabbit_types:ok_or_error({file:filename(), file:filename(), any()})).
+-spec(lock_file/1 :: (file:filename()) -> rabbit_types:ok_or_error('eexist')).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+is_file(File) ->
+ case read_file_info(File) of
+ {ok, #file_info{type=regular}} -> true;
+ {ok, #file_info{type=directory}} -> true;
+ _ -> false
+ end.
+
+is_dir(Dir) -> is_dir_internal(read_file_info(Dir)).
+
+is_dir_no_handle(Dir) -> is_dir_internal(prim_file:read_file_info(Dir)).
+
+is_dir_internal({ok, #file_info{type=directory}}) -> true;
+is_dir_internal(_) -> false.
+
+file_size(File) ->
+ case read_file_info(File) of
+ {ok, #file_info{size=Size}} -> Size;
+ _ -> 0
+ end.
+
+ensure_dir(File) -> with_handle(fun () -> ensure_dir_internal(File) end).
+
+ensure_dir_internal("/") ->
+ ok;
+ensure_dir_internal(File) ->
+ Dir = filename:dirname(File),
+ case is_dir_no_handle(Dir) of
+ true -> ok;
+ false -> ensure_dir_internal(Dir),
+ prim_file:make_dir(Dir)
+ end.
+
+wildcard(Pattern, Dir) ->
+ case list_dir(Dir) of
+ {ok, Files} -> {ok, RE} = re:compile(Pattern, [anchored]),
+ [File || File <- Files,
+ match =:= re:run(File, RE, [{capture, none}])];
+ {error, _} -> []
+ end.
+
+list_dir(Dir) -> with_handle(fun () -> prim_file:list_dir(Dir) end).
+
+read_file_info(File) ->
+ with_handle(fun () -> prim_file:read_file_info(File) end).
+
+read_term_file(File) ->
+ try
+ {ok, Data} = with_handle(fun () -> prim_file:read_file(File) end),
+ {ok, Tokens, _} = erl_scan:string(binary_to_list(Data)),
+ TokenGroups = group_tokens(Tokens),
+ {ok, [begin
+ {ok, Term} = erl_parse:parse_term(Tokens1),
+ Term
+ end || Tokens1 <- TokenGroups]}
+ catch
+ error:{badmatch, Error} -> Error
+ end.
+
+group_tokens(Ts) -> [lists:reverse(G) || G <- group_tokens([], Ts)].
+
+group_tokens([], []) -> [];
+group_tokens(Cur, []) -> [Cur];
+group_tokens(Cur, [T = {dot, _} | Ts]) -> [[T | Cur] | group_tokens([], Ts)];
+group_tokens(Cur, [T | Ts]) -> group_tokens([T | Cur], Ts).
+
+write_term_file(File, Terms) ->
+ write_file(File, list_to_binary([io_lib:format("~w.~n", [Term]) ||
+ Term <- Terms])).
+
+write_file(Path, Data) -> write_file(Path, Data, []).
+
+write_file(Path, Data, Modes) ->
+ Modes1 = [binary, write | (Modes -- [binary, write])],
+ case make_binary(Data) of
+ Bin when is_binary(Bin) -> write_file1(Path, Bin, Modes1);
+ {error, _} = E -> E
+ end.
+
+%% make_binary/1 is based on the corresponding function in the
+%% kernel/file.erl module of the Erlang R14B02 release, which is
+%% licensed under the EPL.
+
+make_binary(Bin) when is_binary(Bin) ->
+ Bin;
+make_binary(List) ->
+ try
+ iolist_to_binary(List)
+ catch error:Reason ->
+ {error, Reason}
+ end.
+
+write_file1(Path, Bin, Modes) ->
+ try
+ with_synced_copy(Path, Modes,
+ fun (Hdl) ->
+ ok = prim_file:write(Hdl, Bin)
+ end)
+ catch
+ error:{badmatch, Error} -> Error;
+ _:{error, Error} -> {error, Error}
+ end.
+
+with_synced_copy(Path, Modes, Fun) ->
+ case lists:member(append, Modes) of
+ true ->
+ {error, append_not_supported, Path};
+ false ->
+ with_handle(
+ fun () ->
+ Bak = Path ++ ?TMP_EXT,
+ case prim_file:open(Bak, Modes) of
+ {ok, Hdl} ->
+ try
+ Result = Fun(Hdl),
+ ok = prim_file:sync(Hdl),
+ ok = prim_file:rename(Bak, Path),
+ Result
+ after
+ prim_file:close(Hdl)
+ end;
+ {error, _} = E -> E
+ end
+ end)
+ end.
+
+%% TODO the semantics of this function are rather odd. But see bug 25021.
+append_file(File, Suffix) ->
+ case read_file_info(File) of
+ {ok, FInfo} -> append_file(File, FInfo#file_info.size, Suffix);
+ {error, enoent} -> append_file(File, 0, Suffix);
+ Error -> Error
+ end.
+
+append_file(_, _, "") ->
+ ok;
+append_file(File, 0, Suffix) ->
+ with_handle(fun () ->
+ case prim_file:open([File, Suffix], [append]) of
+ {ok, Fd} -> prim_file:close(Fd);
+ Error -> Error
+ end
+ end);
+append_file(File, _, Suffix) ->
+ case with_handle(2, fun () ->
+ file:copy(File, {[File, Suffix], [append]})
+ end) of
+ {ok, _BytesCopied} -> ok;
+ Error -> Error
+ end.
+
+ensure_parent_dirs_exist(Filename) ->
+ case ensure_dir(Filename) of
+ ok -> ok;
+ {error, Reason} ->
+ throw({error, {cannot_create_parent_dirs, Filename, Reason}})
+ end.
+
+rename(Old, New) -> with_handle(fun () -> prim_file:rename(Old, New) end).
+
+delete(File) -> with_handle(fun () -> prim_file:delete(File) end).
+
+recursive_delete(Files) ->
+ with_handle(
+ fun () -> lists:foldl(fun (Path, ok) -> recursive_delete1(Path);
+ (_Path, {error, _Err} = Error) -> Error
+ end, ok, Files)
+ end).
+
+recursive_delete1(Path) ->
+ case is_dir_no_handle(Path) and not(is_symlink_no_handle(Path)) of
+ false -> case prim_file:delete(Path) of
+ ok -> ok;
+ {error, enoent} -> ok; %% Path doesn't exist anyway
+ {error, Err} -> {error, {Path, Err}}
+ end;
+ true -> case prim_file:list_dir(Path) of
+ {ok, FileNames} ->
+ case lists:foldl(
+ fun (FileName, ok) ->
+ recursive_delete1(
+ filename:join(Path, FileName));
+ (_FileName, Error) ->
+ Error
+ end, ok, FileNames) of
+ ok ->
+ case prim_file:del_dir(Path) of
+ ok -> ok;
+ {error, Err} -> {error, {Path, Err}}
+ end;
+ {error, _Err} = Error ->
+ Error
+ end;
+ {error, Err} ->
+ {error, {Path, Err}}
+ end
+ end.
+
+is_symlink_no_handle(File) ->
+ case prim_file:read_link(File) of
+ {ok, _} -> true;
+ _ -> false
+ end.
+
+recursive_copy(Src, Dest) ->
+ %% Note that this uses the 'file' module and, hence, shouldn't be
+ %% run on many processes at once.
+ case is_dir(Src) of
+ false -> case file:copy(Src, Dest) of
+ {ok, _Bytes} -> ok;
+ {error, enoent} -> ok; %% Path doesn't exist anyway
+ {error, Err} -> {error, {Src, Dest, Err}}
+ end;
+ true -> case file:list_dir(Src) of
+ {ok, FileNames} ->
+ case file:make_dir(Dest) of
+ ok ->
+ lists:foldl(
+ fun (FileName, ok) ->
+ recursive_copy(
+ filename:join(Src, FileName),
+ filename:join(Dest, FileName));
+ (_FileName, Error) ->
+ Error
+ end, ok, FileNames);
+ {error, Err} ->
+ {error, {Src, Dest, Err}}
+ end;
+ {error, Err} ->
+ {error, {Src, Dest, Err}}
+ end
+ end.
+
+%% TODO: When we stop supporting Erlang prior to R14, this should be
+%% replaced with file:open [write, exclusive]
+lock_file(Path) ->
+ case is_file(Path) of
+ true -> {error, eexist};
+ false -> with_handle(
+ fun () -> {ok, Lock} = prim_file:open(Path, [write]),
+ ok = prim_file:close(Lock)
+ end)
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% TODO auto-generate
+
+-module(rabbit_framing).
+
+-ifdef(use_specs).
+
+-export_type([protocol/0,
+ amqp_field_type/0, amqp_property_type/0,
+ amqp_table/0, amqp_array/0, amqp_value/0,
+ amqp_method_name/0, amqp_method/0, amqp_method_record/0,
+ amqp_method_field_name/0, amqp_property_record/0,
+ amqp_exception/0, amqp_exception_code/0, amqp_class_id/0]).
+
+-type(protocol() :: 'rabbit_framing_amqp_0_8' | 'rabbit_framing_amqp_0_9_1').
+
+-define(protocol_type(T), type(T :: rabbit_framing_amqp_0_8:T |
+ rabbit_framing_amqp_0_9_1:T)).
+
+-?protocol_type(amqp_field_type()).
+-?protocol_type(amqp_property_type()).
+-?protocol_type(amqp_table()).
+-?protocol_type(amqp_array()).
+-?protocol_type(amqp_value()).
+-?protocol_type(amqp_method_name()).
+-?protocol_type(amqp_method()).
+-?protocol_type(amqp_method_record()).
+-?protocol_type(amqp_method_field_name()).
+-?protocol_type(amqp_property_record()).
+-?protocol_type(amqp_exception()).
+-?protocol_type(amqp_exception_code()).
+-?protocol_type(amqp_class_id()).
+
+-endif.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_guid).
+
+-behaviour(gen_server).
+
+-export([start_link/0]).
+-export([filename/0]).
+-export([gen/0, gen_secure/0, string/2, binary/2]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-define(SERVER, ?MODULE).
+-define(SERIAL_FILENAME, "rabbit_serial").
+
+-record(state, {serial}).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-export_type([guid/0]).
+
+-type(guid() :: binary()).
+
+-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
+-spec(filename/0 :: () -> string()).
+-spec(gen/0 :: () -> guid()).
+-spec(gen_secure/0 :: () -> guid()).
+-spec(string/2 :: (guid(), any()) -> string()).
+-spec(binary/2 :: (guid(), any()) -> binary()).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE,
+ [update_disk_serial()], []).
+
+%% We use this to detect a (possibly rather old) Mnesia directory,
+%% since it has existed since at least 1.7.0 (as far back as I cared
+%% to go).
+filename() ->
+ filename:join(rabbit_mnesia:dir(), ?SERIAL_FILENAME).
+
+update_disk_serial() ->
+ Filename = filename(),
+ Serial = case rabbit_file:read_term_file(Filename) of
+ {ok, [Num]} -> Num;
+ {ok, []} -> 0; %% [1]
+ {error, enoent} -> 0;
+ {error, Reason} ->
+ throw({error, {cannot_read_serial_file, Filename, Reason}})
+ end,
+ case rabbit_file:write_term_file(Filename, [Serial + 1]) of
+ ok -> ok;
+ {error, Reason1} ->
+ throw({error, {cannot_write_serial_file, Filename, Reason1}})
+ end,
+ Serial.
+%% [1] a couple of users have reported startup failures due to the
+%% file being empty, presumably as a result of filesystem
+%% corruption. While rabbit doesn't cope with that in general, in this
+%% specific case we can be more accommodating.
+
+%% Generate an un-hashed guid.
+fresh() ->
+ %% We don't use erlang:now() here because a) it may return
+ %% duplicates when the system clock has been rewound prior to a
+ %% restart, or ids were generated at a high rate (which causes
+ %% now() to move ahead of the system time), and b) it is really
+ %% slow since it takes a global lock and makes a system call.
+ %%
+ %% A persisted serial number, the node, and a unique reference
+ %% (per node incarnation) uniquely identifies a process in space
+ %% and time.
+ Serial = gen_server:call(?SERVER, serial, infinity),
+ {Serial, node(), make_ref()}.
+
+advance_blocks({B1, B2, B3, B4}, I) ->
+ %% To produce a new set of blocks, we create a new 32bit block
+ %% hashing {B5, I}. The new hash is used as last block, and the
+ %% other three blocks are XORed with it.
+ %%
+ %% Doing this is convenient because it avoids cascading conflits,
+ %% while being very fast. The conflicts are avoided by propagating
+ %% the changes through all the blocks at each round by XORing, so
+ %% the only occasion in which a collision will take place is when
+ %% all 4 blocks are the same and the counter is the same.
+ %%
+ %% The range (2^32) is provided explicitly since phash uses 2^27
+ %% by default.
+ B5 = erlang:phash2({B1, I}, 4294967296),
+ {{(B2 bxor B5), (B3 bxor B5), (B4 bxor B5), B5}, I+1}.
+
+%% generate a GUID. This function should be used when performance is a
+%% priority and predictability is not an issue. Otherwise use
+%% gen_secure/0.
+gen() ->
+ %% We hash a fresh GUID with md5, split it in 4 blocks, and each
+ %% time we need a new guid we rotate them producing a new hash
+ %% with the aid of the counter. Look at the comments in
+ %% advance_blocks/2 for details.
+ case get(guid) of
+ undefined -> <<B1:32, B2:32, B3:32, B4:32>> = Res =
+ erlang:md5(term_to_binary(fresh())),
+ put(guid, {{B1, B2, B3, B4}, 0}),
+ Res;
+ {BS, I} -> {{B1, B2, B3, B4}, _} = S = advance_blocks(BS, I),
+ put(guid, S),
+ <<B1:32, B2:32, B3:32, B4:32>>
+ end.
+
+%% generate a non-predictable GUID.
+%%
+%% The id is only unique within a single cluster and as long as the
+%% serial store hasn't been deleted.
+%%
+%% If you are not concerned with predictability, gen/0 is faster.
+gen_secure() ->
+ %% Here instead of hashing once we hash the GUID and the counter
+ %% each time, so that the GUID is not predictable.
+ G = case get(guid_secure) of
+ undefined -> {fresh(), 0};
+ {S, I} -> {S, I+1}
+ end,
+ put(guid_secure, G),
+ erlang:md5(term_to_binary(G)).
+
+%% generate a readable string representation of a GUID.
+%%
+%% employs base64url encoding, which is safer in more contexts than
+%% plain base64.
+string(G, Prefix) ->
+ Prefix ++ "-" ++ rabbit_misc:base64url(G).
+
+binary(G, Prefix) ->
+ list_to_binary(string(G, Prefix)).
+
+%%----------------------------------------------------------------------------
+
+init([Serial]) ->
+ {ok, #state{serial = Serial}}.
+
+handle_call(serial, _From, State = #state{serial = Serial}) ->
+ {reply, Serial, State};
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_heartbeat).
+
+-export([start/6, start/7]).
+-export([start_heartbeat_sender/4, start_heartbeat_receiver/4,
+ pause_monitor/1, resume_monitor/1]).
+
+-export([system_continue/3, system_terminate/4, system_code_change/4]).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-export_type([heartbeaters/0]).
+
+-type(heartbeaters() :: {rabbit_types:maybe(pid()), rabbit_types:maybe(pid())}).
+
+-type(heartbeat_callback() :: fun (() -> any())).
+
+-spec(start/6 ::
+ (pid(), rabbit_net:socket(),
+ non_neg_integer(), heartbeat_callback(),
+ non_neg_integer(), heartbeat_callback()) -> heartbeaters()).
+
+-spec(start/7 ::
+ (pid(), rabbit_net:socket(), rabbit_types:proc_name(),
+ non_neg_integer(), heartbeat_callback(),
+ non_neg_integer(), heartbeat_callback()) -> heartbeaters()).
+
+-spec(start_heartbeat_sender/4 ::
+ (rabbit_net:socket(), non_neg_integer(), heartbeat_callback(),
+ rabbit_types:proc_type_and_name()) -> rabbit_types:ok(pid())).
+-spec(start_heartbeat_receiver/4 ::
+ (rabbit_net:socket(), non_neg_integer(), heartbeat_callback(),
+ rabbit_types:proc_type_and_name()) -> rabbit_types:ok(pid())).
+
+-spec(pause_monitor/1 :: (heartbeaters()) -> 'ok').
+-spec(resume_monitor/1 :: (heartbeaters()) -> 'ok').
+
+-spec(system_code_change/4 :: (_,_,_,_) -> {'ok',_}).
+-spec(system_continue/3 :: (_,_,{_, _}) -> any()).
+-spec(system_terminate/4 :: (_,_,_,_) -> none()).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+start(SupPid, Sock, SendTimeoutSec, SendFun, ReceiveTimeoutSec, ReceiveFun) ->
+ start(SupPid, Sock, unknown,
+ SendTimeoutSec, SendFun, ReceiveTimeoutSec, ReceiveFun).
+
+start(SupPid, Sock, Identity,
+ SendTimeoutSec, SendFun, ReceiveTimeoutSec, ReceiveFun) ->
+ {ok, Sender} =
+ start_heartbeater(SendTimeoutSec, SupPid, Sock,
+ SendFun, heartbeat_sender,
+ start_heartbeat_sender, Identity),
+ {ok, Receiver} =
+ start_heartbeater(ReceiveTimeoutSec, SupPid, Sock,
+ ReceiveFun, heartbeat_receiver,
+ start_heartbeat_receiver, Identity),
+ {Sender, Receiver}.
+
+start_heartbeat_sender(Sock, TimeoutSec, SendFun, Identity) ->
+ %% the 'div 2' is there so that we don't end up waiting for nearly
+ %% 2 * TimeoutSec before sending a heartbeat in the boundary case
+ %% where the last message was sent just after a heartbeat.
+ heartbeater({Sock, TimeoutSec * 1000 div 2, send_oct, 0,
+ fun () -> SendFun(), continue end}, Identity).
+
+start_heartbeat_receiver(Sock, TimeoutSec, ReceiveFun, Identity) ->
+ %% we check for incoming data every interval, and time out after
+ %% two checks with no change. As a result we will time out between
+ %% 2 and 3 intervals after the last data has been received.
+ heartbeater({Sock, TimeoutSec * 1000, recv_oct, 1,
+ fun () -> ReceiveFun(), stop end}, Identity).
+
+pause_monitor({_Sender, none}) -> ok;
+pause_monitor({_Sender, Receiver}) -> Receiver ! pause, ok.
+
+resume_monitor({_Sender, none}) -> ok;
+resume_monitor({_Sender, Receiver}) -> Receiver ! resume, ok.
+
+system_continue(_Parent, Deb, {Params, State}) ->
+ heartbeater(Params, Deb, State).
+
+system_terminate(Reason, _Parent, _Deb, _State) ->
+ exit(Reason).
+
+system_code_change(Misc, _Module, _OldVsn, _Extra) ->
+ {ok, Misc}.
+
+%%----------------------------------------------------------------------------
+start_heartbeater(0, _SupPid, _Sock, _TimeoutFun, _Name, _Callback,
+ _Identity) ->
+ {ok, none};
+start_heartbeater(TimeoutSec, SupPid, Sock, TimeoutFun, Name, Callback,
+ Identity) ->
+ supervisor2:start_child(
+ SupPid, {Name,
+ {rabbit_heartbeat, Callback,
+ [Sock, TimeoutSec, TimeoutFun, {Name, Identity}]},
+ transient, ?MAX_WAIT, worker, [rabbit_heartbeat]}).
+
+heartbeater(Params, Identity) ->
+ Deb = sys:debug_options([]),
+ {ok, proc_lib:spawn_link(fun () ->
+ rabbit_misc:store_proc_name(Identity),
+ heartbeater(Params, Deb, {0, 0})
+ end)}.
+
+heartbeater({Sock, TimeoutMillisec, StatName, Threshold, Handler} = Params,
+ Deb, {StatVal, SameCount} = State) ->
+ Recurse = fun (State1) -> heartbeater(Params, Deb, State1) end,
+ System = fun (From, Req) ->
+ sys:handle_system_msg(
+ Req, From, self(), ?MODULE, Deb, {Params, State})
+ end,
+ receive
+ pause ->
+ receive
+ resume -> Recurse({0, 0});
+ {system, From, Req} -> System(From, Req);
+ Other -> exit({unexpected_message, Other})
+ end;
+ {system, From, Req} ->
+ System(From, Req);
+ Other ->
+ exit({unexpected_message, Other})
+ after TimeoutMillisec ->
+ case rabbit_net:getstat(Sock, [StatName]) of
+ {ok, [{StatName, NewStatVal}]} ->
+ if NewStatVal =/= StatVal ->
+ Recurse({NewStatVal, 0});
+ SameCount < Threshold ->
+ Recurse({NewStatVal, SameCount + 1});
+ true ->
+ case Handler() of
+ stop -> ok;
+ continue -> Recurse({NewStatVal, 0})
+ end
+ end;
+ {error, einval} ->
+ %% the socket is dead, most likely because the
+ %% connection is being shut down -> terminate
+ ok;
+ {error, Reason} ->
+ exit({cannot_get_socket_stats, Reason})
+ end
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% The purpose of the limiter is to stem the flow of messages from
+%% queues to channels, in order to act upon various protocol-level
+%% flow control mechanisms, specifically AMQP 0-9-1's basic.qos
+%% prefetch_count, our consumer prefetch extension, and AMQP 1.0's
+%% link (aka consumer) credit mechanism.
+%%
+%% Each channel has an associated limiter process, created with
+%% start_link/1, which it passes to queues on consumer creation with
+%% rabbit_amqqueue:basic_consume/9, and rabbit_amqqueue:basic_get/4.
+%% The latter isn't strictly necessary, since basic.get is not
+%% subject to limiting, but it means that whenever a queue knows about
+%% a channel, it also knows about its limiter, which is less fiddly.
+%%
+%% The limiter process holds state that is, in effect, shared between
+%% the channel and all queues from which the channel is
+%% consuming. Essentially all these queues are competing for access to
+%% a single, limited resource - the ability to deliver messages via
+%% the channel - and it is the job of the limiter process to mediate
+%% that access.
+%%
+%% The limiter process is separate from the channel process for two
+%% reasons: separation of concerns, and efficiency. Channels can get
+%% very busy, particularly if they are also dealing with publishes.
+%% With a separate limiter process all the aforementioned access
+%% mediation can take place without touching the channel.
+%%
+%% For efficiency, both the channel and the queues keep some local
+%% state, initialised from the limiter pid with new/1 and client/1,
+%% respectively. In particular this allows them to avoid any
+%% interaction with the limiter process when it is 'inactive', i.e. no
+%% protocol-level flow control is taking place.
+%%
+%% This optimisation does come at the cost of some complexity though:
+%% when a limiter becomes active, the channel needs to inform all its
+%% consumer queues of this change in status. It does this by invoking
+%% rabbit_amqqueue:activate_limit_all/2. Note that there is no inverse
+%% transition, i.e. once a queue has been told about an active
+%% limiter, it is not subsequently told when that limiter becomes
+%% inactive. In practice it is rare for that to happen, though we
+%% could optimise this case in the future.
+%%
+%% Consumer credit (for AMQP 1.0) and per-consumer prefetch (for AMQP
+%% 0-9-1) are treated as essentially the same thing, but with the
+%% exception that per-consumer prefetch gets an auto-topup when
+%% acknowledgments come in.
+%%
+%% The bookkeeping for this is local to queues, so it is not necessary
+%% to store information about it in the limiter process. But for
+%% abstraction we hide it from the queue behind the limiter API, and
+%% it therefore becomes part of the queue local state.
+%%
+%% The interactions with the limiter are as follows:
+%%
+%% 1. Channels tell the limiter about basic.qos prefetch counts -
+%% that's what the limit_prefetch/3, unlimit_prefetch/1,
+%% get_prefetch_limit/1 API functions are about. They also tell the
+%% limiter queue state (via the queue) about consumer credit
+%% changes and message acknowledgement - that's what credit/5 and
+%% ack_from_queue/3 are for.
+%%
+%% 2. Queues also tell the limiter queue state about the queue
+%% becoming empty (via drained/1) and consumers leaving (via
+%% forget_consumer/2).
+%%
+%% 3. Queues register with the limiter - this happens as part of
+%% activate/1.
+%%
+%% 4. The limiter process maintains an internal counter of 'messages
+%% sent but not yet acknowledged', called the 'volume'.
+%%
+%% 5. Queues ask the limiter for permission (with can_send/3) whenever
+%% they want to deliver a message to a channel. The limiter checks
+%% whether a) the volume has not yet reached the prefetch limit,
+%% and b) whether the consumer has enough credit. If so it
+%% increments the volume and tells the queue to proceed. Otherwise
+%% it marks the queue as requiring notification (see below) and
+%% tells the queue not to proceed.
+%%
+%% 6. A queue that has been told to proceed (by the return value of
+%% can_send/3) sends the message to the channel. Conversely, a
+%% queue that has been told not to proceed, will not attempt to
+%% deliver that message, or any future messages, to the
+%% channel. This is accomplished by can_send/3 capturing the
+%% outcome in the local state, where it can be accessed with
+%% is_suspended/1.
+%%
+%% 7. When a channel receives an ack it tells the limiter (via ack/2)
+%% how many messages were ack'ed. The limiter process decrements
+%% the volume and if it falls below the prefetch_count then it
+%% notifies (through rabbit_amqqueue:resume/2) all the queues
+%% requiring notification, i.e. all those that had a can_send/3
+%% request denied.
+%%
+%% 8. Upon receipt of such a notification, queues resume delivery to
+%% the channel, i.e. they will once again start asking limiter, as
+%% described in (5).
+%%
+%% 9. When a queue has no more consumers associated with a particular
+%% channel, it deactivates use of the limiter with deactivate/1,
+%% which alters the local state such that no further interactions
+%% with the limiter process take place until a subsequent
+%% activate/1.
+
+-module(rabbit_limiter).
+
+-include("rabbit.hrl").
+
+-behaviour(gen_server2).
+
+-export([start_link/1]).
+%% channel API
+-export([new/1, limit_prefetch/3, unlimit_prefetch/1, is_active/1,
+ get_prefetch_limit/1, ack/2, pid/1]).
+%% queue API
+-export([client/1, activate/1, can_send/3, resume/1, deactivate/1,
+ is_suspended/1, is_consumer_blocked/2, credit/5, ack_from_queue/3,
+ drained/1, forget_consumer/2]).
+%% callbacks
+-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
+ handle_info/2, prioritise_call/4]).
+
+%%----------------------------------------------------------------------------
+
+-record(lstate, {pid, prefetch_limited}).
+-record(qstate, {pid, state, credits}).
+
+-ifdef(use_specs).
+
+-type(lstate() :: #lstate{pid :: pid(),
+ prefetch_limited :: boolean()}).
+-type(qstate() :: #qstate{pid :: pid(),
+ state :: 'dormant' | 'active' | 'suspended'}).
+
+-type(credit_mode() :: 'manual' | 'drain' | 'auto').
+
+-spec(start_link/1 :: (rabbit_types:proc_name()) ->
+ rabbit_types:ok_pid_or_error()).
+-spec(new/1 :: (pid()) -> lstate()).
+
+-spec(limit_prefetch/3 :: (lstate(), non_neg_integer(), non_neg_integer())
+ -> lstate()).
+-spec(unlimit_prefetch/1 :: (lstate()) -> lstate()).
+-spec(is_active/1 :: (lstate()) -> boolean()).
+-spec(get_prefetch_limit/1 :: (lstate()) -> non_neg_integer()).
+-spec(ack/2 :: (lstate(), non_neg_integer()) -> 'ok').
+-spec(pid/1 :: (lstate()) -> pid()).
+
+-spec(client/1 :: (pid()) -> qstate()).
+-spec(activate/1 :: (qstate()) -> qstate()).
+-spec(can_send/3 :: (qstate(), boolean(), rabbit_types:ctag()) ->
+ {'continue' | 'suspend', qstate()}).
+-spec(resume/1 :: (qstate()) -> qstate()).
+-spec(deactivate/1 :: (qstate()) -> qstate()).
+-spec(is_suspended/1 :: (qstate()) -> boolean()).
+-spec(is_consumer_blocked/2 :: (qstate(), rabbit_types:ctag()) -> boolean()).
+-spec(credit/5 :: (qstate(), rabbit_types:ctag(), non_neg_integer(),
+ credit_mode(), boolean()) -> {boolean(), qstate()}).
+-spec(ack_from_queue/3 :: (qstate(), rabbit_types:ctag(), non_neg_integer())
+ -> {boolean(), qstate()}).
+-spec(drained/1 :: (qstate())
+ -> {[{rabbit_types:ctag(), non_neg_integer()}], qstate()}).
+-spec(forget_consumer/2 :: (qstate(), rabbit_types:ctag()) -> qstate()).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+-record(lim, {prefetch_count = 0,
+ ch_pid,
+ queues = orddict:new(), % QPid -> {MonitorRef, Notify}
+ volume = 0}).
+%% 'Notify' is a boolean that indicates whether a queue should be
+%% notified of a change in the limit or volume that may allow it to
+%% deliver more messages via the limiter's channel.
+
+-record(credit, {credit = 0, mode}).
+
+%%----------------------------------------------------------------------------
+%% API
+%%----------------------------------------------------------------------------
+
+start_link(ProcName) -> gen_server2:start_link(?MODULE, [ProcName], []).
+
+new(Pid) ->
+ %% this a 'call' to ensure that it is invoked at most once.
+ ok = gen_server:call(Pid, {new, self()}, infinity),
+ #lstate{pid = Pid, prefetch_limited = false}.
+
+limit_prefetch(L, PrefetchCount, UnackedCount) when PrefetchCount > 0 ->
+ ok = gen_server:call(
+ L#lstate.pid,
+ {limit_prefetch, PrefetchCount, UnackedCount}, infinity),
+ L#lstate{prefetch_limited = true}.
+
+unlimit_prefetch(L) ->
+ ok = gen_server:call(L#lstate.pid, unlimit_prefetch, infinity),
+ L#lstate{prefetch_limited = false}.
+
+is_active(#lstate{prefetch_limited = Limited}) -> Limited.
+
+get_prefetch_limit(#lstate{prefetch_limited = false}) -> 0;
+get_prefetch_limit(L) ->
+ gen_server:call(L#lstate.pid, get_prefetch_limit, infinity).
+
+ack(#lstate{prefetch_limited = false}, _AckCount) -> ok;
+ack(L, AckCount) -> gen_server:cast(L#lstate.pid, {ack, AckCount}).
+
+pid(#lstate{pid = Pid}) -> Pid.
+
+client(Pid) -> #qstate{pid = Pid, state = dormant, credits = gb_trees:empty()}.
+
+activate(L = #qstate{state = dormant}) ->
+ ok = gen_server:cast(L#qstate.pid, {register, self()}),
+ L#qstate{state = active};
+activate(L) -> L.
+
+can_send(L = #qstate{pid = Pid, state = State, credits = Credits},
+ AckRequired, CTag) ->
+ case is_consumer_blocked(L, CTag) of
+ false -> case (State =/= active orelse
+ safe_call(Pid, {can_send, self(), AckRequired}, true)) of
+ true -> Credits1 = decrement_credit(CTag, Credits),
+ {continue, L#qstate{credits = Credits1}};
+ false -> {suspend, L#qstate{state = suspended}}
+ end;
+ true -> {suspend, L}
+ end.
+
+safe_call(Pid, Msg, ExitValue) ->
+ rabbit_misc:with_exit_handler(
+ fun () -> ExitValue end,
+ fun () -> gen_server2:call(Pid, Msg, infinity) end).
+
+resume(L = #qstate{state = suspended}) ->
+ L#qstate{state = active};
+resume(L) -> L.
+
+deactivate(L = #qstate{state = dormant}) -> L;
+deactivate(L) ->
+ ok = gen_server:cast(L#qstate.pid, {unregister, self()}),
+ L#qstate{state = dormant}.
+
+is_suspended(#qstate{state = suspended}) -> true;
+is_suspended(#qstate{}) -> false.
+
+is_consumer_blocked(#qstate{credits = Credits}, CTag) ->
+ case gb_trees:lookup(CTag, Credits) of
+ none -> false;
+ {value, #credit{credit = C}} when C > 0 -> false;
+ {value, #credit{}} -> true
+ end.
+
+credit(Limiter = #qstate{credits = Credits}, CTag, Crd, Mode, IsEmpty) ->
+ {Res, Cr} =
+ case IsEmpty andalso Mode =:= drain of
+ true -> {true, #credit{credit = 0, mode = manual}};
+ false -> {false, #credit{credit = Crd, mode = Mode}}
+ end,
+ {Res, Limiter#qstate{credits = enter_credit(CTag, Cr, Credits)}}.
+
+ack_from_queue(Limiter = #qstate{credits = Credits}, CTag, Credit) ->
+ {Credits1, Unblocked} =
+ case gb_trees:lookup(CTag, Credits) of
+ {value, C = #credit{mode = auto, credit = C0}} ->
+ {update_credit(CTag, C#credit{credit = C0 + Credit}, Credits),
+ C0 =:= 0 andalso Credit =/= 0};
+ _ ->
+ {Credits, false}
+ end,
+ {Unblocked, Limiter#qstate{credits = Credits1}}.
+
+drained(Limiter = #qstate{credits = Credits}) ->
+ Drain = fun(C) -> C#credit{credit = 0, mode = manual} end,
+ {CTagCredits, Credits2} =
+ rabbit_misc:gb_trees_fold(
+ fun (CTag, C = #credit{credit = Crd, mode = drain}, {Acc, Creds0}) ->
+ {[{CTag, Crd} | Acc], update_credit(CTag, Drain(C), Creds0)};
+ (_CTag, #credit{credit = _Crd, mode = _Mode}, {Acc, Creds0}) ->
+ {Acc, Creds0}
+ end, {[], Credits}, Credits),
+ {CTagCredits, Limiter#qstate{credits = Credits2}}.
+
+forget_consumer(Limiter = #qstate{credits = Credits}, CTag) ->
+ Limiter#qstate{credits = gb_trees:delete_any(CTag, Credits)}.
+
+%%----------------------------------------------------------------------------
+%% Queue-local code
+%%----------------------------------------------------------------------------
+
+%% We want to do all the AMQP 1.0-ish link level credit calculations
+%% in the queue (to do them elsewhere introduces a ton of
+%% races). However, it's a big chunk of code that is conceptually very
+%% linked to the limiter concept. So we get the queue to hold a bit of
+%% state for us (#qstate.credits), and maintain a fiction that the
+%% limiter is making the decisions...
+
+decrement_credit(CTag, Credits) ->
+ case gb_trees:lookup(CTag, Credits) of
+ {value, C = #credit{credit = Credit}} ->
+ update_credit(CTag, C#credit{credit = Credit - 1}, Credits);
+ none ->
+ Credits
+ end.
+
+enter_credit(CTag, C, Credits) ->
+ gb_trees:enter(CTag, ensure_credit_invariant(C), Credits).
+
+update_credit(CTag, C, Credits) ->
+ gb_trees:update(CTag, ensure_credit_invariant(C), Credits).
+
+ensure_credit_invariant(C = #credit{credit = 0, mode = drain}) ->
+ %% Using up all credit implies no need to send a 'drained' event
+ C#credit{mode = manual};
+ensure_credit_invariant(C) ->
+ C.
+
+%%----------------------------------------------------------------------------
+%% gen_server callbacks
+%%----------------------------------------------------------------------------
+
+init([ProcName]) -> ?store_proc_name(ProcName),
+ {ok, #lim{}}.
+
+prioritise_call(get_prefetch_limit, _From, _Len, _State) -> 9;
+prioritise_call(_Msg, _From, _Len, _State) -> 0.
+
+handle_call({new, ChPid}, _From, State = #lim{ch_pid = undefined}) ->
+ {reply, ok, State#lim{ch_pid = ChPid}};
+
+handle_call({limit_prefetch, PrefetchCount, UnackedCount}, _From,
+ State = #lim{prefetch_count = 0}) ->
+ {reply, ok, maybe_notify(State, State#lim{prefetch_count = PrefetchCount,
+ volume = UnackedCount})};
+handle_call({limit_prefetch, PrefetchCount, _UnackedCount}, _From, State) ->
+ {reply, ok, maybe_notify(State, State#lim{prefetch_count = PrefetchCount})};
+
+handle_call(unlimit_prefetch, _From, State) ->
+ {reply, ok, maybe_notify(State, State#lim{prefetch_count = 0,
+ volume = 0})};
+
+handle_call(get_prefetch_limit, _From,
+ State = #lim{prefetch_count = PrefetchCount}) ->
+ {reply, PrefetchCount, State};
+
+handle_call({can_send, QPid, AckRequired}, _From,
+ State = #lim{volume = Volume}) ->
+ case prefetch_limit_reached(State) of
+ true -> {reply, false, limit_queue(QPid, State)};
+ false -> {reply, true, State#lim{volume = if AckRequired -> Volume + 1;
+ true -> Volume
+ end}}
+ end.
+
+handle_cast({ack, Count}, State = #lim{volume = Volume}) ->
+ NewVolume = if Volume == 0 -> 0;
+ true -> Volume - Count
+ end,
+ {noreply, maybe_notify(State, State#lim{volume = NewVolume})};
+
+handle_cast({register, QPid}, State) ->
+ {noreply, remember_queue(QPid, State)};
+
+handle_cast({unregister, QPid}, State) ->
+ {noreply, forget_queue(QPid, State)}.
+
+handle_info({'DOWN', _MonitorRef, _Type, QPid, _Info}, State) ->
+ {noreply, forget_queue(QPid, State)}.
+
+terminate(_, _) ->
+ ok.
+
+code_change(_, State, _) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+%% Internal plumbing
+%%----------------------------------------------------------------------------
+
+maybe_notify(OldState, NewState) ->
+ case prefetch_limit_reached(OldState) andalso
+ not prefetch_limit_reached(NewState) of
+ true -> notify_queues(NewState);
+ false -> NewState
+ end.
+
+prefetch_limit_reached(#lim{prefetch_count = Limit, volume = Volume}) ->
+ Limit =/= 0 andalso Volume >= Limit.
+
+remember_queue(QPid, State = #lim{queues = Queues}) ->
+ case orddict:is_key(QPid, Queues) of
+ false -> MRef = erlang:monitor(process, QPid),
+ State#lim{queues = orddict:store(QPid, {MRef, false}, Queues)};
+ true -> State
+ end.
+
+forget_queue(QPid, State = #lim{queues = Queues}) ->
+ case orddict:find(QPid, Queues) of
+ {ok, {MRef, _}} -> true = erlang:demonitor(MRef),
+ State#lim{queues = orddict:erase(QPid, Queues)};
+ error -> State
+ end.
+
+limit_queue(QPid, State = #lim{queues = Queues}) ->
+ UpdateFun = fun ({MRef, _}) -> {MRef, true} end,
+ State#lim{queues = orddict:update(QPid, UpdateFun, Queues)}.
+
+notify_queues(State = #lim{ch_pid = ChPid, queues = Queues}) ->
+ {QList, NewQueues} =
+ orddict:fold(fun (_QPid, {_, false}, Acc) -> Acc;
+ (QPid, {MRef, true}, {L, D}) ->
+ {[QPid | L], orddict:store(QPid, {MRef, false}, D)}
+ end, {[], Queues}, Queues),
+ case length(QList) of
+ 0 -> ok;
+ 1 -> ok = rabbit_amqqueue:resume(hd(QList), ChPid); %% common case
+ L ->
+ %% We randomly vary the position of queues in the list,
+ %% thus ensuring that each queue has an equal chance of
+ %% being notified first.
+ {L1, L2} = lists:split(random:uniform(L), QList),
+ [[ok = rabbit_amqqueue:resume(Q, ChPid) || Q <- L3]
+ || L3 <- [L2, L1]],
+ ok
+ end,
+ State#lim{queues = NewQueues}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_log).
+
+-behaviour(gen_server).
+
+-export([start_link/0]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-export([log/3, log/4, info/1, info/2, warning/1, warning/2, error/1, error/2]).
+
+-define(SERVER, ?MODULE).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-export_type([level/0]).
+
+-type(category() :: atom()).
+-type(level() :: 'info' | 'warning' | 'error').
+
+-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
+
+-spec(log/3 :: (category(), level(), string()) -> 'ok').
+-spec(log/4 :: (category(), level(), string(), [any()]) -> 'ok').
+
+-spec(info/1 :: (string()) -> 'ok').
+-spec(info/2 :: (string(), [any()]) -> 'ok').
+-spec(warning/1 :: (string()) -> 'ok').
+-spec(warning/2 :: (string(), [any()]) -> 'ok').
+-spec(error/1 :: (string()) -> 'ok').
+-spec(error/2 :: (string(), [any()]) -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+log(Category, Level, Fmt) -> log(Category, Level, Fmt, []).
+
+log(Category, Level, Fmt, Args) when is_list(Args) ->
+ gen_server:cast(?SERVER, {log, Category, Level, Fmt, Args}).
+
+info(Fmt) -> log(default, info, Fmt).
+info(Fmt, Args) -> log(default, info, Fmt, Args).
+warning(Fmt) -> log(default, warning, Fmt).
+warning(Fmt, Args) -> log(default, warning, Fmt, Args).
+error(Fmt) -> log(default, error, Fmt).
+error(Fmt, Args) -> log(default, error, Fmt, Args).
+
+%%--------------------------------------------------------------------
+
+init([]) ->
+ {ok, CatLevelList} = application:get_env(log_levels),
+ CatLevels = [{Cat, level(Level)} || {Cat, Level} <- CatLevelList],
+ {ok, orddict:from_list(CatLevels)}.
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast({log, Category, Level, Fmt, Args}, CatLevels) ->
+ CatLevel = case orddict:find(Category, CatLevels) of
+ {ok, L} -> L;
+ error -> level(info)
+ end,
+ case level(Level) =< CatLevel of
+ false -> ok;
+ true -> (case Level of
+ info -> fun error_logger:info_msg/2;
+ warning -> fun error_logger:warning_msg/2;
+ error -> fun error_logger:error_msg/2
+ end)(Fmt, Args)
+ end,
+ {noreply, CatLevels};
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%--------------------------------------------------------------------
+
+level(info) -> 3;
+level(warning) -> 2;
+level(error) -> 1;
+level(none) -> 0.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+
+%% This module handles the node-wide memory statistics.
+%% It receives statistics from all queues, counts the desired
+%% queue length (in seconds), and sends this information back to
+%% queues.
+
+-module(rabbit_memory_monitor).
+
+-behaviour(gen_server2).
+
+-export([start_link/0, register/2, deregister/1,
+ report_ram_duration/2, stop/0, conserve_resources/3]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-record(process, {pid, reported, sent, callback, monitor}).
+
+-record(state, {timer, %% 'internal_update' timer
+ queue_durations, %% ets #process
+ queue_duration_sum, %% sum of all queue_durations
+ queue_duration_count, %% number of elements in sum
+ desired_duration, %% the desired queue duration
+ disk_alarm %% disable paging, disk alarm has fired
+ }).
+
+-define(SERVER, ?MODULE).
+-define(DEFAULT_UPDATE_INTERVAL, 2500).
+-define(TABLE_NAME, ?MODULE).
+
+%% If all queues are pushed to disk (duration 0), then the sum of
+%% their reported lengths will be 0. If memory then becomes available,
+%% unless we manually intervene, the sum will remain 0, and the queues
+%% will never get a non-zero duration. Thus when the mem use is <
+%% SUM_INC_THRESHOLD, increase the sum artificially by SUM_INC_AMOUNT.
+-define(SUM_INC_THRESHOLD, 0.95).
+-define(SUM_INC_AMOUNT, 1.0).
+
+-define(EPSILON, 0.000001). %% less than this and we clamp to 0
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
+-spec(register/2 :: (pid(), {atom(),atom(),[any()]}) -> 'ok').
+-spec(deregister/1 :: (pid()) -> 'ok').
+-spec(report_ram_duration/2 ::
+ (pid(), float() | 'infinity') -> number() | 'infinity').
+-spec(stop/0 :: () -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+%% Public API
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ gen_server2:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+register(Pid, MFA = {_M, _F, _A}) ->
+ gen_server2:call(?SERVER, {register, Pid, MFA}, infinity).
+
+deregister(Pid) ->
+ gen_server2:cast(?SERVER, {deregister, Pid}).
+
+report_ram_duration(Pid, QueueDuration) ->
+ gen_server2:call(?SERVER,
+ {report_ram_duration, Pid, QueueDuration}, infinity).
+
+stop() ->
+ gen_server2:cast(?SERVER, stop).
+
+conserve_resources(Pid, disk, Conserve) ->
+ gen_server2:cast(Pid, {disk_alarm, Conserve});
+conserve_resources(_Pid, _Source, _Conserve) ->
+ ok.
+
+%%----------------------------------------------------------------------------
+%% Gen_server callbacks
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, TRef} = timer:send_interval(?DEFAULT_UPDATE_INTERVAL, update),
+
+ Ets = ets:new(?TABLE_NAME, [set, private, {keypos, #process.pid}]),
+ Alarms = rabbit_alarm:register(self(), {?MODULE, conserve_resources, []}),
+ {ok, internal_update(
+ #state { timer = TRef,
+ queue_durations = Ets,
+ queue_duration_sum = 0.0,
+ queue_duration_count = 0,
+ desired_duration = infinity,
+ disk_alarm = lists:member(disk, Alarms)})}.
+
+handle_call({report_ram_duration, Pid, QueueDuration}, From,
+ State = #state { queue_duration_sum = Sum,
+ queue_duration_count = Count,
+ queue_durations = Durations,
+ desired_duration = SendDuration }) ->
+
+ [Proc = #process { reported = PrevQueueDuration }] =
+ ets:lookup(Durations, Pid),
+
+ gen_server2:reply(From, SendDuration),
+
+ {Sum1, Count1} =
+ case {PrevQueueDuration, QueueDuration} of
+ {infinity, infinity} -> {Sum, Count};
+ {infinity, _} -> {Sum + QueueDuration, Count + 1};
+ {_, infinity} -> {Sum - PrevQueueDuration, Count - 1};
+ {_, _} -> {Sum - PrevQueueDuration + QueueDuration,
+ Count}
+ end,
+ true = ets:insert(Durations, Proc #process { reported = QueueDuration,
+ sent = SendDuration }),
+ {noreply, State #state { queue_duration_sum = zero_clamp(Sum1),
+ queue_duration_count = Count1 }};
+
+handle_call({register, Pid, MFA}, _From,
+ State = #state { queue_durations = Durations }) ->
+ MRef = erlang:monitor(process, Pid),
+ true = ets:insert(Durations, #process { pid = Pid, reported = infinity,
+ sent = infinity, callback = MFA,
+ monitor = MRef }),
+ {reply, ok, State};
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast({disk_alarm, Alarm}, State = #state{disk_alarm = Alarm}) ->
+ {noreply, State};
+
+handle_cast({disk_alarm, Alarm}, State) ->
+ {noreply, internal_update(State#state{disk_alarm = Alarm})};
+
+handle_cast({deregister, Pid}, State) ->
+ {noreply, internal_deregister(Pid, true, State)};
+
+handle_cast(stop, State) ->
+ {stop, normal, State};
+
+handle_cast(_Request, State) ->
+ {noreply, State}.
+
+handle_info(update, State) ->
+ {noreply, internal_update(State)};
+
+handle_info({'DOWN', _MRef, process, Pid, _Reason}, State) ->
+ {noreply, internal_deregister(Pid, false, State)};
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, #state { timer = TRef }) ->
+ timer:cancel(TRef),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+
+%%----------------------------------------------------------------------------
+%% Internal functions
+%%----------------------------------------------------------------------------
+
+zero_clamp(Sum) when Sum < ?EPSILON -> 0.0;
+zero_clamp(Sum) -> Sum.
+
+internal_deregister(Pid, Demonitor,
+ State = #state { queue_duration_sum = Sum,
+ queue_duration_count = Count,
+ queue_durations = Durations }) ->
+ case ets:lookup(Durations, Pid) of
+ [] -> State;
+ [#process { reported = PrevQueueDuration, monitor = MRef }] ->
+ true = case Demonitor of
+ true -> erlang:demonitor(MRef);
+ false -> true
+ end,
+ {Sum1, Count1} =
+ case PrevQueueDuration of
+ infinity -> {Sum, Count};
+ _ -> {zero_clamp(Sum - PrevQueueDuration),
+ Count - 1}
+ end,
+ true = ets:delete(Durations, Pid),
+ State #state { queue_duration_sum = Sum1,
+ queue_duration_count = Count1 }
+ end.
+
+internal_update(State = #state{queue_durations = Durations,
+ desired_duration = DesiredDurationAvg,
+ disk_alarm = DiskAlarm}) ->
+ DesiredDurationAvg1 = desired_duration_average(State),
+ ShouldInform = should_inform_predicate(DiskAlarm),
+ case ShouldInform(DesiredDurationAvg, DesiredDurationAvg1) of
+ true -> inform_queues(ShouldInform, DesiredDurationAvg1, Durations);
+ false -> ok
+ end,
+ State#state{desired_duration = DesiredDurationAvg1}.
+
+desired_duration_average(#state{disk_alarm = true}) ->
+ infinity;
+desired_duration_average(#state{disk_alarm = false,
+ queue_duration_sum = Sum,
+ queue_duration_count = Count}) ->
+ {ok, LimitThreshold} =
+ application:get_env(rabbit, vm_memory_high_watermark_paging_ratio),
+ MemoryLimit = vm_memory_monitor:get_memory_limit(),
+ MemoryRatio = case MemoryLimit > 0.0 of
+ true -> erlang:memory(total) / MemoryLimit;
+ false -> infinity
+ end,
+ if MemoryRatio =:= infinity ->
+ 0.0;
+ MemoryRatio < LimitThreshold orelse Count == 0 ->
+ infinity;
+ MemoryRatio < ?SUM_INC_THRESHOLD ->
+ ((Sum + ?SUM_INC_AMOUNT) / Count) / MemoryRatio;
+ true ->
+ (Sum / Count) / MemoryRatio
+ end.
+
+inform_queues(ShouldInform, DesiredDurationAvg, Durations) ->
+ true =
+ ets:foldl(
+ fun (Proc = #process{reported = QueueDuration,
+ sent = PrevSendDuration,
+ callback = {M, F, A}}, true) ->
+ case ShouldInform(PrevSendDuration, DesiredDurationAvg)
+ andalso ShouldInform(QueueDuration, DesiredDurationAvg) of
+ true -> ok = erlang:apply(
+ M, F, A ++ [DesiredDurationAvg]),
+ ets:insert(
+ Durations,
+ Proc#process{sent = DesiredDurationAvg});
+ false -> true
+ end
+ end, true, Durations).
+
+%% In normal use, we only inform queues immediately if the desired
+%% duration has decreased, we want to ensure timely paging.
+should_inform_predicate(false) -> fun greater_than/2;
+%% When the disk alarm has gone off though, we want to inform queues
+%% immediately if the desired duration has *increased* - we want to
+%% ensure timely stopping paging.
+should_inform_predicate(true) -> fun (D1, D2) -> greater_than(D2, D1) end.
+
+greater_than(infinity, infinity) -> false;
+greater_than(infinity, _D2) -> true;
+greater_than(_D1, infinity) -> false;
+greater_than(D1, D2) -> D1 > D2.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_coordinator).
+
+-export([start_link/4, get_gm/1, ensure_monitoring/2]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-export([joined/2, members_changed/3, handle_msg/3]).
+
+-behaviour(gen_server2).
+-behaviour(gm).
+
+-include("rabbit.hrl").
+-include("gm_specs.hrl").
+
+-record(state, { q,
+ gm,
+ monitors,
+ death_fun,
+ depth_fun
+ }).
+
+-ifdef(use_specs).
+
+-spec(start_link/4 :: (rabbit_types:amqqueue(), pid() | 'undefined',
+ rabbit_mirror_queue_master:death_fun(),
+ rabbit_mirror_queue_master:depth_fun()) ->
+ rabbit_types:ok_pid_or_error()).
+-spec(get_gm/1 :: (pid()) -> pid()).
+-spec(ensure_monitoring/2 :: (pid(), [pid()]) -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+%%
+%% Mirror Queues
+%%
+%% A queue with mirrors consists of the following:
+%%
+%% #amqqueue{ pid, slave_pids }
+%% | |
+%% +----------+ +-------+--------------+-----------...etc...
+%% | | |
+%% V V V
+%% amqqueue_process---+ slave-----+ slave-----+ ...etc...
+%% | BQ = master----+ | | BQ = vq | | BQ = vq |
+%% | | BQ = vq | | +-+-------+ +-+-------+
+%% | +-+-------+ | | |
+%% +-++-----|---------+ | | (some details elided)
+%% || | | |
+%% || coordinator-+ | |
+%% || +-+---------+ | |
+%% || | | |
+%% || gm-+ -- -- -- -- gm-+- -- -- -- gm-+- -- --...etc...
+%% || +--+ +--+ +--+
+%% ||
+%% consumers
+%%
+%% The master is merely an implementation of bq, and thus is invoked
+%% through the normal bq interface by the amqqueue_process. The slaves
+%% meanwhile are processes in their own right (as is the
+%% coordinator). The coordinator and all slaves belong to the same gm
+%% group. Every member of a gm group receives messages sent to the gm
+%% group. Because the master is the bq of amqqueue_process, it doesn't
+%% have sole control over its mailbox, and as a result, the master
+%% itself cannot be passed messages directly (well, it could by via
+%% the amqqueue:run_backing_queue callback but that would induce
+%% additional unnecessary loading on the master queue process), yet it
+%% needs to react to gm events, such as the death of slaves. Thus the
+%% master creates the coordinator, and it is the coordinator that is
+%% the gm callback module and event handler for the master.
+%%
+%% Consumers are only attached to the master. Thus the master is
+%% responsible for informing all slaves when messages are fetched from
+%% the bq, when they're acked, and when they're requeued.
+%%
+%% The basic goal is to ensure that all slaves performs actions on
+%% their bqs in the same order as the master. Thus the master
+%% intercepts all events going to its bq, and suitably broadcasts
+%% these events on the gm. The slaves thus receive two streams of
+%% events: one stream is via the gm, and one stream is from channels
+%% directly. Whilst the stream via gm is guaranteed to be consistently
+%% seen by all slaves, the same is not true of the stream via
+%% channels. For example, in the event of an unexpected death of a
+%% channel during a publish, only some of the mirrors may receive that
+%% publish. As a result of this problem, the messages broadcast over
+%% the gm contain published content, and thus slaves can operate
+%% successfully on messages that they only receive via the gm.
+%%
+%% The key purpose of also sending messages directly from the channels
+%% to the slaves is that without this, in the event of the death of
+%% the master, messages could be lost until a suitable slave is
+%% promoted. However, that is not the only reason. A slave cannot send
+%% confirms for a message until it has seen it from the
+%% channel. Otherwise, it might send a confirm to a channel for a
+%% message that it might *never* receive from that channel. This can
+%% happen because new slaves join the gm ring (and thus receive
+%% messages from the master) before inserting themselves in the
+%% queue's mnesia record (which is what channels look at for routing).
+%% As it turns out, channels will simply ignore such bogus confirms,
+%% but relying on that would introduce a dangerously tight coupling.
+%%
+%% Hence the slaves have to wait until they've seen both the publish
+%% via gm, and the publish via the channel before they issue the
+%% confirm. Either form of publish can arrive first, and a slave can
+%% be upgraded to the master at any point during this
+%% process. Confirms continue to be issued correctly, however.
+%%
+%% Because the slave is a full process, it impersonates parts of the
+%% amqqueue API. However, it does not need to implement all parts: for
+%% example, no ack or consumer-related message can arrive directly at
+%% a slave from a channel: it is only publishes that pass both
+%% directly to the slaves and go via gm.
+%%
+%% Slaves can be added dynamically. When this occurs, there is no
+%% attempt made to sync the current contents of the master with the
+%% new slave, thus the slave will start empty, regardless of the state
+%% of the master. Thus the slave needs to be able to detect and ignore
+%% operations which are for messages it has not received: because of
+%% the strict FIFO nature of queues in general, this is
+%% straightforward - all new publishes that the new slave receives via
+%% gm should be processed as normal, but fetches which are for
+%% messages the slave has never seen should be ignored. Similarly,
+%% acks for messages the slave never fetched should be
+%% ignored. Similarly, we don't republish rejected messages that we
+%% haven't seen. Eventually, as the master is consumed from, the
+%% messages at the head of the queue which were there before the slave
+%% joined will disappear, and the slave will become fully synced with
+%% the state of the master.
+%%
+%% The detection of the sync-status is based on the depth of the BQs,
+%% where the depth is defined as the sum of the length of the BQ (as
+%% per BQ:len) and the messages pending an acknowledgement. When the
+%% depth of the slave is equal to the master's, then the slave is
+%% synchronised. We only store the difference between the two for
+%% simplicity. Comparing the length is not enough since we need to
+%% take into account rejected messages which will make it back into
+%% the master queue but can't go back in the slave, since we don't
+%% want "holes" in the slave queue. Note that the depth, and the
+%% length likewise, must always be shorter on the slave - we assert
+%% that in various places. In case slaves are joined to an empty queue
+%% which only goes on to receive publishes, they start by asking the
+%% master to broadcast its depth. This is enough for slaves to always
+%% be able to work out when their head does not differ from the master
+%% (and is much simpler and cheaper than getting the master to hang on
+%% to the guid of the msg at the head of its queue). When a slave is
+%% promoted to a master, it unilaterally broadcasts its depth, in
+%% order to solve the problem of depth requests from new slaves being
+%% unanswered by a dead master.
+%%
+%% Obviously, due to the async nature of communication across gm, the
+%% slaves can fall behind. This does not matter from a sync pov: if
+%% they fall behind and the master dies then a) no publishes are lost
+%% because all publishes go to all mirrors anyway; b) the worst that
+%% happens is that acks get lost and so messages come back to
+%% life. This is no worse than normal given you never get confirmation
+%% that an ack has been received (not quite true with QoS-prefetch,
+%% but close enough for jazz).
+%%
+%% Because acktags are issued by the bq independently, and because
+%% there is no requirement for the master and all slaves to use the
+%% same bq, all references to msgs going over gm is by msg_id. Thus
+%% upon acking, the master must convert the acktags back to msg_ids
+%% (which happens to be what bq:ack returns), then sends the msg_ids
+%% over gm, the slaves must convert the msg_ids to acktags (a mapping
+%% the slaves themselves must maintain).
+%%
+%% When the master dies, a slave gets promoted. This will be the
+%% eldest slave, and thus the hope is that that slave is most likely
+%% to be sync'd with the master. The design of gm is that the
+%% notification of the death of the master will only appear once all
+%% messages in-flight from the master have been fully delivered to all
+%% members of the gm group. Thus at this point, the slave that gets
+%% promoted cannot broadcast different events in a different order
+%% than the master for the same msgs: there is no possibility for the
+%% same msg to be processed by the old master and the new master - if
+%% it was processed by the old master then it will have been processed
+%% by the slave before the slave was promoted, and vice versa.
+%%
+%% Upon promotion, all msgs pending acks are requeued as normal, the
+%% slave constructs state suitable for use in the master module, and
+%% then dynamically changes into an amqqueue_process with the master
+%% as the bq, and the slave's bq as the master's bq. Thus the very
+%% same process that was the slave is now a full amqqueue_process.
+%%
+%% It is important that we avoid memory leaks due to the death of
+%% senders (i.e. channels) and partial publications. A sender
+%% publishing a message may fail mid way through the publish and thus
+%% only some of the mirrors will receive the message. We need the
+%% mirrors to be able to detect this and tidy up as necessary to avoid
+%% leaks. If we just had the master monitoring all senders then we
+%% would have the possibility that a sender appears and only sends the
+%% message to a few of the slaves before dying. Those slaves would
+%% then hold on to the message, assuming they'll receive some
+%% instruction eventually from the master. Thus we have both slaves
+%% and the master monitor all senders they become aware of. But there
+%% is a race: if the slave receives a DOWN of a sender, how does it
+%% know whether or not the master is going to send it instructions
+%% regarding those messages?
+%%
+%% Whilst the master monitors senders, it can't access its mailbox
+%% directly, so it delegates monitoring to the coordinator. When the
+%% coordinator receives a DOWN message from a sender, it informs the
+%% master via a callback. This allows the master to do any tidying
+%% necessary, but more importantly allows the master to broadcast a
+%% sender_death message to all the slaves, saying the sender has
+%% died. Once the slaves receive the sender_death message, they know
+%% that they're not going to receive any more instructions from the gm
+%% regarding that sender. However, it is possible that the coordinator
+%% receives the DOWN and communicates that to the master before the
+%% master has finished receiving and processing publishes from the
+%% sender. This turns out not to be a problem: the sender has actually
+%% died, and so will not need to receive confirms or other feedback,
+%% and should further messages be "received" from the sender, the
+%% master will ask the coordinator to set up a new monitor, and
+%% will continue to process the messages normally. Slaves may thus
+%% receive publishes via gm from previously declared "dead" senders,
+%% but again, this is fine: should the slave have just thrown out the
+%% message it had received directly from the sender (due to receiving
+%% a sender_death message via gm), it will be able to cope with the
+%% publication purely from the master via gm.
+%%
+%% When a slave receives a DOWN message for a sender, if it has not
+%% received the sender_death message from the master via gm already,
+%% then it will wait 20 seconds before broadcasting a request for
+%% confirmation from the master that the sender really has died.
+%% Should a sender have only sent a publish to slaves, this allows
+%% slaves to inform the master of the previous existence of the
+%% sender. The master will thus monitor the sender, receive the DOWN,
+%% and subsequently broadcast the sender_death message, allowing the
+%% slaves to tidy up. This process can repeat for the same sender:
+%% consider one slave receives the publication, then the DOWN, then
+%% asks for confirmation of death, then the master broadcasts the
+%% sender_death message. Only then does another slave receive the
+%% publication and thus set up its monitoring. Eventually that slave
+%% too will receive the DOWN, ask for confirmation and the master will
+%% monitor the sender again, receive another DOWN, and send out
+%% another sender_death message. Given the 20 second delay before
+%% requesting death confirmation, this is highly unlikely, but it is a
+%% possibility.
+%%
+%% When the 20 second timer expires, the slave first checks to see
+%% whether it still needs confirmation of the death before requesting
+%% it. This prevents unnecessary traffic on gm as it allows one
+%% broadcast of the sender_death message to satisfy many slaves.
+%%
+%% If we consider the promotion of a slave at this point, we have two
+%% possibilities: that of the slave that has received the DOWN and is
+%% thus waiting for confirmation from the master that the sender
+%% really is down; and that of the slave that has not received the
+%% DOWN. In the first case, in the act of promotion to master, the new
+%% master will monitor again the dead sender, and after it has
+%% finished promoting itself, it should find another DOWN waiting,
+%% which it will then broadcast. This will allow slaves to tidy up as
+%% normal. In the second case, we have the possibility that
+%% confirmation-of-sender-death request has been broadcast, but that
+%% it was broadcast before the master failed, and that the slave being
+%% promoted does not know anything about that sender, and so will not
+%% monitor it on promotion. Thus a slave that broadcasts such a
+%% request, at the point of broadcasting it, recurses, setting another
+%% 20 second timer. As before, on expiry of the timer, the slaves
+%% checks to see whether it still has not received a sender_death
+%% message for the dead sender, and if not, broadcasts a death
+%% confirmation request. Thus this ensures that even when a master
+%% dies and the new slave has no knowledge of the dead sender, it will
+%% eventually receive a death confirmation request, shall monitor the
+%% dead sender, receive the DOWN and broadcast the sender_death
+%% message.
+%%
+%% The preceding commentary deals with the possibility of slaves
+%% receiving publications from senders which the master does not, and
+%% the need to prevent memory leaks in such scenarios. The inverse is
+%% also possible: a partial publication may cause only the master to
+%% receive a publication. It will then publish the message via gm. The
+%% slaves will receive it via gm, will publish it to their BQ and will
+%% set up monitoring on the sender. They will then receive the DOWN
+%% message and the master will eventually publish the corresponding
+%% sender_death message. The slave will then be able to tidy up its
+%% state as normal.
+%%
+%% Recovery of mirrored queues is straightforward: as nodes die, the
+%% remaining nodes record this, and eventually a situation is reached
+%% in which only one node is alive, which is the master. This is the
+%% only node which, upon recovery, will resurrect a mirrored queue:
+%% nodes which die and then rejoin as a slave will start off empty as
+%% if they have no mirrored content at all. This is not surprising: to
+%% achieve anything more sophisticated would require the master and
+%% recovering slave to be able to check to see whether they agree on
+%% the last seen state of the queue: checking depth alone is not
+%% sufficient in this case.
+%%
+%% For more documentation see the comments in bug 23554.
+%%
+%%----------------------------------------------------------------------------
+
+start_link(Queue, GM, DeathFun, DepthFun) ->
+ gen_server2:start_link(?MODULE, [Queue, GM, DeathFun, DepthFun], []).
+
+get_gm(CPid) ->
+ gen_server2:call(CPid, get_gm, infinity).
+
+ensure_monitoring(CPid, Pids) ->
+ gen_server2:cast(CPid, {ensure_monitoring, Pids}).
+
+%% ---------------------------------------------------------------------------
+%% gen_server
+%% ---------------------------------------------------------------------------
+
+init([#amqqueue { name = QueueName } = Q, GM, DeathFun, DepthFun]) ->
+ ?store_proc_name(QueueName),
+ GM1 = case GM of
+ undefined ->
+ {ok, GM2} = gm:start_link(
+ QueueName, ?MODULE, [self()],
+ fun rabbit_misc:execute_mnesia_transaction/1),
+ receive {joined, GM2, _Members} ->
+ ok
+ end,
+ GM2;
+ _ ->
+ true = link(GM),
+ GM
+ end,
+ {ok, #state { q = Q,
+ gm = GM1,
+ monitors = pmon:new(),
+ death_fun = DeathFun,
+ depth_fun = DepthFun },
+ hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+handle_call(get_gm, _From, State = #state { gm = GM }) ->
+ reply(GM, State).
+
+handle_cast({gm_deaths, DeadGMPids},
+ State = #state { q = #amqqueue { name = QueueName, pid = MPid } })
+ when node(MPid) =:= node() ->
+ case rabbit_mirror_queue_misc:remove_from_queue(
+ QueueName, MPid, DeadGMPids) of
+ {ok, MPid, DeadPids} ->
+ rabbit_mirror_queue_misc:report_deaths(MPid, true, QueueName,
+ DeadPids),
+ noreply(State);
+ {error, not_found} ->
+ {stop, normal, State}
+ end;
+
+handle_cast(request_depth, State = #state { depth_fun = DepthFun }) ->
+ ok = DepthFun(),
+ noreply(State);
+
+handle_cast({ensure_monitoring, Pids}, State = #state { monitors = Mons }) ->
+ noreply(State #state { monitors = pmon:monitor_all(Pids, Mons) });
+
+handle_cast({delete_and_terminate, Reason}, State) ->
+ {stop, Reason, State}.
+
+handle_info({'DOWN', _MonitorRef, process, Pid, _Reason},
+ State = #state { monitors = Mons,
+ death_fun = DeathFun }) ->
+ noreply(case pmon:is_monitored(Pid, Mons) of
+ false -> State;
+ true -> ok = DeathFun(Pid),
+ State #state { monitors = pmon:erase(Pid, Mons) }
+ end);
+
+handle_info(Msg, State) ->
+ {stop, {unexpected_info, Msg}, State}.
+
+terminate(_Reason, #state{}) ->
+ %% gen_server case
+ ok;
+terminate([_CPid], _Reason) ->
+ %% gm case
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%% ---------------------------------------------------------------------------
+%% GM
+%% ---------------------------------------------------------------------------
+
+joined([CPid], Members) ->
+ CPid ! {joined, self(), Members},
+ ok.
+
+members_changed([_CPid], _Births, []) ->
+ ok;
+members_changed([CPid], _Births, Deaths) ->
+ ok = gen_server2:cast(CPid, {gm_deaths, Deaths}).
+
+handle_msg([CPid], _From, request_depth = Msg) ->
+ ok = gen_server2:cast(CPid, Msg);
+handle_msg([CPid], _From, {ensure_monitoring, _Pids} = Msg) ->
+ ok = gen_server2:cast(CPid, Msg);
+handle_msg([CPid], _From, {delete_and_terminate, _Reason} = Msg) ->
+ ok = gen_server2:cast(CPid, Msg),
+ {stop, {shutdown, ring_shutdown}};
+handle_msg([_CPid], _From, _Msg) ->
+ ok.
+
+%% ---------------------------------------------------------------------------
+%% Others
+%% ---------------------------------------------------------------------------
+
+noreply(State) ->
+ {noreply, State, hibernate}.
+
+reply(Reply, State) ->
+ {reply, Reply, State, hibernate}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_master).
+
+-export([init/3, terminate/2, delete_and_terminate/2,
+ purge/1, purge_acks/1, publish/5, publish_delivered/4,
+ discard/3, fetch/2, drop/2, ack/2, requeue/2, ackfold/4, fold/3,
+ len/1, is_empty/1, depth/1, drain_confirmed/1,
+ dropwhile/2, fetchwhile/4, set_ram_duration_target/2, ram_duration/1,
+ needs_timeout/1, timeout/1, handle_pre_hibernate/1, resume/1,
+ msg_rates/1, status/1, invoke/3, is_duplicate/2]).
+
+-export([start/1, stop/0]).
+
+-export([promote_backing_queue_state/8, sender_death_fun/0, depth_fun/0]).
+
+-export([init_with_existing_bq/3, stop_mirroring/1, sync_mirrors/3]).
+
+-behaviour(rabbit_backing_queue).
+
+-include("rabbit.hrl").
+
+-record(state, { name,
+ gm,
+ coordinator,
+ backing_queue,
+ backing_queue_state,
+ seen_status,
+ confirmed,
+ known_senders
+ }).
+
+-ifdef(use_specs).
+
+-export_type([death_fun/0, depth_fun/0, stats_fun/0]).
+
+-type(death_fun() :: fun ((pid()) -> 'ok')).
+-type(depth_fun() :: fun (() -> 'ok')).
+-type(stats_fun() :: fun ((any()) -> 'ok')).
+-type(master_state() :: #state { name :: rabbit_amqqueue:name(),
+ gm :: pid(),
+ coordinator :: pid(),
+ backing_queue :: atom(),
+ backing_queue_state :: any(),
+ seen_status :: dict(),
+ confirmed :: [rabbit_guid:guid()],
+ known_senders :: set()
+ }).
+
+-spec(promote_backing_queue_state/8 ::
+ (rabbit_amqqueue:name(), pid(), atom(), any(), pid(), [any()], dict(),
+ [pid()]) -> master_state()).
+-spec(sender_death_fun/0 :: () -> death_fun()).
+-spec(depth_fun/0 :: () -> depth_fun()).
+-spec(init_with_existing_bq/3 :: (rabbit_types:amqqueue(), atom(), any()) ->
+ master_state()).
+-spec(stop_mirroring/1 :: (master_state()) -> {atom(), any()}).
+-spec(sync_mirrors/3 :: (stats_fun(), stats_fun(), master_state()) ->
+ {'ok', master_state()} | {stop, any(), master_state()}).
+
+-endif.
+
+%% For general documentation of HA design, see
+%% rabbit_mirror_queue_coordinator
+
+%% ---------------------------------------------------------------------------
+%% Backing queue
+%% ---------------------------------------------------------------------------
+
+start(_DurableQueues) ->
+ %% This will never get called as this module will never be
+ %% installed as the default BQ implementation.
+ exit({not_valid_for_generic_backing_queue, ?MODULE}).
+
+stop() ->
+ %% Same as start/1.
+ exit({not_valid_for_generic_backing_queue, ?MODULE}).
+
+init(Q, Recover, AsyncCallback) ->
+ {ok, BQ} = application:get_env(backing_queue_module),
+ BQS = BQ:init(Q, Recover, AsyncCallback),
+ State = #state{gm = GM} = init_with_existing_bq(Q, BQ, BQS),
+ ok = gm:broadcast(GM, {depth, BQ:depth(BQS)}),
+ State.
+
+init_with_existing_bq(Q = #amqqueue{name = QName}, BQ, BQS) ->
+ {ok, CPid} = rabbit_mirror_queue_coordinator:start_link(
+ Q, undefined, sender_death_fun(), depth_fun()),
+ GM = rabbit_mirror_queue_coordinator:get_gm(CPid),
+ Self = self(),
+ ok = rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ [Q1 = #amqqueue{gm_pids = GMPids}]
+ = mnesia:read({rabbit_queue, QName}),
+ ok = rabbit_amqqueue:store_queue(
+ Q1#amqqueue{gm_pids = [{GM, Self} | GMPids]})
+ end),
+ {_MNode, SNodes} = rabbit_mirror_queue_misc:suggested_queue_nodes(Q),
+ %% We need synchronous add here (i.e. do not return until the
+ %% slave is running) so that when queue declaration is finished
+ %% all slaves are up; we don't want to end up with unsynced slaves
+ %% just by declaring a new queue. But add can't be synchronous all
+ %% the time as it can be called by slaves and that's
+ %% deadlock-prone.
+ rabbit_mirror_queue_misc:add_mirrors(QName, SNodes, sync),
+ #state { name = QName,
+ gm = GM,
+ coordinator = CPid,
+ backing_queue = BQ,
+ backing_queue_state = BQS,
+ seen_status = dict:new(),
+ confirmed = [],
+ known_senders = sets:new() }.
+
+stop_mirroring(State = #state { coordinator = CPid,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ unlink(CPid),
+ stop_all_slaves(shutdown, State),
+ {BQ, BQS}.
+
+sync_mirrors(HandleInfo, EmitStats,
+ State = #state { name = QName,
+ gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ Log = fun (Fmt, Params) ->
+ rabbit_mirror_queue_misc:log_info(
+ QName, "Synchronising: " ++ Fmt ++ "~n", Params)
+ end,
+ Log("~p messages to synchronise", [BQ:len(BQS)]),
+ {ok, #amqqueue{slave_pids = SPids}} = rabbit_amqqueue:lookup(QName),
+ Ref = make_ref(),
+ Syncer = rabbit_mirror_queue_sync:master_prepare(Ref, QName, Log, SPids),
+ gm:broadcast(GM, {sync_start, Ref, Syncer, SPids}),
+ S = fun(BQSN) -> State#state{backing_queue_state = BQSN} end,
+ case rabbit_mirror_queue_sync:master_go(
+ Syncer, Ref, Log, HandleInfo, EmitStats, BQ, BQS) of
+ {shutdown, R, BQS1} -> {stop, R, S(BQS1)};
+ {sync_died, R, BQS1} -> Log("~p", [R]),
+ {ok, S(BQS1)};
+ {already_synced, BQS1} -> {ok, S(BQS1)};
+ {ok, BQS1} -> Log("complete", []),
+ {ok, S(BQS1)}
+ end.
+
+terminate({shutdown, dropped} = Reason,
+ State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ %% Backing queue termination - this node has been explicitly
+ %% dropped. Normally, non-durable queues would be tidied up on
+ %% startup, but there's a possibility that we will be added back
+ %% in without this node being restarted. Thus we must do the full
+ %% blown delete_and_terminate now, but only locally: we do not
+ %% broadcast delete_and_terminate.
+ State#state{backing_queue_state = BQ:delete_and_terminate(Reason, BQS)};
+
+terminate(Reason,
+ State = #state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ %% Backing queue termination. The queue is going down but
+ %% shouldn't be deleted. Most likely safe shutdown of this
+ %% node. Thus just let some other slave take over.
+ State #state { backing_queue_state = BQ:terminate(Reason, BQS) }.
+
+delete_and_terminate(Reason, State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ stop_all_slaves(Reason, State),
+ State#state{backing_queue_state = BQ:delete_and_terminate(Reason, BQS)}.
+
+stop_all_slaves(Reason, #state{name = QName, gm = GM}) ->
+ {ok, #amqqueue{slave_pids = SPids}} = rabbit_amqqueue:lookup(QName),
+ MRefs = [erlang:monitor(process, Pid) || Pid <- [GM | SPids]],
+ ok = gm:broadcast(GM, {delete_and_terminate, Reason}),
+ [receive {'DOWN', MRef, process, _Pid, _Info} -> ok end || MRef <- MRefs],
+ %% Normally when we remove a slave another slave or master will
+ %% notice and update Mnesia. But we just removed them all, and
+ %% have stopped listening ourselves. So manually clean up.
+ rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ [Q] = mnesia:read({rabbit_queue, QName}),
+ rabbit_mirror_queue_misc:store_updated_slaves(
+ Q #amqqueue { gm_pids = [], slave_pids = [] })
+ end),
+ ok = gm:forget_group(QName).
+
+purge(State = #state { gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ ok = gm:broadcast(GM, {drop, 0, BQ:len(BQS), false}),
+ {Count, BQS1} = BQ:purge(BQS),
+ {Count, State #state { backing_queue_state = BQS1 }}.
+
+purge_acks(_State) -> exit({not_implemented, {?MODULE, purge_acks}}).
+
+publish(Msg = #basic_message { id = MsgId }, MsgProps, IsDelivered, ChPid,
+ State = #state { gm = GM,
+ seen_status = SS,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ false = dict:is_key(MsgId, SS), %% ASSERTION
+ ok = gm:broadcast(GM, {publish, ChPid, MsgProps, Msg},
+ rabbit_basic:msg_size(Msg)),
+ BQS1 = BQ:publish(Msg, MsgProps, IsDelivered, ChPid, BQS),
+ ensure_monitoring(ChPid, State #state { backing_queue_state = BQS1 }).
+
+publish_delivered(Msg = #basic_message { id = MsgId }, MsgProps,
+ ChPid, State = #state { gm = GM,
+ seen_status = SS,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ false = dict:is_key(MsgId, SS), %% ASSERTION
+ ok = gm:broadcast(GM, {publish_delivered, ChPid, MsgProps, Msg},
+ rabbit_basic:msg_size(Msg)),
+ {AckTag, BQS1} = BQ:publish_delivered(Msg, MsgProps, ChPid, BQS),
+ State1 = State #state { backing_queue_state = BQS1 },
+ {AckTag, ensure_monitoring(ChPid, State1)}.
+
+discard(MsgId, ChPid, State = #state { gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS,
+ seen_status = SS }) ->
+ false = dict:is_key(MsgId, SS), %% ASSERTION
+ ok = gm:broadcast(GM, {discard, ChPid, MsgId}),
+ ensure_monitoring(ChPid, State #state { backing_queue_state =
+ BQ:discard(MsgId, ChPid, BQS) }).
+
+dropwhile(Pred, State = #state{backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ Len = BQ:len(BQS),
+ {Next, BQS1} = BQ:dropwhile(Pred, BQS),
+ {Next, drop(Len, false, State #state { backing_queue_state = BQS1 })}.
+
+fetchwhile(Pred, Fun, Acc, State = #state{backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ Len = BQ:len(BQS),
+ {Next, Acc1, BQS1} = BQ:fetchwhile(Pred, Fun, Acc, BQS),
+ {Next, Acc1, drop(Len, true, State #state { backing_queue_state = BQS1 })}.
+
+drain_confirmed(State = #state { backing_queue = BQ,
+ backing_queue_state = BQS,
+ seen_status = SS,
+ confirmed = Confirmed }) ->
+ {MsgIds, BQS1} = BQ:drain_confirmed(BQS),
+ {MsgIds1, SS1} =
+ lists:foldl(
+ fun (MsgId, {MsgIdsN, SSN}) ->
+ %% We will never see 'discarded' here
+ case dict:find(MsgId, SSN) of
+ error ->
+ {[MsgId | MsgIdsN], SSN};
+ {ok, published} ->
+ %% It was published when we were a slave,
+ %% and we were promoted before we saw the
+ %% publish from the channel. We still
+ %% haven't seen the channel publish, and
+ %% consequently we need to filter out the
+ %% confirm here. We will issue the confirm
+ %% when we see the publish from the channel.
+ {MsgIdsN, dict:store(MsgId, confirmed, SSN)};
+ {ok, confirmed} ->
+ %% Well, confirms are racy by definition.
+ {[MsgId | MsgIdsN], SSN}
+ end
+ end, {[], SS}, MsgIds),
+ {Confirmed ++ MsgIds1, State #state { backing_queue_state = BQS1,
+ seen_status = SS1,
+ confirmed = [] }}.
+
+fetch(AckRequired, State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {Result, BQS1} = BQ:fetch(AckRequired, BQS),
+ State1 = State #state { backing_queue_state = BQS1 },
+ {Result, case Result of
+ empty -> State1;
+ {_MsgId, _IsDelivered, AckTag} -> drop_one(AckTag, State1)
+ end}.
+
+drop(AckRequired, State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {Result, BQS1} = BQ:drop(AckRequired, BQS),
+ State1 = State #state { backing_queue_state = BQS1 },
+ {Result, case Result of
+ empty -> State1;
+ {_MsgId, AckTag} -> drop_one(AckTag, State1)
+ end}.
+
+ack(AckTags, State = #state { gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {MsgIds, BQS1} = BQ:ack(AckTags, BQS),
+ case MsgIds of
+ [] -> ok;
+ _ -> ok = gm:broadcast(GM, {ack, MsgIds})
+ end,
+ {MsgIds, State #state { backing_queue_state = BQS1 }}.
+
+requeue(AckTags, State = #state { gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {MsgIds, BQS1} = BQ:requeue(AckTags, BQS),
+ ok = gm:broadcast(GM, {requeue, MsgIds}),
+ {MsgIds, State #state { backing_queue_state = BQS1 }}.
+
+ackfold(MsgFun, Acc, State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }, AckTags) ->
+ {Acc1, BQS1} = BQ:ackfold(MsgFun, Acc, BQS, AckTags),
+ {Acc1, State #state { backing_queue_state = BQS1 }}.
+
+fold(Fun, Acc, State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {Result, BQS1} = BQ:fold(Fun, Acc, BQS),
+ {Result, State #state { backing_queue_state = BQS1 }}.
+
+len(#state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ BQ:len(BQS).
+
+is_empty(#state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ BQ:is_empty(BQS).
+
+depth(#state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ BQ:depth(BQS).
+
+set_ram_duration_target(Target, State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ State #state { backing_queue_state =
+ BQ:set_ram_duration_target(Target, BQS) }.
+
+ram_duration(State = #state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ {Result, BQS1} = BQ:ram_duration(BQS),
+ {Result, State #state { backing_queue_state = BQS1 }}.
+
+needs_timeout(#state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ BQ:needs_timeout(BQS).
+
+timeout(State = #state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ State #state { backing_queue_state = BQ:timeout(BQS) }.
+
+handle_pre_hibernate(State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ State #state { backing_queue_state = BQ:handle_pre_hibernate(BQS) }.
+
+resume(State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ State #state { backing_queue_state = BQ:resume(BQS) }.
+
+msg_rates(#state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ BQ:msg_rates(BQS).
+
+status(State = #state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ BQ:status(BQS) ++
+ [ {mirror_seen, dict:size(State #state.seen_status)},
+ {mirror_senders, sets:size(State #state.known_senders)} ].
+
+invoke(?MODULE, Fun, State) ->
+ Fun(?MODULE, State);
+invoke(Mod, Fun, State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ State #state { backing_queue_state = BQ:invoke(Mod, Fun, BQS) }.
+
+is_duplicate(Message = #basic_message { id = MsgId },
+ State = #state { seen_status = SS,
+ backing_queue = BQ,
+ backing_queue_state = BQS,
+ confirmed = Confirmed }) ->
+ %% Here, we need to deal with the possibility that we're about to
+ %% receive a message that we've already seen when we were a slave
+ %% (we received it via gm). Thus if we do receive such message now
+ %% via the channel, there may be a confirm waiting to issue for
+ %% it.
+
+ %% We will never see {published, ChPid, MsgSeqNo} here.
+ case dict:find(MsgId, SS) of
+ error ->
+ %% We permit the underlying BQ to have a peek at it, but
+ %% only if we ourselves are not filtering out the msg.
+ {Result, BQS1} = BQ:is_duplicate(Message, BQS),
+ {Result, State #state { backing_queue_state = BQS1 }};
+ {ok, published} ->
+ %% It already got published when we were a slave and no
+ %% confirmation is waiting. amqqueue_process will have, in
+ %% its msg_id_to_channel mapping, the entry for dealing
+ %% with the confirm when that comes back in (it's added
+ %% immediately after calling is_duplicate). The msg is
+ %% invalid. We will not see this again, nor will we be
+ %% further involved in confirming this message, so erase.
+ {true, State #state { seen_status = dict:erase(MsgId, SS) }};
+ {ok, Disposition}
+ when Disposition =:= confirmed
+ %% It got published when we were a slave via gm, and
+ %% confirmed some time after that (maybe even after
+ %% promotion), but before we received the publish from the
+ %% channel, so couldn't previously know what the
+ %% msg_seq_no was (and thus confirm as a slave). So we
+ %% need to confirm now. As above, amqqueue_process will
+ %% have the entry for the msg_id_to_channel mapping added
+ %% immediately after calling is_duplicate/2.
+ orelse Disposition =:= discarded ->
+ %% Message was discarded while we were a slave. Confirm now.
+ %% As above, amqqueue_process will have the entry for the
+ %% msg_id_to_channel mapping.
+ {true, State #state { seen_status = dict:erase(MsgId, SS),
+ confirmed = [MsgId | Confirmed] }}
+ end.
+
+%% ---------------------------------------------------------------------------
+%% Other exported functions
+%% ---------------------------------------------------------------------------
+
+promote_backing_queue_state(QName, CPid, BQ, BQS, GM, AckTags, Seen, KS) ->
+ {_MsgIds, BQS1} = BQ:requeue(AckTags, BQS),
+ Len = BQ:len(BQS1),
+ Depth = BQ:depth(BQS1),
+ true = Len == Depth, %% ASSERTION: everything must have been requeued
+ ok = gm:broadcast(GM, {depth, Depth}),
+ #state { name = QName,
+ gm = GM,
+ coordinator = CPid,
+ backing_queue = BQ,
+ backing_queue_state = BQS1,
+ seen_status = Seen,
+ confirmed = [],
+ known_senders = sets:from_list(KS) }.
+
+sender_death_fun() ->
+ Self = self(),
+ fun (DeadPid) ->
+ rabbit_amqqueue:run_backing_queue(
+ Self, ?MODULE,
+ fun (?MODULE, State = #state { gm = GM, known_senders = KS }) ->
+ ok = gm:broadcast(GM, {sender_death, DeadPid}),
+ KS1 = sets:del_element(DeadPid, KS),
+ State #state { known_senders = KS1 }
+ end)
+ end.
+
+depth_fun() ->
+ Self = self(),
+ fun () ->
+ rabbit_amqqueue:run_backing_queue(
+ Self, ?MODULE,
+ fun (?MODULE, State = #state { gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ ok = gm:broadcast(GM, {depth, BQ:depth(BQS)}),
+ State
+ end)
+ end.
+
+%% ---------------------------------------------------------------------------
+%% Helpers
+%% ---------------------------------------------------------------------------
+
+drop_one(AckTag, State = #state { gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ ok = gm:broadcast(GM, {drop, BQ:len(BQS), 1, AckTag =/= undefined}),
+ State.
+
+drop(PrevLen, AckRequired, State = #state { gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ Len = BQ:len(BQS),
+ case PrevLen - Len of
+ 0 -> State;
+ Dropped -> ok = gm:broadcast(GM, {drop, Len, Dropped, AckRequired}),
+ State
+ end.
+
+ensure_monitoring(ChPid, State = #state { coordinator = CPid,
+ known_senders = KS }) ->
+ case sets:is_element(ChPid, KS) of
+ true -> State;
+ false -> ok = rabbit_mirror_queue_coordinator:ensure_monitoring(
+ CPid, [ChPid]),
+ State #state { known_senders = sets:add_element(ChPid, KS) }
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_misc).
+-behaviour(rabbit_policy_validator).
+
+-export([remove_from_queue/3, on_node_up/0, add_mirrors/3,
+ report_deaths/4, store_updated_slaves/1,
+ initial_queue_node/2, suggested_queue_nodes/1,
+ is_mirrored/1, update_mirrors/2, validate_policy/1,
+ maybe_auto_sync/1, maybe_drop_master_after_sync/1,
+ log_info/3, log_warning/3]).
+
+%% for testing only
+-export([module/1]).
+
+-include("rabbit.hrl").
+
+-rabbit_boot_step({?MODULE,
+ [{description, "HA policy validation"},
+ {mfa, {rabbit_registry, register,
+ [policy_validator, <<"ha-mode">>, ?MODULE]}},
+ {mfa, {rabbit_registry, register,
+ [policy_validator, <<"ha-params">>, ?MODULE]}},
+ {mfa, {rabbit_registry, register,
+ [policy_validator, <<"ha-sync-mode">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, recovery}]}).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(remove_from_queue/3 ::
+ (rabbit_amqqueue:name(), pid(), [pid()])
+ -> {'ok', pid(), [pid()]} | {'error', 'not_found'}).
+-spec(on_node_up/0 :: () -> 'ok').
+-spec(add_mirrors/3 :: (rabbit_amqqueue:name(), [node()], 'sync' | 'async')
+ -> 'ok').
+-spec(store_updated_slaves/1 :: (rabbit_types:amqqueue()) ->
+ rabbit_types:amqqueue()).
+-spec(initial_queue_node/2 :: (rabbit_types:amqqueue(), node()) -> node()).
+-spec(suggested_queue_nodes/1 :: (rabbit_types:amqqueue()) ->
+ {node(), [node()]}).
+-spec(is_mirrored/1 :: (rabbit_types:amqqueue()) -> boolean()).
+-spec(update_mirrors/2 ::
+ (rabbit_types:amqqueue(), rabbit_types:amqqueue()) -> 'ok').
+-spec(maybe_drop_master_after_sync/1 :: (rabbit_types:amqqueue()) -> 'ok').
+-spec(maybe_auto_sync/1 :: (rabbit_types:amqqueue()) -> 'ok').
+-spec(log_info/3 :: (rabbit_amqqueue:name(), string(), [any()]) -> 'ok').
+-spec(log_warning/3 :: (rabbit_amqqueue:name(), string(), [any()]) -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+%% Returns {ok, NewMPid, DeadPids}
+remove_from_queue(QueueName, Self, DeadGMPids) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ %% Someone else could have deleted the queue before we
+ %% get here.
+ case mnesia:read({rabbit_queue, QueueName}) of
+ [] -> {error, not_found};
+ [Q = #amqqueue { pid = QPid,
+ slave_pids = SPids,
+ gm_pids = GMPids }] ->
+ {DeadGM, AliveGM} = lists:partition(
+ fun ({GM, _}) ->
+ lists:member(GM, DeadGMPids)
+ end, GMPids),
+ DeadPids = [Pid || {_GM, Pid} <- DeadGM],
+ AlivePids = [Pid || {_GM, Pid} <- AliveGM],
+ Alive = [Pid || Pid <- [QPid | SPids],
+ lists:member(Pid, AlivePids)],
+ {QPid1, SPids1} = promote_slave(Alive),
+ case {{QPid, SPids}, {QPid1, SPids1}} of
+ {Same, Same} ->
+ ok;
+ _ when QPid =:= QPid1 orelse QPid1 =:= Self ->
+ %% Either master hasn't changed, so
+ %% we're ok to update mnesia; or we have
+ %% become the master.
+ Q1 = Q#amqqueue{pid = QPid1,
+ slave_pids = SPids1,
+ gm_pids = AliveGM},
+ store_updated_slaves(Q1),
+ %% If we add and remove nodes at the same time we
+ %% might tell the old master we need to sync and
+ %% then shut it down. So let's check if the new
+ %% master needs to sync.
+ maybe_auto_sync(Q1);
+ _ ->
+ %% Master has changed, and we're not it.
+ %% [1].
+ Q1 = Q#amqqueue{slave_pids = Alive,
+ gm_pids = AliveGM},
+ store_updated_slaves(Q1)
+ end,
+ {ok, QPid1, DeadPids}
+ end
+ end).
+%% [1] We still update mnesia here in case the slave that is supposed
+%% to become master dies before it does do so, in which case the dead
+%% old master might otherwise never get removed, which in turn might
+%% prevent promotion of another slave (e.g. us).
+%%
+%% Note however that we do not update the master pid. Otherwise we can
+%% have the situation where a slave updates the mnesia record for a
+%% queue, promoting another slave before that slave realises it has
+%% become the new master, which is bad because it could then mean the
+%% slave (now master) receives messages it's not ready for (for
+%% example, new consumers).
+%%
+%% We set slave_pids to Alive rather than SPids1 since otherwise we'd
+%% be removing the pid of the candidate master, which in turn would
+%% prevent it from promoting itself.
+%%
+%% We maintain gm_pids as our source of truth, i.e. it contains the
+%% most up-to-date information about which GMs and associated
+%% {M,S}Pids are alive. And all pids in slave_pids always have a
+%% corresponding entry in gm_pids. By contrast, due to the
+%% aforementioned restriction on updating the master pid, that pid may
+%% not be present in gm_pids, but only if said master has died.
+
+on_node_up() ->
+ QNames =
+ rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ mnesia:foldl(
+ fun (Q = #amqqueue{name = QName,
+ pid = Pid,
+ slave_pids = SPids}, QNames0) ->
+ %% We don't want to pass in the whole
+ %% cluster - we don't want a situation
+ %% where starting one node causes us to
+ %% decide to start a mirror on another
+ PossibleNodes0 = [node(P) || P <- [Pid | SPids]],
+ PossibleNodes =
+ case lists:member(node(), PossibleNodes0) of
+ true -> PossibleNodes0;
+ false -> [node() | PossibleNodes0]
+ end,
+ {_MNode, SNodes} = suggested_queue_nodes(
+ Q, PossibleNodes),
+ case lists:member(node(), SNodes) of
+ true -> [QName | QNames0];
+ false -> QNames0
+ end
+ end, [], rabbit_queue)
+ end),
+ [add_mirror(QName, node(), async) || QName <- QNames],
+ ok.
+
+drop_mirrors(QName, Nodes) ->
+ [drop_mirror(QName, Node) || Node <- Nodes],
+ ok.
+
+drop_mirror(QName, MirrorNode) ->
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, #amqqueue { name = Name, pid = QPid, slave_pids = SPids }} ->
+ case [Pid || Pid <- [QPid | SPids], node(Pid) =:= MirrorNode] of
+ [] ->
+ {error, {queue_not_mirrored_on_node, MirrorNode}};
+ [QPid] when SPids =:= [] ->
+ {error, cannot_drop_only_mirror};
+ [Pid] ->
+ log_info(Name, "Dropping queue mirror on node ~p~n",
+ [MirrorNode]),
+ exit(Pid, {shutdown, dropped}),
+ {ok, dropped}
+ end;
+ {error, not_found} = E ->
+ E
+ end.
+
+add_mirrors(QName, Nodes, SyncMode) ->
+ [add_mirror(QName, Node, SyncMode) || Node <- Nodes],
+ ok.
+
+add_mirror(QName, MirrorNode, SyncMode) ->
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, #amqqueue { name = Name, pid = QPid, slave_pids = SPids } = Q} ->
+ case [Pid || Pid <- [QPid | SPids], node(Pid) =:= MirrorNode] of
+ [] ->
+ start_child(Name, MirrorNode, Q, SyncMode);
+ [SPid] ->
+ case rabbit_misc:is_process_alive(SPid) of
+ true -> {ok, already_mirrored};
+ false -> start_child(Name, MirrorNode, Q, SyncMode)
+ end
+ end;
+ {error, not_found} = E ->
+ E
+ end.
+
+start_child(Name, MirrorNode, Q, SyncMode) ->
+ rabbit_misc:with_exit_handler(
+ rabbit_misc:const(ok),
+ fun () ->
+ {ok, SPid} = rabbit_mirror_queue_slave_sup:start_child(
+ MirrorNode, [Q]),
+ log_info(Name, "Adding mirror on node ~p: ~p~n",
+ [MirrorNode, SPid]),
+ rabbit_mirror_queue_slave:go(SPid, SyncMode)
+ end).
+
+report_deaths(_MirrorPid, _IsMaster, _QueueName, []) ->
+ ok;
+report_deaths(MirrorPid, IsMaster, QueueName, DeadPids) ->
+ log_info(QueueName, "~s ~s saw deaths of mirrors~s~n",
+ [case IsMaster of
+ true -> "Master";
+ false -> "Slave"
+ end,
+ rabbit_misc:pid_to_string(MirrorPid),
+ [[$ , rabbit_misc:pid_to_string(P)] || P <- DeadPids]]).
+
+log_info (QName, Fmt, Args) -> log(info, QName, Fmt, Args).
+log_warning(QName, Fmt, Args) -> log(warning, QName, Fmt, Args).
+
+log(Level, QName, Fmt, Args) ->
+ rabbit_log:log(mirroring, Level, "Mirrored ~s: " ++ Fmt,
+ [rabbit_misc:rs(QName) | Args]).
+
+store_updated_slaves(Q = #amqqueue{slave_pids = SPids,
+ sync_slave_pids = SSPids}) ->
+ %% TODO now that we clear sync_slave_pids in rabbit_durable_queue,
+ %% do we still need this filtering?
+ SSPids1 = [SSPid || SSPid <- SSPids, lists:member(SSPid, SPids)],
+ Q1 = Q#amqqueue{sync_slave_pids = SSPids1},
+ ok = rabbit_amqqueue:store_queue(Q1),
+ %% Wake it up so that we emit a stats event
+ rabbit_amqqueue:notify_policy_changed(Q1),
+ Q1.
+
+%%----------------------------------------------------------------------------
+
+promote_slave([SPid | SPids]) ->
+ %% The slave pids are maintained in descending order of age, so
+ %% the one to promote is the oldest.
+ {SPid, SPids}.
+
+initial_queue_node(Q, DefNode) ->
+ {MNode, _SNodes} = suggested_queue_nodes(Q, DefNode, all_nodes()),
+ MNode.
+
+suggested_queue_nodes(Q) -> suggested_queue_nodes(Q, all_nodes()).
+suggested_queue_nodes(Q, All) -> suggested_queue_nodes(Q, node(), All).
+
+%% The third argument exists so we can pull a call to
+%% rabbit_mnesia:cluster_nodes(running) out of a loop or transaction
+%% or both.
+suggested_queue_nodes(Q = #amqqueue{exclusive_owner = Owner}, DefNode, All) ->
+ {MNode0, SNodes, SSNodes} = actual_queue_nodes(Q),
+ MNode = case MNode0 of
+ none -> DefNode;
+ _ -> MNode0
+ end,
+ case Owner of
+ none -> Params = policy(<<"ha-params">>, Q),
+ case module(Q) of
+ {ok, M} -> M:suggested_queue_nodes(
+ Params, MNode, SNodes, SSNodes, All);
+ _ -> {MNode, []}
+ end;
+ _ -> {MNode, []}
+ end.
+
+all_nodes() -> rabbit_mnesia:cluster_nodes(running).
+
+policy(Policy, Q) ->
+ case rabbit_policy:get(Policy, Q) of
+ undefined -> none;
+ P -> P
+ end.
+
+module(#amqqueue{} = Q) ->
+ case rabbit_policy:get(<<"ha-mode">>, Q) of
+ undefined -> not_mirrored;
+ Mode -> module(Mode)
+ end;
+
+module(Mode) when is_binary(Mode) ->
+ case rabbit_registry:binary_to_type(Mode) of
+ {error, not_found} -> not_mirrored;
+ T -> case rabbit_registry:lookup_module(ha_mode, T) of
+ {ok, Module} -> {ok, Module};
+ _ -> not_mirrored
+ end
+ end.
+
+is_mirrored(Q) ->
+ case module(Q) of
+ {ok, _} -> true;
+ _ -> false
+ end.
+
+actual_queue_nodes(#amqqueue{pid = MPid,
+ slave_pids = SPids,
+ sync_slave_pids = SSPids}) ->
+ Nodes = fun (L) -> [node(Pid) || Pid <- L] end,
+ {case MPid of
+ none -> none;
+ _ -> node(MPid)
+ end, Nodes(SPids), Nodes(SSPids)}.
+
+maybe_auto_sync(Q = #amqqueue{pid = QPid}) ->
+ case policy(<<"ha-sync-mode">>, Q) of
+ <<"automatic">> ->
+ spawn(fun() -> rabbit_amqqueue:sync_mirrors(QPid) end);
+ _ ->
+ ok
+ end.
+
+update_mirrors(OldQ = #amqqueue{pid = QPid},
+ NewQ = #amqqueue{pid = QPid}) ->
+ case {is_mirrored(OldQ), is_mirrored(NewQ)} of
+ {false, false} -> ok;
+ {true, false} -> rabbit_amqqueue:stop_mirroring(QPid);
+ {false, true} -> rabbit_amqqueue:start_mirroring(QPid);
+ {true, true} -> update_mirrors0(OldQ, NewQ)
+ end.
+
+update_mirrors0(OldQ = #amqqueue{name = QName},
+ NewQ = #amqqueue{name = QName}) ->
+ {OldMNode, OldSNodes, _} = actual_queue_nodes(OldQ),
+ {NewMNode, NewSNodes} = suggested_queue_nodes(NewQ),
+ OldNodes = [OldMNode | OldSNodes],
+ NewNodes = [NewMNode | NewSNodes],
+ add_mirrors (QName, NewNodes -- OldNodes, async),
+ drop_mirrors(QName, OldNodes -- NewNodes),
+ %% This is for the case where no extra nodes were added but we changed to
+ %% a policy requiring auto-sync.
+ maybe_auto_sync(NewQ),
+ ok.
+
+%% The arrival of a newly synced slave may cause the master to die if
+%% the policy does not want the master but it has been kept alive
+%% because there were no synced slaves.
+%%
+%% We don't just call update_mirrors/2 here since that could decide to
+%% start a slave for some other reason, and since we are the slave ATM
+%% that allows complicated deadlocks.
+maybe_drop_master_after_sync(Q = #amqqueue{name = QName,
+ pid = MPid}) ->
+ {DesiredMNode, DesiredSNodes} = suggested_queue_nodes(Q),
+ case node(MPid) of
+ DesiredMNode -> ok;
+ OldMNode -> false = lists:member(OldMNode, DesiredSNodes), %% [0]
+ drop_mirror(QName, OldMNode)
+ end,
+ ok.
+%% [0] ASSERTION - if the policy wants the master to change, it has
+%% not just shuffled it into the slaves. All our modes ensure this
+%% does not happen, but we should guard against a misbehaving plugin.
+
+%%----------------------------------------------------------------------------
+
+validate_policy(KeyList) ->
+ Mode = proplists:get_value(<<"ha-mode">>, KeyList, none),
+ Params = proplists:get_value(<<"ha-params">>, KeyList, none),
+ SyncMode = proplists:get_value(<<"ha-sync-mode">>, KeyList, none),
+ case {Mode, Params, SyncMode} of
+ {none, none, none} ->
+ ok;
+ {none, _, _} ->
+ {error, "ha-mode must be specified to specify ha-params or "
+ "ha-sync-mode", []};
+ _ ->
+ case module(Mode) of
+ {ok, M} -> case M:validate_policy(Params) of
+ ok -> validate_sync_mode(SyncMode);
+ E -> E
+ end;
+ _ -> {error, "~p is not a valid ha-mode value", [Mode]}
+ end
+ end.
+
+validate_sync_mode(SyncMode) ->
+ case SyncMode of
+ <<"automatic">> -> ok;
+ <<"manual">> -> ok;
+ none -> ok;
+ Mode -> {error, "ha-sync-mode must be \"manual\" "
+ "or \"automatic\", got ~p", [Mode]}
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_mode).
+
+-ifdef(use_specs).
+
+-type(master() :: node()).
+-type(slave() :: node()).
+-type(params() :: any()).
+
+-callback description() -> [proplists:property()].
+
+%% Called whenever we think we might need to change nodes for a
+%% mirrored queue. Note that this is called from a variety of
+%% contexts, both inside and outside Mnesia transactions. Ideally it
+%% will be pure-functional.
+%%
+%% Takes: parameters set in the policy,
+%% current master,
+%% current slaves,
+%% current synchronised slaves,
+%% all nodes to consider
+%%
+%% Returns: tuple of new master, new slaves
+%%
+-callback suggested_queue_nodes(
+ params(), master(), [slave()], [slave()], [node()]) ->
+ {master(), [slave()]}.
+
+%% Are the parameters valid for this mode?
+-callback validate_policy(params()) ->
+ rabbit_policy_validator:validate_results().
+
+-else.
+
+-export([behaviour_info/1]).
+
+behaviour_info(callbacks) ->
+ [{description, 0}, {suggested_queue_nodes, 5}, {validate_policy, 1}];
+behaviour_info(_Other) ->
+ undefined.
+
+-endif.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_mode_all).
+
+-include("rabbit.hrl").
+
+-behaviour(rabbit_mirror_queue_mode).
+
+-export([description/0, suggested_queue_nodes/5, validate_policy/1]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "mirror mode all"},
+ {mfa, {rabbit_registry, register,
+ [ha_mode, <<"all">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+description() ->
+ [{description, <<"Mirror queue to all nodes">>}].
+
+suggested_queue_nodes(_Params, MNode, _SNodes, _SSNodes, Poss) ->
+ {MNode, Poss -- [MNode]}.
+
+validate_policy(none) ->
+ ok;
+validate_policy(_Params) ->
+ {error, "ha-mode=\"all\" does not take parameters", []}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_mode_exactly).
+
+-include("rabbit.hrl").
+
+-behaviour(rabbit_mirror_queue_mode).
+
+-export([description/0, suggested_queue_nodes/5, validate_policy/1]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "mirror mode exactly"},
+ {mfa, {rabbit_registry, register,
+ [ha_mode, <<"exactly">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+description() ->
+ [{description, <<"Mirror queue to a specified number of nodes">>}].
+
+%% When we need to add nodes, we randomise our candidate list as a
+%% crude form of load-balancing. TODO it would also be nice to
+%% randomise the list of ones to remove when we have too many - we
+%% would have to take account of synchronisation though.
+suggested_queue_nodes(Count, MNode, SNodes, _SSNodes, Poss) ->
+ SCount = Count - 1,
+ {MNode, case SCount > length(SNodes) of
+ true -> Cand = shuffle((Poss -- [MNode]) -- SNodes),
+ SNodes ++ lists:sublist(Cand, SCount - length(SNodes));
+ false -> lists:sublist(SNodes, SCount)
+ end}.
+
+shuffle(L) ->
+ {A1,A2,A3} = now(),
+ random:seed(A1, A2, A3),
+ {_, L1} = lists:unzip(lists:keysort(1, [{random:uniform(), N} || N <- L])),
+ L1.
+
+validate_policy(N) when is_integer(N) andalso N > 0 ->
+ ok;
+validate_policy(Params) ->
+ {error, "ha-mode=\"exactly\" takes an integer, ~p given", [Params]}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_mode_nodes).
+
+-include("rabbit.hrl").
+
+-behaviour(rabbit_mirror_queue_mode).
+
+-export([description/0, suggested_queue_nodes/5, validate_policy/1]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "mirror mode nodes"},
+ {mfa, {rabbit_registry, register,
+ [ha_mode, <<"nodes">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+description() ->
+ [{description, <<"Mirror queue to specified nodes">>}].
+
+suggested_queue_nodes(Nodes0, MNode, _SNodes, SSNodes, Poss) ->
+ Nodes1 = [list_to_atom(binary_to_list(Node)) || Node <- Nodes0],
+ %% If the current master is not in the nodes specified, then what we want
+ %% to do depends on whether there are any synchronised slaves. If there
+ %% are then we can just kill the current master - the admin has asked for
+ %% a migration and we should give it to them. If there are not however
+ %% then we must keep the master around so as not to lose messages.
+ Nodes = case SSNodes of
+ [] -> lists:usort([MNode | Nodes1]);
+ _ -> Nodes1
+ end,
+ Unavailable = Nodes -- Poss,
+ Available = Nodes -- Unavailable,
+ case Available of
+ [] -> %% We have never heard of anything? Not much we can do but
+ %% keep the master alive.
+ {MNode, []};
+ _ -> case lists:member(MNode, Available) of
+ true -> {MNode, Available -- [MNode]};
+ false -> %% Make sure the new master is synced! In order to
+ %% get here SSNodes must not be empty.
+ [NewMNode | _] = SSNodes,
+ {NewMNode, Available -- [NewMNode]}
+ end
+ end.
+
+validate_policy([]) ->
+ {error, "ha-mode=\"nodes\" list must be non-empty", []};
+validate_policy(Nodes) when is_list(Nodes) ->
+ case [I || I <- Nodes, not is_binary(I)] of
+ [] -> ok;
+ Invalid -> {error, "ha-mode=\"nodes\" takes a list of strings, "
+ "~p was not a string", [Invalid]}
+ end;
+validate_policy(Params) ->
+ {error, "ha-mode=\"nodes\" takes a list, ~p given", [Params]}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_slave).
+
+%% For general documentation of HA design, see
+%% rabbit_mirror_queue_coordinator
+%%
+%% We receive messages from GM and from publishers, and the gm
+%% messages can arrive either before or after the 'actual' message.
+%% All instructions from the GM group must be processed in the order
+%% in which they're received.
+
+-export([start_link/1, set_maximum_since_use/2, info/1, go/2]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3, handle_pre_hibernate/1, prioritise_call/4,
+ prioritise_cast/3, prioritise_info/3, format_message_queue/2]).
+
+-export([joined/2, members_changed/3, handle_msg/3]).
+
+-behaviour(gen_server2).
+-behaviour(gm).
+
+-include("rabbit.hrl").
+
+-include("gm_specs.hrl").
+
+%%----------------------------------------------------------------------------
+
+-define(INFO_KEYS,
+ [pid,
+ name,
+ master_pid,
+ is_synchronised
+ ]).
+
+-define(SYNC_INTERVAL, 25). %% milliseconds
+-define(RAM_DURATION_UPDATE_INTERVAL, 5000).
+-define(DEATH_TIMEOUT, 20000). %% 20 seconds
+
+-record(state, { q,
+ gm,
+ backing_queue,
+ backing_queue_state,
+ sync_timer_ref,
+ rate_timer_ref,
+
+ sender_queues, %% :: Pid -> {Q Msg, Set MsgId, ChState}
+ msg_id_ack, %% :: MsgId -> AckTag
+
+ msg_id_status,
+ known_senders,
+
+ %% Master depth - local depth
+ depth_delta
+ }).
+
+%%----------------------------------------------------------------------------
+
+start_link(Q) -> gen_server2:start_link(?MODULE, Q, []).
+
+set_maximum_since_use(QPid, Age) ->
+ gen_server2:cast(QPid, {set_maximum_since_use, Age}).
+
+info(QPid) -> gen_server2:call(QPid, info, infinity).
+
+init(Q) ->
+ ?store_proc_name(Q#amqqueue.name),
+ {ok, {not_started, Q}, hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN,
+ ?DESIRED_HIBERNATE}}.
+
+go(SPid, sync) -> gen_server2:call(SPid, go, infinity);
+go(SPid, async) -> gen_server2:cast(SPid, go).
+
+handle_go(Q = #amqqueue{name = QName}) ->
+ %% We join the GM group before we add ourselves to the amqqueue
+ %% record. As a result:
+ %% 1. We can receive msgs from GM that correspond to messages we will
+ %% never receive from publishers.
+ %% 2. When we receive a message from publishers, we must receive a
+ %% message from the GM group for it.
+ %% 3. However, that instruction from the GM group can arrive either
+ %% before or after the actual message. We need to be able to
+ %% distinguish between GM instructions arriving early, and case (1)
+ %% above.
+ %%
+ process_flag(trap_exit, true), %% amqqueue_process traps exits too.
+ {ok, GM} = gm:start_link(QName, ?MODULE, [self()],
+ fun rabbit_misc:execute_mnesia_transaction/1),
+ MRef = erlang:monitor(process, GM),
+ %% We ignore the DOWN message because we are also linked and
+ %% trapping exits, we just want to not get stuck and we will exit
+ %% later.
+ receive
+ {joined, GM} -> erlang:demonitor(MRef, [flush]),
+ ok;
+ {'DOWN', MRef, _, _, _} -> ok
+ end,
+ Self = self(),
+ Node = node(),
+ case rabbit_misc:execute_mnesia_transaction(
+ fun() -> init_it(Self, GM, Node, QName) end) of
+ {new, QPid, GMPids} ->
+ ok = file_handle_cache:register_callback(
+ rabbit_amqqueue, set_maximum_since_use, [Self]),
+ ok = rabbit_memory_monitor:register(
+ Self, {rabbit_amqqueue, set_ram_duration_target, [Self]}),
+ {ok, BQ} = application:get_env(backing_queue_module),
+ Q1 = Q #amqqueue { pid = QPid },
+ BQS = bq_init(BQ, Q1, new),
+ State = #state { q = Q1,
+ gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS,
+ rate_timer_ref = undefined,
+ sync_timer_ref = undefined,
+
+ sender_queues = dict:new(),
+ msg_id_ack = dict:new(),
+
+ msg_id_status = dict:new(),
+ known_senders = pmon:new(delegate),
+
+ depth_delta = undefined
+ },
+ ok = gm:broadcast(GM, request_depth),
+ ok = gm:validate_members(GM, [GM | [G || {G, _} <- GMPids]]),
+ rabbit_mirror_queue_misc:maybe_auto_sync(Q1),
+ {ok, State};
+ {stale, StalePid} ->
+ rabbit_mirror_queue_misc:log_warning(
+ QName, "Detected stale HA master: ~p~n", [StalePid]),
+ gm:leave(GM),
+ {error, {stale_master_pid, StalePid}};
+ duplicate_live_master ->
+ gm:leave(GM),
+ {error, {duplicate_live_master, Node}};
+ existing ->
+ gm:leave(GM),
+ {error, normal};
+ master_in_recovery ->
+ gm:leave(GM),
+ %% The queue record vanished - we must have a master starting
+ %% concurrently with us. In that case we can safely decide to do
+ %% nothing here, and the master will start us in
+ %% master:init_with_existing_bq/3
+ {error, normal}
+ end.
+
+init_it(Self, GM, Node, QName) ->
+ case mnesia:read({rabbit_queue, QName}) of
+ [Q = #amqqueue { pid = QPid, slave_pids = SPids, gm_pids = GMPids }] ->
+ case [Pid || Pid <- [QPid | SPids], node(Pid) =:= Node] of
+ [] -> add_slave(Q, Self, GM),
+ {new, QPid, GMPids};
+ [QPid] -> case rabbit_misc:is_process_alive(QPid) of
+ true -> duplicate_live_master;
+ false -> {stale, QPid}
+ end;
+ [SPid] -> case rabbit_misc:is_process_alive(SPid) of
+ true -> existing;
+ false -> GMPids1 = [T || T = {_, S} <- GMPids,
+ S =/= SPid],
+ Q1 = Q#amqqueue{
+ slave_pids = SPids -- [SPid],
+ gm_pids = GMPids1},
+ add_slave(Q1, Self, GM),
+ {new, QPid, GMPids1}
+ end
+ end;
+ [] ->
+ master_in_recovery
+ end.
+
+%% Add to the end, so they are in descending order of age, see
+%% rabbit_mirror_queue_misc:promote_slave/1
+add_slave(Q = #amqqueue { slave_pids = SPids, gm_pids = GMPids }, New, GM) ->
+ rabbit_mirror_queue_misc:store_updated_slaves(
+ Q#amqqueue{slave_pids = SPids ++ [New], gm_pids = [{GM, New} | GMPids]}).
+
+handle_call(go, _From, {not_started, Q} = NotStarted) ->
+ case handle_go(Q) of
+ {ok, State} -> {reply, ok, State};
+ {error, Error} -> {stop, Error, NotStarted}
+ end;
+
+handle_call({gm_deaths, DeadGMPids}, From,
+ State = #state { gm = GM, q = Q = #amqqueue {
+ name = QName, pid = MPid }}) ->
+ Self = self(),
+ case rabbit_mirror_queue_misc:remove_from_queue(QName, Self, DeadGMPids) of
+ {error, not_found} ->
+ gen_server2:reply(From, ok),
+ {stop, normal, State};
+ {ok, Pid, DeadPids} ->
+ rabbit_mirror_queue_misc:report_deaths(Self, false, QName,
+ DeadPids),
+ case Pid of
+ MPid ->
+ %% master hasn't changed
+ gen_server2:reply(From, ok),
+ noreply(State);
+ Self ->
+ %% we've become master
+ QueueState = promote_me(From, State),
+ {become, rabbit_amqqueue_process, QueueState, hibernate};
+ _ ->
+ %% master has changed to not us
+ gen_server2:reply(From, ok),
+ %% Since GM is by nature lazy we need to make sure
+ %% there is some traffic when a master dies, to
+ %% make sure all slaves get informed of the
+ %% death. That is all process_death does, create
+ %% some traffic.
+ ok = gm:broadcast(GM, process_death),
+ noreply(State #state { q = Q #amqqueue { pid = Pid } })
+ end
+ end;
+
+handle_call(info, _From, State) ->
+ reply(infos(?INFO_KEYS, State), State).
+
+handle_cast(go, {not_started, Q} = NotStarted) ->
+ case handle_go(Q) of
+ {ok, State} -> {noreply, State};
+ {error, Error} -> {stop, Error, NotStarted}
+ end;
+
+handle_cast({run_backing_queue, Mod, Fun}, State) ->
+ noreply(run_backing_queue(Mod, Fun, State));
+
+handle_cast({gm, Instruction}, State) ->
+ handle_process_result(process_instruction(Instruction, State));
+
+handle_cast({deliver, Delivery = #delivery{sender = Sender}, true, Flow},
+ State) ->
+ %% Asynchronous, non-"mandatory", deliver mode.
+ case Flow of
+ flow -> credit_flow:ack(Sender);
+ noflow -> ok
+ end,
+ noreply(maybe_enqueue_message(Delivery, State));
+
+handle_cast({sync_start, Ref, Syncer},
+ State = #state { depth_delta = DD,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ State1 = #state{rate_timer_ref = TRef} = ensure_rate_timer(State),
+ S = fun({MA, TRefN, BQSN}) ->
+ State1#state{depth_delta = undefined,
+ msg_id_ack = dict:from_list(MA),
+ rate_timer_ref = TRefN,
+ backing_queue_state = BQSN}
+ end,
+ case rabbit_mirror_queue_sync:slave(
+ DD, Ref, TRef, Syncer, BQ, BQS,
+ fun (BQN, BQSN) ->
+ BQSN1 = update_ram_duration(BQN, BQSN),
+ TRefN = erlang:send_after(?RAM_DURATION_UPDATE_INTERVAL,
+ self(), update_ram_duration),
+ {TRefN, BQSN1}
+ end) of
+ denied -> noreply(State1);
+ {ok, Res} -> noreply(set_delta(0, S(Res)));
+ {failed, Res} -> noreply(S(Res));
+ {stop, Reason, Res} -> {stop, Reason, S(Res)}
+ end;
+
+handle_cast({set_maximum_since_use, Age}, State) ->
+ ok = file_handle_cache:set_maximum_since_use(Age),
+ noreply(State);
+
+handle_cast({set_ram_duration_target, Duration},
+ State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ BQS1 = BQ:set_ram_duration_target(Duration, BQS),
+ noreply(State #state { backing_queue_state = BQS1 }).
+
+handle_info(update_ram_duration, State = #state{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ BQS1 = update_ram_duration(BQ, BQS),
+ %% Don't call noreply/1, we don't want to set timers
+ {State1, Timeout} = next_state(State #state {
+ rate_timer_ref = undefined,
+ backing_queue_state = BQS1 }),
+ {noreply, State1, Timeout};
+
+handle_info(sync_timeout, State) ->
+ noreply(backing_queue_timeout(
+ State #state { sync_timer_ref = undefined }));
+
+handle_info(timeout, State) ->
+ noreply(backing_queue_timeout(State));
+
+handle_info({'DOWN', _MonitorRef, process, ChPid, _Reason}, State) ->
+ local_sender_death(ChPid, State),
+ noreply(maybe_forget_sender(ChPid, down_from_ch, State));
+
+handle_info({'EXIT', _Pid, Reason}, State) ->
+ {stop, Reason, State};
+
+handle_info({bump_credit, Msg}, State) ->
+ credit_flow:handle_bump_msg(Msg),
+ noreply(State);
+
+handle_info(Msg, State) ->
+ {stop, {unexpected_info, Msg}, State}.
+
+terminate(_Reason, {not_started, _Q}) ->
+ ok;
+terminate(_Reason, #state { backing_queue_state = undefined }) ->
+ %% We've received a delete_and_terminate from gm, thus nothing to
+ %% do here.
+ ok;
+terminate({shutdown, dropped} = R, State = #state{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ %% See rabbit_mirror_queue_master:terminate/2
+ terminate_common(State),
+ BQ:delete_and_terminate(R, BQS);
+terminate(shutdown, State) ->
+ terminate_shutdown(shutdown, State);
+terminate({shutdown, _} = R, State) ->
+ terminate_shutdown(R, State);
+terminate(Reason, State = #state{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ terminate_common(State),
+ BQ:delete_and_terminate(Reason, BQS);
+terminate([_SPid], _Reason) ->
+ %% gm case
+ ok.
+
+%% If the Reason is shutdown, or {shutdown, _}, it is not the queue
+%% being deleted: it's just the node going down. Even though we're a
+%% slave, we have no idea whether or not we'll be the only copy coming
+%% back up. Thus we must assume we will be, and preserve anything we
+%% have on disk.
+terminate_shutdown(Reason, State = #state{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ terminate_common(State),
+ BQ:terminate(Reason, BQS).
+
+terminate_common(State) ->
+ ok = rabbit_memory_monitor:deregister(self()),
+ stop_rate_timer(stop_sync_timer(State)).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+handle_pre_hibernate({not_started, _Q} = State) ->
+ {hibernate, State};
+
+handle_pre_hibernate(State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {RamDuration, BQS1} = BQ:ram_duration(BQS),
+ DesiredDuration =
+ rabbit_memory_monitor:report_ram_duration(self(), RamDuration),
+ BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1),
+ BQS3 = BQ:handle_pre_hibernate(BQS2),
+ {hibernate, stop_rate_timer(State #state { backing_queue_state = BQS3 })}.
+
+prioritise_call(Msg, _From, _Len, _State) ->
+ case Msg of
+ info -> 9;
+ {gm_deaths, _Dead} -> 5;
+ _ -> 0
+ end.
+
+prioritise_cast(Msg, _Len, _State) ->
+ case Msg of
+ {set_ram_duration_target, _Duration} -> 8;
+ {set_maximum_since_use, _Age} -> 8;
+ {run_backing_queue, _Mod, _Fun} -> 6;
+ {gm, _Msg} -> 5;
+ _ -> 0
+ end.
+
+prioritise_info(Msg, _Len, _State) ->
+ case Msg of
+ update_ram_duration -> 8;
+ sync_timeout -> 6;
+ _ -> 0
+ end.
+
+format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ).
+
+%% ---------------------------------------------------------------------------
+%% GM
+%% ---------------------------------------------------------------------------
+
+joined([SPid], _Members) -> SPid ! {joined, self()}, ok.
+
+members_changed([_SPid], _Births, []) ->
+ ok;
+members_changed([ SPid], _Births, Deaths) ->
+ case rabbit_misc:with_exit_handler(
+ rabbit_misc:const(ok),
+ fun() ->
+ gen_server2:call(SPid, {gm_deaths, Deaths}, infinity)
+ end) of
+ ok -> ok;
+ {promote, CPid} -> {become, rabbit_mirror_queue_coordinator, [CPid]}
+ end.
+
+handle_msg([_SPid], _From, request_depth) ->
+ %% This is only of value to the master
+ ok;
+handle_msg([_SPid], _From, {ensure_monitoring, _Pid}) ->
+ %% This is only of value to the master
+ ok;
+handle_msg([_SPid], _From, process_death) ->
+ %% We must not take any notice of the master death here since it
+ %% comes without ordering guarantees - there could still be
+ %% messages from the master we have yet to receive. When we get
+ %% members_changed, then there will be no more messages.
+ ok;
+handle_msg([CPid], _From, {delete_and_terminate, _Reason} = Msg) ->
+ ok = gen_server2:cast(CPid, {gm, Msg}),
+ {stop, {shutdown, ring_shutdown}};
+handle_msg([SPid], _From, {sync_start, Ref, Syncer, SPids}) ->
+ case lists:member(SPid, SPids) of
+ true -> gen_server2:cast(SPid, {sync_start, Ref, Syncer});
+ false -> ok
+ end;
+handle_msg([SPid], _From, Msg) ->
+ ok = gen_server2:cast(SPid, {gm, Msg}).
+
+%% ---------------------------------------------------------------------------
+%% Others
+%% ---------------------------------------------------------------------------
+
+infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
+
+i(pid, _State) -> self();
+i(name, #state { q = #amqqueue { name = Name } }) -> Name;
+i(master_pid, #state { q = #amqqueue { pid = MPid } }) -> MPid;
+i(is_synchronised, #state { depth_delta = DD }) -> DD =:= 0;
+i(Item, _State) -> throw({bad_argument, Item}).
+
+bq_init(BQ, Q, Recover) ->
+ Self = self(),
+ BQ:init(Q, Recover,
+ fun (Mod, Fun) ->
+ rabbit_amqqueue:run_backing_queue(Self, Mod, Fun)
+ end).
+
+run_backing_queue(rabbit_mirror_queue_master, Fun, State) ->
+ %% Yes, this might look a little crazy, but see comments in
+ %% confirm_sender_death/1
+ Fun(?MODULE, State);
+run_backing_queue(Mod, Fun, State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ State #state { backing_queue_state = BQ:invoke(Mod, Fun, BQS) }.
+
+send_mandatory(#delivery{mandatory = false}) ->
+ ok;
+send_mandatory(#delivery{mandatory = true,
+ sender = SenderPid,
+ msg_seq_no = MsgSeqNo}) ->
+ gen_server2:cast(SenderPid, {mandatory_received, MsgSeqNo}).
+
+send_or_record_confirm(_, #delivery{ confirm = false }, MS, _State) ->
+ MS;
+send_or_record_confirm(published, #delivery { sender = ChPid,
+ confirm = true,
+ msg_seq_no = MsgSeqNo,
+ message = #basic_message {
+ id = MsgId,
+ is_persistent = true } },
+ MS, #state { q = #amqqueue { durable = true } }) ->
+ dict:store(MsgId, {published, ChPid, MsgSeqNo} , MS);
+send_or_record_confirm(_Status, #delivery { sender = ChPid,
+ confirm = true,
+ msg_seq_no = MsgSeqNo },
+ MS, _State) ->
+ ok = rabbit_misc:confirm_to_sender(ChPid, [MsgSeqNo]),
+ MS.
+
+confirm_messages(MsgIds, State = #state { msg_id_status = MS }) ->
+ {CMs, MS1} =
+ lists:foldl(
+ fun (MsgId, {CMsN, MSN} = Acc) ->
+ %% We will never see 'discarded' here
+ case dict:find(MsgId, MSN) of
+ error ->
+ %% If it needed confirming, it'll have
+ %% already been done.
+ Acc;
+ {ok, published} ->
+ %% Still not seen it from the channel, just
+ %% record that it's been confirmed.
+ {CMsN, dict:store(MsgId, confirmed, MSN)};
+ {ok, {published, ChPid, MsgSeqNo}} ->
+ %% Seen from both GM and Channel. Can now
+ %% confirm.
+ {rabbit_misc:gb_trees_cons(ChPid, MsgSeqNo, CMsN),
+ dict:erase(MsgId, MSN)};
+ {ok, confirmed} ->
+ %% It's already been confirmed. This is
+ %% probably it's been both sync'd to disk
+ %% and then delivered and ack'd before we've
+ %% seen the publish from the
+ %% channel. Nothing to do here.
+ Acc
+ end
+ end, {gb_trees:empty(), MS}, MsgIds),
+ rabbit_misc:gb_trees_foreach(fun rabbit_misc:confirm_to_sender/2, CMs),
+ State #state { msg_id_status = MS1 }.
+
+handle_process_result({ok, State}) -> noreply(State);
+handle_process_result({stop, State}) -> {stop, normal, State}.
+
+-ifdef(use_specs).
+-spec(promote_me/2 :: ({pid(), term()}, #state{}) -> no_return()).
+-endif.
+promote_me(From, #state { q = Q = #amqqueue { name = QName },
+ gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS,
+ rate_timer_ref = RateTRef,
+ sender_queues = SQ,
+ msg_id_ack = MA,
+ msg_id_status = MS,
+ known_senders = KS }) ->
+ rabbit_mirror_queue_misc:log_info(QName, "Promoting slave ~s to master~n",
+ [rabbit_misc:pid_to_string(self())]),
+ Q1 = Q #amqqueue { pid = self() },
+ {ok, CPid} = rabbit_mirror_queue_coordinator:start_link(
+ Q1, GM, rabbit_mirror_queue_master:sender_death_fun(),
+ rabbit_mirror_queue_master:depth_fun()),
+ true = unlink(GM),
+ gen_server2:reply(From, {promote, CPid}),
+
+ %% Everything that we're monitoring, we need to ensure our new
+ %% coordinator is monitoring.
+ MPids = pmon:monitored(KS),
+ ok = rabbit_mirror_queue_coordinator:ensure_monitoring(CPid, MPids),
+
+ %% We find all the messages that we've received from channels but
+ %% not from gm, and pass them to the
+ %% queue_process:init_with_backing_queue_state to be enqueued.
+ %%
+ %% We also have to requeue messages which are pending acks: the
+ %% consumers from the master queue have been lost and so these
+ %% messages need requeuing. They might also be pending
+ %% confirmation, and indeed they might also be pending arrival of
+ %% the publication from the channel itself, if we received both
+ %% the publication and the fetch via gm first! Requeuing doesn't
+ %% affect confirmations: if the message was previously pending a
+ %% confirmation then it still will be, under the same msg_id. So
+ %% as a master, we need to be prepared to filter out the
+ %% publication of said messages from the channel (is_duplicate
+ %% (thus such requeued messages must remain in the msg_id_status
+ %% (MS) which becomes seen_status (SS) in the master)).
+ %%
+ %% Then there are messages we already have in the queue, which are
+ %% not currently pending acknowledgement:
+ %% 1. Messages we've only received via gm:
+ %% Filter out subsequent publication from channel through
+ %% validate_message. Might have to issue confirms then or
+ %% later, thus queue_process state will have to know that
+ %% there's a pending confirm.
+ %% 2. Messages received via both gm and channel:
+ %% Queue will have to deal with issuing confirms if necessary.
+ %%
+ %% MS contains the following three entry types:
+ %%
+ %% a) published:
+ %% published via gm only; pending arrival of publication from
+ %% channel, maybe pending confirm.
+ %%
+ %% b) {published, ChPid, MsgSeqNo}:
+ %% published via gm and channel; pending confirm.
+ %%
+ %% c) confirmed:
+ %% published via gm only, and confirmed; pending publication
+ %% from channel.
+ %%
+ %% d) discarded:
+ %% seen via gm only as discarded. Pending publication from
+ %% channel
+ %%
+ %% The forms a, c and d only, need to go to the master state
+ %% seen_status (SS).
+ %%
+ %% The form b only, needs to go through to the queue_process
+ %% state to form the msg_id_to_channel mapping (MTC).
+ %%
+ %% No messages that are enqueued from SQ at this point will have
+ %% entries in MS.
+ %%
+ %% Messages that are extracted from MA may have entries in MS, and
+ %% those messages are then requeued. However, as discussed above,
+ %% this does not affect MS, nor which bits go through to SS in
+ %% Master, or MTC in queue_process.
+
+ St = [published, confirmed, discarded],
+ SS = dict:filter(fun (_MsgId, Status) -> lists:member(Status, St) end, MS),
+ AckTags = [AckTag || {_MsgId, AckTag} <- dict:to_list(MA)],
+
+ MasterState = rabbit_mirror_queue_master:promote_backing_queue_state(
+ QName, CPid, BQ, BQS, GM, AckTags, SS, MPids),
+
+ MTC = dict:fold(fun (MsgId, {published, ChPid, MsgSeqNo}, MTC0) ->
+ gb_trees:insert(MsgId, {ChPid, MsgSeqNo}, MTC0);
+ (_Msgid, _Status, MTC0) ->
+ MTC0
+ end, gb_trees:empty(), MS),
+ Deliveries = [Delivery#delivery{mandatory = false} || %% [0]
+ {_ChPid, {PubQ, _PendCh, _ChState}} <- dict:to_list(SQ),
+ Delivery <- queue:to_list(PubQ)],
+ AwaitGmDown = [ChPid || {ChPid, {_, _, down_from_ch}} <- dict:to_list(SQ)],
+ KS1 = lists:foldl(fun (ChPid0, KS0) ->
+ pmon:demonitor(ChPid0, KS0)
+ end, KS, AwaitGmDown),
+ rabbit_misc:store_proc_name(rabbit_amqqueue_process, QName),
+ rabbit_amqqueue_process:init_with_backing_queue_state(
+ Q1, rabbit_mirror_queue_master, MasterState, RateTRef, Deliveries, KS1,
+ MTC).
+
+%% [0] We reset mandatory to false here because we will have sent the
+%% mandatory_received already as soon as we got the message
+
+noreply(State) ->
+ {NewState, Timeout} = next_state(State),
+ {noreply, ensure_rate_timer(NewState), Timeout}.
+
+reply(Reply, State) ->
+ {NewState, Timeout} = next_state(State),
+ {reply, Reply, ensure_rate_timer(NewState), Timeout}.
+
+next_state(State = #state{backing_queue = BQ, backing_queue_state = BQS}) ->
+ {MsgIds, BQS1} = BQ:drain_confirmed(BQS),
+ State1 = confirm_messages(MsgIds,
+ State #state { backing_queue_state = BQS1 }),
+ case BQ:needs_timeout(BQS1) of
+ false -> {stop_sync_timer(State1), hibernate };
+ idle -> {stop_sync_timer(State1), ?SYNC_INTERVAL};
+ timed -> {ensure_sync_timer(State1), 0 }
+ end.
+
+backing_queue_timeout(State = #state { backing_queue = BQ }) ->
+ run_backing_queue(BQ, fun (M, BQS) -> M:timeout(BQS) end, State).
+
+ensure_sync_timer(State) ->
+ rabbit_misc:ensure_timer(State, #state.sync_timer_ref,
+ ?SYNC_INTERVAL, sync_timeout).
+
+stop_sync_timer(State) -> rabbit_misc:stop_timer(State, #state.sync_timer_ref).
+
+ensure_rate_timer(State) ->
+ rabbit_misc:ensure_timer(State, #state.rate_timer_ref,
+ ?RAM_DURATION_UPDATE_INTERVAL,
+ update_ram_duration).
+
+stop_rate_timer(State) -> rabbit_misc:stop_timer(State, #state.rate_timer_ref).
+
+ensure_monitoring(ChPid, State = #state { known_senders = KS }) ->
+ State #state { known_senders = pmon:monitor(ChPid, KS) }.
+
+local_sender_death(ChPid, #state { known_senders = KS }) ->
+ %% The channel will be monitored iff we have received a delivery
+ %% from it but not heard about its death from the master. So if it
+ %% is monitored we need to point the death out to the master (see
+ %% essay).
+ ok = case pmon:is_monitored(ChPid, KS) of
+ false -> ok;
+ true -> confirm_sender_death(ChPid)
+ end.
+
+confirm_sender_death(Pid) ->
+ %% We have to deal with the possibility that we'll be promoted to
+ %% master before this thing gets run. Consequently we set the
+ %% module to rabbit_mirror_queue_master so that if we do become a
+ %% rabbit_amqqueue_process before then, sane things will happen.
+ Fun =
+ fun (?MODULE, State = #state { known_senders = KS,
+ gm = GM }) ->
+ %% We're running still as a slave
+ %%
+ %% See comment in local_sender_death/2; we might have
+ %% received a sender_death in the meanwhile so check
+ %% again.
+ ok = case pmon:is_monitored(Pid, KS) of
+ false -> ok;
+ true -> gm:broadcast(GM, {ensure_monitoring, [Pid]}),
+ confirm_sender_death(Pid)
+ end,
+ State;
+ (rabbit_mirror_queue_master, State) ->
+ %% We've become a master. State is now opaque to
+ %% us. When we became master, if Pid was still known
+ %% to us then we'd have set up monitoring of it then,
+ %% so this is now a noop.
+ State
+ end,
+ %% Note that we do not remove our knowledge of this ChPid until we
+ %% get the sender_death from GM as well as a DOWN notification.
+ {ok, _TRef} = timer:apply_after(
+ ?DEATH_TIMEOUT, rabbit_amqqueue, run_backing_queue,
+ [self(), rabbit_mirror_queue_master, Fun]),
+ ok.
+
+forget_sender(_, running) -> false;
+forget_sender(down_from_gm, down_from_gm) -> false; %% [1]
+forget_sender(Down1, Down2) when Down1 =/= Down2 -> true.
+
+%% [1] If another slave goes through confirm_sender_death/1 before we
+%% do we can get two GM sender_death messages in a row for the same
+%% channel - don't treat that as anything special.
+
+%% Record and process lifetime events from channels. Forget all about a channel
+%% only when down notifications are received from both the channel and from gm.
+maybe_forget_sender(ChPid, ChState, State = #state { sender_queues = SQ,
+ msg_id_status = MS,
+ known_senders = KS }) ->
+ case dict:find(ChPid, SQ) of
+ error ->
+ State;
+ {ok, {MQ, PendCh, ChStateRecord}} ->
+ case forget_sender(ChState, ChStateRecord) of
+ true ->
+ credit_flow:peer_down(ChPid),
+ State #state { sender_queues = dict:erase(ChPid, SQ),
+ msg_id_status = lists:foldl(
+ fun dict:erase/2,
+ MS, sets:to_list(PendCh)),
+ known_senders = pmon:demonitor(ChPid, KS) };
+ false ->
+ SQ1 = dict:store(ChPid, {MQ, PendCh, ChState}, SQ),
+ State #state { sender_queues = SQ1 }
+ end
+ end.
+
+maybe_enqueue_message(
+ Delivery = #delivery { message = #basic_message { id = MsgId },
+ sender = ChPid },
+ State = #state { sender_queues = SQ, msg_id_status = MS }) ->
+ send_mandatory(Delivery), %% must do this before confirms
+ State1 = ensure_monitoring(ChPid, State),
+ %% We will never see {published, ChPid, MsgSeqNo} here.
+ case dict:find(MsgId, MS) of
+ error ->
+ {MQ, PendingCh, ChState} = get_sender_queue(ChPid, SQ),
+ MQ1 = queue:in(Delivery, MQ),
+ SQ1 = dict:store(ChPid, {MQ1, PendingCh, ChState}, SQ),
+ State1 #state { sender_queues = SQ1 };
+ {ok, Status} ->
+ MS1 = send_or_record_confirm(
+ Status, Delivery, dict:erase(MsgId, MS), State1),
+ SQ1 = remove_from_pending_ch(MsgId, ChPid, SQ),
+ State1 #state { msg_id_status = MS1,
+ sender_queues = SQ1 }
+ end.
+
+get_sender_queue(ChPid, SQ) ->
+ case dict:find(ChPid, SQ) of
+ error -> {queue:new(), sets:new(), running};
+ {ok, Val} -> Val
+ end.
+
+remove_from_pending_ch(MsgId, ChPid, SQ) ->
+ case dict:find(ChPid, SQ) of
+ error ->
+ SQ;
+ {ok, {MQ, PendingCh, ChState}} ->
+ dict:store(ChPid, {MQ, sets:del_element(MsgId, PendingCh), ChState},
+ SQ)
+ end.
+
+publish_or_discard(Status, ChPid, MsgId,
+ State = #state { sender_queues = SQ, msg_id_status = MS }) ->
+ %% We really are going to do the publish/discard right now, even
+ %% though we may not have seen it directly from the channel. But
+ %% we cannot issue confirms until the latter has happened. So we
+ %% need to keep track of the MsgId and its confirmation status in
+ %% the meantime.
+ State1 = ensure_monitoring(ChPid, State),
+ {MQ, PendingCh, ChState} = get_sender_queue(ChPid, SQ),
+ {MQ1, PendingCh1, MS1} =
+ case queue:out(MQ) of
+ {empty, _MQ2} ->
+ {MQ, sets:add_element(MsgId, PendingCh),
+ dict:store(MsgId, Status, MS)};
+ {{value, Delivery = #delivery {
+ message = #basic_message { id = MsgId } }}, MQ2} ->
+ {MQ2, PendingCh,
+ %% We received the msg from the channel first. Thus
+ %% we need to deal with confirms here.
+ send_or_record_confirm(Status, Delivery, MS, State1)};
+ {{value, #delivery {}}, _MQ2} ->
+ %% The instruction was sent to us before we were
+ %% within the slave_pids within the #amqqueue{}
+ %% record. We'll never receive the message directly
+ %% from the channel. And the channel will not be
+ %% expecting any confirms from us.
+ {MQ, PendingCh, MS}
+ end,
+ SQ1 = dict:store(ChPid, {MQ1, PendingCh1, ChState}, SQ),
+ State1 #state { sender_queues = SQ1, msg_id_status = MS1 }.
+
+
+process_instruction({publish, ChPid, MsgProps,
+ Msg = #basic_message { id = MsgId }}, State) ->
+ State1 = #state { backing_queue = BQ, backing_queue_state = BQS } =
+ publish_or_discard(published, ChPid, MsgId, State),
+ BQS1 = BQ:publish(Msg, MsgProps, true, ChPid, BQS),
+ {ok, State1 #state { backing_queue_state = BQS1 }};
+process_instruction({publish_delivered, ChPid, MsgProps,
+ Msg = #basic_message { id = MsgId }}, State) ->
+ State1 = #state { backing_queue = BQ, backing_queue_state = BQS } =
+ publish_or_discard(published, ChPid, MsgId, State),
+ true = BQ:is_empty(BQS),
+ {AckTag, BQS1} = BQ:publish_delivered(Msg, MsgProps, ChPid, BQS),
+ {ok, maybe_store_ack(true, MsgId, AckTag,
+ State1 #state { backing_queue_state = BQS1 })};
+process_instruction({discard, ChPid, MsgId}, State) ->
+ State1 = #state { backing_queue = BQ, backing_queue_state = BQS } =
+ publish_or_discard(discarded, ChPid, MsgId, State),
+ BQS1 = BQ:discard(MsgId, ChPid, BQS),
+ {ok, State1 #state { backing_queue_state = BQS1 }};
+process_instruction({drop, Length, Dropped, AckRequired},
+ State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ QLen = BQ:len(BQS),
+ ToDrop = case QLen - Length of
+ N when N > 0 -> N;
+ _ -> 0
+ end,
+ State1 = lists:foldl(
+ fun (const, StateN = #state{backing_queue_state = BQSN}) ->
+ {{MsgId, AckTag}, BQSN1} = BQ:drop(AckRequired, BQSN),
+ maybe_store_ack(
+ AckRequired, MsgId, AckTag,
+ StateN #state { backing_queue_state = BQSN1 })
+ end, State, lists:duplicate(ToDrop, const)),
+ {ok, case AckRequired of
+ true -> State1;
+ false -> update_delta(ToDrop - Dropped, State1)
+ end};
+process_instruction({ack, MsgIds},
+ State = #state { backing_queue = BQ,
+ backing_queue_state = BQS,
+ msg_id_ack = MA }) ->
+ {AckTags, MA1} = msg_ids_to_acktags(MsgIds, MA),
+ {MsgIds1, BQS1} = BQ:ack(AckTags, BQS),
+ [] = MsgIds1 -- MsgIds, %% ASSERTION
+ {ok, update_delta(length(MsgIds1) - length(MsgIds),
+ State #state { msg_id_ack = MA1,
+ backing_queue_state = BQS1 })};
+process_instruction({requeue, MsgIds},
+ State = #state { backing_queue = BQ,
+ backing_queue_state = BQS,
+ msg_id_ack = MA }) ->
+ {AckTags, MA1} = msg_ids_to_acktags(MsgIds, MA),
+ {_MsgIds, BQS1} = BQ:requeue(AckTags, BQS),
+ {ok, State #state { msg_id_ack = MA1,
+ backing_queue_state = BQS1 }};
+process_instruction({sender_death, ChPid},
+ State = #state { known_senders = KS }) ->
+ %% The channel will be monitored iff we have received a message
+ %% from it. In this case we just want to avoid doing work if we
+ %% never got any messages.
+ {ok, case pmon:is_monitored(ChPid, KS) of
+ false -> State;
+ true -> maybe_forget_sender(ChPid, down_from_gm, State)
+ end};
+process_instruction({depth, Depth},
+ State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {ok, set_delta(Depth - BQ:depth(BQS), State)};
+
+process_instruction({delete_and_terminate, Reason},
+ State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ BQ:delete_and_terminate(Reason, BQS),
+ {stop, State #state { backing_queue_state = undefined }}.
+
+msg_ids_to_acktags(MsgIds, MA) ->
+ {AckTags, MA1} =
+ lists:foldl(
+ fun (MsgId, {Acc, MAN}) ->
+ case dict:find(MsgId, MA) of
+ error -> {Acc, MAN};
+ {ok, AckTag} -> {[AckTag | Acc], dict:erase(MsgId, MAN)}
+ end
+ end, {[], MA}, MsgIds),
+ {lists:reverse(AckTags), MA1}.
+
+maybe_store_ack(false, _MsgId, _AckTag, State) ->
+ State;
+maybe_store_ack(true, MsgId, AckTag, State = #state { msg_id_ack = MA }) ->
+ State #state { msg_id_ack = dict:store(MsgId, AckTag, MA) }.
+
+set_delta(0, State = #state { depth_delta = undefined }) ->
+ ok = record_synchronised(State#state.q),
+ State #state { depth_delta = 0 };
+set_delta(NewDelta, State = #state { depth_delta = undefined }) ->
+ true = NewDelta > 0, %% assertion
+ State #state { depth_delta = NewDelta };
+set_delta(NewDelta, State = #state { depth_delta = Delta }) ->
+ update_delta(NewDelta - Delta, State).
+
+update_delta(_DeltaChange, State = #state { depth_delta = undefined }) ->
+ State;
+update_delta( DeltaChange, State = #state { depth_delta = 0 }) ->
+ 0 = DeltaChange, %% assertion: we cannot become unsync'ed
+ State;
+update_delta( DeltaChange, State = #state { depth_delta = Delta }) ->
+ true = DeltaChange =< 0, %% assertion: we cannot become 'less' sync'ed
+ set_delta(Delta + DeltaChange, State #state { depth_delta = undefined }).
+
+update_ram_duration(BQ, BQS) ->
+ {RamDuration, BQS1} = BQ:ram_duration(BQS),
+ DesiredDuration =
+ rabbit_memory_monitor:report_ram_duration(self(), RamDuration),
+ BQ:set_ram_duration_target(DesiredDuration, BQS1).
+
+record_synchronised(#amqqueue { name = QName }) ->
+ Self = self(),
+ case rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ case mnesia:read({rabbit_queue, QName}) of
+ [] ->
+ ok;
+ [Q1 = #amqqueue { sync_slave_pids = SSPids }] ->
+ Q2 = Q1#amqqueue{sync_slave_pids = [Self | SSPids]},
+ rabbit_mirror_queue_misc:store_updated_slaves(Q2),
+ {ok, Q2}
+ end
+ end) of
+ ok -> ok;
+ {ok, Q} -> rabbit_mirror_queue_misc:maybe_drop_master_after_sync(Q)
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_slave_sup).
+
+-behaviour(supervisor2).
+
+-export([start_link/0, start_child/2]).
+
+-export([init/1]).
+
+-include_lib("rabbit.hrl").
+
+-define(SERVER, ?MODULE).
+
+start_link() -> supervisor2:start_link({local, ?SERVER}, ?MODULE, []).
+
+start_child(Node, Args) -> supervisor2:start_child({?SERVER, Node}, Args).
+
+init([]) ->
+ {ok, {{simple_one_for_one, 10, 10},
+ [{rabbit_mirror_queue_slave,
+ {rabbit_mirror_queue_slave, start_link, []},
+ temporary, ?MAX_WAIT, worker, [rabbit_mirror_queue_slave]}]}}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2012 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_sync).
+
+-include("rabbit.hrl").
+
+-export([master_prepare/4, master_go/7, slave/7]).
+
+-define(SYNC_PROGRESS_INTERVAL, 1000000).
+
+%% There are three processes around, the master, the syncer and the
+%% slave(s). The syncer is an intermediary, linked to the master in
+%% order to make sure we do not mess with the master's credit flow or
+%% set of monitors.
+%%
+%% Interactions
+%% ------------
+%%
+%% '*' indicates repeating messages. All are standard Erlang messages
+%% except sync_start which is sent over GM to flush out any other
+%% messages that we might have sent that way already. (credit) is the
+%% usual credit_flow bump message every so often.
+%%
+%% Master Syncer Slave(s)
+%% sync_mirrors -> || ||
+%% (from channel) || -- (spawns) --> || ||
+%% || --------- sync_start (over GM) -------> ||
+%% || || <--- sync_ready ---- ||
+%% || || (or) ||
+%% || || <--- sync_deny ----- ||
+%% || <--- ready ---- || ||
+%% || <--- next* ---- || || }
+%% || ---- msg* ----> || || } loop
+%% || || ---- sync_msg* ----> || }
+%% || || <--- (credit)* ----- || }
+%% || <--- next ---- || ||
+%% || ---- done ----> || ||
+%% || || -- sync_complete --> ||
+%% || (Dies) ||
+
+-ifdef(use_specs).
+
+-type(log_fun() :: fun ((string(), [any()]) -> 'ok')).
+-type(bq() :: atom()).
+-type(bqs() :: any()).
+-type(ack() :: any()).
+-type(slave_sync_state() :: {[{rabbit_types:msg_id(), ack()}], timer:tref(),
+ bqs()}).
+
+-spec(master_prepare/4 :: (reference(), rabbit_amqqueue:name(),
+ log_fun(), [pid()]) -> pid()).
+-spec(master_go/7 :: (pid(), reference(), log_fun(),
+ rabbit_mirror_queue_master:stats_fun(),
+ rabbit_mirror_queue_master:stats_fun(),
+ bq(), bqs()) ->
+ {'already_synced', bqs()} | {'ok', bqs()} |
+ {'shutdown', any(), bqs()} |
+ {'sync_died', any(), bqs()}).
+-spec(slave/7 :: (non_neg_integer(), reference(), timer:tref(), pid(),
+ bq(), bqs(), fun((bq(), bqs()) -> {timer:tref(), bqs()})) ->
+ 'denied' |
+ {'ok' | 'failed', slave_sync_state()} |
+ {'stop', any(), slave_sync_state()}).
+
+-endif.
+
+%% ---------------------------------------------------------------------------
+%% Master
+
+master_prepare(Ref, QName, Log, SPids) ->
+ MPid = self(),
+ spawn_link(fun () ->
+ ?store_proc_name(QName),
+ syncer(Ref, Log, MPid, SPids)
+ end).
+
+master_go(Syncer, Ref, Log, HandleInfo, EmitStats, BQ, BQS) ->
+ Args = {Syncer, Ref, Log, HandleInfo, EmitStats, rabbit_misc:get_parent()},
+ receive
+ {'EXIT', Syncer, normal} -> {already_synced, BQS};
+ {'EXIT', Syncer, Reason} -> {sync_died, Reason, BQS};
+ {ready, Syncer} -> EmitStats({syncing, 0}),
+ master_go0(Args, BQ, BQS)
+ end.
+
+master_go0(Args, BQ, BQS) ->
+ case BQ:fold(fun (Msg, MsgProps, Unacked, Acc) ->
+ master_send(Msg, MsgProps, Unacked, Args, Acc)
+ end, {0, erlang:now()}, BQS) of
+ {{shutdown, Reason}, BQS1} -> {shutdown, Reason, BQS1};
+ {{sync_died, Reason}, BQS1} -> {sync_died, Reason, BQS1};
+ {_, BQS1} -> master_done(Args, BQS1)
+ end.
+
+master_send(Msg, MsgProps, Unacked,
+ {Syncer, Ref, Log, HandleInfo, EmitStats, Parent}, {I, Last}) ->
+ T = case timer:now_diff(erlang:now(), Last) > ?SYNC_PROGRESS_INTERVAL of
+ true -> EmitStats({syncing, I}),
+ Log("~p messages", [I]),
+ erlang:now();
+ false -> Last
+ end,
+ HandleInfo({syncing, I}),
+ receive
+ {'$gen_cast', {set_maximum_since_use, Age}} ->
+ ok = file_handle_cache:set_maximum_since_use(Age)
+ after 0 ->
+ ok
+ end,
+ receive
+ {'$gen_call', From,
+ cancel_sync_mirrors} -> stop_syncer(Syncer, {cancel, Ref}),
+ gen_server2:reply(From, ok),
+ {stop, cancelled};
+ {next, Ref} -> Syncer ! {msg, Ref, Msg, MsgProps, Unacked},
+ {cont, {I + 1, T}};
+ {'EXIT', Parent, Reason} -> {stop, {shutdown, Reason}};
+ {'EXIT', Syncer, Reason} -> {stop, {sync_died, Reason}}
+ end.
+
+master_done({Syncer, Ref, _Log, _HandleInfo, _EmitStats, Parent}, BQS) ->
+ receive
+ {next, Ref} -> stop_syncer(Syncer, {done, Ref}),
+ {ok, BQS};
+ {'EXIT', Parent, Reason} -> {shutdown, Reason, BQS};
+ {'EXIT', Syncer, Reason} -> {sync_died, Reason, BQS}
+ end.
+
+stop_syncer(Syncer, Msg) ->
+ unlink(Syncer),
+ Syncer ! Msg,
+ receive {'EXIT', Syncer, _} -> ok
+ after 0 -> ok
+ end.
+
+%% Master
+%% ---------------------------------------------------------------------------
+%% Syncer
+
+syncer(Ref, Log, MPid, SPids) ->
+ [erlang:monitor(process, SPid) || SPid <- SPids],
+ %% We wait for a reply from the slaves so that we know they are in
+ %% a receive block and will thus receive messages we send to them
+ %% *without* those messages ending up in their gen_server2 pqueue.
+ case [SPid || SPid <- SPids,
+ receive
+ {sync_ready, Ref, SPid} -> true;
+ {sync_deny, Ref, SPid} -> false;
+ {'DOWN', _, process, SPid, _} -> false
+ end] of
+ [] -> Log("all slaves already synced", []);
+ SPids1 -> MPid ! {ready, self()},
+ Log("mirrors ~p to sync", [[node(SPid) || SPid <- SPids1]]),
+ syncer_loop(Ref, MPid, SPids1)
+ end.
+
+syncer_loop(Ref, MPid, SPids) ->
+ MPid ! {next, Ref},
+ receive
+ {msg, Ref, Msg, MsgProps, Unacked} ->
+ SPids1 = wait_for_credit(SPids),
+ [begin
+ credit_flow:send(SPid),
+ SPid ! {sync_msg, Ref, Msg, MsgProps, Unacked}
+ end || SPid <- SPids1],
+ syncer_loop(Ref, MPid, SPids1);
+ {cancel, Ref} ->
+ %% We don't tell the slaves we will die - so when we do
+ %% they interpret that as a failure, which is what we
+ %% want.
+ ok;
+ {done, Ref} ->
+ [SPid ! {sync_complete, Ref} || SPid <- SPids]
+ end.
+
+wait_for_credit(SPids) ->
+ case credit_flow:blocked() of
+ true -> receive
+ {bump_credit, Msg} ->
+ credit_flow:handle_bump_msg(Msg),
+ wait_for_credit(SPids);
+ {'DOWN', _, process, SPid, _} ->
+ credit_flow:peer_down(SPid),
+ wait_for_credit(lists:delete(SPid, SPids))
+ end;
+ false -> SPids
+ end.
+
+%% Syncer
+%% ---------------------------------------------------------------------------
+%% Slave
+
+slave(0, Ref, _TRef, Syncer, _BQ, _BQS, _UpdateRamDuration) ->
+ Syncer ! {sync_deny, Ref, self()},
+ denied;
+
+slave(_DD, Ref, TRef, Syncer, BQ, BQS, UpdateRamDuration) ->
+ MRef = erlang:monitor(process, Syncer),
+ Syncer ! {sync_ready, Ref, self()},
+ {_MsgCount, BQS1} = BQ:purge(BQ:purge_acks(BQS)),
+ slave_sync_loop({Ref, MRef, Syncer, BQ, UpdateRamDuration,
+ rabbit_misc:get_parent()}, {[], TRef, BQS1}).
+
+slave_sync_loop(Args = {Ref, MRef, Syncer, BQ, UpdateRamDuration, Parent},
+ State = {MA, TRef, BQS}) ->
+ receive
+ {'DOWN', MRef, process, Syncer, _Reason} ->
+ %% If the master dies half way we are not in the usual
+ %% half-synced state (with messages nearer the tail of the
+ %% queue); instead we have ones nearer the head. If we then
+ %% sync with a newly promoted master, or even just receive
+ %% messages from it, we have a hole in the middle. So the
+ %% only thing to do here is purge.
+ {_MsgCount, BQS1} = BQ:purge(BQ:purge_acks(BQS)),
+ credit_flow:peer_down(Syncer),
+ {failed, {[], TRef, BQS1}};
+ {bump_credit, Msg} ->
+ credit_flow:handle_bump_msg(Msg),
+ slave_sync_loop(Args, State);
+ {sync_complete, Ref} ->
+ erlang:demonitor(MRef, [flush]),
+ credit_flow:peer_down(Syncer),
+ {ok, State};
+ {'$gen_cast', {set_maximum_since_use, Age}} ->
+ ok = file_handle_cache:set_maximum_since_use(Age),
+ slave_sync_loop(Args, State);
+ {'$gen_cast', {set_ram_duration_target, Duration}} ->
+ BQS1 = BQ:set_ram_duration_target(Duration, BQS),
+ slave_sync_loop(Args, {MA, TRef, BQS1});
+ update_ram_duration ->
+ {TRef1, BQS1} = UpdateRamDuration(BQ, BQS),
+ slave_sync_loop(Args, {MA, TRef1, BQS1});
+ {sync_msg, Ref, Msg, Props, Unacked} ->
+ credit_flow:ack(Syncer),
+ Props1 = Props#message_properties{needs_confirming = false},
+ {MA1, BQS1} =
+ case Unacked of
+ false -> {MA, BQ:publish(Msg, Props1, true, none, BQS)};
+ true -> {AckTag, BQS2} = BQ:publish_delivered(
+ Msg, Props1, none, BQS),
+ {[{Msg#basic_message.id, AckTag} | MA], BQS2}
+ end,
+ slave_sync_loop(Args, {MA1, TRef, BQS1});
+ {'EXIT', Parent, Reason} ->
+ {stop, Reason, State};
+ %% If the master throws an exception
+ {'$gen_cast', {gm, {delete_and_terminate, Reason}}} ->
+ BQ:delete_and_terminate(Reason, BQS),
+ {stop, Reason, {[], TRef, undefined}}
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_misc).
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+-export([method_record_type/1, polite_pause/0, polite_pause/1]).
+-export([die/1, frame_error/2, amqp_error/4, quit/1,
+ protocol_error/3, protocol_error/4, protocol_error/1]).
+-export([not_found/1, absent/1]).
+-export([type_class/1, assert_args_equivalence/4]).
+-export([dirty_read/1]).
+-export([table_lookup/2, set_table_value/4]).
+-export([r/3, r/2, r_arg/4, rs/1]).
+-export([enable_cover/0, report_cover/0]).
+-export([enable_cover/1, report_cover/1]).
+-export([start_cover/1]).
+-export([confirm_to_sender/2]).
+-export([throw_on_error/2, with_exit_handler/2, is_abnormal_exit/1,
+ filter_exit_map/2]).
+-export([with_user/2, with_user_and_vhost/3]).
+-export([execute_mnesia_transaction/1]).
+-export([execute_mnesia_transaction/2]).
+-export([execute_mnesia_tx_with_tail/1]).
+-export([ensure_ok/2]).
+-export([tcp_name/3, format_inet_error/1]).
+-export([upmap/2, map_in_order/2]).
+-export([table_filter/3]).
+-export([dirty_read_all/1, dirty_foreach_key/2, dirty_dump_log/1]).
+-export([format/2, format_many/1, format_stderr/2]).
+-export([with_local_io/1, local_info_msg/2]).
+-export([unfold/2, ceil/1, queue_fold/3]).
+-export([sort_field_table/1]).
+-export([pid_to_string/1, string_to_pid/1]).
+-export([version_compare/2, version_compare/3]).
+-export([version_minor_equivalent/2]).
+-export([dict_cons/3, orddict_cons/3, gb_trees_cons/3]).
+-export([gb_trees_fold/3, gb_trees_foreach/2]).
+-export([parse_arguments/3]).
+-export([all_module_attributes/1, build_acyclic_graph/3]).
+-export([now_ms/0]).
+-export([const/1]).
+-export([ntoa/1, ntoab/1]).
+-export([is_process_alive/1]).
+-export([pget/2, pget/3, pget_or_die/2, pset/3]).
+-export([format_message_queue/2]).
+-export([append_rpc_all_nodes/4]).
+-export([os_cmd/1]).
+-export([gb_sets_difference/2]).
+-export([version/0, which_applications/0]).
+-export([sequence_error/1]).
+-export([json_encode/1, json_decode/1, json_to_term/1, term_to_json/1]).
+-export([check_expiry/1]).
+-export([base64url/1]).
+-export([interval_operation/4]).
+-export([ensure_timer/4, stop_timer/2]).
+-export([get_parent/0]).
+-export([store_proc_name/1, store_proc_name/2]).
+-export([moving_average/4]).
+
+%% Horrible macro to use in guards
+-define(IS_BENIGN_EXIT(R),
+ R =:= noproc; R =:= noconnection; R =:= nodedown; R =:= normal;
+ R =:= shutdown).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-export_type([resource_name/0, thunk/1]).
+
+-type(ok_or_error() :: rabbit_types:ok_or_error(any())).
+-type(thunk(T) :: fun(() -> T)).
+-type(resource_name() :: binary()).
+-type(optdef() :: flag | {option, string()}).
+-type(channel_or_connection_exit()
+ :: rabbit_types:channel_exit() | rabbit_types:connection_exit()).
+-type(digraph_label() :: term()).
+-type(graph_vertex_fun() ::
+ fun ((atom(), [term()]) -> [{digraph:vertex(), digraph_label()}])).
+-type(graph_edge_fun() ::
+ fun ((atom(), [term()]) -> [{digraph:vertex(), digraph:vertex()}])).
+
+-spec(method_record_type/1 :: (rabbit_framing:amqp_method_record())
+ -> rabbit_framing:amqp_method_name()).
+-spec(polite_pause/0 :: () -> 'done').
+-spec(polite_pause/1 :: (non_neg_integer()) -> 'done').
+-spec(die/1 ::
+ (rabbit_framing:amqp_exception()) -> channel_or_connection_exit()).
+
+-spec(quit/1 :: (integer()) -> no_return()).
+
+-spec(frame_error/2 :: (rabbit_framing:amqp_method_name(), binary())
+ -> rabbit_types:connection_exit()).
+-spec(amqp_error/4 ::
+ (rabbit_framing:amqp_exception(), string(), [any()],
+ rabbit_framing:amqp_method_name())
+ -> rabbit_types:amqp_error()).
+-spec(protocol_error/3 :: (rabbit_framing:amqp_exception(), string(), [any()])
+ -> channel_or_connection_exit()).
+-spec(protocol_error/4 ::
+ (rabbit_framing:amqp_exception(), string(), [any()],
+ rabbit_framing:amqp_method_name()) -> channel_or_connection_exit()).
+-spec(protocol_error/1 ::
+ (rabbit_types:amqp_error()) -> channel_or_connection_exit()).
+-spec(not_found/1 :: (rabbit_types:r(atom())) -> rabbit_types:channel_exit()).
+-spec(absent/1 :: (rabbit_types:amqqueue()) -> rabbit_types:channel_exit()).
+-spec(type_class/1 :: (rabbit_framing:amqp_field_type()) -> atom()).
+-spec(assert_args_equivalence/4 :: (rabbit_framing:amqp_table(),
+ rabbit_framing:amqp_table(),
+ rabbit_types:r(any()), [binary()]) ->
+ 'ok' | rabbit_types:connection_exit()).
+-spec(dirty_read/1 ::
+ ({atom(), any()}) -> rabbit_types:ok_or_error2(any(), 'not_found')).
+-spec(table_lookup/2 ::
+ (rabbit_framing:amqp_table(), binary())
+ -> 'undefined' | {rabbit_framing:amqp_field_type(), any()}).
+-spec(set_table_value/4 ::
+ (rabbit_framing:amqp_table(), binary(),
+ rabbit_framing:amqp_field_type(), rabbit_framing:amqp_value())
+ -> rabbit_framing:amqp_table()).
+-spec(r/2 :: (rabbit_types:vhost(), K)
+ -> rabbit_types:r3(rabbit_types:vhost(), K, '_')
+ when is_subtype(K, atom())).
+-spec(r/3 ::
+ (rabbit_types:vhost() | rabbit_types:r(atom()), K, resource_name())
+ -> rabbit_types:r3(rabbit_types:vhost(), K, resource_name())
+ when is_subtype(K, atom())).
+-spec(r_arg/4 ::
+ (rabbit_types:vhost() | rabbit_types:r(atom()), K,
+ rabbit_framing:amqp_table(), binary()) ->
+ undefined |
+ rabbit_types:error(
+ {invalid_type, rabbit_framing:amqp_field_type()}) |
+ rabbit_types:r(K) when is_subtype(K, atom())).
+-spec(rs/1 :: (rabbit_types:r(atom())) -> string()).
+-spec(enable_cover/0 :: () -> ok_or_error()).
+-spec(start_cover/1 :: ([{string(), string()} | string()]) -> 'ok').
+-spec(report_cover/0 :: () -> 'ok').
+-spec(enable_cover/1 :: ([file:filename() | atom()]) -> ok_or_error()).
+-spec(report_cover/1 :: ([file:filename() | atom()]) -> 'ok').
+-spec(throw_on_error/2 ::
+ (atom(), thunk(rabbit_types:error(any()) | {ok, A} | A)) -> A).
+-spec(with_exit_handler/2 :: (thunk(A), thunk(A)) -> A).
+-spec(is_abnormal_exit/1 :: (any()) -> boolean()).
+-spec(filter_exit_map/2 :: (fun ((A) -> B), [A]) -> [B]).
+-spec(with_user/2 :: (rabbit_types:username(), thunk(A)) -> A).
+-spec(with_user_and_vhost/3 ::
+ (rabbit_types:username(), rabbit_types:vhost(), thunk(A))
+ -> A).
+-spec(execute_mnesia_transaction/1 :: (thunk(A)) -> A).
+-spec(execute_mnesia_transaction/2 ::
+ (thunk(A), fun ((A, boolean()) -> B)) -> B).
+-spec(execute_mnesia_tx_with_tail/1 ::
+ (thunk(fun ((boolean()) -> B))) -> B | (fun ((boolean()) -> B))).
+-spec(ensure_ok/2 :: (ok_or_error(), atom()) -> 'ok').
+-spec(tcp_name/3 ::
+ (atom(), inet:ip_address(), rabbit_networking:ip_port())
+ -> atom()).
+-spec(format_inet_error/1 :: (atom()) -> string()).
+-spec(upmap/2 :: (fun ((A) -> B), [A]) -> [B]).
+-spec(map_in_order/2 :: (fun ((A) -> B), [A]) -> [B]).
+-spec(table_filter/3:: (fun ((A) -> boolean()), fun ((A, boolean()) -> 'ok'),
+ atom()) -> [A]).
+-spec(dirty_read_all/1 :: (atom()) -> [any()]).
+-spec(dirty_foreach_key/2 :: (fun ((any()) -> any()), atom())
+ -> 'ok' | 'aborted').
+-spec(dirty_dump_log/1 :: (file:filename()) -> ok_or_error()).
+-spec(format/2 :: (string(), [any()]) -> string()).
+-spec(format_many/1 :: ([{string(), [any()]}]) -> string()).
+-spec(format_stderr/2 :: (string(), [any()]) -> 'ok').
+-spec(with_local_io/1 :: (fun (() -> A)) -> A).
+-spec(local_info_msg/2 :: (string(), [any()]) -> 'ok').
+-spec(unfold/2 :: (fun ((A) -> ({'true', B, A} | 'false')), A) -> {[B], A}).
+-spec(ceil/1 :: (number()) -> integer()).
+-spec(queue_fold/3 :: (fun ((any(), B) -> B), B, queue()) -> B).
+-spec(sort_field_table/1 ::
+ (rabbit_framing:amqp_table()) -> rabbit_framing:amqp_table()).
+-spec(pid_to_string/1 :: (pid()) -> string()).
+-spec(string_to_pid/1 :: (string()) -> pid()).
+-spec(version_compare/2 :: (string(), string()) -> 'lt' | 'eq' | 'gt').
+-spec(version_compare/3 ::
+ (string(), string(), ('lt' | 'lte' | 'eq' | 'gte' | 'gt'))
+ -> boolean()).
+-spec(version_minor_equivalent/2 :: (string(), string()) -> boolean()).
+-spec(dict_cons/3 :: (any(), any(), dict()) -> dict()).
+-spec(orddict_cons/3 :: (any(), any(), orddict:orddict()) -> orddict:orddict()).
+-spec(gb_trees_cons/3 :: (any(), any(), gb_tree()) -> gb_tree()).
+-spec(gb_trees_fold/3 :: (fun ((any(), any(), A) -> A), A, gb_tree()) -> A).
+-spec(gb_trees_foreach/2 ::
+ (fun ((any(), any()) -> any()), gb_tree()) -> 'ok').
+-spec(parse_arguments/3 ::
+ ([{atom(), [{string(), optdef()}]} | atom()],
+ [{string(), optdef()}],
+ [string()])
+ -> {'ok', {atom(), [{string(), string()}], [string()]}} |
+ 'no_command').
+-spec(all_module_attributes/1 :: (atom()) -> [{atom(), [term()]}]).
+-spec(build_acyclic_graph/3 ::
+ (graph_vertex_fun(), graph_edge_fun(), [{atom(), [term()]}])
+ -> rabbit_types:ok_or_error2(digraph(),
+ {'vertex', 'duplicate', digraph:vertex()} |
+ {'edge', ({bad_vertex, digraph:vertex()} |
+ {bad_edge, [digraph:vertex()]}),
+ digraph:vertex(), digraph:vertex()})).
+-spec(now_ms/0 :: () -> non_neg_integer()).
+-spec(const/1 :: (A) -> thunk(A)).
+-spec(ntoa/1 :: (inet:ip_address()) -> string()).
+-spec(ntoab/1 :: (inet:ip_address()) -> string()).
+-spec(is_process_alive/1 :: (pid()) -> boolean()).
+-spec(pget/2 :: (term(), [term()]) -> term()).
+-spec(pget/3 :: (term(), [term()], term()) -> term()).
+-spec(pget_or_die/2 :: (term(), [term()]) -> term() | no_return()).
+-spec(pset/3 :: (term(), term(), [term()]) -> term()).
+-spec(format_message_queue/2 :: (any(), priority_queue:q()) -> term()).
+-spec(append_rpc_all_nodes/4 :: ([node()], atom(), atom(), [any()]) -> [any()]).
+-spec(os_cmd/1 :: (string()) -> string()).
+-spec(gb_sets_difference/2 :: (gb_set(), gb_set()) -> gb_set()).
+-spec(version/0 :: () -> string()).
+-spec(which_applications/0 :: () -> [{atom(), string(), string()}]).
+-spec(sequence_error/1 :: ([({'error', any()} | any())])
+ -> {'error', any()} | any()).
+-spec(json_encode/1 :: (any()) -> {'ok', string()} | {'error', any()}).
+-spec(json_decode/1 :: (string()) -> {'ok', any()} | 'error').
+-spec(json_to_term/1 :: (any()) -> any()).
+-spec(term_to_json/1 :: (any()) -> any()).
+-spec(check_expiry/1 :: (integer()) -> rabbit_types:ok_or_error(any())).
+-spec(base64url/1 :: (binary()) -> string()).
+-spec(interval_operation/4 ::
+ ({atom(), atom(), any()}, float(), non_neg_integer(), non_neg_integer())
+ -> {any(), non_neg_integer()}).
+-spec(ensure_timer/4 :: (A, non_neg_integer(), non_neg_integer(), any()) -> A).
+-spec(stop_timer/2 :: (A, non_neg_integer()) -> A).
+-spec(get_parent/0 :: () -> pid()).
+-spec(store_proc_name/2 :: (atom(), rabbit_types:proc_name()) -> ok).
+-spec(store_proc_name/1 :: (rabbit_types:proc_type_and_name()) -> ok).
+-spec(moving_average/4 :: (float(), float(), float(), float() | 'undefined')
+ -> float()).
+-endif.
+
+%%----------------------------------------------------------------------------
+
+method_record_type(Record) ->
+ element(1, Record).
+
+polite_pause() ->
+ polite_pause(3000).
+
+polite_pause(N) ->
+ receive
+ after N -> done
+ end.
+
+die(Error) ->
+ protocol_error(Error, "~w", [Error]).
+
+frame_error(MethodName, BinaryFields) ->
+ protocol_error(frame_error, "cannot decode ~w", [BinaryFields], MethodName).
+
+amqp_error(Name, ExplanationFormat, Params, Method) ->
+ Explanation = format(ExplanationFormat, Params),
+ #amqp_error{name = Name, explanation = Explanation, method = Method}.
+
+protocol_error(Name, ExplanationFormat, Params) ->
+ protocol_error(Name, ExplanationFormat, Params, none).
+
+protocol_error(Name, ExplanationFormat, Params, Method) ->
+ protocol_error(amqp_error(Name, ExplanationFormat, Params, Method)).
+
+protocol_error(#amqp_error{} = Error) ->
+ exit(Error).
+
+not_found(R) -> protocol_error(not_found, "no ~s", [rs(R)]).
+
+absent(#amqqueue{name = QueueName, pid = QPid, durable = true}) ->
+ %% The assertion of durability is mainly there because we mention
+ %% durability in the error message. That way we will hopefully
+ %% notice if at some future point our logic changes s.t. we get
+ %% here with non-durable queues.
+ protocol_error(not_found,
+ "home node '~s' of durable ~s is down or inaccessible",
+ [node(QPid), rs(QueueName)]).
+
+type_class(byte) -> int;
+type_class(short) -> int;
+type_class(signedint) -> int;
+type_class(long) -> int;
+type_class(decimal) -> int;
+type_class(float) -> float;
+type_class(double) -> float;
+type_class(Other) -> Other.
+
+assert_args_equivalence(Orig, New, Name, Keys) ->
+ [assert_args_equivalence1(Orig, New, Name, Key) || Key <- Keys],
+ ok.
+
+assert_args_equivalence1(Orig, New, Name, Key) ->
+ {Orig1, New1} = {table_lookup(Orig, Key), table_lookup(New, Key)},
+ FailureFun = fun () ->
+ protocol_error(precondition_failed, "inequivalent arg '~s'"
+ "for ~s: received ~s but current is ~s",
+ [Key, rs(Name), val(New1), val(Orig1)])
+ end,
+ case {Orig1, New1} of
+ {Same, Same} ->
+ ok;
+ {{OrigType, OrigVal}, {NewType, NewVal}} ->
+ case type_class(OrigType) == type_class(NewType) andalso
+ OrigVal == NewVal of
+ true -> ok;
+ false -> FailureFun()
+ end;
+ {_, _} ->
+ FailureFun()
+ end.
+
+val(undefined) ->
+ "none";
+val({Type, Value}) ->
+ ValFmt = case is_binary(Value) of
+ true -> "~s";
+ false -> "~w"
+ end,
+ format("the value '" ++ ValFmt ++ "' of type '~s'", [Value, Type]).
+
+%% Normally we'd call mnesia:dirty_read/1 here, but that is quite
+%% expensive due to general mnesia overheads (figuring out table types
+%% and locations, etc). We get away with bypassing these because we
+%% know that the tables we are looking at here
+%% - are not the schema table
+%% - have a local ram copy
+%% - do not have any indices
+dirty_read({Table, Key}) ->
+ case ets:lookup(Table, Key) of
+ [Result] -> {ok, Result};
+ [] -> {error, not_found}
+ end.
+
+table_lookup(Table, Key) ->
+ case lists:keysearch(Key, 1, Table) of
+ {value, {_, TypeBin, ValueBin}} -> {TypeBin, ValueBin};
+ false -> undefined
+ end.
+
+set_table_value(Table, Key, Type, Value) ->
+ sort_field_table(
+ lists:keystore(Key, 1, Table, {Key, Type, Value})).
+
+r(#resource{virtual_host = VHostPath}, Kind, Name) ->
+ #resource{virtual_host = VHostPath, kind = Kind, name = Name};
+r(VHostPath, Kind, Name) ->
+ #resource{virtual_host = VHostPath, kind = Kind, name = Name}.
+
+r(VHostPath, Kind) ->
+ #resource{virtual_host = VHostPath, kind = Kind, name = '_'}.
+
+r_arg(#resource{virtual_host = VHostPath}, Kind, Table, Key) ->
+ r_arg(VHostPath, Kind, Table, Key);
+r_arg(VHostPath, Kind, Table, Key) ->
+ case table_lookup(Table, Key) of
+ {longstr, NameBin} -> r(VHostPath, Kind, NameBin);
+ undefined -> undefined;
+ {Type, _} -> {error, {invalid_type, Type}}
+ end.
+
+rs(#resource{virtual_host = VHostPath, kind = Kind, name = Name}) ->
+ format("~s '~s' in vhost '~s'", [Kind, Name, VHostPath]).
+
+enable_cover() -> enable_cover(["."]).
+
+enable_cover(Dirs) ->
+ lists:foldl(fun (Dir, ok) ->
+ case cover:compile_beam_directory(
+ filename:join(lists:concat([Dir]),"ebin")) of
+ {error, _} = Err -> Err;
+ _ -> ok
+ end;
+ (_Dir, Err) ->
+ Err
+ end, ok, Dirs).
+
+start_cover(NodesS) ->
+ {ok, _} = cover:start([rabbit_nodes:make(N) || N <- NodesS]),
+ ok.
+
+report_cover() -> report_cover(["."]).
+
+report_cover(Dirs) -> [report_cover1(lists:concat([Dir])) || Dir <- Dirs], ok.
+
+report_cover1(Root) ->
+ Dir = filename:join(Root, "cover"),
+ ok = filelib:ensure_dir(filename:join(Dir, "junk")),
+ lists:foreach(fun (F) -> file:delete(F) end,
+ filelib:wildcard(filename:join(Dir, "*.html"))),
+ {ok, SummaryFile} = file:open(filename:join(Dir, "summary.txt"), [write]),
+ {CT, NCT} =
+ lists:foldl(
+ fun (M,{CovTot, NotCovTot}) ->
+ {ok, {M, {Cov, NotCov}}} = cover:analyze(M, module),
+ ok = report_coverage_percentage(SummaryFile,
+ Cov, NotCov, M),
+ {ok,_} = cover:analyze_to_file(
+ M,
+ filename:join(Dir, atom_to_list(M) ++ ".html"),
+ [html]),
+ {CovTot+Cov, NotCovTot+NotCov}
+ end,
+ {0, 0},
+ lists:sort(cover:modules())),
+ ok = report_coverage_percentage(SummaryFile, CT, NCT, 'TOTAL'),
+ ok = file:close(SummaryFile),
+ ok.
+
+report_coverage_percentage(File, Cov, NotCov, Mod) ->
+ io:fwrite(File, "~6.2f ~p~n",
+ [if
+ Cov+NotCov > 0 -> 100.0*Cov/(Cov+NotCov);
+ true -> 100.0
+ end,
+ Mod]).
+
+confirm_to_sender(Pid, MsgSeqNos) ->
+ gen_server2:cast(Pid, {confirm, MsgSeqNos, self()}).
+
+%% @doc Halts the emulator returning the given status code to the os.
+%% On Windows this function will block indefinitely so as to give the io
+%% subsystem time to flush stdout completely.
+quit(Status) ->
+ case os:type() of
+ {unix, _} -> halt(Status);
+ {win32, _} -> init:stop(Status),
+ receive
+ after infinity -> ok
+ end
+ end.
+
+throw_on_error(E, Thunk) ->
+ case Thunk() of
+ {error, Reason} -> throw({E, Reason});
+ {ok, Res} -> Res;
+ Res -> Res
+ end.
+
+with_exit_handler(Handler, Thunk) ->
+ try
+ Thunk()
+ catch
+ exit:{R, _} when ?IS_BENIGN_EXIT(R) -> Handler();
+ exit:{{R, _}, _} when ?IS_BENIGN_EXIT(R) -> Handler()
+ end.
+
+is_abnormal_exit(R) when ?IS_BENIGN_EXIT(R) -> false;
+is_abnormal_exit({R, _}) when ?IS_BENIGN_EXIT(R) -> false;
+is_abnormal_exit(_) -> true.
+
+filter_exit_map(F, L) ->
+ Ref = make_ref(),
+ lists:filter(fun (R) -> R =/= Ref end,
+ [with_exit_handler(
+ fun () -> Ref end,
+ fun () -> F(I) end) || I <- L]).
+
+
+with_user(Username, Thunk) ->
+ fun () ->
+ case mnesia:read({rabbit_user, Username}) of
+ [] ->
+ mnesia:abort({no_such_user, Username});
+ [_U] ->
+ Thunk()
+ end
+ end.
+
+with_user_and_vhost(Username, VHostPath, Thunk) ->
+ with_user(Username, rabbit_vhost:with(VHostPath, Thunk)).
+
+execute_mnesia_transaction(TxFun) ->
+ %% Making this a sync_transaction allows us to use dirty_read
+ %% elsewhere and get a consistent result even when that read
+ %% executes on a different node.
+ case worker_pool:submit(
+ fun () ->
+ case mnesia:is_transaction() of
+ false -> DiskLogBefore = mnesia_dumper:get_log_writes(),
+ Res = mnesia:sync_transaction(TxFun),
+ DiskLogAfter = mnesia_dumper:get_log_writes(),
+ case DiskLogAfter == DiskLogBefore of
+ true -> Res;
+ false -> {sync, Res}
+ end;
+ true -> mnesia:sync_transaction(TxFun)
+ end
+ end) of
+ {sync, {atomic, Result}} -> mnesia_sync:sync(), Result;
+ {sync, {aborted, Reason}} -> throw({error, Reason});
+ {atomic, Result} -> Result;
+ {aborted, Reason} -> throw({error, Reason})
+ end.
+
+%% Like execute_mnesia_transaction/1 with additional Pre- and Post-
+%% commit function
+execute_mnesia_transaction(TxFun, PrePostCommitFun) ->
+ case mnesia:is_transaction() of
+ true -> throw(unexpected_transaction);
+ false -> ok
+ end,
+ PrePostCommitFun(execute_mnesia_transaction(
+ fun () ->
+ Result = TxFun(),
+ PrePostCommitFun(Result, true),
+ Result
+ end), false).
+
+%% Like execute_mnesia_transaction/2, but TxFun is expected to return a
+%% TailFun which gets called (only) immediately after the tx commit
+execute_mnesia_tx_with_tail(TxFun) ->
+ case mnesia:is_transaction() of
+ true -> execute_mnesia_transaction(TxFun);
+ false -> TailFun = execute_mnesia_transaction(TxFun),
+ TailFun()
+ end.
+
+ensure_ok(ok, _) -> ok;
+ensure_ok({error, Reason}, ErrorTag) -> throw({error, {ErrorTag, Reason}}).
+
+tcp_name(Prefix, IPAddress, Port)
+ when is_atom(Prefix) andalso is_number(Port) ->
+ list_to_atom(
+ format("~w_~s:~w", [Prefix, inet_parse:ntoa(IPAddress), Port])).
+
+format_inet_error(E) -> format("~w (~s)", [E, format_inet_error0(E)]).
+
+format_inet_error0(address) -> "cannot connect to host/port";
+format_inet_error0(timeout) -> "timed out";
+format_inet_error0(Error) -> inet:format_error(Error).
+
+%% This is a modified version of Luke Gorrie's pmap -
+%% http://lukego.livejournal.com/6753.html - that doesn't care about
+%% the order in which results are received.
+%%
+%% WARNING: This is is deliberately lightweight rather than robust -- if F
+%% throws, upmap will hang forever, so make sure F doesn't throw!
+upmap(F, L) ->
+ Parent = self(),
+ Ref = make_ref(),
+ [receive {Ref, Result} -> Result end
+ || _ <- [spawn(fun () -> Parent ! {Ref, F(X)} end) || X <- L]].
+
+map_in_order(F, L) ->
+ lists:reverse(
+ lists:foldl(fun (E, Acc) -> [F(E) | Acc] end, [], L)).
+
+%% Apply a pre-post-commit function to all entries in a table that
+%% satisfy a predicate, and return those entries.
+%%
+%% We ignore entries that have been modified or removed.
+table_filter(Pred, PrePostCommitFun, TableName) ->
+ lists:foldl(
+ fun (E, Acc) ->
+ case execute_mnesia_transaction(
+ fun () -> mnesia:match_object(TableName, E, read) =/= []
+ andalso Pred(E) end,
+ fun (false, _Tx) -> false;
+ (true, Tx) -> PrePostCommitFun(E, Tx), true
+ end) of
+ false -> Acc;
+ true -> [E | Acc]
+ end
+ end, [], dirty_read_all(TableName)).
+
+dirty_read_all(TableName) ->
+ mnesia:dirty_select(TableName, [{'$1',[],['$1']}]).
+
+dirty_foreach_key(F, TableName) ->
+ dirty_foreach_key1(F, TableName, mnesia:dirty_first(TableName)).
+
+dirty_foreach_key1(_F, _TableName, '$end_of_table') ->
+ ok;
+dirty_foreach_key1(F, TableName, K) ->
+ case catch mnesia:dirty_next(TableName, K) of
+ {'EXIT', _} ->
+ aborted;
+ NextKey ->
+ F(K),
+ dirty_foreach_key1(F, TableName, NextKey)
+ end.
+
+dirty_dump_log(FileName) ->
+ {ok, LH} = disk_log:open([{name, dirty_dump_log},
+ {mode, read_only},
+ {file, FileName}]),
+ dirty_dump_log1(LH, disk_log:chunk(LH, start)),
+ disk_log:close(LH).
+
+dirty_dump_log1(_LH, eof) ->
+ io:format("Done.~n");
+dirty_dump_log1(LH, {K, Terms}) ->
+ io:format("Chunk: ~p~n", [Terms]),
+ dirty_dump_log1(LH, disk_log:chunk(LH, K));
+dirty_dump_log1(LH, {K, Terms, BadBytes}) ->
+ io:format("Bad Chunk, ~p: ~p~n", [BadBytes, Terms]),
+ dirty_dump_log1(LH, disk_log:chunk(LH, K)).
+
+format(Fmt, Args) -> lists:flatten(io_lib:format(Fmt, Args)).
+
+format_many(List) ->
+ lists:flatten([io_lib:format(F ++ "~n", A) || {F, A} <- List]).
+
+format_stderr(Fmt, Args) ->
+ case os:type() of
+ {unix, _} ->
+ Port = open_port({fd, 0, 2}, [out]),
+ port_command(Port, io_lib:format(Fmt, Args)),
+ port_close(Port);
+ {win32, _} ->
+ %% stderr on Windows is buffered and I can't figure out a
+ %% way to trigger a fflush(stderr) in Erlang. So rather
+ %% than risk losing output we write to stdout instead,
+ %% which appears to be unbuffered.
+ io:format(Fmt, Args)
+ end,
+ ok.
+
+%% Execute Fun using the IO system of the local node (i.e. the node on
+%% which the code is executing).
+with_local_io(Fun) ->
+ GL = group_leader(),
+ group_leader(whereis(user), self()),
+ try
+ Fun()
+ after
+ group_leader(GL, self())
+ end.
+
+%% Log an info message on the local node using the standard logger.
+%% Use this if rabbit isn't running and the call didn't originate on
+%% the local node (e.g. rabbitmqctl calls).
+local_info_msg(Format, Args) ->
+ with_local_io(fun () -> error_logger:info_msg(Format, Args) end).
+
+unfold(Fun, Init) ->
+ unfold(Fun, [], Init).
+
+unfold(Fun, Acc, Init) ->
+ case Fun(Init) of
+ {true, E, I} -> unfold(Fun, [E|Acc], I);
+ false -> {Acc, Init}
+ end.
+
+ceil(N) ->
+ T = trunc(N),
+ case N == T of
+ true -> T;
+ false -> 1 + T
+ end.
+
+queue_fold(Fun, Init, Q) ->
+ case queue:out(Q) of
+ {empty, _Q} -> Init;
+ {{value, V}, Q1} -> queue_fold(Fun, Fun(V, Init), Q1)
+ end.
+
+%% Sorts a list of AMQP table fields as per the AMQP spec
+sort_field_table(Arguments) ->
+ lists:keysort(1, Arguments).
+
+%% This provides a string representation of a pid that is the same
+%% regardless of what node we are running on. The representation also
+%% permits easy identification of the pid's node.
+pid_to_string(Pid) when is_pid(Pid) ->
+ %% see http://erlang.org/doc/apps/erts/erl_ext_dist.html (8.10 and
+ %% 8.7)
+ <<131,103,100,NodeLen:16,NodeBin:NodeLen/binary,Id:32,Ser:32,Cre:8>>
+ = term_to_binary(Pid),
+ Node = binary_to_term(<<131,100,NodeLen:16,NodeBin:NodeLen/binary>>),
+ format("<~s.~B.~B.~B>", [Node, Cre, Id, Ser]).
+
+%% inverse of above
+string_to_pid(Str) ->
+ Err = {error, {invalid_pid_syntax, Str}},
+ %% The \ before the trailing $ is only there to keep emacs
+ %% font-lock from getting confused.
+ case re:run(Str, "^<(.*)\\.(\\d+)\\.(\\d+)\\.(\\d+)>\$",
+ [{capture,all_but_first,list}]) of
+ {match, [NodeStr, CreStr, IdStr, SerStr]} ->
+ <<131,NodeEnc/binary>> = term_to_binary(list_to_atom(NodeStr)),
+ [Cre, Id, Ser] = lists:map(fun list_to_integer/1,
+ [CreStr, IdStr, SerStr]),
+ binary_to_term(<<131,103,NodeEnc/binary,Id:32,Ser:32,Cre:8>>);
+ nomatch ->
+ throw(Err)
+ end.
+
+version_compare(A, B, lte) ->
+ case version_compare(A, B) of
+ eq -> true;
+ lt -> true;
+ gt -> false
+ end;
+version_compare(A, B, gte) ->
+ case version_compare(A, B) of
+ eq -> true;
+ gt -> true;
+ lt -> false
+ end;
+version_compare(A, B, Result) ->
+ Result =:= version_compare(A, B).
+
+version_compare(A, A) ->
+ eq;
+version_compare([], [$0 | B]) ->
+ version_compare([], dropdot(B));
+version_compare([], _) ->
+ lt; %% 2.3 < 2.3.1
+version_compare([$0 | A], []) ->
+ version_compare(dropdot(A), []);
+version_compare(_, []) ->
+ gt; %% 2.3.1 > 2.3
+version_compare(A, B) ->
+ {AStr, ATl} = lists:splitwith(fun (X) -> X =/= $. end, A),
+ {BStr, BTl} = lists:splitwith(fun (X) -> X =/= $. end, B),
+ ANum = list_to_integer(AStr),
+ BNum = list_to_integer(BStr),
+ if ANum =:= BNum -> version_compare(dropdot(ATl), dropdot(BTl));
+ ANum < BNum -> lt;
+ ANum > BNum -> gt
+ end.
+
+%% a.b.c and a.b.d match, but a.b.c and a.d.e don't. If
+%% versions do not match that pattern, just compare them.
+version_minor_equivalent(A, B) ->
+ {ok, RE} = re:compile("^(\\d+\\.\\d+)(\\.\\d+)\$"),
+ Opts = [{capture, all_but_first, list}],
+ case {re:run(A, RE, Opts), re:run(B, RE, Opts)} of
+ {{match, [A1|_]}, {match, [B1|_]}} -> A1 =:= B1;
+ _ -> A =:= B
+ end.
+
+dropdot(A) -> lists:dropwhile(fun (X) -> X =:= $. end, A).
+
+dict_cons(Key, Value, Dict) ->
+ dict:update(Key, fun (List) -> [Value | List] end, [Value], Dict).
+
+orddict_cons(Key, Value, Dict) ->
+ orddict:update(Key, fun (List) -> [Value | List] end, [Value], Dict).
+
+gb_trees_cons(Key, Value, Tree) ->
+ case gb_trees:lookup(Key, Tree) of
+ {value, Values} -> gb_trees:update(Key, [Value | Values], Tree);
+ none -> gb_trees:insert(Key, [Value], Tree)
+ end.
+
+gb_trees_fold(Fun, Acc, Tree) ->
+ gb_trees_fold1(Fun, Acc, gb_trees:next(gb_trees:iterator(Tree))).
+
+gb_trees_fold1(_Fun, Acc, none) ->
+ Acc;
+gb_trees_fold1(Fun, Acc, {Key, Val, It}) ->
+ gb_trees_fold1(Fun, Fun(Key, Val, Acc), gb_trees:next(It)).
+
+gb_trees_foreach(Fun, Tree) ->
+ gb_trees_fold(fun (Key, Val, Acc) -> Fun(Key, Val), Acc end, ok, Tree).
+
+%% Takes:
+%% * A list of [{atom(), [{string(), optdef()]} | atom()], where the atom()s
+%% are the accepted commands and the optional [string()] is the list of
+%% accepted options for that command
+%% * A list [{string(), optdef()}] of options valid for all commands
+%% * The list of arguments given by the user
+%%
+%% Returns either {ok, {atom(), [{string(), string()}], [string()]} which are
+%% respectively the command, the key-value pairs of the options and the leftover
+%% arguments; or no_command if no command could be parsed.
+parse_arguments(Commands, GlobalDefs, As) ->
+ lists:foldl(maybe_process_opts(GlobalDefs, As), no_command, Commands).
+
+maybe_process_opts(GDefs, As) ->
+ fun({C, Os}, no_command) ->
+ process_opts(atom_to_list(C), dict:from_list(GDefs ++ Os), As);
+ (C, no_command) ->
+ (maybe_process_opts(GDefs, As))({C, []}, no_command);
+ (_, {ok, Res}) ->
+ {ok, Res}
+ end.
+
+process_opts(C, Defs, As0) ->
+ KVs0 = dict:map(fun (_, flag) -> false;
+ (_, {option, V}) -> V
+ end, Defs),
+ process_opts(Defs, C, As0, not_found, KVs0, []).
+
+%% Consume flags/options until you find the correct command. If there are no
+%% arguments or the first argument is not the command we're expecting, fail.
+%% Arguments to this are: definitions, cmd we're looking for, args we
+%% haven't parsed, whether we have found the cmd, options we've found,
+%% plain args we've found.
+process_opts(_Defs, C, [], found, KVs, Outs) ->
+ {ok, {list_to_atom(C), dict:to_list(KVs), lists:reverse(Outs)}};
+process_opts(_Defs, _C, [], not_found, _, _) ->
+ no_command;
+process_opts(Defs, C, [A | As], Found, KVs, Outs) ->
+ OptType = case dict:find(A, Defs) of
+ error -> none;
+ {ok, flag} -> flag;
+ {ok, {option, _}} -> option
+ end,
+ case {OptType, C, Found} of
+ {flag, _, _} -> process_opts(
+ Defs, C, As, Found, dict:store(A, true, KVs),
+ Outs);
+ {option, _, _} -> case As of
+ [] -> no_command;
+ [V | As1] -> process_opts(
+ Defs, C, As1, Found,
+ dict:store(A, V, KVs), Outs)
+ end;
+ {none, A, _} -> process_opts(Defs, C, As, found, KVs, Outs);
+ {none, _, found} -> process_opts(Defs, C, As, found, KVs, [A | Outs]);
+ {none, _, _} -> no_command
+ end.
+
+now_ms() ->
+ timer:now_diff(now(), {0,0,0}) div 1000.
+
+module_attributes(Module) ->
+ case catch Module:module_info(attributes) of
+ {'EXIT', {undef, [{Module, module_info, _} | _]}} ->
+ io:format("WARNING: module ~p not found, so not scanned for boot steps.~n",
+ [Module]),
+ [];
+ {'EXIT', Reason} ->
+ exit(Reason);
+ V ->
+ V
+ end.
+
+all_module_attributes(Name) ->
+ Modules =
+ lists:usort(
+ lists:append(
+ [Modules || {App, _, _} <- application:loaded_applications(),
+ {ok, Modules} <- [application:get_key(App, modules)]])),
+ lists:foldl(
+ fun (Module, Acc) ->
+ case lists:append([Atts || {N, Atts} <- module_attributes(Module),
+ N =:= Name]) of
+ [] -> Acc;
+ Atts -> [{Module, Atts} | Acc]
+ end
+ end, [], Modules).
+
+
+build_acyclic_graph(VertexFun, EdgeFun, Graph) ->
+ G = digraph:new([acyclic]),
+ try
+ [case digraph:vertex(G, Vertex) of
+ false -> digraph:add_vertex(G, Vertex, Label);
+ _ -> ok = throw({graph_error, {vertex, duplicate, Vertex}})
+ end || {Module, Atts} <- Graph,
+ {Vertex, Label} <- VertexFun(Module, Atts)],
+ [case digraph:add_edge(G, From, To) of
+ {error, E} -> throw({graph_error, {edge, E, From, To}});
+ _ -> ok
+ end || {Module, Atts} <- Graph,
+ {From, To} <- EdgeFun(Module, Atts)],
+ {ok, G}
+ catch {graph_error, Reason} ->
+ true = digraph:delete(G),
+ {error, Reason}
+ end.
+
+const(X) -> fun () -> X end.
+
+%% Format IPv4-mapped IPv6 addresses as IPv4, since they're what we see
+%% when IPv6 is enabled but not used (i.e. 99% of the time).
+ntoa({0,0,0,0,0,16#ffff,AB,CD}) ->
+ inet_parse:ntoa({AB bsr 8, AB rem 256, CD bsr 8, CD rem 256});
+ntoa(IP) ->
+ inet_parse:ntoa(IP).
+
+ntoab(IP) ->
+ Str = ntoa(IP),
+ case string:str(Str, ":") of
+ 0 -> Str;
+ _ -> "[" ++ Str ++ "]"
+ end.
+
+%% We try to avoid reconnecting to down nodes here; this is used in a
+%% loop in rabbit_amqqueue:on_node_down/1 and any delays we incur
+%% would be bad news.
+is_process_alive(Pid) ->
+ Node = node(Pid),
+ lists:member(Node, [node() | nodes()]) andalso
+ rpc:call(Node, erlang, is_process_alive, [Pid]) =:= true.
+
+pget(K, P) -> proplists:get_value(K, P).
+pget(K, P, D) -> proplists:get_value(K, P, D).
+
+pget_or_die(K, P) ->
+ case proplists:get_value(K, P) of
+ undefined -> exit({error, key_missing, K});
+ V -> V
+ end.
+
+pset(Key, Value, List) -> [{Key, Value} | proplists:delete(Key, List)].
+
+format_message_queue(_Opt, MQ) ->
+ Len = priority_queue:len(MQ),
+ {Len,
+ case Len > 100 of
+ false -> priority_queue:to_list(MQ);
+ true -> {summary,
+ orddict:to_list(
+ lists:foldl(
+ fun ({P, V}, Counts) ->
+ orddict:update_counter(
+ {P, format_message_queue_entry(V)}, 1, Counts)
+ end, orddict:new(), priority_queue:to_list(MQ)))}
+ end}.
+
+format_message_queue_entry(V) when is_atom(V) ->
+ V;
+format_message_queue_entry(V) when is_tuple(V) ->
+ list_to_tuple([format_message_queue_entry(E) || E <- tuple_to_list(V)]);
+format_message_queue_entry(_V) ->
+ '_'.
+
+append_rpc_all_nodes(Nodes, M, F, A) ->
+ {ResL, _} = rpc:multicall(Nodes, M, F, A),
+ lists:append([case Res of
+ {badrpc, _} -> [];
+ _ -> Res
+ end || Res <- ResL]).
+
+os_cmd(Command) ->
+ case os:type() of
+ {win32, _} ->
+ %% Clink workaround; see
+ %% http://code.google.com/p/clink/issues/detail?id=141
+ os:cmd(" " ++ Command);
+ _ ->
+ %% Don't just return "/bin/sh: <cmd>: not found" if not found
+ Exec = hd(string:tokens(Command, " ")),
+ case os:find_executable(Exec) of
+ false -> throw({command_not_found, Exec});
+ _ -> os:cmd(Command)
+ end
+ end.
+
+gb_sets_difference(S1, S2) ->
+ gb_sets:fold(fun gb_sets:delete_any/2, S1, S2).
+
+version() ->
+ {ok, VSN} = application:get_key(rabbit, vsn),
+ VSN.
+
+%% application:which_applications(infinity) is dangerous, since it can
+%% cause deadlocks on shutdown. So we have to use a timeout variant,
+%% but w/o creating spurious timeout errors.
+which_applications() ->
+ try
+ application:which_applications()
+ catch
+ exit:{timeout, _} -> []
+ end.
+
+sequence_error([T]) -> T;
+sequence_error([{error, _} = Error | _]) -> Error;
+sequence_error([_ | Rest]) -> sequence_error(Rest).
+
+json_encode(Term) ->
+ try
+ {ok, mochijson2:encode(Term)}
+ catch
+ exit:{json_encode, E} ->
+ {error, E}
+ end.
+
+json_decode(Term) ->
+ try
+ {ok, mochijson2:decode(Term)}
+ catch
+ %% Sadly `mochijson2:decode/1' does not offer a nice way to catch
+ %% decoding errors...
+ error:_ -> error
+ end.
+
+json_to_term({struct, L}) ->
+ [{K, json_to_term(V)} || {K, V} <- L];
+json_to_term(L) when is_list(L) ->
+ [json_to_term(I) || I <- L];
+json_to_term(V) when is_binary(V) orelse is_number(V) orelse V =:= null orelse
+ V =:= true orelse V =:= false ->
+ V.
+
+%% This has the flaw that empty lists will never be JSON objects, so use with
+%% care.
+term_to_json([{_, _}|_] = L) ->
+ {struct, [{K, term_to_json(V)} || {K, V} <- L]};
+term_to_json(L) when is_list(L) ->
+ [term_to_json(I) || I <- L];
+term_to_json(V) when is_binary(V) orelse is_number(V) orelse V =:= null orelse
+ V =:= true orelse V =:= false ->
+ V.
+
+check_expiry(N) when N > ?MAX_EXPIRY_TIMER -> {error, {value_too_big, N}};
+check_expiry(N) when N < 0 -> {error, {value_negative, N}};
+check_expiry(_N) -> ok.
+
+base64url(In) ->
+ lists:reverse(lists:foldl(fun ($\+, Acc) -> [$\- | Acc];
+ ($\/, Acc) -> [$\_ | Acc];
+ ($\=, Acc) -> Acc;
+ (Chr, Acc) -> [Chr | Acc]
+ end, [], base64:encode_to_string(In))).
+
+%% Ideally, you'd want Fun to run every IdealInterval. but you don't
+%% want it to take more than MaxRatio of IdealInterval. So if it takes
+%% more then you want to run it less often. So we time how long it
+%% takes to run, and then suggest how long you should wait before
+%% running it again. Times are in millis.
+interval_operation({M, F, A}, MaxRatio, IdealInterval, LastInterval) ->
+ {Micros, Res} = timer:tc(M, F, A),
+ {Res, case {Micros > 1000 * (MaxRatio * IdealInterval),
+ Micros > 1000 * (MaxRatio * LastInterval)} of
+ {true, true} -> round(LastInterval * 1.5);
+ {true, false} -> LastInterval;
+ {false, false} -> lists:max([IdealInterval,
+ round(LastInterval / 1.5)])
+ end}.
+
+ensure_timer(State, Idx, After, Msg) ->
+ case element(Idx, State) of
+ undefined -> TRef = erlang:send_after(After, self(), Msg),
+ setelement(Idx, State, TRef);
+ _ -> State
+ end.
+
+stop_timer(State, Idx) ->
+ case element(Idx, State) of
+ undefined -> State;
+ TRef -> case erlang:cancel_timer(TRef) of
+ false -> State;
+ _ -> setelement(Idx, State, undefined)
+ end
+ end.
+
+store_proc_name(Type, ProcName) -> store_proc_name({Type, ProcName}).
+store_proc_name(TypeProcName) -> put(process_name, TypeProcName).
+
+moving_average(_Time, _HalfLife, Next, undefined) ->
+ Next;
+%% We want the Weight to decrease as Time goes up (since Weight is the
+%% weight for the current sample, not the new one), so that the moving
+%% average decays at the same speed regardless of how long the time is
+%% between samplings. So we want Weight = math:exp(Something), where
+%% Something turns out to be negative.
+%%
+%% We want to determine Something here in terms of the Time taken
+%% since the last measurement, and a HalfLife. So we want Weight =
+%% math:exp(Time * Constant / HalfLife). What should Constant be? We
+%% want Weight to be 0.5 when Time = HalfLife.
+%%
+%% Plug those numbers in and you get 0.5 = math:exp(Constant). Take
+%% the log of each side and you get math:log(0.5) = Constant.
+moving_average(Time, HalfLife, Next, Current) ->
+ Weight = math:exp(Time * math:log(0.5) / HalfLife),
+ Next * (1 - Weight) + Current * Weight.
+
+%% -------------------------------------------------------------------------
+%% Begin copypasta from gen_server2.erl
+
+get_parent() ->
+ case get('$ancestors') of
+ [Parent | _] when is_pid (Parent) -> Parent;
+ [Parent | _] when is_atom(Parent) -> name_to_pid(Parent);
+ _ -> exit(process_was_not_started_by_proc_lib)
+ end.
+
+name_to_pid(Name) ->
+ case whereis(Name) of
+ undefined -> case whereis_name(Name) of
+ undefined -> exit(could_not_find_registerd_name);
+ Pid -> Pid
+ end;
+ Pid -> Pid
+ end.
+
+whereis_name(Name) ->
+ case ets:lookup(global_names, Name) of
+ [{_Name, Pid, _Method, _RPid, _Ref}] ->
+ if node(Pid) == node() -> case erlang:is_process_alive(Pid) of
+ true -> Pid;
+ false -> undefined
+ end;
+ true -> Pid
+ end;
+ [] -> undefined
+ end.
+
+%% End copypasta from gen_server2.erl
+%% -------------------------------------------------------------------------
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_mnesia).
+
+-export([init/0,
+ join_cluster/2,
+ reset/0,
+ force_reset/0,
+ update_cluster_nodes/1,
+ change_cluster_node_type/1,
+ forget_cluster_node/2,
+
+ status/0,
+ is_clustered/0,
+ cluster_nodes/1,
+ node_type/0,
+ dir/0,
+ cluster_status_from_mnesia/0,
+
+ init_db_unchecked/2,
+ copy_db/1,
+ check_cluster_consistency/0,
+ ensure_mnesia_dir/0,
+
+ on_node_up/1,
+ on_node_down/1
+ ]).
+
+%% Used internally in rpc calls
+-export([node_info/0, remove_node_if_mnesia_running/1]).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-export_type([node_type/0, cluster_status/0]).
+
+-type(node_type() :: disc | ram).
+-type(cluster_status() :: {[node()], [node()], [node()]}).
+
+%% Main interface
+-spec(init/0 :: () -> 'ok').
+-spec(join_cluster/2 :: (node(), node_type())
+ -> 'ok' | {'ok', 'already_member'}).
+-spec(reset/0 :: () -> 'ok').
+-spec(force_reset/0 :: () -> 'ok').
+-spec(update_cluster_nodes/1 :: (node()) -> 'ok').
+-spec(change_cluster_node_type/1 :: (node_type()) -> 'ok').
+-spec(forget_cluster_node/2 :: (node(), boolean()) -> 'ok').
+
+%% Various queries to get the status of the db
+-spec(status/0 :: () -> [{'nodes', [{node_type(), [node()]}]} |
+ {'running_nodes', [node()]} |
+ {'partitions', [{node(), [node()]}]}]).
+-spec(is_clustered/0 :: () -> boolean()).
+-spec(cluster_nodes/1 :: ('all' | 'disc' | 'ram' | 'running') -> [node()]).
+-spec(node_type/0 :: () -> node_type()).
+-spec(dir/0 :: () -> file:filename()).
+-spec(cluster_status_from_mnesia/0 :: () -> rabbit_types:ok_or_error2(
+ cluster_status(), any())).
+
+%% Operations on the db and utils, mainly used in `rabbit_upgrade' and `rabbit'
+-spec(init_db_unchecked/2 :: ([node()], node_type()) -> 'ok').
+-spec(copy_db/1 :: (file:filename()) -> rabbit_types:ok_or_error(any())).
+-spec(check_cluster_consistency/0 :: () -> 'ok').
+-spec(ensure_mnesia_dir/0 :: () -> 'ok').
+
+%% Hooks used in `rabbit_node_monitor'
+-spec(on_node_up/1 :: (node()) -> 'ok').
+-spec(on_node_down/1 :: (node()) -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+%% Main interface
+%%----------------------------------------------------------------------------
+
+init() ->
+ ensure_mnesia_running(),
+ ensure_mnesia_dir(),
+ case is_virgin_node() of
+ true -> init_from_config();
+ false -> NodeType = node_type(),
+ init_db_and_upgrade(cluster_nodes(all), NodeType,
+ NodeType =:= ram)
+ end,
+ %% We intuitively expect the global name server to be synced when
+ %% Mnesia is up. In fact that's not guaranteed to be the case -
+ %% let's make it so.
+ ok = global:sync(),
+ ok.
+
+init_from_config() ->
+ {TryNodes, NodeType} =
+ case application:get_env(rabbit, cluster_nodes) of
+ {ok, Nodes} when is_list(Nodes) ->
+ Config = {Nodes -- [node()], case lists:member(node(), Nodes) of
+ true -> disc;
+ false -> ram
+ end},
+ error_logger:warning_msg(
+ "Converting legacy 'cluster_nodes' configuration~n ~w~n"
+ "to~n ~w.~n~n"
+ "Please update the configuration to the new format "
+ "{Nodes, NodeType}, where Nodes contains the nodes that the "
+ "node will try to cluster with, and NodeType is either "
+ "'disc' or 'ram'~n", [Nodes, Config]),
+ Config;
+ {ok, Config} ->
+ Config
+ end,
+ case find_good_node(nodes_excl_me(TryNodes)) of
+ {ok, Node} ->
+ rabbit_log:info("Node '~p' selected for clustering from "
+ "configuration~n", [Node]),
+ {ok, {_, DiscNodes, _}} = discover_cluster0(Node),
+ init_db_and_upgrade(DiscNodes, NodeType, true),
+ rabbit_node_monitor:notify_joined_cluster();
+ none ->
+ rabbit_log:warning("Could not find any suitable node amongst the "
+ "ones provided in the configuration: ~p~n",
+ [TryNodes]),
+ init_db_and_upgrade([node()], disc, false)
+ end.
+
+%% Make the node join a cluster. The node will be reset automatically
+%% before we actually cluster it. The nodes provided will be used to
+%% find out about the nodes in the cluster.
+%%
+%% This function will fail if:
+%%
+%% * The node is currently the only disc node of its cluster
+%% * We can't connect to any of the nodes provided
+%% * The node is currently already clustered with the cluster of the nodes
+%% provided
+%%
+%% Note that we make no attempt to verify that the nodes provided are
+%% all in the same cluster, we simply pick the first online node and
+%% we cluster to its cluster.
+join_cluster(DiscoveryNode, NodeType) ->
+ ensure_mnesia_not_running(),
+ ensure_mnesia_dir(),
+ case is_only_clustered_disc_node() of
+ true -> e(clustering_only_disc_node);
+ false -> ok
+ end,
+ {ClusterNodes, _, _} = discover_cluster([DiscoveryNode]),
+ case me_in_nodes(ClusterNodes) of
+ false ->
+ %% reset the node. this simplifies things and it will be needed in
+ %% this case - we're joining a new cluster with new nodes which
+ %% are not in synch with the current node. I also lifts the burden
+ %% of reseting the node from the user.
+ reset_gracefully(),
+
+ %% Join the cluster
+ rabbit_misc:local_info_msg("Clustering with ~p as ~p node~n",
+ [ClusterNodes, NodeType]),
+ ok = init_db_with_mnesia(ClusterNodes, NodeType, true, true),
+ rabbit_node_monitor:notify_joined_cluster(),
+ ok;
+ true ->
+ rabbit_misc:local_info_msg("Already member of cluster: ~p~n",
+ [ClusterNodes]),
+ {ok, already_member}
+ end.
+
+%% return node to its virgin state, where it is not member of any
+%% cluster, has no cluster configuration, no local database, and no
+%% persisted messages
+reset() ->
+ ensure_mnesia_not_running(),
+ rabbit_misc:local_info_msg("Resetting Rabbit~n", []),
+ reset_gracefully().
+
+force_reset() ->
+ ensure_mnesia_not_running(),
+ rabbit_misc:local_info_msg("Resetting Rabbit forcefully~n", []),
+ wipe().
+
+reset_gracefully() ->
+ AllNodes = cluster_nodes(all),
+ %% Reconnecting so that we will get an up to date nodes. We don't
+ %% need to check for consistency because we are resetting.
+ %% Force=true here so that reset still works when clustered with a
+ %% node which is down.
+ init_db_with_mnesia(AllNodes, node_type(), false, false),
+ case is_only_clustered_disc_node() of
+ true -> e(resetting_only_disc_node);
+ false -> ok
+ end,
+ leave_cluster(),
+ rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), cannot_delete_schema),
+ wipe().
+
+wipe() ->
+ %% We need to make sure that we don't end up in a distributed
+ %% Erlang system with nodes while not being in an Mnesia cluster
+ %% with them. We don't handle that well.
+ [erlang:disconnect_node(N) || N <- cluster_nodes(all)],
+ %% remove persisted messages and any other garbage we find
+ ok = rabbit_file:recursive_delete(filelib:wildcard(dir() ++ "/*")),
+ ok = rabbit_node_monitor:reset_cluster_status(),
+ ok.
+
+change_cluster_node_type(Type) ->
+ ensure_mnesia_not_running(),
+ ensure_mnesia_dir(),
+ case is_clustered() of
+ false -> e(not_clustered);
+ true -> ok
+ end,
+ {_, _, RunningNodes} = discover_cluster(cluster_nodes(all)),
+ %% We might still be marked as running by a remote node since the
+ %% information of us going down might not have propagated yet.
+ Node = case RunningNodes -- [node()] of
+ [] -> e(no_online_cluster_nodes);
+ [Node0|_] -> Node0
+ end,
+ ok = reset(),
+ ok = join_cluster(Node, Type).
+
+update_cluster_nodes(DiscoveryNode) ->
+ ensure_mnesia_not_running(),
+ ensure_mnesia_dir(),
+ Status = {AllNodes, _, _} = discover_cluster([DiscoveryNode]),
+ case me_in_nodes(AllNodes) of
+ true ->
+ %% As in `check_consistency/0', we can safely delete the
+ %% schema here, since it'll be replicated from the other
+ %% nodes
+ mnesia:delete_schema([node()]),
+ rabbit_node_monitor:write_cluster_status(Status),
+ rabbit_misc:local_info_msg("Updating cluster nodes from ~p~n",
+ [DiscoveryNode]),
+ init_db_with_mnesia(AllNodes, node_type(), true, true);
+ false ->
+ e(inconsistent_cluster)
+ end,
+ ok.
+
+%% We proceed like this: try to remove the node locally. If the node
+%% is offline, we remove the node if:
+%% * This node is a disc node
+%% * All other nodes are offline
+%% * This node was, at the best of our knowledge (see comment below)
+%% the last or second to last after the node we're removing to go
+%% down
+forget_cluster_node(Node, RemoveWhenOffline) ->
+ case lists:member(Node, cluster_nodes(all)) of
+ true -> ok;
+ false -> e(not_a_cluster_node)
+ end,
+ case {RemoveWhenOffline, is_running()} of
+ {true, false} -> remove_node_offline_node(Node);
+ {true, true} -> e(online_node_offline_flag);
+ {false, false} -> e(offline_node_no_offline_flag);
+ {false, true} -> rabbit_misc:local_info_msg(
+ "Removing node ~p from cluster~n", [Node]),
+ case remove_node_if_mnesia_running(Node) of
+ ok -> ok;
+ {error, _} = Err -> throw(Err)
+ end
+ end.
+
+remove_node_offline_node(Node) ->
+ %% Here `mnesia:system_info(running_db_nodes)' will RPC, but that's what we
+ %% want - we need to know the running nodes *now*. If the current node is a
+ %% RAM node it will return bogus results, but we don't care since we only do
+ %% this operation from disc nodes.
+ case {mnesia:system_info(running_db_nodes) -- [Node], node_type()} of
+ {[], disc} ->
+ start_mnesia(),
+ try
+ %% What we want to do here is replace the last node to
+ %% go down with the current node. The way we do this
+ %% is by force loading the table, and making sure that
+ %% they are loaded.
+ rabbit_table:force_load(),
+ rabbit_table:wait_for_replicated(),
+ forget_cluster_node(Node, false),
+ force_load_next_boot()
+ after
+ stop_mnesia()
+ end;
+ {_, _} ->
+ e(removing_node_from_offline_node)
+ end.
+
+
+%%----------------------------------------------------------------------------
+%% Queries
+%%----------------------------------------------------------------------------
+
+status() ->
+ IfNonEmpty = fun (_, []) -> [];
+ (Type, Nodes) -> [{Type, Nodes}]
+ end,
+ [{nodes, (IfNonEmpty(disc, cluster_nodes(disc)) ++
+ IfNonEmpty(ram, cluster_nodes(ram)))}] ++
+ case is_running() of
+ true -> RunningNodes = cluster_nodes(running),
+ [{running_nodes, RunningNodes},
+ {cluster_name, rabbit_nodes:cluster_name()},
+ {partitions, mnesia_partitions(RunningNodes)}];
+ false -> []
+ end.
+
+mnesia_partitions(Nodes) ->
+ Replies = rabbit_node_monitor:partitions(Nodes),
+ [Reply || Reply = {_, R} <- Replies, R =/= []].
+
+is_running() -> mnesia:system_info(is_running) =:= yes.
+
+is_clustered() -> AllNodes = cluster_nodes(all),
+ AllNodes =/= [] andalso AllNodes =/= [node()].
+
+cluster_nodes(WhichNodes) -> cluster_status(WhichNodes).
+
+%% This function is the actual source of information, since it gets
+%% the data from mnesia. Obviously it'll work only when mnesia is
+%% running.
+cluster_status_from_mnesia() ->
+ case is_running() of
+ false ->
+ {error, mnesia_not_running};
+ true ->
+ %% If the tables are not present, it means that
+ %% `init_db/3' hasn't been run yet. In other words, either
+ %% we are a virgin node or a restarted RAM node. In both
+ %% cases we're not interested in what mnesia has to say.
+ NodeType = case mnesia:system_info(use_dir) of
+ true -> disc;
+ false -> ram
+ end,
+ case rabbit_table:is_present() of
+ true -> AllNodes = mnesia:system_info(db_nodes),
+ DiscCopies = mnesia:table_info(schema, disc_copies),
+ DiscNodes = case NodeType of
+ disc -> nodes_incl_me(DiscCopies);
+ ram -> DiscCopies
+ end,
+ %% `mnesia:system_info(running_db_nodes)' is safe since
+ %% we know that mnesia is running
+ RunningNodes = mnesia:system_info(running_db_nodes),
+ {ok, {AllNodes, DiscNodes, RunningNodes}};
+ false -> {error, tables_not_present}
+ end
+ end.
+
+cluster_status(WhichNodes) ->
+ {AllNodes, DiscNodes, RunningNodes} = Nodes =
+ case cluster_status_from_mnesia() of
+ {ok, Nodes0} ->
+ Nodes0;
+ {error, _Reason} ->
+ {AllNodes0, DiscNodes0, RunningNodes0} =
+ rabbit_node_monitor:read_cluster_status(),
+ %% The cluster status file records the status when the node is
+ %% online, but we know for sure that the node is offline now, so
+ %% we can remove it from the list of running nodes.
+ {AllNodes0, DiscNodes0, nodes_excl_me(RunningNodes0)}
+ end,
+ case WhichNodes of
+ status -> Nodes;
+ all -> AllNodes;
+ disc -> DiscNodes;
+ ram -> AllNodes -- DiscNodes;
+ running -> RunningNodes
+ end.
+
+node_info() ->
+ {erlang:system_info(otp_release), rabbit_misc:version(),
+ cluster_status_from_mnesia()}.
+
+node_type() ->
+ {_AllNodes, DiscNodes, _RunningNodes} =
+ rabbit_node_monitor:read_cluster_status(),
+ case DiscNodes =:= [] orelse me_in_nodes(DiscNodes) of
+ true -> disc;
+ false -> ram
+ end.
+
+dir() -> mnesia:system_info(directory).
+
+%%----------------------------------------------------------------------------
+%% Operations on the db
+%%----------------------------------------------------------------------------
+
+%% Adds the provided nodes to the mnesia cluster, creating a new
+%% schema if there is the need to and catching up if there are other
+%% nodes in the cluster already. It also updates the cluster status
+%% file.
+init_db(ClusterNodes, NodeType, CheckOtherNodes) ->
+ Nodes = change_extra_db_nodes(ClusterNodes, CheckOtherNodes),
+ %% Note that we use `system_info' here and not the cluster status
+ %% since when we start rabbit for the first time the cluster
+ %% status will say we are a disc node but the tables won't be
+ %% present yet.
+ WasDiscNode = mnesia:system_info(use_dir),
+ case {Nodes, WasDiscNode, NodeType} of
+ {[], _, ram} ->
+ %% Standalone ram node, we don't want that
+ throw({error, cannot_create_standalone_ram_node});
+ {[], false, disc} ->
+ %% RAM -> disc, starting from scratch
+ ok = create_schema();
+ {[], true, disc} ->
+ %% First disc node up
+ maybe_force_load(),
+ ok;
+ {[_ | _], _, _} ->
+ %% Subsequent node in cluster, catch up
+ maybe_force_load(),
+ ok = rabbit_table:wait_for_replicated(),
+ ok = rabbit_table:create_local_copy(NodeType)
+ end,
+ ensure_schema_integrity(),
+ rabbit_node_monitor:update_cluster_status(),
+ ok.
+
+init_db_unchecked(ClusterNodes, NodeType) ->
+ init_db(ClusterNodes, NodeType, false).
+
+init_db_and_upgrade(ClusterNodes, NodeType, CheckOtherNodes) ->
+ ok = init_db(ClusterNodes, NodeType, CheckOtherNodes),
+ ok = case rabbit_upgrade:maybe_upgrade_local() of
+ ok -> ok;
+ starting_from_scratch -> rabbit_version:record_desired();
+ version_not_available -> schema_ok_or_move()
+ end,
+ %% `maybe_upgrade_local' restarts mnesia, so ram nodes will forget
+ %% about the cluster
+ case NodeType of
+ ram -> start_mnesia(),
+ change_extra_db_nodes(ClusterNodes, false);
+ disc -> ok
+ end,
+ %% ...and all nodes will need to wait for tables
+ rabbit_table:wait_for_replicated(),
+ ok.
+
+init_db_with_mnesia(ClusterNodes, NodeType,
+ CheckOtherNodes, CheckConsistency) ->
+ start_mnesia(CheckConsistency),
+ try
+ init_db_and_upgrade(ClusterNodes, NodeType, CheckOtherNodes)
+ after
+ stop_mnesia()
+ end.
+
+ensure_mnesia_dir() ->
+ MnesiaDir = dir() ++ "/",
+ case filelib:ensure_dir(MnesiaDir) of
+ {error, Reason} ->
+ throw({error, {cannot_create_mnesia_dir, MnesiaDir, Reason}});
+ ok ->
+ ok
+ end.
+
+ensure_mnesia_running() ->
+ case mnesia:system_info(is_running) of
+ yes ->
+ ok;
+ starting ->
+ wait_for(mnesia_running),
+ ensure_mnesia_running();
+ Reason when Reason =:= no; Reason =:= stopping ->
+ throw({error, mnesia_not_running})
+ end.
+
+ensure_mnesia_not_running() ->
+ case mnesia:system_info(is_running) of
+ no ->
+ ok;
+ stopping ->
+ wait_for(mnesia_not_running),
+ ensure_mnesia_not_running();
+ Reason when Reason =:= yes; Reason =:= starting ->
+ throw({error, mnesia_unexpectedly_running})
+ end.
+
+ensure_schema_integrity() ->
+ case rabbit_table:check_schema_integrity() of
+ ok ->
+ ok;
+ {error, Reason} ->
+ throw({error, {schema_integrity_check_failed, Reason}})
+ end.
+
+copy_db(Destination) ->
+ ok = ensure_mnesia_not_running(),
+ rabbit_file:recursive_copy(dir(), Destination).
+
+force_load_filename() ->
+ filename:join(dir(), "force_load").
+
+force_load_next_boot() ->
+ rabbit_file:write_file(force_load_filename(), <<"">>).
+
+maybe_force_load() ->
+ case rabbit_file:is_file(force_load_filename()) of
+ true -> rabbit_table:force_load(),
+ rabbit_file:delete(force_load_filename());
+ false -> ok
+ end.
+
+%% This does not guarantee us much, but it avoids some situations that
+%% will definitely end up badly
+check_cluster_consistency() ->
+ %% We want to find 0 or 1 consistent nodes.
+ case lists:foldl(
+ fun (Node, {error, _}) -> check_cluster_consistency(Node);
+ (_Node, {ok, Status}) -> {ok, Status}
+ end, {error, not_found}, nodes_excl_me(cluster_nodes(all)))
+ of
+ {ok, Status = {RemoteAllNodes, _, _}} ->
+ case ordsets:is_subset(ordsets:from_list(cluster_nodes(all)),
+ ordsets:from_list(RemoteAllNodes)) of
+ true ->
+ ok;
+ false ->
+ %% We delete the schema here since we think we are
+ %% clustered with nodes that are no longer in the
+ %% cluster and there is no other way to remove
+ %% them from our schema. On the other hand, we are
+ %% sure that there is another online node that we
+ %% can use to sync the tables with. There is a
+ %% race here: if between this check and the
+ %% `init_db' invocation the cluster gets
+ %% disbanded, we're left with a node with no
+ %% mnesia data that will try to connect to offline
+ %% nodes.
+ mnesia:delete_schema([node()])
+ end,
+ rabbit_node_monitor:write_cluster_status(Status);
+ {error, not_found} ->
+ ok;
+ {error, _} = E ->
+ throw(E)
+ end.
+
+check_cluster_consistency(Node) ->
+ case rpc:call(Node, rabbit_mnesia, node_info, []) of
+ {badrpc, _Reason} ->
+ {error, not_found};
+ {_OTP, _Rabbit, {error, _}} ->
+ {error, not_found};
+ {OTP, Rabbit, {ok, Status}} ->
+ case check_consistency(OTP, Rabbit, Node, Status) of
+ {error, _} = E -> E;
+ {ok, Res} -> {ok, Res}
+ end;
+ {_OTP, Rabbit, _Hash, _Status} ->
+ %% delegate hash checking implies version mismatch
+ version_error("Rabbit", rabbit_misc:version(), Rabbit)
+ end.
+
+%%--------------------------------------------------------------------
+%% Hooks for `rabbit_node_monitor'
+%%--------------------------------------------------------------------
+
+on_node_up(Node) ->
+ case running_disc_nodes() of
+ [Node] -> rabbit_log:info("cluster contains disc nodes again~n");
+ _ -> ok
+ end.
+
+on_node_down(_Node) ->
+ case running_disc_nodes() of
+ [] -> rabbit_log:info("only running disc node went down~n");
+ _ -> ok
+ end.
+
+running_disc_nodes() ->
+ {_AllNodes, DiscNodes, RunningNodes} = cluster_status(status),
+ ordsets:to_list(ordsets:intersection(ordsets:from_list(DiscNodes),
+ ordsets:from_list(RunningNodes))).
+
+%%--------------------------------------------------------------------
+%% Internal helpers
+%%--------------------------------------------------------------------
+
+discover_cluster(Nodes) ->
+ case lists:foldl(fun (_, {ok, Res}) -> {ok, Res};
+ (Node, _) -> discover_cluster0(Node)
+ end, {error, no_nodes_provided}, Nodes) of
+ {ok, Res} -> Res;
+ {error, E} -> throw({error, E});
+ {badrpc, Reason} -> throw({badrpc_multi, Reason, Nodes})
+ end.
+
+discover_cluster0(Node) when Node == node() ->
+ {error, cannot_cluster_node_with_itself};
+discover_cluster0(Node) ->
+ rpc:call(Node, rabbit_mnesia, cluster_status_from_mnesia, []).
+
+schema_ok_or_move() ->
+ case rabbit_table:check_schema_integrity() of
+ ok ->
+ ok;
+ {error, Reason} ->
+ %% NB: we cannot use rabbit_log here since it may not have been
+ %% started yet
+ error_logger:warning_msg("schema integrity check failed: ~p~n"
+ "moving database to backup location "
+ "and recreating schema from scratch~n",
+ [Reason]),
+ ok = move_db(),
+ ok = create_schema()
+ end.
+
+%% We only care about disc nodes since ram nodes are supposed to catch
+%% up only
+create_schema() ->
+ stop_mnesia(),
+ rabbit_misc:ensure_ok(mnesia:create_schema([node()]), cannot_create_schema),
+ start_mnesia(),
+ ok = rabbit_table:create(),
+ ensure_schema_integrity(),
+ ok = rabbit_version:record_desired().
+
+move_db() ->
+ stop_mnesia(),
+ MnesiaDir = filename:dirname(dir() ++ "/"),
+ {{Year, Month, Day}, {Hour, Minute, Second}} = erlang:universaltime(),
+ BackupDir = rabbit_misc:format(
+ "~s_~w~2..0w~2..0w~2..0w~2..0w~2..0w",
+ [MnesiaDir, Year, Month, Day, Hour, Minute, Second]),
+ case file:rename(MnesiaDir, BackupDir) of
+ ok ->
+ %% NB: we cannot use rabbit_log here since it may not have
+ %% been started yet
+ error_logger:warning_msg("moved database from ~s to ~s~n",
+ [MnesiaDir, BackupDir]),
+ ok;
+ {error, Reason} -> throw({error, {cannot_backup_mnesia,
+ MnesiaDir, BackupDir, Reason}})
+ end,
+ ensure_mnesia_dir(),
+ start_mnesia(),
+ ok.
+
+remove_node_if_mnesia_running(Node) ->
+ case is_running() of
+ false ->
+ {error, mnesia_not_running};
+ true ->
+ %% Deleting the the schema copy of the node will result in
+ %% the node being removed from the cluster, with that
+ %% change being propagated to all nodes
+ case mnesia:del_table_copy(schema, Node) of
+ {atomic, ok} ->
+ rabbit_amqqueue:forget_all_durable(Node),
+ rabbit_node_monitor:notify_left_cluster(Node),
+ ok;
+ {aborted, Reason} ->
+ {error, {failed_to_remove_node, Node, Reason}}
+ end
+ end.
+
+leave_cluster() ->
+ case nodes_excl_me(cluster_nodes(all)) of
+ [] -> ok;
+ AllNodes -> case lists:any(fun leave_cluster/1, AllNodes) of
+ true -> ok;
+ false -> e(no_running_cluster_nodes)
+ end
+ end.
+
+leave_cluster(Node) ->
+ case rpc:call(Node,
+ rabbit_mnesia, remove_node_if_mnesia_running, [node()]) of
+ ok -> true;
+ {error, mnesia_not_running} -> false;
+ {error, Reason} -> throw({error, Reason});
+ {badrpc, nodedown} -> false
+ end.
+
+wait_for(Condition) ->
+ error_logger:info_msg("Waiting for ~p...~n", [Condition]),
+ timer:sleep(1000).
+
+start_mnesia(CheckConsistency) ->
+ case CheckConsistency of
+ true -> check_cluster_consistency();
+ false -> ok
+ end,
+ rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia),
+ ensure_mnesia_running().
+
+start_mnesia() ->
+ start_mnesia(true).
+
+stop_mnesia() ->
+ stopped = mnesia:stop(),
+ ensure_mnesia_not_running().
+
+change_extra_db_nodes(ClusterNodes0, CheckOtherNodes) ->
+ ClusterNodes = nodes_excl_me(ClusterNodes0),
+ case {mnesia:change_config(extra_db_nodes, ClusterNodes), ClusterNodes} of
+ {{ok, []}, [_|_]} when CheckOtherNodes ->
+ throw({error, {failed_to_cluster_with, ClusterNodes,
+ "Mnesia could not connect to any nodes."}});
+ {{ok, Nodes}, _} ->
+ Nodes
+ end.
+
+check_consistency(OTP, Rabbit) ->
+ rabbit_misc:sequence_error(
+ [check_otp_consistency(OTP),
+ check_rabbit_consistency(Rabbit)]).
+
+check_consistency(OTP, Rabbit, Node, Status) ->
+ rabbit_misc:sequence_error(
+ [check_otp_consistency(OTP),
+ check_rabbit_consistency(Rabbit),
+ check_nodes_consistency(Node, Status)]).
+
+check_nodes_consistency(Node, RemoteStatus = {RemoteAllNodes, _, _}) ->
+ case me_in_nodes(RemoteAllNodes) of
+ true ->
+ {ok, RemoteStatus};
+ false ->
+ {error, {inconsistent_cluster,
+ rabbit_misc:format("Node ~p thinks it's clustered "
+ "with node ~p, but ~p disagrees",
+ [node(), Node, Node])}}
+ end.
+
+check_version_consistency(This, Remote, Name) ->
+ check_version_consistency(This, Remote, Name, fun (A, B) -> A =:= B end).
+
+check_version_consistency(This, Remote, Name, Comp) ->
+ case Comp(This, Remote) of
+ true -> ok;
+ false -> version_error(Name, This, Remote)
+ end.
+
+version_error(Name, This, Remote) ->
+ {error, {inconsistent_cluster,
+ rabbit_misc:format("~s version mismatch: local node is ~s, "
+ "remote node ~s", [Name, This, Remote])}}.
+
+check_otp_consistency(Remote) ->
+ check_version_consistency(erlang:system_info(otp_release), Remote, "OTP").
+
+check_rabbit_consistency(Remote) ->
+ check_version_consistency(
+ rabbit_misc:version(), Remote, "Rabbit",
+ fun rabbit_misc:version_minor_equivalent/2).
+
+%% This is fairly tricky. We want to know if the node is in the state
+%% that a `reset' would leave it in. We cannot simply check if the
+%% mnesia tables aren't there because restarted RAM nodes won't have
+%% tables while still being non-virgin. What we do instead is to
+%% check if the mnesia directory is non existant or empty, with the
+%% exception of the cluster status files, which will be there thanks to
+%% `rabbit_node_monitor:prepare_cluster_status_file/0'.
+is_virgin_node() ->
+ case rabbit_file:list_dir(dir()) of
+ {error, enoent} ->
+ true;
+ {ok, []} ->
+ true;
+ {ok, [File1, File2]} ->
+ lists:usort([dir() ++ "/" ++ File1, dir() ++ "/" ++ File2]) =:=
+ lists:usort([rabbit_node_monitor:cluster_status_filename(),
+ rabbit_node_monitor:running_nodes_filename()]);
+ {ok, _} ->
+ false
+ end.
+
+find_good_node([]) ->
+ none;
+find_good_node([Node | Nodes]) ->
+ case rpc:call(Node, rabbit_mnesia, node_info, []) of
+ {badrpc, _Reason} -> find_good_node(Nodes);
+ %% old delegate hash check
+ {_OTP, _Rabbit, _Hash, _} -> find_good_node(Nodes);
+ {OTP, Rabbit, _} -> case check_consistency(OTP, Rabbit) of
+ {error, _} -> find_good_node(Nodes);
+ ok -> {ok, Node}
+ end
+ end.
+
+is_only_clustered_disc_node() ->
+ node_type() =:= disc andalso is_clustered() andalso
+ cluster_nodes(disc) =:= [node()].
+
+me_in_nodes(Nodes) -> lists:member(node(), Nodes).
+
+nodes_incl_me(Nodes) -> lists:usort([node()|Nodes]).
+
+nodes_excl_me(Nodes) -> Nodes -- [node()].
+
+e(Tag) -> throw({error, {Tag, error_description(Tag)}}).
+
+error_description(clustering_only_disc_node) ->
+ "You cannot cluster a node if it is the only disc node in its existing "
+ " cluster. If new nodes joined while this node was offline, use "
+ "'update_cluster_nodes' to add them manually.";
+error_description(resetting_only_disc_node) ->
+ "You cannot reset a node when it is the only disc node in a cluster. "
+ "Please convert another node of the cluster to a disc node first.";
+error_description(not_clustered) ->
+ "Non-clustered nodes can only be disc nodes.";
+error_description(no_online_cluster_nodes) ->
+ "Could not find any online cluster nodes. If the cluster has changed, "
+ "you can use the 'update_cluster_nodes' command.";
+error_description(inconsistent_cluster) ->
+ "The nodes provided do not have this node as part of the cluster.";
+error_description(not_a_cluster_node) ->
+ "The node selected is not in the cluster.";
+error_description(online_node_offline_flag) ->
+ "You set the --offline flag, which is used to remove nodes remotely from "
+ "offline nodes, but this node is online.";
+error_description(offline_node_no_offline_flag) ->
+ "You are trying to remove a node from an offline node. That is dangerous, "
+ "but can be done with the --offline flag. Please consult the manual "
+ "for rabbitmqctl for more information.";
+error_description(removing_node_from_offline_node) ->
+ "To remove a node remotely from an offline node, the node you are removing "
+ "from must be a disc node and all the other nodes must be offline.";
+error_description(no_running_cluster_nodes) ->
+ "You cannot leave a cluster if no online nodes are present.".
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_msg_file).
+
+-export([append/3, read/2, scan/4]).
+
+%%----------------------------------------------------------------------------
+
+-include("rabbit_msg_store.hrl").
+
+-define(INTEGER_SIZE_BYTES, 8).
+-define(INTEGER_SIZE_BITS, (8 * ?INTEGER_SIZE_BYTES)).
+-define(WRITE_OK_SIZE_BITS, 8).
+-define(WRITE_OK_MARKER, 255).
+-define(FILE_PACKING_ADJUSTMENT, (1 + ?INTEGER_SIZE_BYTES)).
+-define(MSG_ID_SIZE_BYTES, 16).
+-define(MSG_ID_SIZE_BITS, (8 * ?MSG_ID_SIZE_BYTES)).
+-define(SCAN_BLOCK_SIZE, 4194304). %% 4MB
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-type(io_device() :: any()).
+-type(position() :: non_neg_integer()).
+-type(msg_size() :: non_neg_integer()).
+-type(file_size() :: non_neg_integer()).
+-type(message_accumulator(A) ::
+ fun (({rabbit_types:msg_id(), msg_size(), position(), binary()}, A) ->
+ A)).
+
+-spec(append/3 :: (io_device(), rabbit_types:msg_id(), msg()) ->
+ rabbit_types:ok_or_error2(msg_size(), any())).
+-spec(read/2 :: (io_device(), msg_size()) ->
+ rabbit_types:ok_or_error2({rabbit_types:msg_id(), msg()},
+ any())).
+-spec(scan/4 :: (io_device(), file_size(), message_accumulator(A), A) ->
+ {'ok', A, position()}).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+append(FileHdl, MsgId, MsgBody)
+ when is_binary(MsgId) andalso size(MsgId) =:= ?MSG_ID_SIZE_BYTES ->
+ MsgBodyBin = term_to_binary(MsgBody),
+ MsgBodyBinSize = size(MsgBodyBin),
+ Size = MsgBodyBinSize + ?MSG_ID_SIZE_BYTES,
+ case file_handle_cache:append(FileHdl,
+ <<Size:?INTEGER_SIZE_BITS,
+ MsgId:?MSG_ID_SIZE_BYTES/binary,
+ MsgBodyBin:MsgBodyBinSize/binary,
+ ?WRITE_OK_MARKER:?WRITE_OK_SIZE_BITS>>) of
+ ok -> {ok, Size + ?FILE_PACKING_ADJUSTMENT};
+ KO -> KO
+ end.
+
+read(FileHdl, TotalSize) ->
+ Size = TotalSize - ?FILE_PACKING_ADJUSTMENT,
+ BodyBinSize = Size - ?MSG_ID_SIZE_BYTES,
+ case file_handle_cache:read(FileHdl, TotalSize) of
+ {ok, <<Size:?INTEGER_SIZE_BITS,
+ MsgId:?MSG_ID_SIZE_BYTES/binary,
+ MsgBodyBin:BodyBinSize/binary,
+ ?WRITE_OK_MARKER:?WRITE_OK_SIZE_BITS>>} ->
+ {ok, {MsgId, binary_to_term(MsgBodyBin)}};
+ KO -> KO
+ end.
+
+scan(FileHdl, FileSize, Fun, Acc) when FileSize >= 0 ->
+ scan(FileHdl, FileSize, <<>>, 0, 0, Fun, Acc).
+
+scan(_FileHdl, FileSize, _Data, FileSize, ScanOffset, _Fun, Acc) ->
+ {ok, Acc, ScanOffset};
+scan(FileHdl, FileSize, Data, ReadOffset, ScanOffset, Fun, Acc) ->
+ Read = lists:min([?SCAN_BLOCK_SIZE, (FileSize - ReadOffset)]),
+ case file_handle_cache:read(FileHdl, Read) of
+ {ok, Data1} ->
+ {Data2, Acc1, ScanOffset1} =
+ scanner(<<Data/binary, Data1/binary>>, ScanOffset, Fun, Acc),
+ ReadOffset1 = ReadOffset + size(Data1),
+ scan(FileHdl, FileSize, Data2, ReadOffset1, ScanOffset1, Fun, Acc1);
+ _KO ->
+ {ok, Acc, ScanOffset}
+ end.
+
+scanner(<<>>, Offset, _Fun, Acc) ->
+ {<<>>, Acc, Offset};
+scanner(<<0:?INTEGER_SIZE_BITS, _Rest/binary>>, Offset, _Fun, Acc) ->
+ {<<>>, Acc, Offset}; %% Nothing to do other than stop.
+scanner(<<Size:?INTEGER_SIZE_BITS, MsgIdAndMsg:Size/binary,
+ WriteMarker:?WRITE_OK_SIZE_BITS, Rest/binary>>, Offset, Fun, Acc) ->
+ TotalSize = Size + ?FILE_PACKING_ADJUSTMENT,
+ case WriteMarker of
+ ?WRITE_OK_MARKER ->
+ %% Here we take option 5 from
+ %% http://www.erlang.org/cgi-bin/ezmlm-cgi?2:mss:1569 in
+ %% which we read the MsgId as a number, and then convert it
+ %% back to a binary in order to work around bugs in
+ %% Erlang's GC.
+ <<MsgIdNum:?MSG_ID_SIZE_BITS, Msg/binary>> =
+ <<MsgIdAndMsg:Size/binary>>,
+ <<MsgId:?MSG_ID_SIZE_BYTES/binary>> =
+ <<MsgIdNum:?MSG_ID_SIZE_BITS>>,
+ scanner(Rest, Offset + TotalSize, Fun,
+ Fun({MsgId, TotalSize, Offset, Msg}, Acc));
+ _ ->
+ scanner(Rest, Offset + TotalSize, Fun, Acc)
+ end;
+scanner(Data, Offset, _Fun, Acc) ->
+ {Data, Acc, Offset}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_msg_store).
+
+-behaviour(gen_server2).
+
+-export([start_link/4, successfully_recovered_state/1,
+ client_init/4, client_terminate/1, client_delete_and_terminate/1,
+ client_ref/1, close_all_indicated/1,
+ write/3, write_flow/3, read/2, contains/2, remove/2]).
+
+-export([set_maximum_since_use/2, has_readers/2, combine_files/3,
+ delete_file/2]). %% internal
+
+-export([transform_dir/3, force_recovery/2]). %% upgrade
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3, prioritise_call/4, prioritise_cast/3,
+ prioritise_info/3, format_message_queue/2]).
+
+%%----------------------------------------------------------------------------
+
+-include("rabbit_msg_store.hrl").
+
+-define(SYNC_INTERVAL, 25). %% milliseconds
+-define(CLEAN_FILENAME, "clean.dot").
+-define(FILE_SUMMARY_FILENAME, "file_summary.ets").
+-define(TRANSFORM_TMP, "transform_tmp").
+
+-define(BINARY_MODE, [raw, binary]).
+-define(READ_MODE, [read]).
+-define(READ_AHEAD_MODE, [read_ahead | ?READ_MODE]).
+-define(WRITE_MODE, [write]).
+
+-define(FILE_EXTENSION, ".rdq").
+-define(FILE_EXTENSION_TMP, ".rdt").
+
+-define(HANDLE_CACHE_BUFFER_SIZE, 1048576). %% 1MB
+
+ %% i.e. two pairs, so GC does not go idle when busy
+-define(MAXIMUM_SIMULTANEOUS_GC_FILES, 4).
+
+%%----------------------------------------------------------------------------
+
+-record(msstate,
+ { dir, %% store directory
+ index_module, %% the module for index ops
+ index_state, %% where are messages?
+ current_file, %% current file name as number
+ current_file_handle, %% current file handle since the last fsync?
+ file_handle_cache, %% file handle cache
+ sync_timer_ref, %% TRef for our interval timer
+ sum_valid_data, %% sum of valid data in all files
+ sum_file_size, %% sum of file sizes
+ pending_gc_completion, %% things to do once GC completes
+ gc_pid, %% pid of our GC
+ file_handles_ets, %% tid of the shared file handles table
+ file_summary_ets, %% tid of the file summary table
+ cur_file_cache_ets, %% tid of current file cache table
+ flying_ets, %% tid of writes/removes in flight
+ dying_clients, %% set of dying clients
+ clients, %% map of references of all registered clients
+ %% to callbacks
+ successfully_recovered, %% boolean: did we recover state?
+ file_size_limit, %% how big are our files allowed to get?
+ cref_to_msg_ids %% client ref to synced messages mapping
+ }).
+
+-record(client_msstate,
+ { server,
+ client_ref,
+ file_handle_cache,
+ index_state,
+ index_module,
+ dir,
+ gc_pid,
+ file_handles_ets,
+ file_summary_ets,
+ cur_file_cache_ets,
+ flying_ets
+ }).
+
+-record(file_summary,
+ {file, valid_total_size, left, right, file_size, locked, readers}).
+
+-record(gc_state,
+ { dir,
+ index_module,
+ index_state,
+ file_summary_ets,
+ file_handles_ets,
+ msg_store
+ }).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-export_type([gc_state/0, file_num/0]).
+
+-type(gc_state() :: #gc_state { dir :: file:filename(),
+ index_module :: atom(),
+ index_state :: any(),
+ file_summary_ets :: ets:tid(),
+ file_handles_ets :: ets:tid(),
+ msg_store :: server()
+ }).
+
+-type(server() :: pid() | atom()).
+-type(client_ref() :: binary()).
+-type(file_num() :: non_neg_integer()).
+-type(client_msstate() :: #client_msstate {
+ server :: server(),
+ client_ref :: client_ref(),
+ file_handle_cache :: dict(),
+ index_state :: any(),
+ index_module :: atom(),
+ dir :: file:filename(),
+ gc_pid :: pid(),
+ file_handles_ets :: ets:tid(),
+ file_summary_ets :: ets:tid(),
+ cur_file_cache_ets :: ets:tid(),
+ flying_ets :: ets:tid()}).
+-type(msg_ref_delta_gen(A) ::
+ fun ((A) -> 'finished' |
+ {rabbit_types:msg_id(), non_neg_integer(), A})).
+-type(maybe_msg_id_fun() ::
+ 'undefined' | fun ((gb_set(), 'written' | 'ignored') -> any())).
+-type(maybe_close_fds_fun() :: 'undefined' | fun (() -> 'ok')).
+-type(deletion_thunk() :: fun (() -> boolean())).
+
+-spec(start_link/4 ::
+ (atom(), file:filename(), [binary()] | 'undefined',
+ {msg_ref_delta_gen(A), A}) -> rabbit_types:ok_pid_or_error()).
+-spec(successfully_recovered_state/1 :: (server()) -> boolean()).
+-spec(client_init/4 :: (server(), client_ref(), maybe_msg_id_fun(),
+ maybe_close_fds_fun()) -> client_msstate()).
+-spec(client_terminate/1 :: (client_msstate()) -> 'ok').
+-spec(client_delete_and_terminate/1 :: (client_msstate()) -> 'ok').
+-spec(client_ref/1 :: (client_msstate()) -> client_ref()).
+-spec(close_all_indicated/1 ::
+ (client_msstate()) -> rabbit_types:ok(client_msstate())).
+-spec(write/3 :: (rabbit_types:msg_id(), msg(), client_msstate()) -> 'ok').
+-spec(write_flow/3 :: (rabbit_types:msg_id(), msg(), client_msstate()) -> 'ok').
+-spec(read/2 :: (rabbit_types:msg_id(), client_msstate()) ->
+ {rabbit_types:ok(msg()) | 'not_found', client_msstate()}).
+-spec(contains/2 :: (rabbit_types:msg_id(), client_msstate()) -> boolean()).
+-spec(remove/2 :: ([rabbit_types:msg_id()], client_msstate()) -> 'ok').
+
+-spec(set_maximum_since_use/2 :: (server(), non_neg_integer()) -> 'ok').
+-spec(has_readers/2 :: (non_neg_integer(), gc_state()) -> boolean()).
+-spec(combine_files/3 :: (non_neg_integer(), non_neg_integer(), gc_state()) ->
+ deletion_thunk()).
+-spec(delete_file/2 :: (non_neg_integer(), gc_state()) -> deletion_thunk()).
+-spec(force_recovery/2 :: (file:filename(), server()) -> 'ok').
+-spec(transform_dir/3 :: (file:filename(), server(),
+ fun ((any()) -> (rabbit_types:ok_or_error2(msg(), any())))) -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+%% We run GC whenever (garbage / sum_file_size) > ?GARBAGE_FRACTION
+%% It is not recommended to set this to < 0.5
+-define(GARBAGE_FRACTION, 0.5).
+
+%% The components:
+%%
+%% Index: this is a mapping from MsgId to #msg_location{}:
+%% {MsgId, RefCount, File, Offset, TotalSize}
+%% By default, it's in ets, but it's also pluggable.
+%% FileSummary: this is an ets table which maps File to #file_summary{}:
+%% {File, ValidTotalSize, Left, Right, FileSize, Locked, Readers}
+%%
+%% The basic idea is that messages are appended to the current file up
+%% until that file becomes too big (> file_size_limit). At that point,
+%% the file is closed and a new file is created on the _right_ of the
+%% old file which is used for new messages. Files are named
+%% numerically ascending, thus the file with the lowest name is the
+%% eldest file.
+%%
+%% We need to keep track of which messages are in which files (this is
+%% the Index); how much useful data is in each file and which files
+%% are on the left and right of each other. This is the purpose of the
+%% FileSummary ets table.
+%%
+%% As messages are removed from files, holes appear in these
+%% files. The field ValidTotalSize contains the total amount of useful
+%% data left in the file. This is needed for garbage collection.
+%%
+%% When we discover that a file is now empty, we delete it. When we
+%% discover that it can be combined with the useful data in either its
+%% left or right neighbour, and overall, across all the files, we have
+%% ((the amount of garbage) / (the sum of all file sizes)) >
+%% ?GARBAGE_FRACTION, we start a garbage collection run concurrently,
+%% which will compact the two files together. This keeps disk
+%% utilisation high and aids performance. We deliberately do this
+%% lazily in order to prevent doing GC on files which are soon to be
+%% emptied (and hence deleted) soon.
+%%
+%% Given the compaction between two files, the left file (i.e. elder
+%% file) is considered the ultimate destination for the good data in
+%% the right file. If necessary, the good data in the left file which
+%% is fragmented throughout the file is written out to a temporary
+%% file, then read back in to form a contiguous chunk of good data at
+%% the start of the left file. Thus the left file is garbage collected
+%% and compacted. Then the good data from the right file is copied
+%% onto the end of the left file. Index and FileSummary tables are
+%% updated.
+%%
+%% On non-clean startup, we scan the files we discover, dealing with
+%% the possibilites of a crash having occured during a compaction
+%% (this consists of tidyup - the compaction is deliberately designed
+%% such that data is duplicated on disk rather than risking it being
+%% lost), and rebuild the FileSummary ets table and Index.
+%%
+%% So, with this design, messages move to the left. Eventually, they
+%% should end up in a contiguous block on the left and are then never
+%% rewritten. But this isn't quite the case. If in a file there is one
+%% message that is being ignored, for some reason, and messages in the
+%% file to the right and in the current block are being read all the
+%% time then it will repeatedly be the case that the good data from
+%% both files can be combined and will be written out to a new
+%% file. Whenever this happens, our shunned message will be rewritten.
+%%
+%% So, provided that we combine messages in the right order,
+%% (i.e. left file, bottom to top, right file, bottom to top),
+%% eventually our shunned message will end up at the bottom of the
+%% left file. The compaction/combining algorithm is smart enough to
+%% read in good data from the left file that is scattered throughout
+%% (i.e. C and D in the below diagram), then truncate the file to just
+%% above B (i.e. truncate to the limit of the good contiguous region
+%% at the start of the file), then write C and D on top and then write
+%% E, F and G from the right file on top. Thus contiguous blocks of
+%% good data at the bottom of files are not rewritten.
+%%
+%% +-------+ +-------+ +-------+
+%% | X | | G | | G |
+%% +-------+ +-------+ +-------+
+%% | D | | X | | F |
+%% +-------+ +-------+ +-------+
+%% | X | | X | | E |
+%% +-------+ +-------+ +-------+
+%% | C | | F | ===> | D |
+%% +-------+ +-------+ +-------+
+%% | X | | X | | C |
+%% +-------+ +-------+ +-------+
+%% | B | | X | | B |
+%% +-------+ +-------+ +-------+
+%% | A | | E | | A |
+%% +-------+ +-------+ +-------+
+%% left right left
+%%
+%% From this reasoning, we do have a bound on the number of times the
+%% message is rewritten. From when it is inserted, there can be no
+%% files inserted between it and the head of the queue, and the worst
+%% case is that everytime it is rewritten, it moves one position lower
+%% in the file (for it to stay at the same position requires that
+%% there are no holes beneath it, which means truncate would be used
+%% and so it would not be rewritten at all). Thus this seems to
+%% suggest the limit is the number of messages ahead of it in the
+%% queue, though it's likely that that's pessimistic, given the
+%% requirements for compaction/combination of files.
+%%
+%% The other property is that we have is the bound on the lowest
+%% utilisation, which should be 50% - worst case is that all files are
+%% fractionally over half full and can't be combined (equivalent is
+%% alternating full files and files with only one tiny message in
+%% them).
+%%
+%% Messages are reference-counted. When a message with the same msg id
+%% is written several times we only store it once, and only remove it
+%% from the store when it has been removed the same number of times.
+%%
+%% The reference counts do not persist. Therefore the initialisation
+%% function must be provided with a generator that produces ref count
+%% deltas for all recovered messages. This is only used on startup
+%% when the shutdown was non-clean.
+%%
+%% Read messages with a reference count greater than one are entered
+%% into a message cache. The purpose of the cache is not especially
+%% performance, though it can help there too, but prevention of memory
+%% explosion. It ensures that as messages with a high reference count
+%% are read from several processes they are read back as the same
+%% binary object rather than multiples of identical binary
+%% objects.
+%%
+%% Reads can be performed directly by clients without calling to the
+%% server. This is safe because multiple file handles can be used to
+%% read files. However, locking is used by the concurrent GC to make
+%% sure that reads are not attempted from files which are in the
+%% process of being garbage collected.
+%%
+%% When a message is removed, its reference count is decremented. Even
+%% if the reference count becomes 0, its entry is not removed. This is
+%% because in the event of the same message being sent to several
+%% different queues, there is the possibility of one queue writing and
+%% removing the message before other queues write it at all. Thus
+%% accomodating 0-reference counts allows us to avoid unnecessary
+%% writes here. Of course, there are complications: the file to which
+%% the message has already been written could be locked pending
+%% deletion or GC, which means we have to rewrite the message as the
+%% original copy will now be lost.
+%%
+%% The server automatically defers reads, removes and contains calls
+%% that occur which refer to files which are currently being
+%% GC'd. Contains calls are only deferred in order to ensure they do
+%% not overtake removes.
+%%
+%% The current file to which messages are being written has a
+%% write-back cache. This is written to immediately by clients and can
+%% be read from by clients too. This means that there are only ever
+%% writes made to the current file, thus eliminating delays due to
+%% flushing write buffers in order to be able to safely read from the
+%% current file. The one exception to this is that on start up, the
+%% cache is not populated with msgs found in the current file, and
+%% thus in this case only, reads may have to come from the file
+%% itself. The effect of this is that even if the msg_store process is
+%% heavily overloaded, clients can still write and read messages with
+%% very low latency and not block at all.
+%%
+%% Clients of the msg_store are required to register before using the
+%% msg_store. This provides them with the necessary client-side state
+%% to allow them to directly access the various caches and files. When
+%% they terminate, they should deregister. They can do this by calling
+%% either client_terminate/1 or client_delete_and_terminate/1. The
+%% differences are: (a) client_terminate is synchronous. As a result,
+%% if the msg_store is badly overloaded and has lots of in-flight
+%% writes and removes to process, this will take some time to
+%% return. However, once it does return, you can be sure that all the
+%% actions you've issued to the msg_store have been processed. (b) Not
+%% only is client_delete_and_terminate/1 asynchronous, but it also
+%% permits writes and subsequent removes from the current
+%% (terminating) client which are still in flight to be safely
+%% ignored. Thus from the point of view of the msg_store itself, and
+%% all from the same client:
+%%
+%% (T) = termination; (WN) = write of msg N; (RN) = remove of msg N
+%% --> W1, W2, W1, R1, T, W3, R2, W2, R1, R2, R3, W4 -->
+%%
+%% The client obviously sent T after all the other messages (up to
+%% W4), but because the msg_store prioritises messages, the T can be
+%% promoted and thus received early.
+%%
+%% Thus at the point of the msg_store receiving T, we have messages 1
+%% and 2 with a refcount of 1. After T, W3 will be ignored because
+%% it's an unknown message, as will R3, and W4. W2, R1 and R2 won't be
+%% ignored because the messages that they refer to were already known
+%% to the msg_store prior to T. However, it can be a little more
+%% complex: after the first R2, the refcount of msg 2 is 0. At that
+%% point, if a GC occurs or file deletion, msg 2 could vanish, which
+%% would then mean that the subsequent W2 and R2 are then ignored.
+%%
+%% The use case then for client_delete_and_terminate/1 is if the
+%% client wishes to remove everything it's written to the msg_store:
+%% it issues removes for all messages it's written and not removed,
+%% and then calls client_delete_and_terminate/1. At that point, any
+%% in-flight writes (and subsequent removes) can be ignored, but
+%% removes and writes for messages the msg_store already knows about
+%% will continue to be processed normally (which will normally just
+%% involve modifying the reference count, which is fast). Thus we save
+%% disk bandwidth for writes which are going to be immediately removed
+%% again by the the terminating client.
+%%
+%% We use a separate set to keep track of the dying clients in order
+%% to keep that set, which is inspected on every write and remove, as
+%% small as possible. Inspecting the set of all clients would degrade
+%% performance with many healthy clients and few, if any, dying
+%% clients, which is the typical case.
+%%
+%% When the msg_store has a backlog (i.e. it has unprocessed messages
+%% in its mailbox / gen_server priority queue), a further optimisation
+%% opportunity arises: we can eliminate pairs of 'write' and 'remove'
+%% from the same client for the same message. A typical occurrence of
+%% these is when an empty durable queue delivers persistent messages
+%% to ack'ing consumers. The queue will asynchronously ask the
+%% msg_store to 'write' such messages, and when they are acknowledged
+%% it will issue a 'remove'. That 'remove' may be issued before the
+%% msg_store has processed the 'write'. There is then no point going
+%% ahead with the processing of that 'write'.
+%%
+%% To detect this situation a 'flying_ets' table is shared between the
+%% clients and the server. The table is keyed on the combination of
+%% client (reference) and msg id, and the value represents an
+%% integration of all the writes and removes currently "in flight" for
+%% that message between the client and server - '+1' means all the
+%% writes/removes add up to a single 'write', '-1' to a 'remove', and
+%% '0' to nothing. (NB: the integration can never add up to more than
+%% one 'write' or 'read' since clients must not write/remove a message
+%% more than once without first removing/writing it).
+%%
+%% Maintaining this table poses two challenges: 1) both the clients
+%% and the server access and update the table, which causes
+%% concurrency issues, 2) we must ensure that entries do not stay in
+%% the table forever, since that would constitute a memory leak. We
+%% address the former by carefully modelling all operations as
+%% sequences of atomic actions that produce valid results in all
+%% possible interleavings. We address the latter by deleting table
+%% entries whenever the server finds a 0-valued entry during the
+%% processing of a write/remove. 0 is essentially equivalent to "no
+%% entry". If, OTOH, the value is non-zero we know there is at least
+%% one other 'write' or 'remove' in flight, so we get an opportunity
+%% later to delete the table entry when processing these.
+%%
+%% There are two further complications. We need to ensure that 1)
+%% eliminated writes still get confirmed, and 2) the write-back cache
+%% doesn't grow unbounded. These are quite straightforward to
+%% address. See the comments in the code.
+%%
+%% For notes on Clean Shutdown and startup, see documentation in
+%% variable_queue.
+
+%%----------------------------------------------------------------------------
+%% public API
+%%----------------------------------------------------------------------------
+
+start_link(Server, Dir, ClientRefs, StartupFunState) ->
+ gen_server2:start_link({local, Server}, ?MODULE,
+ [Server, Dir, ClientRefs, StartupFunState],
+ [{timeout, infinity}]).
+
+successfully_recovered_state(Server) ->
+ gen_server2:call(Server, successfully_recovered_state, infinity).
+
+client_init(Server, Ref, MsgOnDiskFun, CloseFDsFun) ->
+ {IState, IModule, Dir, GCPid,
+ FileHandlesEts, FileSummaryEts, CurFileCacheEts, FlyingEts} =
+ gen_server2:call(
+ Server, {new_client_state, Ref, self(), MsgOnDiskFun, CloseFDsFun},
+ infinity),
+ #client_msstate { server = Server,
+ client_ref = Ref,
+ file_handle_cache = dict:new(),
+ index_state = IState,
+ index_module = IModule,
+ dir = Dir,
+ gc_pid = GCPid,
+ file_handles_ets = FileHandlesEts,
+ file_summary_ets = FileSummaryEts,
+ cur_file_cache_ets = CurFileCacheEts,
+ flying_ets = FlyingEts }.
+
+client_terminate(CState = #client_msstate { client_ref = Ref }) ->
+ close_all_handles(CState),
+ ok = server_call(CState, {client_terminate, Ref}).
+
+client_delete_and_terminate(CState = #client_msstate { client_ref = Ref }) ->
+ close_all_handles(CState),
+ ok = server_cast(CState, {client_dying, Ref}),
+ ok = server_cast(CState, {client_delete, Ref}).
+
+client_ref(#client_msstate { client_ref = Ref }) -> Ref.
+
+write_flow(MsgId, Msg, CState = #client_msstate { server = Server }) ->
+ credit_flow:send(whereis(Server), ?CREDIT_DISC_BOUND),
+ client_write(MsgId, Msg, flow, CState).
+
+write(MsgId, Msg, CState) -> client_write(MsgId, Msg, noflow, CState).
+
+read(MsgId,
+ CState = #client_msstate { cur_file_cache_ets = CurFileCacheEts }) ->
+ %% Check the cur file cache
+ case ets:lookup(CurFileCacheEts, MsgId) of
+ [] ->
+ Defer = fun() -> {server_call(CState, {read, MsgId}), CState} end,
+ case index_lookup_positive_ref_count(MsgId, CState) of
+ not_found -> Defer();
+ MsgLocation -> client_read1(MsgLocation, Defer, CState)
+ end;
+ [{MsgId, Msg, _CacheRefCount}] ->
+ {{ok, Msg}, CState}
+ end.
+
+contains(MsgId, CState) -> server_call(CState, {contains, MsgId}).
+remove([], _CState) -> ok;
+remove(MsgIds, CState = #client_msstate { client_ref = CRef }) ->
+ [client_update_flying(-1, MsgId, CState) || MsgId <- MsgIds],
+ server_cast(CState, {remove, CRef, MsgIds}).
+
+set_maximum_since_use(Server, Age) ->
+ gen_server2:cast(Server, {set_maximum_since_use, Age}).
+
+%%----------------------------------------------------------------------------
+%% Client-side-only helpers
+%%----------------------------------------------------------------------------
+
+server_call(#client_msstate { server = Server }, Msg) ->
+ gen_server2:call(Server, Msg, infinity).
+
+server_cast(#client_msstate { server = Server }, Msg) ->
+ gen_server2:cast(Server, Msg).
+
+client_write(MsgId, Msg, Flow,
+ CState = #client_msstate { cur_file_cache_ets = CurFileCacheEts,
+ client_ref = CRef }) ->
+ ok = client_update_flying(+1, MsgId, CState),
+ ok = update_msg_cache(CurFileCacheEts, MsgId, Msg),
+ ok = server_cast(CState, {write, CRef, MsgId, Flow}).
+
+client_read1(#msg_location { msg_id = MsgId, file = File } = MsgLocation, Defer,
+ CState = #client_msstate { file_summary_ets = FileSummaryEts }) ->
+ case ets:lookup(FileSummaryEts, File) of
+ [] -> %% File has been GC'd and no longer exists. Go around again.
+ read(MsgId, CState);
+ [#file_summary { locked = Locked, right = Right }] ->
+ client_read2(Locked, Right, MsgLocation, Defer, CState)
+ end.
+
+client_read2(false, undefined, _MsgLocation, Defer, _CState) ->
+ %% Although we've already checked both caches and not found the
+ %% message there, the message is apparently in the
+ %% current_file. We can only arrive here if we are trying to read
+ %% a message which we have not written, which is very odd, so just
+ %% defer.
+ %%
+ %% OR, on startup, the cur_file_cache is not populated with the
+ %% contents of the current file, thus reads from the current file
+ %% will end up here and will need to be deferred.
+ Defer();
+client_read2(true, _Right, _MsgLocation, Defer, _CState) ->
+ %% Of course, in the mean time, the GC could have run and our msg
+ %% is actually in a different file, unlocked. However, defering is
+ %% the safest and simplest thing to do.
+ Defer();
+client_read2(false, _Right,
+ MsgLocation = #msg_location { msg_id = MsgId, file = File },
+ Defer,
+ CState = #client_msstate { file_summary_ets = FileSummaryEts }) ->
+ %% It's entirely possible that everything we're doing from here on
+ %% is for the wrong file, or a non-existent file, as a GC may have
+ %% finished.
+ safe_ets_update_counter(
+ FileSummaryEts, File, {#file_summary.readers, +1},
+ fun (_) -> client_read3(MsgLocation, Defer, CState) end,
+ fun () -> read(MsgId, CState) end).
+
+client_read3(#msg_location { msg_id = MsgId, file = File }, Defer,
+ CState = #client_msstate { file_handles_ets = FileHandlesEts,
+ file_summary_ets = FileSummaryEts,
+ gc_pid = GCPid,
+ client_ref = Ref }) ->
+ Release =
+ fun() -> ok = case ets:update_counter(FileSummaryEts, File,
+ {#file_summary.readers, -1}) of
+ 0 -> case ets:lookup(FileSummaryEts, File) of
+ [#file_summary { locked = true }] ->
+ rabbit_msg_store_gc:no_readers(
+ GCPid, File);
+ _ -> ok
+ end;
+ _ -> ok
+ end
+ end,
+ %% If a GC involving the file hasn't already started, it won't
+ %% start now. Need to check again to see if we've been locked in
+ %% the meantime, between lookup and update_counter (thus GC
+ %% started before our +1. In fact, it could have finished by now
+ %% too).
+ case ets:lookup(FileSummaryEts, File) of
+ [] -> %% GC has deleted our file, just go round again.
+ read(MsgId, CState);
+ [#file_summary { locked = true }] ->
+ %% If we get a badarg here, then the GC has finished and
+ %% deleted our file. Try going around again. Otherwise,
+ %% just defer.
+ %%
+ %% badarg scenario: we lookup, msg_store locks, GC starts,
+ %% GC ends, we +1 readers, msg_store ets:deletes (and
+ %% unlocks the dest)
+ try Release(),
+ Defer()
+ catch error:badarg -> read(MsgId, CState)
+ end;
+ [#file_summary { locked = false }] ->
+ %% Ok, we're definitely safe to continue - a GC involving
+ %% the file cannot start up now, and isn't running, so
+ %% nothing will tell us from now on to close the handle if
+ %% it's already open.
+ %%
+ %% Finally, we need to recheck that the msg is still at
+ %% the same place - it's possible an entire GC ran between
+ %% us doing the lookup and the +1 on the readers. (Same as
+ %% badarg scenario above, but we don't have a missing file
+ %% - we just have the /wrong/ file).
+ case index_lookup(MsgId, CState) of
+ #msg_location { file = File } = MsgLocation ->
+ %% Still the same file.
+ {ok, CState1} = close_all_indicated(CState),
+ %% We are now guaranteed that the mark_handle_open
+ %% call will either insert_new correctly, or will
+ %% fail, but find the value is open, not close.
+ mark_handle_open(FileHandlesEts, File, Ref),
+ %% Could the msg_store now mark the file to be
+ %% closed? No: marks for closing are issued only
+ %% when the msg_store has locked the file.
+ %% This will never be the current file
+ {Msg, CState2} = read_from_disk(MsgLocation, CState1),
+ Release(), %% this MUST NOT fail with badarg
+ {{ok, Msg}, CState2};
+ #msg_location {} = MsgLocation -> %% different file!
+ Release(), %% this MUST NOT fail with badarg
+ client_read1(MsgLocation, Defer, CState);
+ not_found -> %% it seems not to exist. Defer, just to be sure.
+ try Release() %% this can badarg, same as locked case, above
+ catch error:badarg -> ok
+ end,
+ Defer()
+ end
+ end.
+
+client_update_flying(Diff, MsgId, #client_msstate { flying_ets = FlyingEts,
+ client_ref = CRef }) ->
+ Key = {MsgId, CRef},
+ case ets:insert_new(FlyingEts, {Key, Diff}) of
+ true -> ok;
+ false -> try ets:update_counter(FlyingEts, Key, {2, Diff}) of
+ 0 -> ok;
+ Diff -> ok;
+ Err -> throw({bad_flying_ets_update, Diff, Err, Key})
+ catch error:badarg ->
+ %% this is guaranteed to succeed since the
+ %% server only removes and updates flying_ets
+ %% entries; it never inserts them
+ true = ets:insert_new(FlyingEts, {Key, Diff})
+ end,
+ ok
+ end.
+
+clear_client(CRef, State = #msstate { cref_to_msg_ids = CTM,
+ dying_clients = DyingClients }) ->
+ State #msstate { cref_to_msg_ids = dict:erase(CRef, CTM),
+ dying_clients = sets:del_element(CRef, DyingClients) }.
+
+
+%%----------------------------------------------------------------------------
+%% gen_server callbacks
+%%----------------------------------------------------------------------------
+
+init([Server, BaseDir, ClientRefs, StartupFunState]) ->
+ process_flag(trap_exit, true),
+
+ ok = file_handle_cache:register_callback(?MODULE, set_maximum_since_use,
+ [self()]),
+
+ Dir = filename:join(BaseDir, atom_to_list(Server)),
+
+ {ok, IndexModule} = application:get_env(msg_store_index_module),
+ rabbit_log:info("~w: using ~p to provide index~n", [Server, IndexModule]),
+
+ AttemptFileSummaryRecovery =
+ case ClientRefs of
+ undefined -> ok = rabbit_file:recursive_delete([Dir]),
+ ok = filelib:ensure_dir(filename:join(Dir, "nothing")),
+ false;
+ _ -> ok = filelib:ensure_dir(filename:join(Dir, "nothing")),
+ recover_crashed_compactions(Dir)
+ end,
+
+ %% if we found crashed compactions we trust neither the
+ %% file_summary nor the location index. Note the file_summary is
+ %% left empty here if it can't be recovered.
+ {FileSummaryRecovered, FileSummaryEts} =
+ recover_file_summary(AttemptFileSummaryRecovery, Dir),
+
+ {CleanShutdown, IndexState, ClientRefs1} =
+ recover_index_and_client_refs(IndexModule, FileSummaryRecovered,
+ ClientRefs, Dir, Server),
+ Clients = dict:from_list(
+ [{CRef, {undefined, undefined, undefined}} ||
+ CRef <- ClientRefs1]),
+ %% CleanShutdown => msg location index and file_summary both
+ %% recovered correctly.
+ true = case {FileSummaryRecovered, CleanShutdown} of
+ {true, false} -> ets:delete_all_objects(FileSummaryEts);
+ _ -> true
+ end,
+ %% CleanShutdown <=> msg location index and file_summary both
+ %% recovered correctly.
+
+ FileHandlesEts = ets:new(rabbit_msg_store_shared_file_handles,
+ [ordered_set, public]),
+ CurFileCacheEts = ets:new(rabbit_msg_store_cur_file, [set, public]),
+ FlyingEts = ets:new(rabbit_msg_store_flying, [set, public]),
+
+ {ok, FileSizeLimit} = application:get_env(msg_store_file_size_limit),
+
+ {ok, GCPid} = rabbit_msg_store_gc:start_link(
+ #gc_state { dir = Dir,
+ index_module = IndexModule,
+ index_state = IndexState,
+ file_summary_ets = FileSummaryEts,
+ file_handles_ets = FileHandlesEts,
+ msg_store = self()
+ }),
+
+ State = #msstate { dir = Dir,
+ index_module = IndexModule,
+ index_state = IndexState,
+ current_file = 0,
+ current_file_handle = undefined,
+ file_handle_cache = dict:new(),
+ sync_timer_ref = undefined,
+ sum_valid_data = 0,
+ sum_file_size = 0,
+ pending_gc_completion = orddict:new(),
+ gc_pid = GCPid,
+ file_handles_ets = FileHandlesEts,
+ file_summary_ets = FileSummaryEts,
+ cur_file_cache_ets = CurFileCacheEts,
+ flying_ets = FlyingEts,
+ dying_clients = sets:new(),
+ clients = Clients,
+ successfully_recovered = CleanShutdown,
+ file_size_limit = FileSizeLimit,
+ cref_to_msg_ids = dict:new()
+ },
+
+ %% If we didn't recover the msg location index then we need to
+ %% rebuild it now.
+ {Offset, State1 = #msstate { current_file = CurFile }} =
+ build_index(CleanShutdown, StartupFunState, State),
+
+ %% read is only needed so that we can seek
+ {ok, CurHdl} = open_file(Dir, filenum_to_name(CurFile),
+ [read | ?WRITE_MODE]),
+ {ok, Offset} = file_handle_cache:position(CurHdl, Offset),
+ ok = file_handle_cache:truncate(CurHdl),
+
+ {ok, maybe_compact(State1 #msstate { current_file_handle = CurHdl }),
+ hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+prioritise_call(Msg, _From, _Len, _State) ->
+ case Msg of
+ successfully_recovered_state -> 7;
+ {new_client_state, _Ref, _Pid, _MODC, _CloseFDsFun} -> 7;
+ {read, _MsgId} -> 2;
+ _ -> 0
+ end.
+
+prioritise_cast(Msg, _Len, _State) ->
+ case Msg of
+ {combine_files, _Source, _Destination, _Reclaimed} -> 8;
+ {delete_file, _File, _Reclaimed} -> 8;
+ {set_maximum_since_use, _Age} -> 8;
+ {client_dying, _Pid} -> 7;
+ _ -> 0
+ end.
+
+prioritise_info(Msg, _Len, _State) ->
+ case Msg of
+ sync -> 8;
+ _ -> 0
+ end.
+
+handle_call(successfully_recovered_state, _From, State) ->
+ reply(State #msstate.successfully_recovered, State);
+
+handle_call({new_client_state, CRef, CPid, MsgOnDiskFun, CloseFDsFun}, _From,
+ State = #msstate { dir = Dir,
+ index_state = IndexState,
+ index_module = IndexModule,
+ file_handles_ets = FileHandlesEts,
+ file_summary_ets = FileSummaryEts,
+ cur_file_cache_ets = CurFileCacheEts,
+ flying_ets = FlyingEts,
+ clients = Clients,
+ gc_pid = GCPid }) ->
+ Clients1 = dict:store(CRef, {CPid, MsgOnDiskFun, CloseFDsFun}, Clients),
+ erlang:monitor(process, CPid),
+ reply({IndexState, IndexModule, Dir, GCPid, FileHandlesEts, FileSummaryEts,
+ CurFileCacheEts, FlyingEts},
+ State #msstate { clients = Clients1 });
+
+handle_call({client_terminate, CRef}, _From, State) ->
+ reply(ok, clear_client(CRef, State));
+
+handle_call({read, MsgId}, From, State) ->
+ State1 = read_message(MsgId, From, State),
+ noreply(State1);
+
+handle_call({contains, MsgId}, From, State) ->
+ State1 = contains_message(MsgId, From, State),
+ noreply(State1).
+
+handle_cast({client_dying, CRef},
+ State = #msstate { dying_clients = DyingClients }) ->
+ DyingClients1 = sets:add_element(CRef, DyingClients),
+ noreply(write_message(CRef, <<>>,
+ State #msstate { dying_clients = DyingClients1 }));
+
+handle_cast({client_delete, CRef},
+ State = #msstate { clients = Clients }) ->
+ State1 = State #msstate { clients = dict:erase(CRef, Clients) },
+ noreply(remove_message(CRef, CRef, clear_client(CRef, State1)));
+
+handle_cast({write, CRef, MsgId, Flow},
+ State = #msstate { cur_file_cache_ets = CurFileCacheEts,
+ clients = Clients }) ->
+ case Flow of
+ flow -> {CPid, _, _} = dict:fetch(CRef, Clients),
+ credit_flow:ack(CPid, ?CREDIT_DISC_BOUND);
+ noflow -> ok
+ end,
+ true = 0 =< ets:update_counter(CurFileCacheEts, MsgId, {3, -1}),
+ case update_flying(-1, MsgId, CRef, State) of
+ process ->
+ [{MsgId, Msg, _PWC}] = ets:lookup(CurFileCacheEts, MsgId),
+ noreply(write_message(MsgId, Msg, CRef, State));
+ ignore ->
+ %% A 'remove' has already been issued and eliminated the
+ %% 'write'.
+ State1 = blind_confirm(CRef, gb_sets:singleton(MsgId),
+ ignored, State),
+ %% If all writes get eliminated, cur_file_cache_ets could
+ %% grow unbounded. To prevent that we delete the cache
+ %% entry here, but only if the message isn't in the
+ %% current file. That way reads of the message can
+ %% continue to be done client side, from either the cache
+ %% or the non-current files. If the message *is* in the
+ %% current file then the cache entry will be removed by
+ %% the normal logic for that in write_message/4 and
+ %% maybe_roll_to_new_file/2.
+ case index_lookup(MsgId, State1) of
+ [#msg_location { file = File }]
+ when File == State1 #msstate.current_file ->
+ ok;
+ _ ->
+ true = ets:match_delete(CurFileCacheEts, {MsgId, '_', 0})
+ end,
+ noreply(State1)
+ end;
+
+handle_cast({remove, CRef, MsgIds}, State) ->
+ {RemovedMsgIds, State1} =
+ lists:foldl(
+ fun (MsgId, {Removed, State2}) ->
+ case update_flying(+1, MsgId, CRef, State2) of
+ process -> {[MsgId | Removed],
+ remove_message(MsgId, CRef, State2)};
+ ignore -> {Removed, State2}
+ end
+ end, {[], State}, MsgIds),
+ noreply(maybe_compact(client_confirm(CRef, gb_sets:from_list(RemovedMsgIds),
+ ignored, State1)));
+
+handle_cast({combine_files, Source, Destination, Reclaimed},
+ State = #msstate { sum_file_size = SumFileSize,
+ file_handles_ets = FileHandlesEts,
+ file_summary_ets = FileSummaryEts,
+ clients = Clients }) ->
+ ok = cleanup_after_file_deletion(Source, State),
+ %% see comment in cleanup_after_file_deletion, and client_read3
+ true = mark_handle_to_close(Clients, FileHandlesEts, Destination, false),
+ true = ets:update_element(FileSummaryEts, Destination,
+ {#file_summary.locked, false}),
+ State1 = State #msstate { sum_file_size = SumFileSize - Reclaimed },
+ noreply(maybe_compact(run_pending([Source, Destination], State1)));
+
+handle_cast({delete_file, File, Reclaimed},
+ State = #msstate { sum_file_size = SumFileSize }) ->
+ ok = cleanup_after_file_deletion(File, State),
+ State1 = State #msstate { sum_file_size = SumFileSize - Reclaimed },
+ noreply(maybe_compact(run_pending([File], State1)));
+
+handle_cast({set_maximum_since_use, Age}, State) ->
+ ok = file_handle_cache:set_maximum_since_use(Age),
+ noreply(State).
+
+handle_info(sync, State) ->
+ noreply(internal_sync(State));
+
+handle_info(timeout, State) ->
+ noreply(internal_sync(State));
+
+handle_info({'DOWN', _MRef, process, Pid, _Reason}, State) ->
+ credit_flow:peer_down(Pid),
+ noreply(State);
+
+handle_info({'EXIT', _Pid, Reason}, State) ->
+ {stop, Reason, State}.
+
+terminate(_Reason, State = #msstate { index_state = IndexState,
+ index_module = IndexModule,
+ current_file_handle = CurHdl,
+ gc_pid = GCPid,
+ file_handles_ets = FileHandlesEts,
+ file_summary_ets = FileSummaryEts,
+ cur_file_cache_ets = CurFileCacheEts,
+ flying_ets = FlyingEts,
+ clients = Clients,
+ dir = Dir }) ->
+ %% stop the gc first, otherwise it could be working and we pull
+ %% out the ets tables from under it.
+ ok = rabbit_msg_store_gc:stop(GCPid),
+ State1 = case CurHdl of
+ undefined -> State;
+ _ -> State2 = internal_sync(State),
+ ok = file_handle_cache:close(CurHdl),
+ State2
+ end,
+ State3 = close_all_handles(State1),
+ ok = store_file_summary(FileSummaryEts, Dir),
+ [true = ets:delete(T) || T <- [FileSummaryEts, FileHandlesEts,
+ CurFileCacheEts, FlyingEts]],
+ IndexModule:terminate(IndexState),
+ ok = store_recovery_terms([{client_refs, dict:fetch_keys(Clients)},
+ {index_module, IndexModule}], Dir),
+ State3 #msstate { index_state = undefined,
+ current_file_handle = undefined }.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ).
+
+%%----------------------------------------------------------------------------
+%% general helper functions
+%%----------------------------------------------------------------------------
+
+noreply(State) ->
+ {State1, Timeout} = next_state(State),
+ {noreply, State1, Timeout}.
+
+reply(Reply, State) ->
+ {State1, Timeout} = next_state(State),
+ {reply, Reply, State1, Timeout}.
+
+next_state(State = #msstate { sync_timer_ref = undefined,
+ cref_to_msg_ids = CTM }) ->
+ case dict:size(CTM) of
+ 0 -> {State, hibernate};
+ _ -> {start_sync_timer(State), 0}
+ end;
+next_state(State = #msstate { cref_to_msg_ids = CTM }) ->
+ case dict:size(CTM) of
+ 0 -> {stop_sync_timer(State), hibernate};
+ _ -> {State, 0}
+ end.
+
+start_sync_timer(State) ->
+ rabbit_misc:ensure_timer(State, #msstate.sync_timer_ref,
+ ?SYNC_INTERVAL, sync).
+
+stop_sync_timer(State) ->
+ rabbit_misc:stop_timer(State, #msstate.sync_timer_ref).
+
+internal_sync(State = #msstate { current_file_handle = CurHdl,
+ cref_to_msg_ids = CTM }) ->
+ State1 = stop_sync_timer(State),
+ CGs = dict:fold(fun (CRef, MsgIds, NS) ->
+ case gb_sets:is_empty(MsgIds) of
+ true -> NS;
+ false -> [{CRef, MsgIds} | NS]
+ end
+ end, [], CTM),
+ ok = case CGs of
+ [] -> ok;
+ _ -> file_handle_cache:sync(CurHdl)
+ end,
+ lists:foldl(fun ({CRef, MsgIds}, StateN) ->
+ client_confirm(CRef, MsgIds, written, StateN)
+ end, State1, CGs).
+
+update_flying(Diff, MsgId, CRef, #msstate { flying_ets = FlyingEts }) ->
+ Key = {MsgId, CRef},
+ NDiff = -Diff,
+ case ets:lookup(FlyingEts, Key) of
+ [] -> ignore;
+ [{_, Diff}] -> ignore; %% [1]
+ [{_, NDiff}] -> ets:update_counter(FlyingEts, Key, {2, Diff}),
+ true = ets:delete_object(FlyingEts, {Key, 0}),
+ process;
+ [{_, 0}] -> true = ets:delete_object(FlyingEts, {Key, 0}),
+ ignore;
+ [{_, Err}] -> throw({bad_flying_ets_record, Diff, Err, Key})
+ end.
+%% [1] We can get here, for example, in the following scenario: There
+%% is a write followed by a remove in flight. The counter will be 0,
+%% so on processing the write the server attempts to delete the
+%% entry. If at that point the client injects another write it will
+%% either insert a new entry, containing +1, or increment the existing
+%% entry to +1, thus preventing its removal. Either way therefore when
+%% the server processes the read, the counter will be +1.
+
+write_action({true, not_found}, _MsgId, State) ->
+ {ignore, undefined, State};
+write_action({true, #msg_location { file = File }}, _MsgId, State) ->
+ {ignore, File, State};
+write_action({false, not_found}, _MsgId, State) ->
+ {write, State};
+write_action({Mask, #msg_location { ref_count = 0, file = File,
+ total_size = TotalSize }},
+ MsgId, State = #msstate { file_summary_ets = FileSummaryEts }) ->
+ case {Mask, ets:lookup(FileSummaryEts, File)} of
+ {false, [#file_summary { locked = true }]} ->
+ ok = index_delete(MsgId, State),
+ {write, State};
+ {false_if_increment, [#file_summary { locked = true }]} ->
+ %% The msg for MsgId is older than the client death
+ %% message, but as it is being GC'd currently we'll have
+ %% to write a new copy, which will then be younger, so
+ %% ignore this write.
+ {ignore, File, State};
+ {_Mask, [#file_summary {}]} ->
+ ok = index_update_ref_count(MsgId, 1, State),
+ State1 = adjust_valid_total_size(File, TotalSize, State),
+ {confirm, File, State1}
+ end;
+write_action({_Mask, #msg_location { ref_count = RefCount, file = File }},
+ MsgId, State) ->
+ ok = index_update_ref_count(MsgId, RefCount + 1, State),
+ %% We already know about it, just update counter. Only update
+ %% field otherwise bad interaction with concurrent GC
+ {confirm, File, State}.
+
+write_message(MsgId, Msg, CRef,
+ State = #msstate { cur_file_cache_ets = CurFileCacheEts }) ->
+ case write_action(should_mask_action(CRef, MsgId, State), MsgId, State) of
+ {write, State1} ->
+ write_message(MsgId, Msg,
+ record_pending_confirm(CRef, MsgId, State1));
+ {ignore, CurFile, State1 = #msstate { current_file = CurFile }} ->
+ State1;
+ {ignore, _File, State1} ->
+ true = ets:delete_object(CurFileCacheEts, {MsgId, Msg, 0}),
+ State1;
+ {confirm, CurFile, State1 = #msstate { current_file = CurFile }}->
+ record_pending_confirm(CRef, MsgId, State1);
+ {confirm, _File, State1} ->
+ true = ets:delete_object(CurFileCacheEts, {MsgId, Msg, 0}),
+ update_pending_confirms(
+ fun (MsgOnDiskFun, CTM) ->
+ MsgOnDiskFun(gb_sets:singleton(MsgId), written),
+ CTM
+ end, CRef, State1)
+ end.
+
+remove_message(MsgId, CRef,
+ State = #msstate { file_summary_ets = FileSummaryEts }) ->
+ case should_mask_action(CRef, MsgId, State) of
+ {true, _Location} ->
+ State;
+ {false_if_increment, #msg_location { ref_count = 0 }} ->
+ %% CRef has tried to both write and remove this msg whilst
+ %% it's being GC'd.
+ %%
+ %% ASSERTION: [#file_summary { locked = true }] =
+ %% ets:lookup(FileSummaryEts, File),
+ State;
+ {_Mask, #msg_location { ref_count = RefCount, file = File,
+ total_size = TotalSize }}
+ when RefCount > 0 ->
+ %% only update field, otherwise bad interaction with
+ %% concurrent GC
+ Dec = fun () -> index_update_ref_count(
+ MsgId, RefCount - 1, State) end,
+ case RefCount of
+ %% don't remove from cur_file_cache_ets here because
+ %% there may be further writes in the mailbox for the
+ %% same msg.
+ 1 -> case ets:lookup(FileSummaryEts, File) of
+ [#file_summary { locked = true }] ->
+ add_to_pending_gc_completion(
+ {remove, MsgId, CRef}, File, State);
+ [#file_summary {}] ->
+ ok = Dec(),
+ delete_file_if_empty(
+ File, adjust_valid_total_size(
+ File, -TotalSize, State))
+ end;
+ _ -> ok = Dec(),
+ State
+ end
+ end.
+
+write_message(MsgId, Msg,
+ State = #msstate { current_file_handle = CurHdl,
+ current_file = CurFile,
+ sum_valid_data = SumValid,
+ sum_file_size = SumFileSize,
+ file_summary_ets = FileSummaryEts }) ->
+ {ok, CurOffset} = file_handle_cache:current_virtual_offset(CurHdl),
+ {ok, TotalSize} = rabbit_msg_file:append(CurHdl, MsgId, Msg),
+ ok = index_insert(
+ #msg_location { msg_id = MsgId, ref_count = 1, file = CurFile,
+ offset = CurOffset, total_size = TotalSize }, State),
+ [#file_summary { right = undefined, locked = false }] =
+ ets:lookup(FileSummaryEts, CurFile),
+ [_,_] = ets:update_counter(FileSummaryEts, CurFile,
+ [{#file_summary.valid_total_size, TotalSize},
+ {#file_summary.file_size, TotalSize}]),
+ maybe_roll_to_new_file(CurOffset + TotalSize,
+ State #msstate {
+ sum_valid_data = SumValid + TotalSize,
+ sum_file_size = SumFileSize + TotalSize }).
+
+read_message(MsgId, From, State) ->
+ case index_lookup_positive_ref_count(MsgId, State) of
+ not_found -> gen_server2:reply(From, not_found),
+ State;
+ MsgLocation -> read_message1(From, MsgLocation, State)
+ end.
+
+read_message1(From, #msg_location { msg_id = MsgId, file = File,
+ offset = Offset } = MsgLoc,
+ State = #msstate { current_file = CurFile,
+ current_file_handle = CurHdl,
+ file_summary_ets = FileSummaryEts,
+ cur_file_cache_ets = CurFileCacheEts }) ->
+ case File =:= CurFile of
+ true -> {Msg, State1} =
+ %% can return [] if msg in file existed on startup
+ case ets:lookup(CurFileCacheEts, MsgId) of
+ [] ->
+ {ok, RawOffSet} =
+ file_handle_cache:current_raw_offset(CurHdl),
+ ok = case Offset >= RawOffSet of
+ true -> file_handle_cache:flush(CurHdl);
+ false -> ok
+ end,
+ read_from_disk(MsgLoc, State);
+ [{MsgId, Msg1, _CacheRefCount}] ->
+ {Msg1, State}
+ end,
+ gen_server2:reply(From, {ok, Msg}),
+ State1;
+ false -> [#file_summary { locked = Locked }] =
+ ets:lookup(FileSummaryEts, File),
+ case Locked of
+ true -> add_to_pending_gc_completion({read, MsgId, From},
+ File, State);
+ false -> {Msg, State1} = read_from_disk(MsgLoc, State),
+ gen_server2:reply(From, {ok, Msg}),
+ State1
+ end
+ end.
+
+read_from_disk(#msg_location { msg_id = MsgId, file = File, offset = Offset,
+ total_size = TotalSize }, State) ->
+ {Hdl, State1} = get_read_handle(File, State),
+ {ok, Offset} = file_handle_cache:position(Hdl, Offset),
+ {ok, {MsgId, Msg}} =
+ case rabbit_msg_file:read(Hdl, TotalSize) of
+ {ok, {MsgId, _}} = Obj ->
+ Obj;
+ Rest ->
+ {error, {misread, [{old_state, State},
+ {file_num, File},
+ {offset, Offset},
+ {msg_id, MsgId},
+ {read, Rest},
+ {proc_dict, get()}
+ ]}}
+ end,
+ {Msg, State1}.
+
+contains_message(MsgId, From,
+ State = #msstate { pending_gc_completion = Pending }) ->
+ case index_lookup_positive_ref_count(MsgId, State) of
+ not_found ->
+ gen_server2:reply(From, false),
+ State;
+ #msg_location { file = File } ->
+ case orddict:is_key(File, Pending) of
+ true -> add_to_pending_gc_completion(
+ {contains, MsgId, From}, File, State);
+ false -> gen_server2:reply(From, true),
+ State
+ end
+ end.
+
+add_to_pending_gc_completion(
+ Op, File, State = #msstate { pending_gc_completion = Pending }) ->
+ State #msstate { pending_gc_completion =
+ rabbit_misc:orddict_cons(File, Op, Pending) }.
+
+run_pending(Files, State) ->
+ lists:foldl(
+ fun (File, State1 = #msstate { pending_gc_completion = Pending }) ->
+ Pending1 = orddict:erase(File, Pending),
+ lists:foldl(
+ fun run_pending_action/2,
+ State1 #msstate { pending_gc_completion = Pending1 },
+ lists:reverse(orddict:fetch(File, Pending)))
+ end, State, Files).
+
+run_pending_action({read, MsgId, From}, State) ->
+ read_message(MsgId, From, State);
+run_pending_action({contains, MsgId, From}, State) ->
+ contains_message(MsgId, From, State);
+run_pending_action({remove, MsgId, CRef}, State) ->
+ remove_message(MsgId, CRef, State).
+
+safe_ets_update_counter(Tab, Key, UpdateOp, SuccessFun, FailThunk) ->
+ try
+ SuccessFun(ets:update_counter(Tab, Key, UpdateOp))
+ catch error:badarg -> FailThunk()
+ end.
+
+update_msg_cache(CacheEts, MsgId, Msg) ->
+ case ets:insert_new(CacheEts, {MsgId, Msg, 1}) of
+ true -> ok;
+ false -> safe_ets_update_counter(
+ CacheEts, MsgId, {3, +1}, fun (_) -> ok end,
+ fun () -> update_msg_cache(CacheEts, MsgId, Msg) end)
+ end.
+
+adjust_valid_total_size(File, Delta, State = #msstate {
+ sum_valid_data = SumValid,
+ file_summary_ets = FileSummaryEts }) ->
+ [_] = ets:update_counter(FileSummaryEts, File,
+ [{#file_summary.valid_total_size, Delta}]),
+ State #msstate { sum_valid_data = SumValid + Delta }.
+
+orddict_store(Key, Val, Dict) ->
+ false = orddict:is_key(Key, Dict),
+ orddict:store(Key, Val, Dict).
+
+update_pending_confirms(Fun, CRef,
+ State = #msstate { clients = Clients,
+ cref_to_msg_ids = CTM }) ->
+ case dict:fetch(CRef, Clients) of
+ {_CPid, undefined, _CloseFDsFun} -> State;
+ {_CPid, MsgOnDiskFun, _CloseFDsFun} -> CTM1 = Fun(MsgOnDiskFun, CTM),
+ State #msstate {
+ cref_to_msg_ids = CTM1 }
+ end.
+
+record_pending_confirm(CRef, MsgId, State) ->
+ update_pending_confirms(
+ fun (_MsgOnDiskFun, CTM) ->
+ dict:update(CRef, fun (MsgIds) -> gb_sets:add(MsgId, MsgIds) end,
+ gb_sets:singleton(MsgId), CTM)
+ end, CRef, State).
+
+client_confirm(CRef, MsgIds, ActionTaken, State) ->
+ update_pending_confirms(
+ fun (MsgOnDiskFun, CTM) ->
+ case dict:find(CRef, CTM) of
+ {ok, Gs} -> MsgOnDiskFun(gb_sets:intersection(Gs, MsgIds),
+ ActionTaken),
+ MsgIds1 = rabbit_misc:gb_sets_difference(
+ Gs, MsgIds),
+ case gb_sets:is_empty(MsgIds1) of
+ true -> dict:erase(CRef, CTM);
+ false -> dict:store(CRef, MsgIds1, CTM)
+ end;
+ error -> CTM
+ end
+ end, CRef, State).
+
+blind_confirm(CRef, MsgIds, ActionTaken, State) ->
+ update_pending_confirms(
+ fun (MsgOnDiskFun, CTM) -> MsgOnDiskFun(MsgIds, ActionTaken), CTM end,
+ CRef, State).
+
+%% Detect whether the MsgId is older or younger than the client's death
+%% msg (if there is one). If the msg is older than the client death
+%% msg, and it has a 0 ref_count we must only alter the ref_count, not
+%% rewrite the msg - rewriting it would make it younger than the death
+%% msg and thus should be ignored. Note that this (correctly) returns
+%% false when testing to remove the death msg itself.
+should_mask_action(CRef, MsgId,
+ State = #msstate { dying_clients = DyingClients }) ->
+ case {sets:is_element(CRef, DyingClients), index_lookup(MsgId, State)} of
+ {false, Location} ->
+ {false, Location};
+ {true, not_found} ->
+ {true, not_found};
+ {true, #msg_location { file = File, offset = Offset,
+ ref_count = RefCount } = Location} ->
+ #msg_location { file = DeathFile, offset = DeathOffset } =
+ index_lookup(CRef, State),
+ {case {{DeathFile, DeathOffset} < {File, Offset}, RefCount} of
+ {true, _} -> true;
+ {false, 0} -> false_if_increment;
+ {false, _} -> false
+ end, Location}
+ end.
+
+%%----------------------------------------------------------------------------
+%% file helper functions
+%%----------------------------------------------------------------------------
+
+open_file(Dir, FileName, Mode) ->
+ file_handle_cache:open(form_filename(Dir, FileName), ?BINARY_MODE ++ Mode,
+ [{write_buffer, ?HANDLE_CACHE_BUFFER_SIZE}]).
+
+close_handle(Key, CState = #client_msstate { file_handle_cache = FHC }) ->
+ CState #client_msstate { file_handle_cache = close_handle(Key, FHC) };
+
+close_handle(Key, State = #msstate { file_handle_cache = FHC }) ->
+ State #msstate { file_handle_cache = close_handle(Key, FHC) };
+
+close_handle(Key, FHC) ->
+ case dict:find(Key, FHC) of
+ {ok, Hdl} -> ok = file_handle_cache:close(Hdl),
+ dict:erase(Key, FHC);
+ error -> FHC
+ end.
+
+mark_handle_open(FileHandlesEts, File, Ref) ->
+ %% This is fine to fail (already exists). Note it could fail with
+ %% the value being close, and not have it updated to open.
+ ets:insert_new(FileHandlesEts, {{Ref, File}, open}),
+ true.
+
+%% See comment in client_read3 - only call this when the file is locked
+mark_handle_to_close(ClientRefs, FileHandlesEts, File, Invoke) ->
+ [ begin
+ case (ets:update_element(FileHandlesEts, Key, {2, close})
+ andalso Invoke) of
+ true -> case dict:fetch(Ref, ClientRefs) of
+ {_CPid, _MsgOnDiskFun, undefined} ->
+ ok;
+ {_CPid, _MsgOnDiskFun, CloseFDsFun} ->
+ ok = CloseFDsFun()
+ end;
+ false -> ok
+ end
+ end || {{Ref, _File} = Key, open} <-
+ ets:match_object(FileHandlesEts, {{'_', File}, open}) ],
+ true.
+
+safe_file_delete_fun(File, Dir, FileHandlesEts) ->
+ fun () -> safe_file_delete(File, Dir, FileHandlesEts) end.
+
+safe_file_delete(File, Dir, FileHandlesEts) ->
+ %% do not match on any value - it's the absence of the row that
+ %% indicates the client has really closed the file.
+ case ets:match_object(FileHandlesEts, {{'_', File}, '_'}, 1) of
+ {[_|_], _Cont} -> false;
+ _ -> ok = file:delete(
+ form_filename(Dir, filenum_to_name(File))),
+ true
+ end.
+
+close_all_indicated(#client_msstate { file_handles_ets = FileHandlesEts,
+ client_ref = Ref } =
+ CState) ->
+ Objs = ets:match_object(FileHandlesEts, {{Ref, '_'}, close}),
+ {ok, lists:foldl(fun ({Key = {_Ref, File}, close}, CStateM) ->
+ true = ets:delete(FileHandlesEts, Key),
+ close_handle(File, CStateM)
+ end, CState, Objs)}.
+
+close_all_handles(CState = #client_msstate { file_handles_ets = FileHandlesEts,
+ file_handle_cache = FHC,
+ client_ref = Ref }) ->
+ ok = dict:fold(fun (File, Hdl, ok) ->
+ true = ets:delete(FileHandlesEts, {Ref, File}),
+ file_handle_cache:close(Hdl)
+ end, ok, FHC),
+ CState #client_msstate { file_handle_cache = dict:new() };
+
+close_all_handles(State = #msstate { file_handle_cache = FHC }) ->
+ ok = dict:fold(fun (_Key, Hdl, ok) -> file_handle_cache:close(Hdl) end,
+ ok, FHC),
+ State #msstate { file_handle_cache = dict:new() }.
+
+get_read_handle(FileNum, CState = #client_msstate { file_handle_cache = FHC,
+ dir = Dir }) ->
+ {Hdl, FHC2} = get_read_handle(FileNum, FHC, Dir),
+ {Hdl, CState #client_msstate { file_handle_cache = FHC2 }};
+
+get_read_handle(FileNum, State = #msstate { file_handle_cache = FHC,
+ dir = Dir }) ->
+ {Hdl, FHC2} = get_read_handle(FileNum, FHC, Dir),
+ {Hdl, State #msstate { file_handle_cache = FHC2 }}.
+
+get_read_handle(FileNum, FHC, Dir) ->
+ case dict:find(FileNum, FHC) of
+ {ok, Hdl} -> {Hdl, FHC};
+ error -> {ok, Hdl} = open_file(Dir, filenum_to_name(FileNum),
+ ?READ_MODE),
+ {Hdl, dict:store(FileNum, Hdl, FHC)}
+ end.
+
+preallocate(Hdl, FileSizeLimit, FinalPos) ->
+ {ok, FileSizeLimit} = file_handle_cache:position(Hdl, FileSizeLimit),
+ ok = file_handle_cache:truncate(Hdl),
+ {ok, FinalPos} = file_handle_cache:position(Hdl, FinalPos),
+ ok.
+
+truncate_and_extend_file(Hdl, Lowpoint, Highpoint) ->
+ {ok, Lowpoint} = file_handle_cache:position(Hdl, Lowpoint),
+ ok = file_handle_cache:truncate(Hdl),
+ ok = preallocate(Hdl, Highpoint, Lowpoint).
+
+form_filename(Dir, Name) -> filename:join(Dir, Name).
+
+filenum_to_name(File) -> integer_to_list(File) ++ ?FILE_EXTENSION.
+
+filename_to_num(FileName) -> list_to_integer(filename:rootname(FileName)).
+
+list_sorted_filenames(Dir, Ext) ->
+ lists:sort(fun (A, B) -> filename_to_num(A) < filename_to_num(B) end,
+ filelib:wildcard("*" ++ Ext, Dir)).
+
+%%----------------------------------------------------------------------------
+%% index
+%%----------------------------------------------------------------------------
+
+index_lookup_positive_ref_count(Key, State) ->
+ case index_lookup(Key, State) of
+ not_found -> not_found;
+ #msg_location { ref_count = 0 } -> not_found;
+ #msg_location {} = MsgLocation -> MsgLocation
+ end.
+
+index_update_ref_count(Key, RefCount, State) ->
+ index_update_fields(Key, {#msg_location.ref_count, RefCount}, State).
+
+index_lookup(Key, #client_msstate { index_module = Index,
+ index_state = State }) ->
+ Index:lookup(Key, State);
+
+index_lookup(Key, #msstate { index_module = Index, index_state = State }) ->
+ Index:lookup(Key, State).
+
+index_insert(Obj, #msstate { index_module = Index, index_state = State }) ->
+ Index:insert(Obj, State).
+
+index_update(Obj, #msstate { index_module = Index, index_state = State }) ->
+ Index:update(Obj, State).
+
+index_update_fields(Key, Updates, #msstate { index_module = Index,
+ index_state = State }) ->
+ Index:update_fields(Key, Updates, State).
+
+index_delete(Key, #msstate { index_module = Index, index_state = State }) ->
+ Index:delete(Key, State).
+
+index_delete_by_file(File, #msstate { index_module = Index,
+ index_state = State }) ->
+ Index:delete_by_file(File, State).
+
+%%----------------------------------------------------------------------------
+%% shutdown and recovery
+%%----------------------------------------------------------------------------
+
+recover_index_and_client_refs(IndexModule, _Recover, undefined, Dir, _Server) ->
+ {false, IndexModule:new(Dir), []};
+recover_index_and_client_refs(IndexModule, false, _ClientRefs, Dir, Server) ->
+ rabbit_log:warning("~w: rebuilding indices from scratch~n", [Server]),
+ {false, IndexModule:new(Dir), []};
+recover_index_and_client_refs(IndexModule, true, ClientRefs, Dir, Server) ->
+ Fresh = fun (ErrorMsg, ErrorArgs) ->
+ rabbit_log:warning("~w: " ++ ErrorMsg ++ "~n"
+ "rebuilding indices from scratch~n",
+ [Server | ErrorArgs]),
+ {false, IndexModule:new(Dir), []}
+ end,
+ case read_recovery_terms(Dir) of
+ {false, Error} ->
+ Fresh("failed to read recovery terms: ~p", [Error]);
+ {true, Terms} ->
+ RecClientRefs = proplists:get_value(client_refs, Terms, []),
+ RecIndexModule = proplists:get_value(index_module, Terms),
+ case (lists:sort(ClientRefs) =:= lists:sort(RecClientRefs)
+ andalso IndexModule =:= RecIndexModule) of
+ true -> case IndexModule:recover(Dir) of
+ {ok, IndexState1} ->
+ {true, IndexState1, ClientRefs};
+ {error, Error} ->
+ Fresh("failed to recover index: ~p", [Error])
+ end;
+ false -> Fresh("recovery terms differ from present", [])
+ end
+ end.
+
+store_recovery_terms(Terms, Dir) ->
+ rabbit_file:write_term_file(filename:join(Dir, ?CLEAN_FILENAME), Terms).
+
+read_recovery_terms(Dir) ->
+ Path = filename:join(Dir, ?CLEAN_FILENAME),
+ case rabbit_file:read_term_file(Path) of
+ {ok, Terms} -> case file:delete(Path) of
+ ok -> {true, Terms};
+ {error, Error} -> {false, Error}
+ end;
+ {error, Error} -> {false, Error}
+ end.
+
+store_file_summary(Tid, Dir) ->
+ ok = ets:tab2file(Tid, filename:join(Dir, ?FILE_SUMMARY_FILENAME),
+ [{extended_info, [object_count]}]).
+
+recover_file_summary(false, _Dir) ->
+ %% TODO: the only reason for this to be an *ordered*_set is so
+ %% that a) maybe_compact can start a traversal from the eldest
+ %% file, and b) build_index in fast recovery mode can easily
+ %% identify the current file. It's awkward to have both that
+ %% odering and the left/right pointers in the entries - replacing
+ %% the former with some additional bit of state would be easy, but
+ %% ditching the latter would be neater.
+ {false, ets:new(rabbit_msg_store_file_summary,
+ [ordered_set, public, {keypos, #file_summary.file}])};
+recover_file_summary(true, Dir) ->
+ Path = filename:join(Dir, ?FILE_SUMMARY_FILENAME),
+ case ets:file2tab(Path) of
+ {ok, Tid} -> ok = file:delete(Path),
+ {true, Tid};
+ {error, _Error} -> recover_file_summary(false, Dir)
+ end.
+
+count_msg_refs(Gen, Seed, State) ->
+ case Gen(Seed) of
+ finished ->
+ ok;
+ {_MsgId, 0, Next} ->
+ count_msg_refs(Gen, Next, State);
+ {MsgId, Delta, Next} ->
+ ok = case index_lookup(MsgId, State) of
+ not_found ->
+ index_insert(#msg_location { msg_id = MsgId,
+ file = undefined,
+ ref_count = Delta },
+ State);
+ #msg_location { ref_count = RefCount } = StoreEntry ->
+ NewRefCount = RefCount + Delta,
+ case NewRefCount of
+ 0 -> index_delete(MsgId, State);
+ _ -> index_update(StoreEntry #msg_location {
+ ref_count = NewRefCount },
+ State)
+ end
+ end,
+ count_msg_refs(Gen, Next, State)
+ end.
+
+recover_crashed_compactions(Dir) ->
+ FileNames = list_sorted_filenames(Dir, ?FILE_EXTENSION),
+ TmpFileNames = list_sorted_filenames(Dir, ?FILE_EXTENSION_TMP),
+ lists:foreach(
+ fun (TmpFileName) ->
+ NonTmpRelatedFileName =
+ filename:rootname(TmpFileName) ++ ?FILE_EXTENSION,
+ true = lists:member(NonTmpRelatedFileName, FileNames),
+ ok = recover_crashed_compaction(
+ Dir, TmpFileName, NonTmpRelatedFileName)
+ end, TmpFileNames),
+ TmpFileNames == [].
+
+recover_crashed_compaction(Dir, TmpFileName, NonTmpRelatedFileName) ->
+ %% Because a msg can legitimately appear multiple times in the
+ %% same file, identifying the contents of the tmp file and where
+ %% they came from is non-trivial. If we are recovering a crashed
+ %% compaction then we will be rebuilding the index, which can cope
+ %% with duplicates appearing. Thus the simplest and safest thing
+ %% to do is to append the contents of the tmp file to its main
+ %% file.
+ {ok, TmpHdl} = open_file(Dir, TmpFileName, ?READ_MODE),
+ {ok, MainHdl} = open_file(Dir, NonTmpRelatedFileName,
+ ?READ_MODE ++ ?WRITE_MODE),
+ {ok, _End} = file_handle_cache:position(MainHdl, eof),
+ Size = filelib:file_size(form_filename(Dir, TmpFileName)),
+ {ok, Size} = file_handle_cache:copy(TmpHdl, MainHdl, Size),
+ ok = file_handle_cache:close(MainHdl),
+ ok = file_handle_cache:delete(TmpHdl),
+ ok.
+
+scan_file_for_valid_messages(Dir, FileName) ->
+ case open_file(Dir, FileName, ?READ_MODE) of
+ {ok, Hdl} -> Valid = rabbit_msg_file:scan(
+ Hdl, filelib:file_size(
+ form_filename(Dir, FileName)),
+ fun scan_fun/2, []),
+ ok = file_handle_cache:close(Hdl),
+ Valid;
+ {error, enoent} -> {ok, [], 0};
+ {error, Reason} -> {error, {unable_to_scan_file, FileName, Reason}}
+ end.
+
+scan_fun({MsgId, TotalSize, Offset, _Msg}, Acc) ->
+ [{MsgId, TotalSize, Offset} | Acc].
+
+%% Takes the list in *ascending* order (i.e. eldest message
+%% first). This is the opposite of what scan_file_for_valid_messages
+%% produces. The list of msgs that is produced is youngest first.
+drop_contiguous_block_prefix(L) -> drop_contiguous_block_prefix(L, 0).
+
+drop_contiguous_block_prefix([], ExpectedOffset) ->
+ {ExpectedOffset, []};
+drop_contiguous_block_prefix([#msg_location { offset = ExpectedOffset,
+ total_size = TotalSize } | Tail],
+ ExpectedOffset) ->
+ ExpectedOffset1 = ExpectedOffset + TotalSize,
+ drop_contiguous_block_prefix(Tail, ExpectedOffset1);
+drop_contiguous_block_prefix(MsgsAfterGap, ExpectedOffset) ->
+ {ExpectedOffset, MsgsAfterGap}.
+
+build_index(true, _StartupFunState,
+ State = #msstate { file_summary_ets = FileSummaryEts }) ->
+ ets:foldl(
+ fun (#file_summary { valid_total_size = ValidTotalSize,
+ file_size = FileSize,
+ file = File },
+ {_Offset, State1 = #msstate { sum_valid_data = SumValid,
+ sum_file_size = SumFileSize }}) ->
+ {FileSize, State1 #msstate {
+ sum_valid_data = SumValid + ValidTotalSize,
+ sum_file_size = SumFileSize + FileSize,
+ current_file = File }}
+ end, {0, State}, FileSummaryEts);
+build_index(false, {MsgRefDeltaGen, MsgRefDeltaGenInit},
+ State = #msstate { dir = Dir }) ->
+ ok = count_msg_refs(MsgRefDeltaGen, MsgRefDeltaGenInit, State),
+ {ok, Pid} = gatherer:start_link(),
+ case [filename_to_num(FileName) ||
+ FileName <- list_sorted_filenames(Dir, ?FILE_EXTENSION)] of
+ [] -> build_index(Pid, undefined, [State #msstate.current_file],
+ State);
+ Files -> {Offset, State1} = build_index(Pid, undefined, Files, State),
+ {Offset, lists:foldl(fun delete_file_if_empty/2,
+ State1, Files)}
+ end.
+
+build_index(Gatherer, Left, [],
+ State = #msstate { file_summary_ets = FileSummaryEts,
+ sum_valid_data = SumValid,
+ sum_file_size = SumFileSize }) ->
+ case gatherer:out(Gatherer) of
+ empty ->
+ unlink(Gatherer),
+ ok = gatherer:stop(Gatherer),
+ ok = index_delete_by_file(undefined, State),
+ Offset = case ets:lookup(FileSummaryEts, Left) of
+ [] -> 0;
+ [#file_summary { file_size = FileSize }] -> FileSize
+ end,
+ {Offset, State #msstate { current_file = Left }};
+ {value, #file_summary { valid_total_size = ValidTotalSize,
+ file_size = FileSize } = FileSummary} ->
+ true = ets:insert_new(FileSummaryEts, FileSummary),
+ build_index(Gatherer, Left, [],
+ State #msstate {
+ sum_valid_data = SumValid + ValidTotalSize,
+ sum_file_size = SumFileSize + FileSize })
+ end;
+build_index(Gatherer, Left, [File|Files], State) ->
+ ok = gatherer:fork(Gatherer),
+ ok = worker_pool:submit_async(
+ fun () -> build_index_worker(Gatherer, State,
+ Left, File, Files)
+ end),
+ build_index(Gatherer, File, Files, State).
+
+build_index_worker(Gatherer, State = #msstate { dir = Dir },
+ Left, File, Files) ->
+ {ok, Messages, FileSize} =
+ scan_file_for_valid_messages(Dir, filenum_to_name(File)),
+ {ValidMessages, ValidTotalSize} =
+ lists:foldl(
+ fun (Obj = {MsgId, TotalSize, Offset}, {VMAcc, VTSAcc}) ->
+ case index_lookup(MsgId, State) of
+ #msg_location { file = undefined } = StoreEntry ->
+ ok = index_update(StoreEntry #msg_location {
+ file = File, offset = Offset,
+ total_size = TotalSize },
+ State),
+ {[Obj | VMAcc], VTSAcc + TotalSize};
+ _ ->
+ {VMAcc, VTSAcc}
+ end
+ end, {[], 0}, Messages),
+ {Right, FileSize1} =
+ case Files of
+ %% if it's the last file, we'll truncate to remove any
+ %% rubbish above the last valid message. This affects the
+ %% file size.
+ [] -> {undefined, case ValidMessages of
+ [] -> 0;
+ _ -> {_MsgId, TotalSize, Offset} =
+ lists:last(ValidMessages),
+ Offset + TotalSize
+ end};
+ [F|_] -> {F, FileSize}
+ end,
+ ok = gatherer:in(Gatherer, #file_summary {
+ file = File,
+ valid_total_size = ValidTotalSize,
+ left = Left,
+ right = Right,
+ file_size = FileSize1,
+ locked = false,
+ readers = 0 }),
+ ok = gatherer:finish(Gatherer).
+
+%%----------------------------------------------------------------------------
+%% garbage collection / compaction / aggregation -- internal
+%%----------------------------------------------------------------------------
+
+maybe_roll_to_new_file(
+ Offset,
+ State = #msstate { dir = Dir,
+ current_file_handle = CurHdl,
+ current_file = CurFile,
+ file_summary_ets = FileSummaryEts,
+ cur_file_cache_ets = CurFileCacheEts,
+ file_size_limit = FileSizeLimit })
+ when Offset >= FileSizeLimit ->
+ State1 = internal_sync(State),
+ ok = file_handle_cache:close(CurHdl),
+ NextFile = CurFile + 1,
+ {ok, NextHdl} = open_file(Dir, filenum_to_name(NextFile), ?WRITE_MODE),
+ true = ets:insert_new(FileSummaryEts, #file_summary {
+ file = NextFile,
+ valid_total_size = 0,
+ left = CurFile,
+ right = undefined,
+ file_size = 0,
+ locked = false,
+ readers = 0 }),
+ true = ets:update_element(FileSummaryEts, CurFile,
+ {#file_summary.right, NextFile}),
+ true = ets:match_delete(CurFileCacheEts, {'_', '_', 0}),
+ maybe_compact(State1 #msstate { current_file_handle = NextHdl,
+ current_file = NextFile });
+maybe_roll_to_new_file(_, State) ->
+ State.
+
+maybe_compact(State = #msstate { sum_valid_data = SumValid,
+ sum_file_size = SumFileSize,
+ gc_pid = GCPid,
+ pending_gc_completion = Pending,
+ file_summary_ets = FileSummaryEts,
+ file_size_limit = FileSizeLimit })
+ when SumFileSize > 2 * FileSizeLimit andalso
+ (SumFileSize - SumValid) / SumFileSize > ?GARBAGE_FRACTION ->
+ %% TODO: the algorithm here is sub-optimal - it may result in a
+ %% complete traversal of FileSummaryEts.
+ First = ets:first(FileSummaryEts),
+ case First =:= '$end_of_table' orelse
+ orddict:size(Pending) >= ?MAXIMUM_SIMULTANEOUS_GC_FILES of
+ true ->
+ State;
+ false ->
+ case find_files_to_combine(FileSummaryEts, FileSizeLimit,
+ ets:lookup(FileSummaryEts, First)) of
+ not_found ->
+ State;
+ {Src, Dst} ->
+ Pending1 = orddict_store(Dst, [],
+ orddict_store(Src, [], Pending)),
+ State1 = close_handle(Src, close_handle(Dst, State)),
+ true = ets:update_element(FileSummaryEts, Src,
+ {#file_summary.locked, true}),
+ true = ets:update_element(FileSummaryEts, Dst,
+ {#file_summary.locked, true}),
+ ok = rabbit_msg_store_gc:combine(GCPid, Src, Dst),
+ State1 #msstate { pending_gc_completion = Pending1 }
+ end
+ end;
+maybe_compact(State) ->
+ State.
+
+find_files_to_combine(FileSummaryEts, FileSizeLimit,
+ [#file_summary { file = Dst,
+ valid_total_size = DstValid,
+ right = Src,
+ locked = DstLocked }]) ->
+ case Src of
+ undefined ->
+ not_found;
+ _ ->
+ [#file_summary { file = Src,
+ valid_total_size = SrcValid,
+ left = Dst,
+ right = SrcRight,
+ locked = SrcLocked }] = Next =
+ ets:lookup(FileSummaryEts, Src),
+ case SrcRight of
+ undefined -> not_found;
+ _ -> case (DstValid + SrcValid =< FileSizeLimit) andalso
+ (DstValid > 0) andalso (SrcValid > 0) andalso
+ not (DstLocked orelse SrcLocked) of
+ true -> {Src, Dst};
+ false -> find_files_to_combine(
+ FileSummaryEts, FileSizeLimit, Next)
+ end
+ end
+ end.
+
+delete_file_if_empty(File, State = #msstate { current_file = File }) ->
+ State;
+delete_file_if_empty(File, State = #msstate {
+ gc_pid = GCPid,
+ file_summary_ets = FileSummaryEts,
+ pending_gc_completion = Pending }) ->
+ [#file_summary { valid_total_size = ValidData,
+ locked = false }] =
+ ets:lookup(FileSummaryEts, File),
+ case ValidData of
+ %% don't delete the file_summary_ets entry for File here
+ %% because we could have readers which need to be able to
+ %% decrement the readers count.
+ 0 -> true = ets:update_element(FileSummaryEts, File,
+ {#file_summary.locked, true}),
+ ok = rabbit_msg_store_gc:delete(GCPid, File),
+ Pending1 = orddict_store(File, [], Pending),
+ close_handle(File,
+ State #msstate { pending_gc_completion = Pending1 });
+ _ -> State
+ end.
+
+cleanup_after_file_deletion(File,
+ #msstate { file_handles_ets = FileHandlesEts,
+ file_summary_ets = FileSummaryEts,
+ clients = Clients }) ->
+ %% Ensure that any clients that have open fhs to the file close
+ %% them before using them again. This has to be done here (given
+ %% it's done in the msg_store, and not the gc), and not when
+ %% starting up the GC, because if done when starting up the GC,
+ %% the client could find the close, and close and reopen the fh,
+ %% whilst the GC is waiting for readers to disappear, before it's
+ %% actually done the GC.
+ true = mark_handle_to_close(Clients, FileHandlesEts, File, true),
+ [#file_summary { left = Left,
+ right = Right,
+ locked = true,
+ readers = 0 }] = ets:lookup(FileSummaryEts, File),
+ %% We'll never delete the current file, so right is never undefined
+ true = Right =/= undefined, %% ASSERTION
+ true = ets:update_element(FileSummaryEts, Right,
+ {#file_summary.left, Left}),
+ %% ensure the double linked list is maintained
+ true = case Left of
+ undefined -> true; %% File is the eldest file (left-most)
+ _ -> ets:update_element(FileSummaryEts, Left,
+ {#file_summary.right, Right})
+ end,
+ true = ets:delete(FileSummaryEts, File),
+ ok.
+
+%%----------------------------------------------------------------------------
+%% garbage collection / compaction / aggregation -- external
+%%----------------------------------------------------------------------------
+
+has_readers(File, #gc_state { file_summary_ets = FileSummaryEts }) ->
+ [#file_summary { locked = true, readers = Count }] =
+ ets:lookup(FileSummaryEts, File),
+ Count /= 0.
+
+combine_files(Source, Destination,
+ State = #gc_state { file_summary_ets = FileSummaryEts,
+ file_handles_ets = FileHandlesEts,
+ dir = Dir,
+ msg_store = Server }) ->
+ [#file_summary {
+ readers = 0,
+ left = Destination,
+ valid_total_size = SourceValid,
+ file_size = SourceFileSize,
+ locked = true }] = ets:lookup(FileSummaryEts, Source),
+ [#file_summary {
+ readers = 0,
+ right = Source,
+ valid_total_size = DestinationValid,
+ file_size = DestinationFileSize,
+ locked = true }] = ets:lookup(FileSummaryEts, Destination),
+
+ SourceName = filenum_to_name(Source),
+ DestinationName = filenum_to_name(Destination),
+ {ok, SourceHdl} = open_file(Dir, SourceName,
+ ?READ_AHEAD_MODE),
+ {ok, DestinationHdl} = open_file(Dir, DestinationName,
+ ?READ_AHEAD_MODE ++ ?WRITE_MODE),
+ TotalValidData = SourceValid + DestinationValid,
+ %% if DestinationValid =:= DestinationContiguousTop then we don't
+ %% need a tmp file
+ %% if they're not equal, then we need to write out everything past
+ %% the DestinationContiguousTop to a tmp file then truncate,
+ %% copy back in, and then copy over from Source
+ %% otherwise we just truncate straight away and copy over from Source
+ {DestinationWorkList, DestinationValid} =
+ load_and_vacuum_message_file(Destination, State),
+ {DestinationContiguousTop, DestinationWorkListTail} =
+ drop_contiguous_block_prefix(DestinationWorkList),
+ case DestinationWorkListTail of
+ [] -> ok = truncate_and_extend_file(
+ DestinationHdl, DestinationContiguousTop, TotalValidData);
+ _ -> Tmp = filename:rootname(DestinationName) ++ ?FILE_EXTENSION_TMP,
+ {ok, TmpHdl} = open_file(Dir, Tmp, ?READ_AHEAD_MODE++?WRITE_MODE),
+ ok = copy_messages(
+ DestinationWorkListTail, DestinationContiguousTop,
+ DestinationValid, DestinationHdl, TmpHdl, Destination,
+ State),
+ TmpSize = DestinationValid - DestinationContiguousTop,
+ %% so now Tmp contains everything we need to salvage
+ %% from Destination, and index_state has been updated to
+ %% reflect the compaction of Destination so truncate
+ %% Destination and copy from Tmp back to the end
+ {ok, 0} = file_handle_cache:position(TmpHdl, 0),
+ ok = truncate_and_extend_file(
+ DestinationHdl, DestinationContiguousTop, TotalValidData),
+ {ok, TmpSize} =
+ file_handle_cache:copy(TmpHdl, DestinationHdl, TmpSize),
+ %% position in DestinationHdl should now be DestinationValid
+ ok = file_handle_cache:sync(DestinationHdl),
+ ok = file_handle_cache:delete(TmpHdl)
+ end,
+ {SourceWorkList, SourceValid} = load_and_vacuum_message_file(Source, State),
+ ok = copy_messages(SourceWorkList, DestinationValid, TotalValidData,
+ SourceHdl, DestinationHdl, Destination, State),
+ %% tidy up
+ ok = file_handle_cache:close(DestinationHdl),
+ ok = file_handle_cache:close(SourceHdl),
+
+ %% don't update dest.right, because it could be changing at the
+ %% same time
+ true = ets:update_element(
+ FileSummaryEts, Destination,
+ [{#file_summary.valid_total_size, TotalValidData},
+ {#file_summary.file_size, TotalValidData}]),
+
+ Reclaimed = SourceFileSize + DestinationFileSize - TotalValidData,
+ gen_server2:cast(Server, {combine_files, Source, Destination, Reclaimed}),
+ safe_file_delete_fun(Source, Dir, FileHandlesEts).
+
+delete_file(File, State = #gc_state { file_summary_ets = FileSummaryEts,
+ file_handles_ets = FileHandlesEts,
+ dir = Dir,
+ msg_store = Server }) ->
+ [#file_summary { valid_total_size = 0,
+ locked = true,
+ file_size = FileSize,
+ readers = 0 }] = ets:lookup(FileSummaryEts, File),
+ {[], 0} = load_and_vacuum_message_file(File, State),
+ gen_server2:cast(Server, {delete_file, File, FileSize}),
+ safe_file_delete_fun(File, Dir, FileHandlesEts).
+
+load_and_vacuum_message_file(File, #gc_state { dir = Dir,
+ index_module = Index,
+ index_state = IndexState }) ->
+ %% Messages here will be end-of-file at start-of-list
+ {ok, Messages, _FileSize} =
+ scan_file_for_valid_messages(Dir, filenum_to_name(File)),
+ %% foldl will reverse so will end up with msgs in ascending offset order
+ lists:foldl(
+ fun ({MsgId, TotalSize, Offset}, Acc = {List, Size}) ->
+ case Index:lookup(MsgId, IndexState) of
+ #msg_location { file = File, total_size = TotalSize,
+ offset = Offset, ref_count = 0 } = Entry ->
+ ok = Index:delete_object(Entry, IndexState),
+ Acc;
+ #msg_location { file = File, total_size = TotalSize,
+ offset = Offset } = Entry ->
+ {[ Entry | List ], TotalSize + Size};
+ _ ->
+ Acc
+ end
+ end, {[], 0}, Messages).
+
+copy_messages(WorkList, InitOffset, FinalOffset, SourceHdl, DestinationHdl,
+ Destination, #gc_state { index_module = Index,
+ index_state = IndexState }) ->
+ Copy = fun ({BlockStart, BlockEnd}) ->
+ BSize = BlockEnd - BlockStart,
+ {ok, BlockStart} =
+ file_handle_cache:position(SourceHdl, BlockStart),
+ {ok, BSize} =
+ file_handle_cache:copy(SourceHdl, DestinationHdl, BSize)
+ end,
+ case
+ lists:foldl(
+ fun (#msg_location { msg_id = MsgId, offset = Offset,
+ total_size = TotalSize },
+ {CurOffset, Block = {BlockStart, BlockEnd}}) ->
+ %% CurOffset is in the DestinationFile.
+ %% Offset, BlockStart and BlockEnd are in the SourceFile
+ %% update MsgLocation to reflect change of file and offset
+ ok = Index:update_fields(MsgId,
+ [{#msg_location.file, Destination},
+ {#msg_location.offset, CurOffset}],
+ IndexState),
+ {CurOffset + TotalSize,
+ case BlockEnd of
+ undefined ->
+ %% base case, called only for the first list elem
+ {Offset, Offset + TotalSize};
+ Offset ->
+ %% extend the current block because the
+ %% next msg follows straight on
+ {BlockStart, BlockEnd + TotalSize};
+ _ ->
+ %% found a gap, so actually do the work for
+ %% the previous block
+ Copy(Block),
+ {Offset, Offset + TotalSize}
+ end}
+ end, {InitOffset, {undefined, undefined}}, WorkList) of
+ {FinalOffset, Block} ->
+ case WorkList of
+ [] -> ok;
+ _ -> Copy(Block), %% do the last remaining block
+ ok = file_handle_cache:sync(DestinationHdl)
+ end;
+ {FinalOffsetZ, _Block} ->
+ {gc_error, [{expected, FinalOffset},
+ {got, FinalOffsetZ},
+ {destination, Destination}]}
+ end.
+
+force_recovery(BaseDir, Store) ->
+ Dir = filename:join(BaseDir, atom_to_list(Store)),
+ case file:delete(filename:join(Dir, ?CLEAN_FILENAME)) of
+ ok -> ok;
+ {error, enoent} -> ok
+ end,
+ recover_crashed_compactions(BaseDir),
+ ok.
+
+foreach_file(D, Fun, Files) ->
+ [ok = Fun(filename:join(D, File)) || File <- Files].
+
+foreach_file(D1, D2, Fun, Files) ->
+ [ok = Fun(filename:join(D1, File), filename:join(D2, File)) || File <- Files].
+
+transform_dir(BaseDir, Store, TransformFun) ->
+ Dir = filename:join(BaseDir, atom_to_list(Store)),
+ TmpDir = filename:join(Dir, ?TRANSFORM_TMP),
+ TransformFile = fun (A, B) -> transform_msg_file(A, B, TransformFun) end,
+ CopyFile = fun (Src, Dst) -> {ok, _Bytes} = file:copy(Src, Dst), ok end,
+ case filelib:is_dir(TmpDir) of
+ true -> throw({error, transform_failed_previously});
+ false -> FileList = list_sorted_filenames(Dir, ?FILE_EXTENSION),
+ foreach_file(Dir, TmpDir, TransformFile, FileList),
+ foreach_file(Dir, fun file:delete/1, FileList),
+ foreach_file(TmpDir, Dir, CopyFile, FileList),
+ foreach_file(TmpDir, fun file:delete/1, FileList),
+ ok = file:del_dir(TmpDir)
+ end.
+
+transform_msg_file(FileOld, FileNew, TransformFun) ->
+ ok = rabbit_file:ensure_parent_dirs_exist(FileNew),
+ {ok, RefOld} = file_handle_cache:open(FileOld, [raw, binary, read], []),
+ {ok, RefNew} = file_handle_cache:open(FileNew, [raw, binary, write],
+ [{write_buffer,
+ ?HANDLE_CACHE_BUFFER_SIZE}]),
+ {ok, _Acc, _IgnoreSize} =
+ rabbit_msg_file:scan(
+ RefOld, filelib:file_size(FileOld),
+ fun({MsgId, _Size, _Offset, BinMsg}, ok) ->
+ {ok, MsgNew} = case binary_to_term(BinMsg) of
+ <<>> -> {ok, <<>>}; %% dying client marker
+ Msg -> TransformFun(Msg)
+ end,
+ {ok, _} = rabbit_msg_file:append(RefNew, MsgId, MsgNew),
+ ok
+ end, ok),
+ ok = file_handle_cache:close(RefOld),
+ ok = file_handle_cache:close(RefNew),
+ ok.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_msg_store_ets_index).
+
+-include("rabbit_msg_store.hrl").
+
+-behaviour(rabbit_msg_store_index).
+
+-export([new/1, recover/1,
+ lookup/2, insert/2, update/2, update_fields/3, delete/2,
+ delete_object/2, delete_by_file/2, terminate/1]).
+
+-define(MSG_LOC_NAME, rabbit_msg_store_ets_index).
+-define(FILENAME, "msg_store_index.ets").
+
+-record(state, { table, dir }).
+
+new(Dir) ->
+ file:delete(filename:join(Dir, ?FILENAME)),
+ Tid = ets:new(?MSG_LOC_NAME, [set, public, {keypos, #msg_location.msg_id}]),
+ #state { table = Tid, dir = Dir }.
+
+recover(Dir) ->
+ Path = filename:join(Dir, ?FILENAME),
+ case ets:file2tab(Path) of
+ {ok, Tid} -> file:delete(Path),
+ {ok, #state { table = Tid, dir = Dir }};
+ Error -> Error
+ end.
+
+lookup(Key, State) ->
+ case ets:lookup(State #state.table, Key) of
+ [] -> not_found;
+ [Entry] -> Entry
+ end.
+
+insert(Obj, State) ->
+ true = ets:insert_new(State #state.table, Obj),
+ ok.
+
+update(Obj, State) ->
+ true = ets:insert(State #state.table, Obj),
+ ok.
+
+update_fields(Key, Updates, State) ->
+ true = ets:update_element(State #state.table, Key, Updates),
+ ok.
+
+delete(Key, State) ->
+ true = ets:delete(State #state.table, Key),
+ ok.
+
+delete_object(Obj, State) ->
+ true = ets:delete_object(State #state.table, Obj),
+ ok.
+
+delete_by_file(File, State) ->
+ MatchHead = #msg_location { file = File, _ = '_' },
+ ets:select_delete(State #state.table, [{MatchHead, [], [true]}]),
+ ok.
+
+terminate(#state { table = MsgLocations, dir = Dir }) ->
+ ok = ets:tab2file(MsgLocations, filename:join(Dir, ?FILENAME),
+ [{extended_info, [object_count]}]),
+ ets:delete(MsgLocations).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_msg_store_gc).
+
+-behaviour(gen_server2).
+
+-export([start_link/1, combine/3, delete/2, no_readers/2, stop/1]).
+
+-export([set_maximum_since_use/2]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3, prioritise_cast/3]).
+
+-record(state,
+ { pending_no_readers,
+ on_action,
+ msg_store_state
+ }).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/1 :: (rabbit_msg_store:gc_state()) ->
+ rabbit_types:ok_pid_or_error()).
+-spec(combine/3 :: (pid(), rabbit_msg_store:file_num(),
+ rabbit_msg_store:file_num()) -> 'ok').
+-spec(delete/2 :: (pid(), rabbit_msg_store:file_num()) -> 'ok').
+-spec(no_readers/2 :: (pid(), rabbit_msg_store:file_num()) -> 'ok').
+-spec(stop/1 :: (pid()) -> 'ok').
+-spec(set_maximum_since_use/2 :: (pid(), non_neg_integer()) -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start_link(MsgStoreState) ->
+ gen_server2:start_link(?MODULE, [MsgStoreState],
+ [{timeout, infinity}]).
+
+combine(Server, Source, Destination) ->
+ gen_server2:cast(Server, {combine, Source, Destination}).
+
+delete(Server, File) ->
+ gen_server2:cast(Server, {delete, File}).
+
+no_readers(Server, File) ->
+ gen_server2:cast(Server, {no_readers, File}).
+
+stop(Server) ->
+ gen_server2:call(Server, stop, infinity).
+
+set_maximum_since_use(Pid, Age) ->
+ gen_server2:cast(Pid, {set_maximum_since_use, Age}).
+
+%%----------------------------------------------------------------------------
+
+init([MsgStoreState]) ->
+ ok = file_handle_cache:register_callback(?MODULE, set_maximum_since_use,
+ [self()]),
+ {ok, #state { pending_no_readers = dict:new(),
+ on_action = [],
+ msg_store_state = MsgStoreState }, hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+prioritise_cast({set_maximum_since_use, _Age}, _Len, _State) -> 8;
+prioritise_cast(_Msg, _Len, _State) -> 0.
+
+handle_call(stop, _From, State) ->
+ {stop, normal, ok, State}.
+
+handle_cast({combine, Source, Destination}, State) ->
+ {noreply, attempt_action(combine, [Source, Destination], State), hibernate};
+
+handle_cast({delete, File}, State) ->
+ {noreply, attempt_action(delete, [File], State), hibernate};
+
+handle_cast({no_readers, File},
+ State = #state { pending_no_readers = Pending }) ->
+ {noreply, case dict:find(File, Pending) of
+ error ->
+ State;
+ {ok, {Action, Files}} ->
+ Pending1 = dict:erase(File, Pending),
+ attempt_action(
+ Action, Files,
+ State #state { pending_no_readers = Pending1 })
+ end, hibernate};
+
+handle_cast({set_maximum_since_use, Age}, State) ->
+ ok = file_handle_cache:set_maximum_since_use(Age),
+ {noreply, State, hibernate}.
+
+handle_info(Info, State) ->
+ {stop, {unhandled_info, Info}, State}.
+
+terminate(_Reason, State) ->
+ State.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+attempt_action(Action, Files,
+ State = #state { pending_no_readers = Pending,
+ on_action = Thunks,
+ msg_store_state = MsgStoreState }) ->
+ case [File || File <- Files,
+ rabbit_msg_store:has_readers(File, MsgStoreState)] of
+ [] -> State #state {
+ on_action = lists:filter(
+ fun (Thunk) -> not Thunk() end,
+ [do_action(Action, Files, MsgStoreState) |
+ Thunks]) };
+ [File | _] -> Pending1 = dict:store(File, {Action, Files}, Pending),
+ State #state { pending_no_readers = Pending1 }
+ end.
+
+do_action(combine, [Source, Destination], MsgStoreState) ->
+ rabbit_msg_store:combine_files(Source, Destination, MsgStoreState);
+do_action(delete, [File], MsgStoreState) ->
+ rabbit_msg_store:delete_file(File, MsgStoreState).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_msg_store_index).
+
+-include("rabbit_msg_store.hrl").
+
+-ifdef(use_specs).
+
+-type(dir() :: any()).
+-type(index_state() :: any()).
+-type(keyvalue() :: any()).
+-type(fieldpos() :: non_neg_integer()).
+-type(fieldvalue() :: any()).
+
+-callback new(dir()) -> index_state().
+-callback recover(dir()) -> rabbit_types:ok_or_error2(index_state(), any()).
+-callback lookup(rabbit_types:msg_id(), index_state()) -> ('not_found' | keyvalue()).
+-callback insert(keyvalue(), index_state()) -> 'ok'.
+-callback update(keyvalue(), index_state()) -> 'ok'.
+-callback update_fields(rabbit_types:msg_id(), ({fieldpos(), fieldvalue()} |
+ [{fieldpos(), fieldvalue()}]),
+ index_state()) -> 'ok'.
+-callback delete(rabbit_types:msg_id(), index_state()) -> 'ok'.
+-callback delete_object(keyvalue(), index_state()) -> 'ok'.
+-callback delete_by_file(fieldvalue(), index_state()) -> 'ok'.
+-callback terminate(index_state()) -> any().
+
+-else.
+
+-export([behaviour_info/1]).
+
+behaviour_info(callbacks) ->
+ [{new, 1},
+ {recover, 1},
+ {lookup, 2},
+ {insert, 2},
+ {update, 2},
+ {update_fields, 3},
+ {delete, 2},
+ {delete_by_file, 2},
+ {terminate, 1}];
+behaviour_info(_Other) ->
+ undefined.
+
+-endif.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_net).
+-include("rabbit.hrl").
+
+-export([is_ssl/1, ssl_info/1, controlling_process/2, getstat/2,
+ recv/1, sync_recv/2, async_recv/3, port_command/2, getopts/2,
+ setopts/2, send/2, close/1, fast_close/1, sockname/1, peername/1,
+ peercert/1, connection_string/2, socket_ends/2, is_loopback/1]).
+
+%%---------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-export_type([socket/0]).
+
+-type(stat_option() ::
+ 'recv_cnt' | 'recv_max' | 'recv_avg' | 'recv_oct' | 'recv_dvi' |
+ 'send_cnt' | 'send_max' | 'send_avg' | 'send_oct' | 'send_pend').
+-type(ok_val_or_error(A) :: rabbit_types:ok_or_error2(A, any())).
+-type(ok_or_any_error() :: rabbit_types:ok_or_error(any())).
+-type(socket() :: port() | #ssl_socket{}).
+-type(opts() :: [{atom(), any()} |
+ {raw, non_neg_integer(), non_neg_integer(), binary()}]).
+-type(host_or_ip() :: binary() | inet:ip_address()).
+-spec(is_ssl/1 :: (socket()) -> boolean()).
+-spec(ssl_info/1 :: (socket())
+ -> 'nossl' | ok_val_or_error(
+ {atom(), {atom(), atom(), atom()}})).
+-spec(controlling_process/2 :: (socket(), pid()) -> ok_or_any_error()).
+-spec(getstat/2 ::
+ (socket(), [stat_option()])
+ -> ok_val_or_error([{stat_option(), integer()}])).
+-spec(recv/1 :: (socket()) ->
+ {'data', [char()] | binary()} | 'closed' |
+ rabbit_types:error(any()) | {'other', any()}).
+-spec(sync_recv/2 :: (socket(), integer()) -> rabbit_types:ok(binary()) |
+ rabbit_types:error(any())).
+-spec(async_recv/3 ::
+ (socket(), integer(), timeout()) -> rabbit_types:ok(any())).
+-spec(port_command/2 :: (socket(), iolist()) -> 'true').
+-spec(getopts/2 :: (socket(), [atom() | {raw,
+ non_neg_integer(),
+ non_neg_integer(),
+ non_neg_integer() | binary()}])
+ -> ok_val_or_error(opts())).
+-spec(setopts/2 :: (socket(), opts()) -> ok_or_any_error()).
+-spec(send/2 :: (socket(), binary() | iolist()) -> ok_or_any_error()).
+-spec(close/1 :: (socket()) -> ok_or_any_error()).
+-spec(fast_close/1 :: (socket()) -> ok_or_any_error()).
+-spec(sockname/1 ::
+ (socket())
+ -> ok_val_or_error({inet:ip_address(), rabbit_networking:ip_port()})).
+-spec(peername/1 ::
+ (socket())
+ -> ok_val_or_error({inet:ip_address(), rabbit_networking:ip_port()})).
+-spec(peercert/1 ::
+ (socket())
+ -> 'nossl' | ok_val_or_error(rabbit_ssl:certificate())).
+-spec(connection_string/2 ::
+ (socket(), 'inbound' | 'outbound') -> ok_val_or_error(string())).
+-spec(socket_ends/2 ::
+ (socket(), 'inbound' | 'outbound')
+ -> ok_val_or_error({host_or_ip(), rabbit_networking:ip_port(),
+ host_or_ip(), rabbit_networking:ip_port()})).
+-spec(is_loopback/1 :: (socket() | inet:ip_address()) -> boolean()).
+
+-endif.
+
+%%---------------------------------------------------------------------------
+
+-define(SSL_CLOSE_TIMEOUT, 5000).
+
+-define(IS_SSL(Sock), is_record(Sock, ssl_socket)).
+
+is_ssl(Sock) -> ?IS_SSL(Sock).
+
+ssl_info(Sock) when ?IS_SSL(Sock) ->
+ ssl:connection_info(Sock#ssl_socket.ssl);
+ssl_info(_Sock) ->
+ nossl.
+
+controlling_process(Sock, Pid) when ?IS_SSL(Sock) ->
+ ssl:controlling_process(Sock#ssl_socket.ssl, Pid);
+controlling_process(Sock, Pid) when is_port(Sock) ->
+ gen_tcp:controlling_process(Sock, Pid).
+
+getstat(Sock, Stats) when ?IS_SSL(Sock) ->
+ inet:getstat(Sock#ssl_socket.tcp, Stats);
+getstat(Sock, Stats) when is_port(Sock) ->
+ inet:getstat(Sock, Stats).
+
+recv(Sock) when ?IS_SSL(Sock) ->
+ recv(Sock#ssl_socket.ssl, {ssl, ssl_closed, ssl_error});
+recv(Sock) when is_port(Sock) ->
+ recv(Sock, {tcp, tcp_closed, tcp_error}).
+
+recv(S, {DataTag, ClosedTag, ErrorTag}) ->
+ receive
+ {DataTag, S, Data} -> {data, Data};
+ {ClosedTag, S} -> closed;
+ {ErrorTag, S, Reason} -> {error, Reason};
+ Other -> {other, Other}
+ end.
+
+sync_recv(Sock, Length) when ?IS_SSL(Sock) ->
+ ssl:recv(Sock#ssl_socket.ssl, Length);
+sync_recv(Sock, Length) ->
+ gen_tcp:recv(Sock, Length).
+
+async_recv(Sock, Length, Timeout) when ?IS_SSL(Sock) ->
+ Pid = self(),
+ Ref = make_ref(),
+
+ spawn(fun () -> Pid ! {inet_async, Sock, Ref,
+ ssl:recv(Sock#ssl_socket.ssl, Length, Timeout)}
+ end),
+
+ {ok, Ref};
+async_recv(Sock, Length, infinity) when is_port(Sock) ->
+ prim_inet:async_recv(Sock, Length, -1);
+async_recv(Sock, Length, Timeout) when is_port(Sock) ->
+ prim_inet:async_recv(Sock, Length, Timeout).
+
+port_command(Sock, Data) when ?IS_SSL(Sock) ->
+ case ssl:send(Sock#ssl_socket.ssl, Data) of
+ ok -> self() ! {inet_reply, Sock, ok},
+ true;
+ {error, Reason} -> erlang:error(Reason)
+ end;
+port_command(Sock, Data) when is_port(Sock) ->
+ erlang:port_command(Sock, Data).
+
+getopts(Sock, Options) when ?IS_SSL(Sock) ->
+ ssl:getopts(Sock#ssl_socket.ssl, Options);
+getopts(Sock, Options) when is_port(Sock) ->
+ inet:getopts(Sock, Options).
+
+setopts(Sock, Options) when ?IS_SSL(Sock) ->
+ ssl:setopts(Sock#ssl_socket.ssl, Options);
+setopts(Sock, Options) when is_port(Sock) ->
+ inet:setopts(Sock, Options).
+
+send(Sock, Data) when ?IS_SSL(Sock) -> ssl:send(Sock#ssl_socket.ssl, Data);
+send(Sock, Data) when is_port(Sock) -> gen_tcp:send(Sock, Data).
+
+close(Sock) when ?IS_SSL(Sock) -> ssl:close(Sock#ssl_socket.ssl);
+close(Sock) when is_port(Sock) -> gen_tcp:close(Sock).
+
+fast_close(Sock) when ?IS_SSL(Sock) ->
+ %% We cannot simply port_close the underlying tcp socket since the
+ %% TLS protocol is quite insistent that a proper closing handshake
+ %% should take place (see RFC 5245 s7.2.1). So we call ssl:close
+ %% instead, but that can block for a very long time, e.g. when
+ %% there is lots of pending output and there is tcp backpressure,
+ %% or the ssl_connection process has entered the the
+ %% workaround_transport_delivery_problems function during
+ %% termination, which, inexplicably, does a gen_tcp:recv(Socket,
+ %% 0), which may never return if the client doesn't send a FIN or
+ %% that gets swallowed by the network. Since there is no timeout
+ %% variant of ssl:close, we construct our own.
+ {Pid, MRef} = spawn_monitor(fun () -> ssl:close(Sock#ssl_socket.ssl) end),
+ erlang:send_after(?SSL_CLOSE_TIMEOUT, self(), {Pid, ssl_close_timeout}),
+ receive
+ {Pid, ssl_close_timeout} ->
+ erlang:demonitor(MRef, [flush]),
+ exit(Pid, kill);
+ {'DOWN', MRef, process, Pid, _Reason} ->
+ ok
+ end,
+ catch port_close(Sock#ssl_socket.tcp),
+ ok;
+fast_close(Sock) when is_port(Sock) ->
+ catch port_close(Sock), ok.
+
+sockname(Sock) when ?IS_SSL(Sock) -> ssl:sockname(Sock#ssl_socket.ssl);
+sockname(Sock) when is_port(Sock) -> inet:sockname(Sock).
+
+peername(Sock) when ?IS_SSL(Sock) -> ssl:peername(Sock#ssl_socket.ssl);
+peername(Sock) when is_port(Sock) -> inet:peername(Sock).
+
+peercert(Sock) when ?IS_SSL(Sock) -> ssl:peercert(Sock#ssl_socket.ssl);
+peercert(Sock) when is_port(Sock) -> nossl.
+
+connection_string(Sock, Direction) ->
+ case socket_ends(Sock, Direction) of
+ {ok, {FromAddress, FromPort, ToAddress, ToPort}} ->
+ {ok, rabbit_misc:format(
+ "~s:~p -> ~s:~p",
+ [maybe_ntoab(FromAddress), FromPort,
+ maybe_ntoab(ToAddress), ToPort])};
+ Error ->
+ Error
+ end.
+
+socket_ends(Sock, Direction) ->
+ {From, To} = sock_funs(Direction),
+ case {From(Sock), To(Sock)} of
+ {{ok, {FromAddress, FromPort}}, {ok, {ToAddress, ToPort}}} ->
+ {ok, {rdns(FromAddress), FromPort,
+ rdns(ToAddress), ToPort}};
+ {{error, _Reason} = Error, _} ->
+ Error;
+ {_, {error, _Reason} = Error} ->
+ Error
+ end.
+
+maybe_ntoab(Addr) when is_tuple(Addr) -> rabbit_misc:ntoab(Addr);
+maybe_ntoab(Host) -> Host.
+
+rdns(Addr) ->
+ case application:get_env(rabbit, reverse_dns_lookups) of
+ {ok, true} -> list_to_binary(rabbit_networking:tcp_host(Addr));
+ _ -> Addr
+ end.
+
+sock_funs(inbound) -> {fun peername/1, fun sockname/1};
+sock_funs(outbound) -> {fun sockname/1, fun peername/1}.
+
+is_loopback(Sock) when is_port(Sock) ; ?IS_SSL(Sock) ->
+ case sockname(Sock) of
+ {ok, {Addr, _Port}} -> is_loopback(Addr);
+ {error, _} -> false
+ end;
+%% We could parse the results of inet:getifaddrs() instead. But that
+%% would be more complex and less maybe Windows-compatible...
+is_loopback({127,_,_,_}) -> true;
+is_loopback({0,0,0,0,0,0,0,1}) -> true;
+is_loopback({0,0,0,0,0,65535,AB,CD}) -> is_loopback(ipv4(AB, CD));
+is_loopback(_) -> false.
+
+ipv4(AB, CD) -> {AB bsr 8, AB band 255, CD bsr 8, CD band 255}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_networking).
+
+-export([boot/0, start/0, start_tcp_listener/1, start_ssl_listener/2,
+ stop_tcp_listener/1, on_node_down/1, active_listeners/0,
+ node_listeners/1, register_connection/1, unregister_connection/1,
+ connections/0, connection_info_keys/0,
+ connection_info/1, connection_info/2,
+ connection_info_all/0, connection_info_all/1,
+ close_connection/2, force_connection_event_refresh/1, tcp_host/1]).
+
+%%used by TCP-based transports, e.g. STOMP adapter
+-export([tcp_listener_addresses/1, tcp_listener_spec/6,
+ ensure_ssl/0, ssl_transform_fun/1]).
+
+-export([tcp_listener_started/3, tcp_listener_stopped/3,
+ start_client/1, start_ssl_client/2]).
+
+%% Internal
+-export([connections_local/0]).
+
+-include("rabbit.hrl").
+-include_lib("kernel/include/inet.hrl").
+
+-define(SSL_TIMEOUT, 5). %% seconds
+
+-define(FIRST_TEST_BIND_PORT, 10000).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-export_type([ip_port/0, hostname/0]).
+
+-type(hostname() :: inet:hostname()).
+-type(ip_port() :: inet:port_number()).
+
+-type(family() :: atom()).
+-type(listener_config() :: ip_port() |
+ {hostname(), ip_port()} |
+ {hostname(), ip_port(), family()}).
+-type(address() :: {inet:ip_address(), ip_port(), family()}).
+-type(name_prefix() :: atom()).
+-type(protocol() :: atom()).
+-type(label() :: string()).
+
+-spec(start/0 :: () -> 'ok').
+-spec(start_tcp_listener/1 :: (listener_config()) -> 'ok').
+-spec(start_ssl_listener/2 ::
+ (listener_config(), rabbit_types:infos()) -> 'ok').
+-spec(stop_tcp_listener/1 :: (listener_config()) -> 'ok').
+-spec(active_listeners/0 :: () -> [rabbit_types:listener()]).
+-spec(node_listeners/1 :: (node()) -> [rabbit_types:listener()]).
+-spec(register_connection/1 :: (pid()) -> ok).
+-spec(unregister_connection/1 :: (pid()) -> ok).
+-spec(connections/0 :: () -> [rabbit_types:connection()]).
+-spec(connections_local/0 :: () -> [rabbit_types:connection()]).
+-spec(connection_info_keys/0 :: () -> rabbit_types:info_keys()).
+-spec(connection_info/1 ::
+ (rabbit_types:connection()) -> rabbit_types:infos()).
+-spec(connection_info/2 ::
+ (rabbit_types:connection(), rabbit_types:info_keys())
+ -> rabbit_types:infos()).
+-spec(connection_info_all/0 :: () -> [rabbit_types:infos()]).
+-spec(connection_info_all/1 ::
+ (rabbit_types:info_keys()) -> [rabbit_types:infos()]).
+-spec(close_connection/2 :: (pid(), string()) -> 'ok').
+-spec(force_connection_event_refresh/1 :: (reference()) -> 'ok').
+
+-spec(on_node_down/1 :: (node()) -> 'ok').
+-spec(tcp_listener_addresses/1 :: (listener_config()) -> [address()]).
+-spec(tcp_listener_spec/6 ::
+ (name_prefix(), address(), [gen_tcp:listen_option()], protocol(),
+ label(), rabbit_types:mfargs()) -> supervisor:child_spec()).
+-spec(ensure_ssl/0 :: () -> rabbit_types:infos()).
+-spec(ssl_transform_fun/1 ::
+ (rabbit_types:infos())
+ -> fun ((rabbit_net:socket())
+ -> rabbit_types:ok_or_error(#ssl_socket{}))).
+
+-spec(boot/0 :: () -> 'ok').
+-spec(start_client/1 ::
+ (port() | #ssl_socket{ssl::{'sslsocket',_,_}}) ->
+ atom() | pid() | port() | {atom(),atom()}).
+-spec(start_ssl_client/2 ::
+ (_,port() | #ssl_socket{ssl::{'sslsocket',_,_}}) ->
+ atom() | pid() | port() | {atom(),atom()}).
+-spec(tcp_listener_started/3 ::
+ (_,
+ string() |
+ {byte(),byte(),byte(),byte()} |
+ {char(),char(),char(),char(),char(),char(),char(),char()},
+ _) ->
+ 'ok').
+-spec(tcp_listener_stopped/3 ::
+ (_,
+ string() |
+ {byte(),byte(),byte(),byte()} |
+ {char(),char(),char(),char(),char(),char(),char(),char()},
+ _) ->
+ 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+boot() ->
+ ok = record_distribution_listener(),
+ ok = start(),
+ ok = boot_tcp(),
+ ok = boot_ssl().
+
+boot_tcp() ->
+ {ok, TcpListeners} = application:get_env(tcp_listeners),
+ [ok = start_tcp_listener(Listener) || Listener <- TcpListeners],
+ ok.
+
+boot_ssl() ->
+ case application:get_env(ssl_listeners) of
+ {ok, []} ->
+ ok;
+ {ok, SslListeners} ->
+ SslOpts = ensure_ssl(),
+ [start_ssl_listener(Listener, SslOpts) || Listener <- SslListeners],
+ ok
+ end.
+
+start() -> rabbit_sup:start_supervisor_child(
+ rabbit_tcp_client_sup, rabbit_client_sup,
+ [{local, rabbit_tcp_client_sup},
+ {rabbit_connection_sup,start_link,[]}]).
+
+ensure_ssl() ->
+ {ok, SslAppsConfig} = application:get_env(rabbit, ssl_apps),
+ ok = app_utils:start_applications(SslAppsConfig),
+ {ok, SslOptsConfig} = application:get_env(rabbit, ssl_options),
+
+ case rabbit_misc:pget(verify_fun, SslOptsConfig) of
+ {Module, Function} ->
+ rabbit_misc:pset(verify_fun,
+ fun (ErrorList) ->
+ Module:Function(ErrorList)
+ end, SslOptsConfig);
+ undefined ->
+ % unknown_ca errors are silently ignored prior to R14B unless we
+ % supply this verify_fun - remove when at least R14B is required
+ case proplists:get_value(verify, SslOptsConfig, verify_none) of
+ verify_none -> SslOptsConfig;
+ verify_peer -> [{verify_fun, fun([]) -> true;
+ ([_|_]) -> false
+ end}
+ | SslOptsConfig]
+ end
+ end.
+
+ssl_transform_fun(SslOpts) ->
+ fun (Sock) ->
+ case catch ssl:ssl_accept(Sock, SslOpts, ?SSL_TIMEOUT * 1000) of
+ {ok, SslSock} ->
+ {ok, #ssl_socket{tcp = Sock, ssl = SslSock}};
+ {error, timeout} ->
+ {error, {ssl_upgrade_error, timeout}};
+ {error, Reason} ->
+ %% We have no idea what state the ssl_connection
+ %% process is in - it could still be happily
+ %% going, it might be stuck, or it could be just
+ %% about to fail. There is little that our caller
+ %% can do but close the TCP socket, but this could
+ %% cause ssl alerts to get dropped (which is bad
+ %% form, according to the TLS spec). So we give
+ %% the ssl_connection a little bit of time to send
+ %% such alerts.
+ timer:sleep(?SSL_TIMEOUT * 1000),
+ {error, {ssl_upgrade_error, Reason}};
+ {'EXIT', Reason} ->
+ {error, {ssl_upgrade_failure, Reason}}
+ end
+ end.
+
+tcp_listener_addresses(Port) when is_integer(Port) ->
+ tcp_listener_addresses_auto(Port);
+tcp_listener_addresses({"auto", Port}) ->
+ %% Variant to prevent lots of hacking around in bash and batch files
+ tcp_listener_addresses_auto(Port);
+tcp_listener_addresses({Host, Port}) ->
+ %% auto: determine family IPv4 / IPv6 after converting to IP address
+ tcp_listener_addresses({Host, Port, auto});
+tcp_listener_addresses({Host, Port, Family0})
+ when is_integer(Port) andalso (Port >= 0) andalso (Port =< 65535) ->
+ [{IPAddress, Port, Family} ||
+ {IPAddress, Family} <- getaddr(Host, Family0)];
+tcp_listener_addresses({_Host, Port, _Family0}) ->
+ error_logger:error_msg("invalid port ~p - not 0..65535~n", [Port]),
+ throw({error, {invalid_port, Port}}).
+
+tcp_listener_addresses_auto(Port) ->
+ lists:append([tcp_listener_addresses(Listener) ||
+ Listener <- port_to_listeners(Port)]).
+
+tcp_listener_spec(NamePrefix, {IPAddress, Port, Family}, SocketOpts,
+ Protocol, Label, OnConnect) ->
+ {rabbit_misc:tcp_name(NamePrefix, IPAddress, Port),
+ {tcp_listener_sup, start_link,
+ [IPAddress, Port, [Family | SocketOpts],
+ {?MODULE, tcp_listener_started, [Protocol]},
+ {?MODULE, tcp_listener_stopped, [Protocol]},
+ OnConnect, Label]},
+ transient, infinity, supervisor, [tcp_listener_sup]}.
+
+start_tcp_listener(Listener) ->
+ start_listener(Listener, amqp, "TCP Listener",
+ {?MODULE, start_client, []}).
+
+start_ssl_listener(Listener, SslOpts) ->
+ start_listener(Listener, 'amqp/ssl', "SSL Listener",
+ {?MODULE, start_ssl_client, [SslOpts]}).
+
+start_listener(Listener, Protocol, Label, OnConnect) ->
+ [start_listener0(Address, Protocol, Label, OnConnect) ||
+ Address <- tcp_listener_addresses(Listener)],
+ ok.
+
+start_listener0(Address, Protocol, Label, OnConnect) ->
+ Spec = tcp_listener_spec(rabbit_tcp_listener_sup, Address, tcp_opts(),
+ Protocol, Label, OnConnect),
+ case supervisor:start_child(rabbit_sup, Spec) of
+ {ok, _} -> ok;
+ {error, {shutdown, _}} -> {IPAddress, Port, _Family} = Address,
+ exit({could_not_start_tcp_listener,
+ {rabbit_misc:ntoa(IPAddress), Port}})
+ end.
+
+stop_tcp_listener(Listener) ->
+ [stop_tcp_listener0(Address) ||
+ Address <- tcp_listener_addresses(Listener)],
+ ok.
+
+stop_tcp_listener0({IPAddress, Port, _Family}) ->
+ Name = rabbit_misc:tcp_name(rabbit_tcp_listener_sup, IPAddress, Port),
+ ok = supervisor:terminate_child(rabbit_sup, Name),
+ ok = supervisor:delete_child(rabbit_sup, Name).
+
+tcp_listener_started(Protocol, IPAddress, Port) ->
+ %% We need the ip to distinguish e.g. 0.0.0.0 and 127.0.0.1
+ %% We need the host so we can distinguish multiple instances of the above
+ %% in a cluster.
+ ok = mnesia:dirty_write(
+ rabbit_listener,
+ #listener{node = node(),
+ protocol = Protocol,
+ host = tcp_host(IPAddress),
+ ip_address = IPAddress,
+ port = Port}).
+
+tcp_listener_stopped(Protocol, IPAddress, Port) ->
+ ok = mnesia:dirty_delete_object(
+ rabbit_listener,
+ #listener{node = node(),
+ protocol = Protocol,
+ host = tcp_host(IPAddress),
+ ip_address = IPAddress,
+ port = Port}).
+
+record_distribution_listener() ->
+ {Name, Host} = rabbit_nodes:parts(node()),
+ {port, Port, _Version} = erl_epmd:port_please(Name, Host),
+ tcp_listener_started(clustering, {0,0,0,0,0,0,0,0}, Port).
+
+active_listeners() ->
+ rabbit_misc:dirty_read_all(rabbit_listener).
+
+node_listeners(Node) ->
+ mnesia:dirty_read(rabbit_listener, Node).
+
+on_node_down(Node) ->
+ ok = mnesia:dirty_delete(rabbit_listener, Node).
+
+start_client(Sock, SockTransform) ->
+ {ok, _Child, Reader} = supervisor:start_child(rabbit_tcp_client_sup, []),
+ ok = rabbit_net:controlling_process(Sock, Reader),
+ Reader ! {go, Sock, SockTransform},
+
+ %% In the event that somebody floods us with connections, the
+ %% reader processes can spew log events at error_logger faster
+ %% than it can keep up, causing its mailbox to grow unbounded
+ %% until we eat all the memory available and crash. So here is a
+ %% meaningless synchronous call to the underlying gen_event
+ %% mechanism. When it returns the mailbox is drained, and we
+ %% return to our caller to accept more connetions.
+ gen_event:which_handlers(error_logger),
+
+ Reader.
+
+start_client(Sock) ->
+ start_client(Sock, fun (S) -> {ok, S} end).
+
+start_ssl_client(SslOpts, Sock) ->
+ start_client(Sock, ssl_transform_fun(SslOpts)).
+
+register_connection(Pid) -> pg_local:join(rabbit_connections, Pid).
+
+unregister_connection(Pid) -> pg_local:leave(rabbit_connections, Pid).
+
+connections() ->
+ rabbit_misc:append_rpc_all_nodes(rabbit_mnesia:cluster_nodes(running),
+ rabbit_networking, connections_local, []).
+
+connections_local() -> pg_local:get_members(rabbit_connections).
+
+connection_info_keys() -> rabbit_reader:info_keys().
+
+connection_info(Pid) -> rabbit_reader:info(Pid).
+connection_info(Pid, Items) -> rabbit_reader:info(Pid, Items).
+
+connection_info_all() -> cmap(fun (Q) -> connection_info(Q) end).
+connection_info_all(Items) -> cmap(fun (Q) -> connection_info(Q, Items) end).
+
+close_connection(Pid, Explanation) ->
+ rabbit_log:info("Closing connection ~p because ~p~n", [Pid, Explanation]),
+ case lists:member(Pid, connections()) of
+ true -> rabbit_reader:shutdown(Pid, Explanation);
+ false -> throw({error, {not_a_connection_pid, Pid}})
+ end.
+
+force_connection_event_refresh(Ref) ->
+ [rabbit_reader:force_event_refresh(C, Ref) || C <- connections()],
+ ok.
+
+%%--------------------------------------------------------------------
+
+tcp_host({0,0,0,0}) ->
+ hostname();
+
+tcp_host({0,0,0,0,0,0,0,0}) ->
+ hostname();
+
+tcp_host(IPAddress) ->
+ case inet:gethostbyaddr(IPAddress) of
+ {ok, #hostent{h_name = Name}} -> Name;
+ {error, _Reason} -> rabbit_misc:ntoa(IPAddress)
+ end.
+
+hostname() ->
+ {ok, Hostname} = inet:gethostname(),
+ case inet:gethostbyname(Hostname) of
+ {ok, #hostent{h_name = Name}} -> Name;
+ {error, _Reason} -> Hostname
+ end.
+
+cmap(F) -> rabbit_misc:filter_exit_map(F, connections()).
+
+tcp_opts() ->
+ {ok, Opts} = application:get_env(rabbit, tcp_listen_options),
+ Opts.
+
+%% inet_parse:address takes care of ip string, like "0.0.0.0"
+%% inet:getaddr returns immediately for ip tuple {0,0,0,0},
+%% and runs 'inet_gethost' port process for dns lookups.
+%% On Windows inet:getaddr runs dns resolver for ip string, which may fail.
+getaddr(Host, Family) ->
+ case inet_parse:address(Host) of
+ {ok, IPAddress} -> [{IPAddress, resolve_family(IPAddress, Family)}];
+ {error, _} -> gethostaddr(Host, Family)
+ end.
+
+gethostaddr(Host, auto) ->
+ Lookups = [{Family, inet:getaddr(Host, Family)} || Family <- [inet, inet6]],
+ case [{IP, Family} || {Family, {ok, IP}} <- Lookups] of
+ [] -> host_lookup_error(Host, Lookups);
+ IPs -> IPs
+ end;
+
+gethostaddr(Host, Family) ->
+ case inet:getaddr(Host, Family) of
+ {ok, IPAddress} -> [{IPAddress, Family}];
+ {error, Reason} -> host_lookup_error(Host, Reason)
+ end.
+
+host_lookup_error(Host, Reason) ->
+ error_logger:error_msg("invalid host ~p - ~p~n", [Host, Reason]),
+ throw({error, {invalid_host, Host, Reason}}).
+
+resolve_family({_,_,_,_}, auto) -> inet;
+resolve_family({_,_,_,_,_,_,_,_}, auto) -> inet6;
+resolve_family(IP, auto) -> throw({error, {strange_family, IP}});
+resolve_family(_, F) -> F.
+
+%%--------------------------------------------------------------------
+
+%% There are three kinds of machine (for our purposes).
+%%
+%% * Those which treat IPv4 addresses as a special kind of IPv6 address
+%% ("Single stack")
+%% - Linux by default, Windows Vista and later
+%% - We also treat any (hypothetical?) IPv6-only machine the same way
+%% * Those which consider IPv6 and IPv4 to be completely separate things
+%% ("Dual stack")
+%% - OpenBSD, Windows XP / 2003, Linux if so configured
+%% * Those which do not support IPv6.
+%% - Ancient/weird OSes, Linux if so configured
+%%
+%% How to reconfigure Linux to test this:
+%% Single stack (default):
+%% echo 0 > /proc/sys/net/ipv6/bindv6only
+%% Dual stack:
+%% echo 1 > /proc/sys/net/ipv6/bindv6only
+%% IPv4 only:
+%% add ipv6.disable=1 to GRUB_CMDLINE_LINUX_DEFAULT in /etc/default/grub then
+%% sudo update-grub && sudo reboot
+%%
+%% This matters in (and only in) the case where the sysadmin (or the
+%% app descriptor) has only supplied a port and we wish to bind to
+%% "all addresses". This means different things depending on whether
+%% we're single or dual stack. On single stack binding to "::"
+%% implicitly includes all IPv4 addresses, and subsequently attempting
+%% to bind to "0.0.0.0" will fail. On dual stack, binding to "::" will
+%% only bind to IPv6 addresses, and we need another listener bound to
+%% "0.0.0.0" for IPv4. Finally, on IPv4-only systems we of course only
+%% want to bind to "0.0.0.0".
+%%
+%% Unfortunately it seems there is no way to detect single vs dual stack
+%% apart from attempting to bind to the port.
+port_to_listeners(Port) ->
+ IPv4 = {"0.0.0.0", Port, inet},
+ IPv6 = {"::", Port, inet6},
+ case ipv6_status(?FIRST_TEST_BIND_PORT) of
+ single_stack -> [IPv6];
+ ipv6_only -> [IPv6];
+ dual_stack -> [IPv6, IPv4];
+ ipv4_only -> [IPv4]
+ end.
+
+ipv6_status(TestPort) ->
+ IPv4 = [inet, {ip, {0,0,0,0}}],
+ IPv6 = [inet6, {ip, {0,0,0,0,0,0,0,0}}],
+ case gen_tcp:listen(TestPort, IPv6) of
+ {ok, LSock6} ->
+ case gen_tcp:listen(TestPort, IPv4) of
+ {ok, LSock4} ->
+ %% Dual stack
+ gen_tcp:close(LSock6),
+ gen_tcp:close(LSock4),
+ dual_stack;
+ %% Checking the error here would only let us
+ %% distinguish single stack IPv6 / IPv4 vs IPv6 only,
+ %% which we figure out below anyway.
+ {error, _} ->
+ gen_tcp:close(LSock6),
+ case gen_tcp:listen(TestPort, IPv4) of
+ %% Single stack
+ {ok, LSock4} -> gen_tcp:close(LSock4),
+ single_stack;
+ %% IPv6-only machine. Welcome to the future.
+ {error, eafnosupport} -> ipv6_only; %% Linux
+ {error, eprotonosupport}-> ipv6_only; %% FreeBSD
+ %% Dual stack machine with something already
+ %% on IPv4.
+ {error, _} -> ipv6_status(TestPort + 1)
+ end
+ end;
+ %% IPv4-only machine. Welcome to the 90s.
+ {error, eafnosupport} -> %% Linux
+ ipv4_only;
+ {error, eprotonosupport} -> %% FreeBSD
+ ipv4_only;
+ %% Port in use
+ {error, _} ->
+ ipv6_status(TestPort + 1)
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_node_monitor).
+
+-behaviour(gen_server).
+
+-export([start_link/0]).
+-export([running_nodes_filename/0,
+ cluster_status_filename/0, prepare_cluster_status_files/0,
+ write_cluster_status/1, read_cluster_status/0,
+ update_cluster_status/0, reset_cluster_status/0]).
+-export([notify_node_up/0, notify_joined_cluster/0, notify_left_cluster/1]).
+-export([partitions/0, partitions/1, subscribe/1]).
+-export([pause_minority_guard/0]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+ %% Utils
+-export([all_rabbit_nodes_up/0, run_outside_applications/1, ping_all/0]).
+
+-define(SERVER, ?MODULE).
+-define(RABBIT_UP_RPC_TIMEOUT, 2000).
+-define(RABBIT_DOWN_PING_INTERVAL, 1000).
+
+-record(state, {monitors, partitions, subscribers, down_ping_timer, autoheal}).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
+
+-spec(running_nodes_filename/0 :: () -> string()).
+-spec(cluster_status_filename/0 :: () -> string()).
+-spec(prepare_cluster_status_files/0 :: () -> 'ok').
+-spec(write_cluster_status/1 :: (rabbit_mnesia:cluster_status()) -> 'ok').
+-spec(read_cluster_status/0 :: () -> rabbit_mnesia:cluster_status()).
+-spec(update_cluster_status/0 :: () -> 'ok').
+-spec(reset_cluster_status/0 :: () -> 'ok').
+
+-spec(notify_node_up/0 :: () -> 'ok').
+-spec(notify_joined_cluster/0 :: () -> 'ok').
+-spec(notify_left_cluster/1 :: (node()) -> 'ok').
+
+-spec(partitions/0 :: () -> [node()]).
+-spec(partitions/1 :: ([node()]) -> [{node(), [node()]}]).
+-spec(subscribe/1 :: (pid()) -> 'ok').
+-spec(pause_minority_guard/0 :: () -> 'ok' | 'pausing').
+
+-spec(all_rabbit_nodes_up/0 :: () -> boolean()).
+-spec(run_outside_applications/1 :: (fun (() -> any())) -> pid()).
+-spec(ping_all/0 :: () -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+%% Start
+%%----------------------------------------------------------------------------
+
+start_link() -> gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+%%----------------------------------------------------------------------------
+%% Cluster file operations
+%%----------------------------------------------------------------------------
+
+%% The cluster file information is kept in two files. The "cluster
+%% status file" contains all the clustered nodes and the disc nodes.
+%% The "running nodes file" contains the currently running nodes or
+%% the running nodes at shutdown when the node is down.
+%%
+%% We strive to keep the files up to date and we rely on this
+%% assumption in various situations. Obviously when mnesia is offline
+%% the information we have will be outdated, but it cannot be
+%% otherwise.
+
+running_nodes_filename() ->
+ filename:join(rabbit_mnesia:dir(), "nodes_running_at_shutdown").
+
+cluster_status_filename() ->
+ rabbit_mnesia:dir() ++ "/cluster_nodes.config".
+
+prepare_cluster_status_files() ->
+ rabbit_mnesia:ensure_mnesia_dir(),
+ Corrupt = fun(F) -> throw({error, corrupt_cluster_status_files, F}) end,
+ RunningNodes1 = case try_read_file(running_nodes_filename()) of
+ {ok, [Nodes]} when is_list(Nodes) -> Nodes;
+ {ok, Other} -> Corrupt(Other);
+ {error, enoent} -> []
+ end,
+ ThisNode = [node()],
+ %% The running nodes file might contain a set or a list, in case
+ %% of the legacy file
+ RunningNodes2 = lists:usort(ThisNode ++ RunningNodes1),
+ {AllNodes1, DiscNodes} =
+ case try_read_file(cluster_status_filename()) of
+ {ok, [{AllNodes, DiscNodes0}]} ->
+ {AllNodes, DiscNodes0};
+ {ok, [AllNodes0]} when is_list(AllNodes0) ->
+ {legacy_cluster_nodes(AllNodes0), legacy_disc_nodes(AllNodes0)};
+ {ok, Files} ->
+ Corrupt(Files);
+ {error, enoent} ->
+ LegacyNodes = legacy_cluster_nodes([]),
+ {LegacyNodes, LegacyNodes}
+ end,
+ AllNodes2 = lists:usort(AllNodes1 ++ RunningNodes2),
+ ok = write_cluster_status({AllNodes2, DiscNodes, RunningNodes2}).
+
+write_cluster_status({All, Disc, Running}) ->
+ ClusterStatusFN = cluster_status_filename(),
+ Res = case rabbit_file:write_term_file(ClusterStatusFN, [{All, Disc}]) of
+ ok ->
+ RunningNodesFN = running_nodes_filename(),
+ {RunningNodesFN,
+ rabbit_file:write_term_file(RunningNodesFN, [Running])};
+ E1 = {error, _} ->
+ {ClusterStatusFN, E1}
+ end,
+ case Res of
+ {_, ok} -> ok;
+ {FN, {error, E2}} -> throw({error, {could_not_write_file, FN, E2}})
+ end.
+
+read_cluster_status() ->
+ case {try_read_file(cluster_status_filename()),
+ try_read_file(running_nodes_filename())} of
+ {{ok, [{All, Disc}]}, {ok, [Running]}} when is_list(Running) ->
+ {All, Disc, Running};
+ {Stat, Run} ->
+ throw({error, {corrupt_or_missing_cluster_files, Stat, Run}})
+ end.
+
+update_cluster_status() ->
+ {ok, Status} = rabbit_mnesia:cluster_status_from_mnesia(),
+ write_cluster_status(Status).
+
+reset_cluster_status() ->
+ write_cluster_status({[node()], [node()], [node()]}).
+
+%%----------------------------------------------------------------------------
+%% Cluster notifications
+%%----------------------------------------------------------------------------
+
+notify_node_up() ->
+ Nodes = rabbit_mnesia:cluster_nodes(running) -- [node()],
+ gen_server:abcast(Nodes, ?SERVER,
+ {node_up, node(), rabbit_mnesia:node_type()}),
+ %% register other active rabbits with this rabbit
+ DiskNodes = rabbit_mnesia:cluster_nodes(disc),
+ [gen_server:cast(?SERVER, {node_up, N, case lists:member(N, DiskNodes) of
+ true -> disc;
+ false -> ram
+ end}) || N <- Nodes],
+ ok.
+
+notify_joined_cluster() ->
+ Nodes = rabbit_mnesia:cluster_nodes(running) -- [node()],
+ gen_server:abcast(Nodes, ?SERVER,
+ {joined_cluster, node(), rabbit_mnesia:node_type()}),
+ ok.
+
+notify_left_cluster(Node) ->
+ Nodes = rabbit_mnesia:cluster_nodes(running),
+ gen_server:abcast(Nodes, ?SERVER, {left_cluster, Node}),
+ ok.
+
+%%----------------------------------------------------------------------------
+%% Server calls
+%%----------------------------------------------------------------------------
+
+partitions() ->
+ gen_server:call(?SERVER, partitions, infinity).
+
+partitions(Nodes) ->
+ {Replies, _} = gen_server:multi_call(Nodes, ?SERVER, partitions, infinity),
+ Replies.
+
+subscribe(Pid) ->
+ gen_server:cast(?SERVER, {subscribe, Pid}).
+
+%%----------------------------------------------------------------------------
+%% pause_minority safety
+%%----------------------------------------------------------------------------
+
+%% If we are in a minority and pause_minority mode then a) we are
+%% going to shut down imminently and b) we should not confirm anything
+%% until then, since anything we confirm is likely to be lost.
+%%
+%% We could confirm something by having an HA queue see the minority
+%% state (and fail over into it) before the node monitor stops us, or
+%% by using unmirrored queues and just having them vanish (and
+%% confiming messages as thrown away).
+%%
+%% So we have channels call in here before issuing confirms, to do a
+%% lightweight check that we have not entered a minority state.
+
+pause_minority_guard() ->
+ case get(pause_minority_guard) of
+ not_minority_mode ->
+ ok;
+ undefined ->
+ {ok, M} = application:get_env(rabbit, cluster_partition_handling),
+ case M of
+ pause_minority -> pause_minority_guard([]);
+ _ -> put(pause_minority_guard, not_minority_mode),
+ ok
+ end;
+ {minority_mode, Nodes} ->
+ pause_minority_guard(Nodes)
+ end.
+
+pause_minority_guard(LastNodes) ->
+ case nodes() of
+ LastNodes -> ok;
+ _ -> put(pause_minority_guard, {minority_mode, nodes()}),
+ case majority() of
+ false -> pausing;
+ true -> ok
+ end
+ end.
+
+%%----------------------------------------------------------------------------
+%% gen_server callbacks
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ %% We trap exits so that the supervisor will not just kill us. We
+ %% want to be sure that we are not going to be killed while
+ %% writing out the cluster status files - bad things can then
+ %% happen.
+ process_flag(trap_exit, true),
+ net_kernel:monitor_nodes(true, [nodedown_reason]),
+ {ok, _} = mnesia:subscribe(system),
+ {ok, #state{monitors = pmon:new(),
+ subscribers = pmon:new(),
+ partitions = [],
+ autoheal = rabbit_autoheal:init()}}.
+
+handle_call(partitions, _From, State = #state{partitions = Partitions}) ->
+ {reply, Partitions, State};
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+%% Note: when updating the status file, we can't simply write the
+%% mnesia information since the message can (and will) overtake the
+%% mnesia propagation.
+handle_cast({node_up, Node, NodeType},
+ State = #state{monitors = Monitors}) ->
+ case pmon:is_monitored({rabbit, Node}, Monitors) of
+ true -> {noreply, State};
+ false -> rabbit_log:info("rabbit on node ~p up~n", [Node]),
+ {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(),
+ write_cluster_status({add_node(Node, AllNodes),
+ case NodeType of
+ disc -> add_node(Node, DiscNodes);
+ ram -> DiscNodes
+ end,
+ add_node(Node, RunningNodes)}),
+ ok = handle_live_rabbit(Node),
+ {noreply, State#state{
+ monitors = pmon:monitor({rabbit, Node}, Monitors)}}
+ end;
+handle_cast({joined_cluster, Node, NodeType}, State) ->
+ {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(),
+ write_cluster_status({add_node(Node, AllNodes),
+ case NodeType of
+ disc -> add_node(Node, DiscNodes);
+ ram -> DiscNodes
+ end,
+ RunningNodes}),
+ {noreply, State};
+handle_cast({left_cluster, Node}, State) ->
+ {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(),
+ write_cluster_status({del_node(Node, AllNodes), del_node(Node, DiscNodes),
+ del_node(Node, RunningNodes)}),
+ {noreply, State};
+handle_cast({subscribe, Pid}, State = #state{subscribers = Subscribers}) ->
+ {noreply, State#state{subscribers = pmon:monitor(Pid, Subscribers)}};
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info({'DOWN', _MRef, process, {rabbit, Node}, _Reason},
+ State = #state{monitors = Monitors, subscribers = Subscribers}) ->
+ rabbit_log:info("rabbit on node ~p down~n", [Node]),
+ {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(),
+ write_cluster_status({AllNodes, DiscNodes, del_node(Node, RunningNodes)}),
+ [P ! {node_down, Node} || P <- pmon:monitored(Subscribers)],
+ {noreply, handle_dead_rabbit(
+ Node,
+ State#state{monitors = pmon:erase({rabbit, Node}, Monitors)})};
+
+handle_info({'DOWN', _MRef, process, Pid, _Reason},
+ State = #state{subscribers = Subscribers}) ->
+ {noreply, State#state{subscribers = pmon:erase(Pid, Subscribers)}};
+
+handle_info({nodedown, Node, Info}, State) ->
+ rabbit_log:info("node ~p down: ~p~n",
+ [Node, proplists:get_value(nodedown_reason, Info)]),
+ {noreply, handle_dead_node(Node, State)};
+
+handle_info({mnesia_system_event,
+ {inconsistent_database, running_partitioned_network, Node}},
+ State = #state{partitions = Partitions,
+ monitors = Monitors,
+ autoheal = AState}) ->
+ %% We will not get a node_up from this node - yet we should treat it as
+ %% up (mostly).
+ State1 = case pmon:is_monitored({rabbit, Node}, Monitors) of
+ true -> State;
+ false -> State#state{
+ monitors = pmon:monitor({rabbit, Node}, Monitors)}
+ end,
+ ok = handle_live_rabbit(Node),
+ Partitions1 = ordsets:to_list(
+ ordsets:add_element(Node, ordsets:from_list(Partitions))),
+ {noreply, State1#state{partitions = Partitions1,
+ autoheal = rabbit_autoheal:maybe_start(AState)}};
+
+handle_info({autoheal_msg, Msg}, State = #state{autoheal = AState,
+ partitions = Partitions}) ->
+ AState1 = rabbit_autoheal:handle_msg(Msg, AState, Partitions),
+ {noreply, State#state{autoheal = AState1}};
+
+handle_info(ping_nodes, State) ->
+ %% We ping nodes when some are down to ensure that we find out
+ %% about healed partitions quickly. We ping all nodes rather than
+ %% just the ones we know are down for simplicity; it's not expensive
+ %% to ping the nodes that are up, after all.
+ State1 = State#state{down_ping_timer = undefined},
+ Self = self(),
+ %% We ping in a separate process since in a partition it might
+ %% take some noticeable length of time and we don't want to block
+ %% the node monitor for that long.
+ spawn_link(fun () ->
+ ping_all(),
+ case all_nodes_up() of
+ true -> ok;
+ false -> Self ! ping_again
+ end
+ end),
+ {noreply, State1};
+
+handle_info(ping_again, State) ->
+ {noreply, ensure_ping_timer(State)};
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, State) ->
+ rabbit_misc:stop_timer(State, #state.down_ping_timer),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+%% Functions that call the module specific hooks when nodes go up/down
+%%----------------------------------------------------------------------------
+
+handle_dead_node(Node, State = #state{autoheal = Autoheal}) ->
+ %% In general in rabbit_node_monitor we care about whether the
+ %% rabbit application is up rather than the node; we do this so
+ %% that we can respond in the same way to "rabbitmqctl stop_app"
+ %% and "rabbitmqctl stop" as much as possible.
+ %%
+ %% However, for pause_minority mode we can't do this, since we
+ %% depend on looking at whether other nodes are up to decide
+ %% whether to come back up ourselves - if we decide that based on
+ %% the rabbit application we would go down and never come back.
+ case application:get_env(rabbit, cluster_partition_handling) of
+ {ok, pause_minority} ->
+ case majority() of
+ true -> ok;
+ false -> await_cluster_recovery()
+ end,
+ State;
+ {ok, ignore} ->
+ State;
+ {ok, autoheal} ->
+ State#state{autoheal = rabbit_autoheal:node_down(Node, Autoheal)};
+ {ok, Term} ->
+ rabbit_log:warning("cluster_partition_handling ~p unrecognised, "
+ "assuming 'ignore'~n", [Term]),
+ State
+ end.
+
+await_cluster_recovery() ->
+ rabbit_log:warning("Cluster minority status detected - awaiting recovery~n",
+ []),
+ run_outside_applications(fun () ->
+ rabbit:stop(),
+ wait_for_cluster_recovery()
+ end),
+ ok.
+
+run_outside_applications(Fun) ->
+ spawn(fun () ->
+ %% If our group leader is inside an application we are about
+ %% to stop, application:stop/1 does not return.
+ group_leader(whereis(init), self()),
+ %% Ensure only one such process at a time, the
+ %% exit(badarg) is harmless if one is already running
+ try register(rabbit_outside_app_process, self()) of
+ true -> Fun()
+ catch error:badarg -> ok
+ end
+ end).
+
+wait_for_cluster_recovery() ->
+ ping_all(),
+ case majority() of
+ true -> rabbit:start();
+ false -> timer:sleep(?RABBIT_DOWN_PING_INTERVAL),
+ wait_for_cluster_recovery()
+ end.
+
+handle_dead_rabbit(Node, State = #state{partitions = Partitions,
+ autoheal = Autoheal}) ->
+ %% TODO: This may turn out to be a performance hog when there are
+ %% lots of nodes. We really only need to execute some of these
+ %% statements on *one* node, rather than all of them.
+ ok = rabbit_networking:on_node_down(Node),
+ ok = rabbit_amqqueue:on_node_down(Node),
+ ok = rabbit_alarm:on_node_down(Node),
+ ok = rabbit_mnesia:on_node_down(Node),
+ %% If we have been partitioned, and we are now in the only remaining
+ %% partition, we no longer care about partitions - forget them. Note
+ %% that we do not attempt to deal with individual (other) partitions
+ %% going away. It's only safe to forget anything about partitions when
+ %% there are no partitions.
+ Partitions1 = case Partitions -- (Partitions -- alive_rabbit_nodes()) of
+ [] -> [];
+ _ -> Partitions
+ end,
+ ensure_ping_timer(
+ State#state{partitions = Partitions1,
+ autoheal = rabbit_autoheal:rabbit_down(Node, Autoheal)}).
+
+ensure_ping_timer(State) ->
+ rabbit_misc:ensure_timer(
+ State, #state.down_ping_timer, ?RABBIT_DOWN_PING_INTERVAL, ping_nodes).
+
+handle_live_rabbit(Node) ->
+ ok = rabbit_alarm:on_node_up(Node),
+ ok = rabbit_mnesia:on_node_up(Node).
+
+%%--------------------------------------------------------------------
+%% Internal utils
+%%--------------------------------------------------------------------
+
+try_read_file(FileName) ->
+ case rabbit_file:read_term_file(FileName) of
+ {ok, Term} -> {ok, Term};
+ {error, enoent} -> {error, enoent};
+ {error, E} -> throw({error, {cannot_read_file, FileName, E}})
+ end.
+
+legacy_cluster_nodes(Nodes) ->
+ %% We get all the info that we can, including the nodes from
+ %% mnesia, which will be there if the node is a disc node (empty
+ %% list otherwise)
+ lists:usort(Nodes ++ mnesia:system_info(db_nodes)).
+
+legacy_disc_nodes(AllNodes) ->
+ case AllNodes == [] orelse lists:member(node(), AllNodes) of
+ true -> [node()];
+ false -> []
+ end.
+
+add_node(Node, Nodes) -> lists:usort([Node | Nodes]).
+
+del_node(Node, Nodes) -> Nodes -- [Node].
+
+%%--------------------------------------------------------------------
+
+%% mnesia:system_info(db_nodes) (and hence
+%% rabbit_mnesia:cluster_nodes(running)) does not give reliable
+%% results when partitioned. So we have a small set of replacement
+%% functions here. "rabbit" in a function's name implies we test if
+%% the rabbit application is up, not just the node.
+
+%% As we use these functions to decide what to do in pause_minority
+%% state, they *must* be fast, even in the case where TCP connections
+%% are timing out. So that means we should be careful about whether we
+%% connect to nodes which are currently disconnected.
+
+majority() ->
+ Nodes = rabbit_mnesia:cluster_nodes(all),
+ length(alive_nodes(Nodes)) / length(Nodes) > 0.5.
+
+all_nodes_up() ->
+ Nodes = rabbit_mnesia:cluster_nodes(all),
+ length(alive_nodes(Nodes)) =:= length(Nodes).
+
+all_rabbit_nodes_up() ->
+ Nodes = rabbit_mnesia:cluster_nodes(all),
+ length(alive_rabbit_nodes(Nodes)) =:= length(Nodes).
+
+alive_nodes(Nodes) -> [N || N <- Nodes, lists:member(N, [node()|nodes()])].
+
+alive_rabbit_nodes() -> alive_rabbit_nodes(rabbit_mnesia:cluster_nodes(all)).
+
+alive_rabbit_nodes(Nodes) ->
+ [N || N <- alive_nodes(Nodes), rabbit:is_running(N)].
+
+%% This one is allowed to connect!
+ping_all() ->
+ [net_adm:ping(N) || N <- rabbit_mnesia:cluster_nodes(all)],
+ ok.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_nodes).
+
+-export([names/1, diagnostics/1, make/1, parts/1, cookie_hash/0,
+ is_running/2, is_process_running/2,
+ cluster_name/0, set_cluster_name/1]).
+
+-include_lib("kernel/include/inet.hrl").
+
+-define(EPMD_TIMEOUT, 30000).
+-define(TCP_DIAGNOSTIC_TIMEOUT, 5000).
+
+%%----------------------------------------------------------------------------
+%% Specs
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(names/1 :: (string()) -> rabbit_types:ok_or_error2(
+ [{string(), integer()}], term())).
+-spec(diagnostics/1 :: ([node()]) -> string()).
+-spec(make/1 :: ({string(), string()} | string()) -> node()).
+-spec(parts/1 :: (node() | string()) -> {string(), string()}).
+-spec(cookie_hash/0 :: () -> string()).
+-spec(is_running/2 :: (node(), atom()) -> boolean()).
+-spec(is_process_running/2 :: (node(), atom()) -> boolean()).
+-spec(cluster_name/0 :: () -> binary()).
+-spec(set_cluster_name/1 :: (binary()) -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+names(Hostname) ->
+ Self = self(),
+ Ref = make_ref(),
+ {Pid, MRef} = spawn_monitor(
+ fun () -> Self ! {Ref, net_adm:names(Hostname)} end),
+ timer:exit_after(?EPMD_TIMEOUT, Pid, timeout),
+ receive
+ {Ref, Names} -> erlang:demonitor(MRef, [flush]),
+ Names;
+ {'DOWN', MRef, process, Pid, Reason} -> {error, Reason}
+ end.
+
+diagnostics(Nodes) ->
+ NodeDiags = [{"~nDIAGNOSTICS~n===========~n~n"
+ "attempted to contact: ~p~n", [Nodes]}] ++
+ [diagnostics_node(Node) || Node <- Nodes] ++
+ current_node_details(),
+ rabbit_misc:format_many(lists:flatten(NodeDiags)).
+
+current_node_details() ->
+ [{"~ncurrent node details:~n- node name: ~w", [node()]},
+ case init:get_argument(home) of
+ {ok, [[Home]]} -> {"- home dir: ~s", [Home]};
+ Other -> {"- no home dir: ~p", [Other]}
+ end,
+ {"- cookie hash: ~s", [cookie_hash()]}].
+
+diagnostics_node(Node) ->
+ {Name, Host} = parts(Node),
+ [{"~s:", [Node]} |
+ case names(Host) of
+ {error, Reason} ->
+ [{" * unable to connect to epmd (port ~s) on ~s: ~s~n",
+ [epmd_port(), Host, rabbit_misc:format_inet_error(Reason)]}];
+ {ok, NamePorts} ->
+ [{" * connected to epmd (port ~s) on ~s",
+ [epmd_port(), Host]}] ++
+ case net_adm:ping(Node) of
+ pong -> dist_working_diagnostics(Node);
+ pang -> dist_broken_diagnostics(Name, Host, NamePorts)
+ end
+ end].
+
+epmd_port() ->
+ case init:get_argument(epmd_port) of
+ {ok, [[Port | _] | _]} when is_list(Port) -> Port;
+ error -> "4369"
+ end.
+
+dist_working_diagnostics(Node) ->
+ case rabbit:is_running(Node) of
+ true -> [{" * node ~s up, 'rabbit' application running", [Node]}];
+ false -> [{" * node ~s up, 'rabbit' application not running~n"
+ " * running applications on ~s: ~p~n"
+ " * suggestion: start_app on ~s",
+ [Node, Node, remote_apps(Node), Node]}]
+ end.
+
+remote_apps(Node) ->
+ %% We want a timeout here because really, we don't trust the node,
+ %% the last thing we want to do is hang.
+ case rpc:call(Node, application, which_applications, [5000]) of
+ {badrpc, _} = E -> E;
+ Apps -> [App || {App, _, _} <- Apps]
+ end.
+
+dist_broken_diagnostics(Name, Host, NamePorts) ->
+ case [{N, P} || {N, P} <- NamePorts, N =:= Name] of
+ [] ->
+ {SelfName, SelfHost} = parts(node()),
+ Others = [list_to_atom(N) || {N, _} <- NamePorts,
+ N =/= case SelfHost of
+ Host -> SelfName;
+ _ -> never_matches
+ end],
+ OthersDiag = case Others of
+ [] -> [{" no other nodes on ~s",
+ [Host]}];
+ _ -> [{" other nodes on ~s: ~p",
+ [Host, Others]}]
+ end,
+ [{" * epmd reports: node '~s' not running at all", [Name]},
+ OthersDiag, {" * suggestion: start the node", []}];
+ [{Name, Port}] ->
+ [{" * epmd reports node '~s' running on port ~b", [Name, Port]} |
+ case diagnose_connect(Host, Port) of
+ ok ->
+ [{" * TCP connection succeeded but Erlang distribution "
+ "failed~n"
+ " * suggestion: hostname mismatch?~n"
+ " * suggestion: is the cookie set correctly?", []}];
+ {error, Reason} ->
+ [{" * can't establish TCP connection, reason: ~s~n"
+ " * suggestion: blocked by firewall?",
+ [rabbit_misc:format_inet_error(Reason)]}]
+ end]
+ end.
+
+diagnose_connect(Host, Port) ->
+ case inet:gethostbyname(Host) of
+ {ok, #hostent{h_addrtype = Family}} ->
+ case gen_tcp:connect(Host, Port, [Family],
+ ?TCP_DIAGNOSTIC_TIMEOUT) of
+ {ok, Socket} -> gen_tcp:close(Socket),
+ ok;
+ {error, _} = E -> E
+ end;
+ {error, _} = E ->
+ E
+ end.
+
+make({Prefix, Suffix}) -> list_to_atom(lists:append([Prefix, "@", Suffix]));
+make(NodeStr) -> make(parts(NodeStr)).
+
+parts(Node) when is_atom(Node) ->
+ parts(atom_to_list(Node));
+parts(NodeStr) ->
+ case lists:splitwith(fun (E) -> E =/= $@ end, NodeStr) of
+ {Prefix, []} -> {_, Suffix} = parts(node()),
+ {Prefix, Suffix};
+ {Prefix, Suffix} -> {Prefix, tl(Suffix)}
+ end.
+
+cookie_hash() ->
+ base64:encode_to_string(erlang:md5(atom_to_list(erlang:get_cookie()))).
+
+is_running(Node, Application) ->
+ case rpc:call(Node, rabbit_misc, which_applications, []) of
+ {badrpc, _} -> false;
+ Apps -> proplists:is_defined(Application, Apps)
+ end.
+
+is_process_running(Node, Process) ->
+ case rpc:call(Node, erlang, whereis, [Process]) of
+ {badrpc, _} -> false;
+ undefined -> false;
+ P when is_pid(P) -> true
+ end.
+
+cluster_name() ->
+ rabbit_runtime_parameters:value_global(
+ cluster_name, cluster_name_default()).
+
+cluster_name_default() ->
+ {ID, _} = rabbit_nodes:parts(node()),
+ {ok, Host} = inet:gethostname(),
+ {ok, #hostent{h_name = FQDN}} = inet:gethostbyname(Host),
+ list_to_binary(atom_to_list(rabbit_nodes:make({ID, FQDN}))).
+
+set_cluster_name(Name) ->
+ rabbit_runtime_parameters:set_global(cluster_name, Name).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_parameter_validation).
+
+-export([number/2, binary/2, boolean/2, list/2, regex/2, proplist/3, enum/1]).
+
+number(_Name, Term) when is_number(Term) ->
+ ok;
+
+number(Name, Term) ->
+ {error, "~s should be number, actually was ~p", [Name, Term]}.
+
+binary(_Name, Term) when is_binary(Term) ->
+ ok;
+
+binary(Name, Term) ->
+ {error, "~s should be binary, actually was ~p", [Name, Term]}.
+
+boolean(_Name, Term) when is_boolean(Term) ->
+ ok;
+boolean(Name, Term) ->
+ {error, "~s should be boolean, actually was ~p", [Name, Term]}.
+
+list(_Name, Term) when is_list(Term) ->
+ ok;
+
+list(Name, Term) ->
+ {error, "~s should be list, actually was ~p", [Name, Term]}.
+
+regex(Name, Term) when is_binary(Term) ->
+ case re:compile(Term) of
+ {ok, _} -> ok;
+ {error, Reason} -> {error, "~s should be regular expression "
+ "but is invalid: ~p", [Name, Reason]}
+ end;
+regex(Name, Term) ->
+ {error, "~s should be a binary but was ~p", [Name, Term]}.
+
+proplist(Name, Constraints, Term) when is_list(Term) ->
+ {Results, Remainder}
+ = lists:foldl(
+ fun ({Key, Fun, Needed}, {Results0, Term0}) ->
+ case {lists:keytake(Key, 1, Term0), Needed} of
+ {{value, {Key, Value}, Term1}, _} ->
+ {[Fun(Key, Value) | Results0],
+ Term1};
+ {false, mandatory} ->
+ {[{error, "Key \"~s\" not found in ~s",
+ [Key, Name]} | Results0], Term0};
+ {false, optional} ->
+ {Results0, Term0}
+ end
+ end, {[], Term}, Constraints),
+ case Remainder of
+ [] -> Results;
+ _ -> [{error, "Unrecognised terms ~p in ~s", [Remainder, Name]}
+ | Results]
+ end;
+
+proplist(Name, _Constraints, Term) ->
+ {error, "~s not a list ~p", [Name, Term]}.
+
+enum(OptionsA) ->
+ Options = [list_to_binary(atom_to_list(O)) || O <- OptionsA],
+ fun (Name, Term) when is_binary(Term) ->
+ case lists:member(Term, Options) of
+ true -> ok;
+ false -> {error, "~s should be one of ~p, actually was ~p",
+ [Name, Options, Term]}
+ end;
+ (Name, Term) ->
+ {error, "~s should be binary, actually was ~p", [Name, Term]}
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_plugins).
+-include("rabbit.hrl").
+
+-export([setup/0, active/0, read_enabled/1, list/1, dependencies/3]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-type(plugin_name() :: atom()).
+
+-spec(setup/0 :: () -> [plugin_name()]).
+-spec(active/0 :: () -> [plugin_name()]).
+-spec(list/1 :: (string()) -> [#plugin{}]).
+-spec(read_enabled/1 :: (file:filename()) -> [plugin_name()]).
+-spec(dependencies/3 :: (boolean(), [plugin_name()], [#plugin{}]) ->
+ [plugin_name()]).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+%% @doc Prepares the file system and installs all enabled plugins.
+setup() ->
+ {ok, PluginDir} = application:get_env(rabbit, plugins_dir),
+ {ok, ExpandDir} = application:get_env(rabbit, plugins_expand_dir),
+ {ok, EnabledFile} = application:get_env(rabbit, enabled_plugins_file),
+ prepare_plugins(EnabledFile, PluginDir, ExpandDir).
+
+%% @doc Lists the plugins which are currently running.
+active() ->
+ {ok, ExpandDir} = application:get_env(rabbit, plugins_expand_dir),
+ InstalledPlugins = [ P#plugin.name || P <- list(ExpandDir) ],
+ [App || {App, _, _} <- rabbit_misc:which_applications(),
+ lists:member(App, InstalledPlugins)].
+
+%% @doc Get the list of plugins which are ready to be enabled.
+list(PluginsDir) ->
+ EZs = [{ez, EZ} || EZ <- filelib:wildcard("*.ez", PluginsDir)],
+ FreeApps = [{app, App} ||
+ App <- filelib:wildcard("*/ebin/*.app", PluginsDir)],
+ {Plugins, Problems} =
+ lists:foldl(fun ({error, EZ, Reason}, {Plugins1, Problems1}) ->
+ {Plugins1, [{EZ, Reason} | Problems1]};
+ (Plugin = #plugin{}, {Plugins1, Problems1}) ->
+ {[Plugin|Plugins1], Problems1}
+ end, {[], []},
+ [plugin_info(PluginsDir, Plug) || Plug <- EZs ++ FreeApps]),
+ case Problems of
+ [] -> ok;
+ _ -> error_logger:warning_msg(
+ "Problem reading some plugins: ~p~n", [Problems])
+ end,
+ Plugins.
+
+%% @doc Read the list of enabled plugins from the supplied term file.
+read_enabled(PluginsFile) ->
+ case rabbit_file:read_term_file(PluginsFile) of
+ {ok, [Plugins]} -> Plugins;
+ {ok, []} -> [];
+ {ok, [_|_]} -> throw({error, {malformed_enabled_plugins_file,
+ PluginsFile}});
+ {error, enoent} -> [];
+ {error, Reason} -> throw({error, {cannot_read_enabled_plugins_file,
+ PluginsFile, Reason}})
+ end.
+
+%% @doc Calculate the dependency graph from <i>Sources</i>.
+%% When Reverse =:= true the bottom/leaf level applications are returned in
+%% the resulting list, otherwise they're skipped.
+dependencies(Reverse, Sources, AllPlugins) ->
+ {ok, G} = rabbit_misc:build_acyclic_graph(
+ fun (App, _Deps) -> [{App, App}] end,
+ fun (App, Deps) -> [{App, Dep} || Dep <- Deps] end,
+ lists:ukeysort(
+ 1, [{Name, Deps} ||
+ #plugin{name = Name,
+ dependencies = Deps} <- AllPlugins] ++
+ [{Dep, []} ||
+ #plugin{dependencies = Deps} <- AllPlugins,
+ Dep <- Deps])),
+ Dests = case Reverse of
+ false -> digraph_utils:reachable(Sources, G);
+ true -> digraph_utils:reaching(Sources, G)
+ end,
+ true = digraph:delete(G),
+ Dests.
+
+%%----------------------------------------------------------------------------
+
+prepare_plugins(EnabledFile, PluginsDistDir, ExpandDir) ->
+ AllPlugins = list(PluginsDistDir),
+ Enabled = read_enabled(EnabledFile),
+ ToUnpack = dependencies(false, Enabled, AllPlugins),
+ ToUnpackPlugins = lookup_plugins(ToUnpack, AllPlugins),
+
+ case Enabled -- plugin_names(ToUnpackPlugins) of
+ [] -> ok;
+ Missing -> error_logger:warning_msg(
+ "The following enabled plugins were not found: ~p~n",
+ [Missing])
+ end,
+
+ %% Eliminate the contents of the destination directory
+ case delete_recursively(ExpandDir) of
+ ok -> ok;
+ {error, E1} -> throw({error, {cannot_delete_plugins_expand_dir,
+ [ExpandDir, E1]}})
+ end,
+ case filelib:ensure_dir(ExpandDir ++ "/") of
+ ok -> ok;
+ {error, E2} -> throw({error, {cannot_create_plugins_expand_dir,
+ [ExpandDir, E2]}})
+ end,
+
+ [prepare_plugin(Plugin, ExpandDir) || Plugin <- ToUnpackPlugins],
+
+ [prepare_dir_plugin(PluginAppDescPath) ||
+ PluginAppDescPath <- filelib:wildcard(ExpandDir ++ "/*/ebin/*.app")].
+
+prepare_dir_plugin(PluginAppDescPath) ->
+ code:add_path(filename:dirname(PluginAppDescPath)),
+ list_to_atom(filename:basename(PluginAppDescPath, ".app")).
+
+%%----------------------------------------------------------------------------
+
+delete_recursively(Fn) ->
+ case rabbit_file:recursive_delete([Fn]) of
+ ok -> ok;
+ {error, {Path, E}} -> {error, {cannot_delete, Path, E}}
+ end.
+
+prepare_plugin(#plugin{type = ez, location = Location}, ExpandDir) ->
+ zip:unzip(Location, [{cwd, ExpandDir}]);
+prepare_plugin(#plugin{type = dir, name = Name, location = Location},
+ ExpandDir) ->
+ rabbit_file:recursive_copy(Location, filename:join([ExpandDir, Name])).
+
+plugin_info(Base, {ez, EZ0}) ->
+ EZ = filename:join([Base, EZ0]),
+ case read_app_file(EZ) of
+ {application, Name, Props} -> mkplugin(Name, Props, ez, EZ);
+ {error, Reason} -> {error, EZ, Reason}
+ end;
+plugin_info(Base, {app, App0}) ->
+ App = filename:join([Base, App0]),
+ case rabbit_file:read_term_file(App) of
+ {ok, [{application, Name, Props}]} ->
+ mkplugin(Name, Props, dir,
+ filename:absname(
+ filename:dirname(filename:dirname(App))));
+ {error, Reason} ->
+ {error, App, {invalid_app, Reason}}
+ end.
+
+mkplugin(Name, Props, Type, Location) ->
+ Version = proplists:get_value(vsn, Props, "0"),
+ Description = proplists:get_value(description, Props, ""),
+ Dependencies =
+ filter_applications(proplists:get_value(applications, Props, [])),
+ #plugin{name = Name, version = Version, description = Description,
+ dependencies = Dependencies, location = Location, type = Type}.
+
+read_app_file(EZ) ->
+ case zip:list_dir(EZ) of
+ {ok, [_|ZippedFiles]} ->
+ case find_app_files(ZippedFiles) of
+ [AppPath|_] ->
+ {ok, [{AppPath, AppFile}]} =
+ zip:extract(EZ, [{file_list, [AppPath]}, memory]),
+ parse_binary(AppFile);
+ [] ->
+ {error, no_app_file}
+ end;
+ {error, Reason} ->
+ {error, {invalid_ez, Reason}}
+ end.
+
+find_app_files(ZippedFiles) ->
+ {ok, RE} = re:compile("^.*/ebin/.*.app$"),
+ [Path || {zip_file, Path, _, _, _, _} <- ZippedFiles,
+ re:run(Path, RE, [{capture, none}]) =:= match].
+
+parse_binary(Bin) ->
+ try
+ {ok, Ts, _} = erl_scan:string(binary_to_list(Bin)),
+ {ok, Term} = erl_parse:parse_term(Ts),
+ Term
+ catch
+ Err -> {error, {invalid_app, Err}}
+ end.
+
+filter_applications(Applications) ->
+ [Application || Application <- Applications,
+ not is_available_app(Application)].
+
+is_available_app(Application) ->
+ case application:load(Application) of
+ {error, {already_loaded, _}} -> true;
+ ok -> application:unload(Application),
+ true;
+ _ -> false
+ end.
+
+plugin_names(Plugins) ->
+ [Name || #plugin{name = Name} <- Plugins].
+
+lookup_plugins(Names, AllPlugins) ->
+ [P || P = #plugin{name = Name} <- AllPlugins, lists:member(Name, Names)].
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_plugins_main).
+-include("rabbit.hrl").
+
+-export([start/0, stop/0]).
+
+-define(VERBOSE_OPT, "-v").
+-define(MINIMAL_OPT, "-m").
+-define(ENABLED_OPT, "-E").
+-define(ENABLED_ALL_OPT, "-e").
+
+-define(VERBOSE_DEF, {?VERBOSE_OPT, flag}).
+-define(MINIMAL_DEF, {?MINIMAL_OPT, flag}).
+-define(ENABLED_DEF, {?ENABLED_OPT, flag}).
+-define(ENABLED_ALL_DEF, {?ENABLED_ALL_OPT, flag}).
+
+-define(GLOBAL_DEFS, []).
+
+-define(COMMANDS,
+ [{list, [?VERBOSE_DEF, ?MINIMAL_DEF, ?ENABLED_DEF, ?ENABLED_ALL_DEF]},
+ enable,
+ disable]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start/0 :: () -> no_return()).
+-spec(stop/0 :: () -> 'ok').
+-spec(usage/0 :: () -> no_return()).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start() ->
+ {ok, [[PluginsFile|_]|_]} =
+ init:get_argument(enabled_plugins_file),
+ {ok, [[PluginsDir|_]|_]} = init:get_argument(plugins_dist_dir),
+ {Command, Opts, Args} =
+ case rabbit_misc:parse_arguments(?COMMANDS, ?GLOBAL_DEFS,
+ init:get_plain_arguments())
+ of
+ {ok, Res} -> Res;
+ no_command -> print_error("could not recognise command", []),
+ usage()
+ end,
+
+ PrintInvalidCommandError =
+ fun () ->
+ print_error("invalid command '~s'",
+ [string:join([atom_to_list(Command) | Args], " ")])
+ end,
+
+ case catch action(Command, Args, Opts, PluginsFile, PluginsDir) of
+ ok ->
+ rabbit_misc:quit(0);
+ {'EXIT', {function_clause, [{?MODULE, action, _} | _]}} ->
+ PrintInvalidCommandError(),
+ usage();
+ {'EXIT', {function_clause, [{?MODULE, action, _, _} | _]}} ->
+ PrintInvalidCommandError(),
+ usage();
+ {error, Reason} ->
+ print_error("~p", [Reason]),
+ rabbit_misc:quit(2);
+ {error_string, Reason} ->
+ print_error("~s", [Reason]),
+ rabbit_misc:quit(2);
+ Other ->
+ print_error("~p", [Other]),
+ rabbit_misc:quit(2)
+ end.
+
+stop() ->
+ ok.
+
+%%----------------------------------------------------------------------------
+
+action(list, [], Opts, PluginsFile, PluginsDir) ->
+ action(list, [".*"], Opts, PluginsFile, PluginsDir);
+action(list, [Pat], Opts, PluginsFile, PluginsDir) ->
+ format_plugins(Pat, Opts, PluginsFile, PluginsDir);
+
+action(enable, ToEnable0, _Opts, PluginsFile, PluginsDir) ->
+ case ToEnable0 of
+ [] -> throw({error_string, "Not enough arguments for 'enable'"});
+ _ -> ok
+ end,
+ AllPlugins = rabbit_plugins:list(PluginsDir),
+ Enabled = rabbit_plugins:read_enabled(PluginsFile),
+ ImplicitlyEnabled = rabbit_plugins:dependencies(false,
+ Enabled, AllPlugins),
+ ToEnable = [list_to_atom(Name) || Name <- ToEnable0],
+ Missing = ToEnable -- plugin_names(AllPlugins),
+ NewEnabled = lists:usort(Enabled ++ ToEnable),
+ NewImplicitlyEnabled = rabbit_plugins:dependencies(false,
+ NewEnabled, AllPlugins),
+ MissingDeps = (NewImplicitlyEnabled -- plugin_names(AllPlugins)) -- Missing,
+ case {Missing, MissingDeps} of
+ {[], []} -> ok;
+ {Miss, []} -> throw({error_string, fmt_missing("plugins", Miss)});
+ {[], Miss} -> throw({error_string, fmt_missing("dependencies", Miss)});
+ {_, _} -> throw({error_string,
+ fmt_missing("plugins", Missing) ++
+ fmt_missing("dependencies", MissingDeps)})
+ end,
+ write_enabled_plugins(PluginsFile, NewEnabled),
+ case NewEnabled -- ImplicitlyEnabled of
+ [] -> io:format("Plugin configuration unchanged.~n");
+ _ -> print_list("The following plugins have been enabled:",
+ NewImplicitlyEnabled -- ImplicitlyEnabled),
+ report_change()
+ end;
+
+action(disable, ToDisable0, _Opts, PluginsFile, PluginsDir) ->
+ case ToDisable0 of
+ [] -> throw({error_string, "Not enough arguments for 'disable'"});
+ _ -> ok
+ end,
+ ToDisable = [list_to_atom(Name) || Name <- ToDisable0],
+ Enabled = rabbit_plugins:read_enabled(PluginsFile),
+ AllPlugins = rabbit_plugins:list(PluginsDir),
+ Missing = ToDisable -- plugin_names(AllPlugins),
+ case Missing of
+ [] -> ok;
+ _ -> print_list("Warning: the following plugins could not be found:",
+ Missing)
+ end,
+ ToDisableDeps = rabbit_plugins:dependencies(true, ToDisable, AllPlugins),
+ NewEnabled = Enabled -- ToDisableDeps,
+ case length(Enabled) =:= length(NewEnabled) of
+ true -> io:format("Plugin configuration unchanged.~n");
+ false -> ImplicitlyEnabled =
+ rabbit_plugins:dependencies(false, Enabled, AllPlugins),
+ NewImplicitlyEnabled =
+ rabbit_plugins:dependencies(false,
+ NewEnabled, AllPlugins),
+ print_list("The following plugins have been disabled:",
+ ImplicitlyEnabled -- NewImplicitlyEnabled),
+ write_enabled_plugins(PluginsFile, NewEnabled),
+ report_change()
+ end.
+
+%%----------------------------------------------------------------------------
+
+print_error(Format, Args) ->
+ rabbit_misc:format_stderr("Error: " ++ Format ++ "~n", Args).
+
+usage() ->
+ io:format("~s", [rabbit_plugins_usage:usage()]),
+ rabbit_misc:quit(1).
+
+%% Pretty print a list of plugins.
+format_plugins(Pattern, Opts, PluginsFile, PluginsDir) ->
+ Verbose = proplists:get_bool(?VERBOSE_OPT, Opts),
+ Minimal = proplists:get_bool(?MINIMAL_OPT, Opts),
+ Format = case {Verbose, Minimal} of
+ {false, false} -> normal;
+ {true, false} -> verbose;
+ {false, true} -> minimal;
+ {true, true} -> throw({error_string,
+ "Cannot specify -m and -v together"})
+ end,
+ OnlyEnabled = proplists:get_bool(?ENABLED_OPT, Opts),
+ OnlyEnabledAll = proplists:get_bool(?ENABLED_ALL_OPT, Opts),
+
+ AvailablePlugins = rabbit_plugins:list(PluginsDir),
+ EnabledExplicitly = rabbit_plugins:read_enabled(PluginsFile),
+ EnabledImplicitly =
+ rabbit_plugins:dependencies(false, EnabledExplicitly,
+ AvailablePlugins) -- EnabledExplicitly,
+ Missing = [#plugin{name = Name, dependencies = []} ||
+ Name <- ((EnabledExplicitly ++ EnabledImplicitly) --
+ plugin_names(AvailablePlugins))],
+ {ok, RE} = re:compile(Pattern),
+ Plugins = [ Plugin ||
+ Plugin = #plugin{name = Name} <- AvailablePlugins ++ Missing,
+ re:run(atom_to_list(Name), RE, [{capture, none}]) =:= match,
+ if OnlyEnabled -> lists:member(Name, EnabledExplicitly);
+ OnlyEnabledAll -> (lists:member(Name,
+ EnabledExplicitly) or
+ lists:member(Name, EnabledImplicitly));
+ true -> true
+ end],
+ Plugins1 = usort_plugins(Plugins),
+ MaxWidth = lists:max([length(atom_to_list(Name)) ||
+ #plugin{name = Name} <- Plugins1] ++ [0]),
+ [format_plugin(P, EnabledExplicitly, EnabledImplicitly,
+ plugin_names(Missing), Format, MaxWidth) || P <- Plugins1],
+ ok.
+
+format_plugin(#plugin{name = Name, version = Version,
+ description = Description, dependencies = Deps},
+ EnabledExplicitly, EnabledImplicitly, Missing,
+ Format, MaxWidth) ->
+ Glyph = case {lists:member(Name, EnabledExplicitly),
+ lists:member(Name, EnabledImplicitly),
+ lists:member(Name, Missing)} of
+ {true, false, false} -> "[E]";
+ {false, true, false} -> "[e]";
+ {_, _, true} -> "[!]";
+ _ -> "[ ]"
+ end,
+ Opt = fun (_F, A, A) -> ok;
+ ( F, A, _) -> io:format(F, [A])
+ end,
+ case Format of
+ minimal -> io:format("~s~n", [Name]);
+ normal -> io:format("~s ~-" ++ integer_to_list(MaxWidth) ++ "w ",
+ [Glyph, Name]),
+ Opt("~s", Version, undefined),
+ io:format("~n");
+ verbose -> io:format("~s ~w~n", [Glyph, Name]),
+ Opt(" Version: \t~s~n", Version, undefined),
+ Opt(" Dependencies:\t~p~n", Deps, []),
+ Opt(" Description: \t~s~n", Description, undefined),
+ io:format("~n")
+ end.
+
+print_list(Header, Plugins) ->
+ io:format(fmt_list(Header, Plugins)).
+
+fmt_list(Header, Plugins) ->
+ lists:flatten(
+ [Header, $\n, [io_lib:format(" ~s~n", [P]) || P <- Plugins]]).
+
+fmt_missing(Desc, Missing) ->
+ fmt_list("The following " ++ Desc ++ " could not be found:", Missing).
+
+usort_plugins(Plugins) ->
+ lists:usort(fun plugins_cmp/2, Plugins).
+
+plugins_cmp(#plugin{name = N1, version = V1},
+ #plugin{name = N2, version = V2}) ->
+ {N1, V1} =< {N2, V2}.
+
+%% Return the names of the given plugins.
+plugin_names(Plugins) ->
+ [Name || #plugin{name = Name} <- Plugins].
+
+%% Write the enabled plugin names on disk.
+write_enabled_plugins(PluginsFile, Plugins) ->
+ case rabbit_file:write_term_file(PluginsFile, [Plugins]) of
+ ok -> ok;
+ {error, Reason} -> throw({error, {cannot_write_enabled_plugins_file,
+ PluginsFile, Reason}})
+ end.
+
+report_change() ->
+ io:format("Plugin configuration has changed. "
+ "Restart RabbitMQ for changes to take effect.~n").
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_policies).
+-behaviour(rabbit_policy_validator).
+
+-include("rabbit.hrl").
+
+-export([register/0, validate_policy/1]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "internal policies"},
+ {mfa, {rabbit_policies, register, []}},
+ {requires, rabbit_registry},
+ {enables, recovery}]}).
+
+register() ->
+ [rabbit_registry:register(Class, Name, ?MODULE) ||
+ {Class, Name} <- [{policy_validator, <<"alternate-exchange">>},
+ {policy_validator, <<"dead-letter-exchange">>},
+ {policy_validator, <<"dead-letter-routing-key">>},
+ {policy_validator, <<"message-ttl">>},
+ {policy_validator, <<"expires">>},
+ {policy_validator, <<"max-length">>}]],
+ ok.
+
+validate_policy(Terms) ->
+ lists:foldl(fun ({Key, Value}, ok) -> validate_policy0(Key, Value);
+ (_, Error) -> Error
+ end, ok, Terms).
+
+validate_policy0(<<"alternate-exchange">>, Value)
+ when is_binary(Value) ->
+ ok;
+validate_policy0(<<"alternate-exchange">>, Value) ->
+ {error, "~p is not a valid alternate exchange name", [Value]};
+
+validate_policy0(<<"dead-letter-exchange">>, Value)
+ when is_binary(Value) ->
+ ok;
+validate_policy0(<<"dead-letter-exchange">>, Value) ->
+ {error, "~p is not a valid dead letter exchange name", [Value]};
+
+validate_policy0(<<"dead-letter-routing-key">>, Value)
+ when is_binary(Value) ->
+ ok;
+validate_policy0(<<"dead-letter-routing-key">>, Value) ->
+ {error, "~p is not a valid dead letter routing key", [Value]};
+
+validate_policy0(<<"message-ttl">>, Value)
+ when is_integer(Value), Value >= 0, Value =< ?MAX_EXPIRY_TIMER ->
+ ok;
+validate_policy0(<<"message-ttl">>, Value) ->
+ {error, "~p is not a valid message TTL", [Value]};
+
+validate_policy0(<<"expires">>, Value)
+ when is_integer(Value), Value >= 1, Value =< ?MAX_EXPIRY_TIMER ->
+ ok;
+validate_policy0(<<"expires">>, Value) ->
+ {error, "~p is not a valid queue expiry", [Value]};
+
+validate_policy0(<<"max-length">>, Value)
+ when is_integer(Value), Value >= 0 ->
+ ok;
+validate_policy0(<<"max-length">>, Value) ->
+ {error, "~p is not a valid maximum length", [Value]}.
+
+
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_policy).
+
+%% TODO specs
+
+-behaviour(rabbit_runtime_parameter).
+
+-include("rabbit.hrl").
+
+-import(rabbit_misc, [pget/2]).
+
+-export([register/0]).
+-export([invalidate/0, recover/0]).
+-export([name/1, get/2, get_arg/3, set/1]).
+-export([validate/5, notify/4, notify_clear/3]).
+-export([parse_set/6, set/6, delete/2, lookup/2, list/0, list/1,
+ list_formatted/1, info_keys/0]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "policy parameters"},
+ {mfa, {rabbit_policy, register, []}},
+ {requires, rabbit_registry},
+ {enables, recovery}]}).
+
+register() ->
+ rabbit_registry:register(runtime_parameter, <<"policy">>, ?MODULE).
+
+name(#amqqueue{policy = Policy}) -> name0(Policy);
+name(#exchange{policy = Policy}) -> name0(Policy).
+
+name0(undefined) -> none;
+name0(Policy) -> pget(name, Policy).
+
+set(Q = #amqqueue{name = Name}) -> rabbit_queue_decorator:set(
+ Q#amqqueue{policy = set0(Name)});
+set(X = #exchange{name = Name}) -> rabbit_exchange_decorator:set(
+ X#exchange{policy = set0(Name)}).
+
+set0(Name = #resource{virtual_host = VHost}) -> match(Name, list(VHost)).
+
+set(Q = #amqqueue{name = Name}, Ps) -> Q#amqqueue{policy = match(Name, Ps)};
+set(X = #exchange{name = Name}, Ps) -> rabbit_exchange_decorator:set(
+ X#exchange{policy = match(Name, Ps)}).
+
+get(Name, #amqqueue{policy = Policy}) -> get0(Name, Policy);
+get(Name, #exchange{policy = Policy}) -> get0(Name, Policy);
+%% Caution - SLOW.
+get(Name, EntityName = #resource{virtual_host = VHost}) ->
+ get0(Name, match(EntityName, list(VHost))).
+
+get0(_Name, undefined) -> undefined;
+get0(Name, List) -> case pget(definition, List) of
+ undefined -> undefined;
+ Policy -> pget(Name, Policy)
+ end.
+
+%% Many heads for optimisation
+get_arg(_AName, _PName, #exchange{arguments = [], policy = undefined}) ->
+ undefined;
+get_arg(_AName, PName, X = #exchange{arguments = []}) ->
+ get(PName, X);
+get_arg(AName, PName, X = #exchange{arguments = Args}) ->
+ case rabbit_misc:table_lookup(Args, AName) of
+ undefined -> get(PName, X);
+ {_Type, Arg} -> Arg
+ end.
+
+%%----------------------------------------------------------------------------
+
+%% Gets called during upgrades - therefore must not assume anything about the
+%% state of Mnesia
+invalidate() ->
+ rabbit_file:write_file(invalid_file(), <<"">>).
+
+recover() ->
+ case rabbit_file:is_file(invalid_file()) of
+ true -> recover0(),
+ rabbit_file:delete(invalid_file());
+ false -> ok
+ end.
+
+%% To get here we have to have just completed an Mnesia upgrade - i.e. we are
+%% the first node starting. So we can rewrite the whole database. Note that
+%% recovery has not yet happened; we must work with the rabbit_durable_<thing>
+%% variants.
+recover0() ->
+ Xs = mnesia:dirty_match_object(rabbit_durable_exchange, #exchange{_ = '_'}),
+ Qs = mnesia:dirty_match_object(rabbit_durable_queue, #amqqueue{_ = '_'}),
+ Policies = list(),
+ [rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ mnesia:write(rabbit_durable_exchange, set(X, Policies), write)
+ end) || X <- Xs],
+ [rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ mnesia:write(rabbit_durable_queue, set(Q, Policies), write)
+ end) || Q <- Qs],
+ ok.
+
+invalid_file() ->
+ filename:join(rabbit_mnesia:dir(), "policies_are_invalid").
+
+%%----------------------------------------------------------------------------
+
+parse_set(VHost, Name, Pattern, Definition, Priority, ApplyTo) ->
+ try list_to_integer(Priority) of
+ Num -> parse_set0(VHost, Name, Pattern, Definition, Num, ApplyTo)
+ catch
+ error:badarg -> {error, "~p priority must be a number", [Priority]}
+ end.
+
+parse_set0(VHost, Name, Pattern, Defn, Priority, ApplyTo) ->
+ case rabbit_misc:json_decode(Defn) of
+ {ok, JSON} ->
+ set0(VHost, Name,
+ [{<<"pattern">>, list_to_binary(Pattern)},
+ {<<"definition">>, rabbit_misc:json_to_term(JSON)},
+ {<<"priority">>, Priority},
+ {<<"apply-to">>, ApplyTo}]);
+ error ->
+ {error_string, "JSON decoding error"}
+ end.
+
+set(VHost, Name, Pattern, Definition, Priority, ApplyTo) ->
+ PolicyProps = [{<<"pattern">>, Pattern},
+ {<<"definition">>, Definition},
+ {<<"priority">>, case Priority of
+ undefined -> 0;
+ _ -> Priority
+ end},
+ {<<"apply-to">>, case ApplyTo of
+ undefined -> <<"all">>;
+ _ -> ApplyTo
+ end}],
+ set0(VHost, Name, PolicyProps).
+
+set0(VHost, Name, Term) ->
+ rabbit_runtime_parameters:set_any(VHost, <<"policy">>, Name, Term, none).
+
+delete(VHost, Name) ->
+ rabbit_runtime_parameters:clear_any(VHost, <<"policy">>, Name).
+
+lookup(VHost, Name) ->
+ case rabbit_runtime_parameters:lookup(VHost, <<"policy">>, Name) of
+ not_found -> not_found;
+ P -> p(P, fun ident/1)
+ end.
+
+list() ->
+ list('_').
+
+list(VHost) ->
+ list0(VHost, fun ident/1).
+
+list_formatted(VHost) ->
+ order_policies(list0(VHost, fun format/1)).
+
+list0(VHost, DefnFun) ->
+ [p(P, DefnFun) || P <- rabbit_runtime_parameters:list(VHost, <<"policy">>)].
+
+order_policies(PropList) ->
+ lists:sort(fun (A, B) -> pget(priority, A) < pget(priority, B) end,
+ PropList).
+
+p(Parameter, DefnFun) ->
+ Value = pget(value, Parameter),
+ [{vhost, pget(vhost, Parameter)},
+ {name, pget(name, Parameter)},
+ {pattern, pget(<<"pattern">>, Value)},
+ {'apply-to', pget(<<"apply-to">>, Value)},
+ {definition, DefnFun(pget(<<"definition">>, Value))},
+ {priority, pget(<<"priority">>, Value)}].
+
+format(Term) ->
+ {ok, JSON} = rabbit_misc:json_encode(rabbit_misc:term_to_json(Term)),
+ list_to_binary(JSON).
+
+ident(X) -> X.
+
+info_keys() -> [vhost, name, 'apply-to', pattern, definition, priority].
+
+%%----------------------------------------------------------------------------
+
+validate(_VHost, <<"policy">>, Name, Term, _User) ->
+ rabbit_parameter_validation:proplist(
+ Name, policy_validation(), Term).
+
+notify(VHost, <<"policy">>, Name, Term) ->
+ rabbit_event:notify(policy_set, [{name, Name} | Term]),
+ update_policies(VHost).
+
+notify_clear(VHost, <<"policy">>, Name) ->
+ rabbit_event:notify(policy_cleared, [{name, Name}]),
+ update_policies(VHost).
+
+%%----------------------------------------------------------------------------
+
+%% [1] We need to prevent this from becoming O(n^2) in a similar
+%% manner to rabbit_binding:remove_for_{source,destination}. So see
+%% the comment in rabbit_binding:lock_route_tables/0 for more rationale.
+%% [2] We could be here in a post-tx fun after the vhost has been
+%% deleted; in which case it's fine to do nothing.
+update_policies(VHost) ->
+ Tabs = [rabbit_queue, rabbit_durable_queue,
+ rabbit_exchange, rabbit_durable_exchange],
+ {Xs, Qs} = rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ [mnesia:lock({table, T}, write) || T <- Tabs], %% [1]
+ case catch list(VHost) of
+ {error, {no_such_vhost, _}} ->
+ ok; %% [2]
+ Policies ->
+ {[update_exchange(X, Policies) ||
+ X <- rabbit_exchange:list(VHost)],
+ [update_queue(Q, Policies) ||
+ Q <- rabbit_amqqueue:list(VHost)]}
+ end
+ end),
+ [catch notify(X) || X <- Xs],
+ [catch notify(Q) || Q <- Qs],
+ ok.
+
+update_exchange(X = #exchange{name = XName, policy = OldPolicy}, Policies) ->
+ case match(XName, Policies) of
+ OldPolicy -> no_change;
+ NewPolicy -> case rabbit_exchange:update(
+ XName, fun (X0) ->
+ rabbit_exchange_decorator:set(
+ X0 #exchange{policy = NewPolicy})
+ end) of
+ #exchange{} = X1 -> {X, X1};
+ not_found -> {X, X }
+ end
+ end.
+
+update_queue(Q = #amqqueue{name = QName, policy = OldPolicy}, Policies) ->
+ case match(QName, Policies) of
+ OldPolicy -> no_change;
+ NewPolicy -> case rabbit_amqqueue:update(
+ QName, fun(Q1) ->
+ rabbit_queue_decorator:set(
+ Q1#amqqueue{policy = NewPolicy})
+ end) of
+ #amqqueue{} = Q1 -> {Q, Q1};
+ not_found -> {Q, Q }
+ end
+ end.
+
+notify(no_change)->
+ ok;
+notify({X1 = #exchange{}, X2 = #exchange{}}) ->
+ rabbit_exchange:policy_changed(X1, X2);
+notify({Q1 = #amqqueue{}, Q2 = #amqqueue{}}) ->
+ rabbit_amqqueue:policy_changed(Q1, Q2).
+
+match(Name, Policies) ->
+ case lists:sort(fun sort_pred/2, [P || P <- Policies, matches(Name, P)]) of
+ [] -> undefined;
+ [Policy | _Rest] -> Policy
+ end.
+
+matches(#resource{name = Name, kind = Kind, virtual_host = VHost}, Policy) ->
+ matches_type(Kind, pget('apply-to', Policy)) andalso
+ match =:= re:run(Name, pget(pattern, Policy), [{capture, none}]) andalso
+ VHost =:= pget(vhost, Policy).
+
+matches_type(exchange, <<"exchanges">>) -> true;
+matches_type(queue, <<"queues">>) -> true;
+matches_type(exchange, <<"all">>) -> true;
+matches_type(queue, <<"all">>) -> true;
+matches_type(_, _) -> false.
+
+sort_pred(A, B) -> pget(priority, A) >= pget(priority, B).
+
+%%----------------------------------------------------------------------------
+
+policy_validation() ->
+ [{<<"priority">>, fun rabbit_parameter_validation:number/2, mandatory},
+ {<<"pattern">>, fun rabbit_parameter_validation:regex/2, mandatory},
+ {<<"apply-to">>, fun apply_to_validation/2, optional},
+ {<<"definition">>, fun validation/2, mandatory}].
+
+validation(_Name, []) ->
+ {error, "no policy provided", []};
+validation(_Name, Terms) when is_list(Terms) ->
+ {Keys, Modules} = lists:unzip(
+ rabbit_registry:lookup_all(policy_validator)),
+ [] = dups(Keys), %% ASSERTION
+ Validators = lists:zipwith(fun (M, K) -> {M, a2b(K)} end, Modules, Keys),
+ case is_proplist(Terms) of
+ true -> {TermKeys, _} = lists:unzip(Terms),
+ case dups(TermKeys) of
+ [] -> validation0(Validators, Terms);
+ Dup -> {error, "~p duplicate keys not allowed", [Dup]}
+ end;
+ false -> {error, "definition must be a dictionary: ~p", [Terms]}
+ end;
+validation(_Name, Term) ->
+ {error, "parse error while reading policy: ~p", [Term]}.
+
+validation0(Validators, Terms) ->
+ case lists:foldl(
+ fun (Mod, {ok, TermsLeft}) ->
+ ModKeys = proplists:get_all_values(Mod, Validators),
+ case [T || {Key, _} = T <- TermsLeft,
+ lists:member(Key, ModKeys)] of
+ [] -> {ok, TermsLeft};
+ Scope -> {Mod:validate_policy(Scope), TermsLeft -- Scope}
+ end;
+ (_, Acc) ->
+ Acc
+ end, {ok, Terms}, proplists:get_keys(Validators)) of
+ {ok, []} ->
+ ok;
+ {ok, Unvalidated} ->
+ {error, "~p are not recognised policy settings", [Unvalidated]};
+ {Error, _} ->
+ Error
+ end.
+
+a2b(A) -> list_to_binary(atom_to_list(A)).
+
+dups(L) -> L -- lists:usort(L).
+
+is_proplist(L) -> length(L) =:= length([I || I = {_, _} <- L]).
+
+apply_to_validation(_Name, <<"all">>) -> ok;
+apply_to_validation(_Name, <<"exchanges">>) -> ok;
+apply_to_validation(_Name, <<"queues">>) -> ok;
+apply_to_validation(_Name, Term) ->
+ {error, "apply-to '~s' unrecognised; should be 'queues', 'exchanges' "
+ "or 'all'", [Term]}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_policy_validator).
+
+-ifdef(use_specs).
+
+-export_type([validate_results/0]).
+
+-type(validate_results() ::
+ 'ok' | {error, string(), [term()]} | [validate_results()]).
+
+-callback validate_policy([{binary(), term()}]) -> validate_results().
+
+-else.
+
+-export([behaviour_info/1]).
+
+behaviour_info(callbacks) ->
+ [
+ {validate_policy, 1}
+ ];
+behaviour_info(_Other) ->
+ undefined.
+
+-endif.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_prelaunch).
+
+-export([start/0, stop/0]).
+
+-import(rabbit_misc, [pget/2, pget/3]).
+
+-include("rabbit.hrl").
+
+-define(DIST_PORT_NOT_CONFIGURED, 0).
+-define(ERROR_CODE, 1).
+-define(DIST_PORT_CONFIGURED, 2).
+
+%%----------------------------------------------------------------------------
+%% Specs
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start/0 :: () -> no_return()).
+-spec(stop/0 :: () -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start() ->
+ case init:get_plain_arguments() of
+ [NodeStr] ->
+ Node = rabbit_nodes:make(NodeStr),
+ {NodeName, NodeHost} = rabbit_nodes:parts(Node),
+ ok = duplicate_node_check(NodeName, NodeHost),
+ ok = dist_port_set_check(),
+ ok = dist_port_use_check(NodeHost);
+ [] ->
+ %% Ignore running node while installing windows service
+ ok = dist_port_set_check(),
+ ok
+ end,
+ rabbit_misc:quit(?DIST_PORT_NOT_CONFIGURED),
+ ok.
+
+stop() ->
+ ok.
+
+%%----------------------------------------------------------------------------
+
+%% Check whether a node with the same name is already running
+duplicate_node_check(NodeName, NodeHost) ->
+ case rabbit_nodes:names(NodeHost) of
+ {ok, NamePorts} ->
+ case proplists:is_defined(NodeName, NamePorts) of
+ true -> io:format(
+ "ERROR: node with name ~p already running on ~p~n",
+ [NodeName, NodeHost]),
+ rabbit_misc:quit(?ERROR_CODE);
+ false -> ok
+ end;
+ {error, EpmdReason} ->
+ io:format("ERROR: epmd error for host ~s: ~s~n",
+ [NodeHost, rabbit_misc:format_inet_error(EpmdReason)]),
+ rabbit_misc:quit(?ERROR_CODE)
+ end.
+
+dist_port_set_check() ->
+ case os:getenv("RABBITMQ_CONFIG_FILE") of
+ false ->
+ ok;
+ File ->
+ case file:consult(File ++ ".config") of
+ {ok, [Config]} ->
+ Kernel = pget(kernel, Config, []),
+ case {pget(inet_dist_listen_min, Kernel, none),
+ pget(inet_dist_listen_max, Kernel, none)} of
+ {none, none} -> ok;
+ _ -> rabbit_misc:quit(?DIST_PORT_CONFIGURED)
+ end;
+ {ok, _} ->
+ ok;
+ {error, _} ->
+ ok
+ end
+ end.
+
+dist_port_use_check(NodeHost) ->
+ case os:getenv("RABBITMQ_DIST_PORT") of
+ false -> ok;
+ PortStr -> Port = list_to_integer(PortStr),
+ case gen_tcp:listen(Port, [inet, {reuseaddr, true}]) of
+ {ok, Sock} -> gen_tcp:close(Sock);
+ {error, _} -> dist_port_use_check_fail(Port, NodeHost)
+ end
+ end.
+
+-ifdef(use_specs).
+-spec(dist_port_use_check_fail/2 :: (non_neg_integer(), string()) ->
+ no_return()).
+-endif.
+dist_port_use_check_fail(Port, Host) ->
+ {ok, Names} = rabbit_nodes:names(Host),
+ case [N || {N, P} <- Names, P =:= Port] of
+ [] -> io:format("ERROR: distribution port ~b in use on ~s "
+ "(by non-Erlang process?)~n", [Port, Host]);
+ [Name] -> io:format("ERROR: distribution port ~b in use by ~s@~s~n",
+ [Port, Name, Host])
+ end,
+ rabbit_misc:quit(?ERROR_CODE).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_queue_collector).
+
+-behaviour(gen_server).
+
+-export([start_link/1, register/2, delete_all/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-record(state, {monitors, delete_from}).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/1 :: (rabbit_types:proc_name()) ->
+ rabbit_types:ok_pid_or_error()).
+-spec(register/2 :: (pid(), pid()) -> 'ok').
+-spec(delete_all/1 :: (pid()) -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start_link(ProcName) ->
+ gen_server:start_link(?MODULE, [ProcName], []).
+
+register(CollectorPid, Q) ->
+ gen_server:call(CollectorPid, {register, Q}, infinity).
+
+delete_all(CollectorPid) ->
+ gen_server:call(CollectorPid, delete_all, infinity).
+
+%%----------------------------------------------------------------------------
+
+init([ProcName]) ->
+ ?store_proc_name(ProcName),
+ {ok, #state{monitors = pmon:new(), delete_from = undefined}}.
+
+%%--------------------------------------------------------------------------
+
+handle_call({register, QPid}, _From,
+ State = #state{monitors = QMons, delete_from = Deleting}) ->
+ case Deleting of
+ undefined -> ok;
+ _ -> ok = rabbit_amqqueue:delete_immediately([QPid])
+ end,
+ {reply, ok, State#state{monitors = pmon:monitor(QPid, QMons)}};
+
+handle_call(delete_all, From, State = #state{monitors = QMons,
+ delete_from = undefined}) ->
+ case pmon:monitored(QMons) of
+ [] -> {reply, ok, State#state{delete_from = From}};
+ QPids -> ok = rabbit_amqqueue:delete_immediately(QPids),
+ {noreply, State#state{delete_from = From}}
+ end.
+
+handle_cast(Msg, State) ->
+ {stop, {unhandled_cast, Msg}, State}.
+
+handle_info({'DOWN', _MRef, process, DownPid, _Reason},
+ State = #state{monitors = QMons, delete_from = Deleting}) ->
+ QMons1 = pmon:erase(DownPid, QMons),
+ case Deleting =/= undefined andalso pmon:is_empty(QMons1) of
+ true -> gen_server:reply(Deleting, ok);
+ false -> ok
+ end,
+ {noreply, State#state{monitors = QMons1}}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_queue_consumers).
+
+-export([new/0, max_active_priority/1, inactive/1, all/1, count/0,
+ unacknowledged_message_count/0, add/9, remove/3, erase_ch/2,
+ send_drained/0, deliver/3, record_ack/3, subtract_acks/3,
+ possibly_unblock/3,
+ resume_fun/0, notify_sent_fun/1, activate_limit_fun/0,
+ credit/6, utilisation/1]).
+
+%%----------------------------------------------------------------------------
+
+-define(UNSENT_MESSAGE_LIMIT, 200).
+
+%% Utilisation average calculations are all in μs.
+-define(USE_AVG_HALF_LIFE, 1000000.0).
+
+-record(state, {consumers, use}).
+
+-record(consumer, {tag, ack_required, prefetch, args}).
+
+%% These are held in our process dictionary
+-record(cr, {ch_pid,
+ monitor_ref,
+ acktags,
+ consumer_count,
+ %% Queue of {ChPid, #consumer{}} for consumers which have
+ %% been blocked for any reason
+ blocked_consumers,
+ %% The limiter itself
+ limiter,
+ %% Internal flow control for queue -> writer
+ unsent_message_count}).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-type time_micros() :: non_neg_integer().
+-type ratio() :: float().
+-type state() :: #state{consumers ::priority_queue:q(),
+ use :: {'inactive',
+ time_micros(), time_micros(), ratio()} |
+ {'active', time_micros(), ratio()}}.
+-type ch() :: pid().
+-type ack() :: non_neg_integer().
+-type cr_fun() :: fun ((#cr{}) -> #cr{}).
+-type fetch_result() :: {rabbit_types:basic_message(), boolean(), ack()}.
+
+-spec new() -> state().
+-spec max_active_priority(state()) -> integer() | 'infinity' | 'empty'.
+-spec inactive(state()) -> boolean().
+-spec all(state()) -> [{ch(), rabbit_types:ctag(), boolean(),
+ non_neg_integer(), rabbit_framing:amqp_table()}].
+-spec count() -> non_neg_integer().
+-spec unacknowledged_message_count() -> non_neg_integer().
+-spec add(ch(), rabbit_types:ctag(), boolean(), pid(), boolean(),
+ non_neg_integer(), rabbit_framing:amqp_table(), boolean(), state())
+ -> state().
+-spec remove(ch(), rabbit_types:ctag(), state()) ->
+ 'not_found' | state().
+-spec erase_ch(ch(), state()) ->
+ 'not_found' | {[ack()], [rabbit_types:ctag()],
+ state()}.
+-spec send_drained() -> 'ok'.
+-spec deliver(fun ((boolean()) -> {fetch_result(), T}),
+ rabbit_amqqueue:name(), state()) ->
+ {'delivered', boolean(), T, state()} |
+ {'undelivered', boolean(), state()}.
+-spec record_ack(ch(), pid(), ack()) -> 'ok'.
+-spec subtract_acks(ch(), [ack()], state()) ->
+ 'not_found' | 'unchanged' | {'unblocked', state()}.
+-spec possibly_unblock(cr_fun(), ch(), state()) ->
+ 'unchanged' | {'unblocked', state()}.
+-spec resume_fun() -> cr_fun().
+-spec notify_sent_fun(non_neg_integer()) -> cr_fun().
+-spec activate_limit_fun() -> cr_fun().
+-spec credit(boolean(), integer(), boolean(), ch(), rabbit_types:ctag(),
+ state()) -> 'unchanged' | {'unblocked', state()}.
+-spec utilisation(state()) -> ratio().
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+new() -> #state{consumers = priority_queue:new(),
+ use = {active, now_micros(), 1.0}}.
+
+max_active_priority(#state{consumers = Consumers}) ->
+ priority_queue:highest(Consumers).
+
+inactive(#state{consumers = Consumers}) ->
+ priority_queue:is_empty(Consumers).
+
+all(#state{consumers = Consumers}) ->
+ lists:foldl(fun (C, Acc) -> consumers(C#cr.blocked_consumers, Acc) end,
+ consumers(Consumers, []), all_ch_record()).
+
+consumers(Consumers, Acc) ->
+ priority_queue:fold(
+ fun ({ChPid, Consumer}, _P, Acc1) ->
+ #consumer{tag = CTag, ack_required = Ack, prefetch = Prefetch,
+ args = Args} = Consumer,
+ [{ChPid, CTag, Ack, Prefetch, Args} | Acc1]
+ end, Acc, Consumers).
+
+count() -> lists:sum([Count || #cr{consumer_count = Count} <- all_ch_record()]).
+
+unacknowledged_message_count() ->
+ lists:sum([queue:len(C#cr.acktags) || C <- all_ch_record()]).
+
+add(ChPid, CTag, NoAck, LimiterPid, LimiterActive, Prefetch, Args, IsEmpty,
+ State = #state{consumers = Consumers}) ->
+ C = #cr{consumer_count = Count,
+ limiter = Limiter} = ch_record(ChPid, LimiterPid),
+ Limiter1 = case LimiterActive of
+ true -> rabbit_limiter:activate(Limiter);
+ false -> Limiter
+ end,
+ C1 = C#cr{consumer_count = Count + 1, limiter = Limiter1},
+ update_ch_record(
+ case parse_credit_args(Prefetch, Args) of
+ {0, auto} -> C1;
+ {_Credit, auto} when NoAck -> C1;
+ {Credit, Mode} -> credit_and_drain(
+ C1, CTag, Credit, Mode, IsEmpty)
+ end),
+ Consumer = #consumer{tag = CTag,
+ ack_required = not NoAck,
+ prefetch = Prefetch,
+ args = Args},
+ State#state{consumers = add_consumer({ChPid, Consumer}, Consumers)}.
+
+remove(ChPid, CTag, State = #state{consumers = Consumers}) ->
+ case lookup_ch(ChPid) of
+ not_found ->
+ not_found;
+ C = #cr{consumer_count = Count,
+ limiter = Limiter,
+ blocked_consumers = Blocked} ->
+ Blocked1 = remove_consumer(ChPid, CTag, Blocked),
+ Limiter1 = case Count of
+ 1 -> rabbit_limiter:deactivate(Limiter);
+ _ -> Limiter
+ end,
+ Limiter2 = rabbit_limiter:forget_consumer(Limiter1, CTag),
+ update_ch_record(C#cr{consumer_count = Count - 1,
+ limiter = Limiter2,
+ blocked_consumers = Blocked1}),
+ State#state{consumers =
+ remove_consumer(ChPid, CTag, Consumers)}
+ end.
+
+erase_ch(ChPid, State = #state{consumers = Consumers}) ->
+ case lookup_ch(ChPid) of
+ not_found ->
+ not_found;
+ C = #cr{ch_pid = ChPid,
+ acktags = ChAckTags,
+ blocked_consumers = BlockedQ} ->
+ AllConsumers = priority_queue:join(Consumers, BlockedQ),
+ ok = erase_ch_record(C),
+ {[AckTag || {AckTag, _CTag} <- queue:to_list(ChAckTags)],
+ tags(priority_queue:to_list(AllConsumers)),
+ State#state{consumers = remove_consumers(ChPid, Consumers)}}
+ end.
+
+send_drained() -> [update_ch_record(send_drained(C)) || C <- all_ch_record()],
+ ok.
+
+deliver(FetchFun, QName, State) -> deliver(FetchFun, QName, false, State).
+
+deliver(FetchFun, QName, ConsumersChanged,
+ State = #state{consumers = Consumers}) ->
+ case priority_queue:out_p(Consumers) of
+ {empty, _} ->
+ {undelivered, ConsumersChanged,
+ State#state{use = update_use(State#state.use, inactive)}};
+ {{value, QEntry, Priority}, Tail} ->
+ case deliver_to_consumer(FetchFun, QEntry, QName) of
+ {delivered, R} ->
+ {delivered, ConsumersChanged, R,
+ State#state{consumers = priority_queue:in(QEntry, Priority,
+ Tail)}};
+ undelivered ->
+ deliver(FetchFun, QName, true,
+ State#state{consumers = Tail})
+ end
+ end.
+
+deliver_to_consumer(FetchFun, E = {ChPid, Consumer}, QName) ->
+ C = lookup_ch(ChPid),
+ case is_ch_blocked(C) of
+ true -> block_consumer(C, E),
+ undelivered;
+ false -> case rabbit_limiter:can_send(C#cr.limiter,
+ Consumer#consumer.ack_required,
+ Consumer#consumer.tag) of
+ {suspend, Limiter} ->
+ block_consumer(C#cr{limiter = Limiter}, E),
+ undelivered;
+ {continue, Limiter} ->
+ {delivered, deliver_to_consumer(
+ FetchFun, Consumer,
+ C#cr{limiter = Limiter}, QName)}
+ end
+ end.
+
+deliver_to_consumer(FetchFun,
+ #consumer{tag = CTag,
+ ack_required = AckRequired},
+ C = #cr{ch_pid = ChPid,
+ acktags = ChAckTags,
+ unsent_message_count = Count},
+ QName) ->
+ {{Message, IsDelivered, AckTag}, R} = FetchFun(AckRequired),
+ rabbit_channel:deliver(ChPid, CTag, AckRequired,
+ {QName, self(), AckTag, IsDelivered, Message}),
+ ChAckTags1 = case AckRequired of
+ true -> queue:in({AckTag, CTag}, ChAckTags);
+ false -> ChAckTags
+ end,
+ update_ch_record(C#cr{acktags = ChAckTags1,
+ unsent_message_count = Count + 1}),
+ R.
+
+record_ack(ChPid, LimiterPid, AckTag) ->
+ C = #cr{acktags = ChAckTags} = ch_record(ChPid, LimiterPid),
+ update_ch_record(C#cr{acktags = queue:in({AckTag, none}, ChAckTags)}),
+ ok.
+
+subtract_acks(ChPid, AckTags, State) ->
+ case lookup_ch(ChPid) of
+ not_found ->
+ not_found;
+ C = #cr{acktags = ChAckTags, limiter = Lim} ->
+ {CTagCounts, AckTags2} = subtract_acks(
+ AckTags, [], orddict:new(), ChAckTags),
+ {Unblocked, Lim2} =
+ orddict:fold(
+ fun (CTag, Count, {UnblockedN, LimN}) ->
+ {Unblocked1, LimN1} =
+ rabbit_limiter:ack_from_queue(LimN, CTag, Count),
+ {UnblockedN orelse Unblocked1, LimN1}
+ end, {false, Lim}, CTagCounts),
+ C2 = C#cr{acktags = AckTags2, limiter = Lim2},
+ case Unblocked of
+ true -> unblock(C2, State);
+ false -> update_ch_record(C2),
+ unchanged
+ end
+ end.
+
+subtract_acks([], [], CTagCounts, AckQ) ->
+ {CTagCounts, AckQ};
+subtract_acks([], Prefix, CTagCounts, AckQ) ->
+ {CTagCounts, queue:join(queue:from_list(lists:reverse(Prefix)), AckQ)};
+subtract_acks([T | TL] = AckTags, Prefix, CTagCounts, AckQ) ->
+ case queue:out(AckQ) of
+ {{value, {T, CTag}}, QTail} ->
+ subtract_acks(TL, Prefix,
+ orddict:update_counter(CTag, 1, CTagCounts), QTail);
+ {{value, V}, QTail} ->
+ subtract_acks(AckTags, [V | Prefix], CTagCounts, QTail)
+ end.
+
+possibly_unblock(Update, ChPid, State) ->
+ case lookup_ch(ChPid) of
+ not_found -> unchanged;
+ C -> C1 = Update(C),
+ case is_ch_blocked(C) andalso not is_ch_blocked(C1) of
+ false -> update_ch_record(C1),
+ unchanged;
+ true -> unblock(C1, State)
+ end
+ end.
+
+unblock(C = #cr{blocked_consumers = BlockedQ, limiter = Limiter},
+ State = #state{consumers = Consumers, use = Use}) ->
+ case lists:partition(
+ fun({_P, {_ChPid, #consumer{tag = CTag}}}) ->
+ rabbit_limiter:is_consumer_blocked(Limiter, CTag)
+ end, priority_queue:to_list(BlockedQ)) of
+ {_, []} ->
+ update_ch_record(C),
+ unchanged;
+ {Blocked, Unblocked} ->
+ BlockedQ1 = priority_queue:from_list(Blocked),
+ UnblockedQ = priority_queue:from_list(Unblocked),
+ update_ch_record(C#cr{blocked_consumers = BlockedQ1}),
+ {unblocked,
+ State#state{consumers = priority_queue:join(Consumers, UnblockedQ),
+ use = update_use(Use, active)}}
+ end.
+
+resume_fun() ->
+ fun (C = #cr{limiter = Limiter}) ->
+ C#cr{limiter = rabbit_limiter:resume(Limiter)}
+ end.
+
+notify_sent_fun(Credit) ->
+ fun (C = #cr{unsent_message_count = Count}) ->
+ C#cr{unsent_message_count = Count - Credit}
+ end.
+
+activate_limit_fun() ->
+ fun (C = #cr{limiter = Limiter}) ->
+ C#cr{limiter = rabbit_limiter:activate(Limiter)}
+ end.
+
+credit(IsEmpty, Credit, Drain, ChPid, CTag, State) ->
+ case lookup_ch(ChPid) of
+ not_found ->
+ unchanged;
+ #cr{limiter = Limiter} = C ->
+ C1 = #cr{limiter = Limiter1} =
+ credit_and_drain(C, CTag, Credit, drain_mode(Drain), IsEmpty),
+ case is_ch_blocked(C1) orelse
+ (not rabbit_limiter:is_consumer_blocked(Limiter, CTag)) orelse
+ rabbit_limiter:is_consumer_blocked(Limiter1, CTag) of
+ true -> update_ch_record(C1),
+ unchanged;
+ false -> unblock(C1, State)
+ end
+ end.
+
+drain_mode(true) -> drain;
+drain_mode(false) -> manual.
+
+utilisation(#state{use = {active, Since, Avg}}) ->
+ use_avg(now_micros() - Since, 0, Avg);
+utilisation(#state{use = {inactive, Since, Active, Avg}}) ->
+ use_avg(Active, now_micros() - Since, Avg).
+
+%%----------------------------------------------------------------------------
+
+parse_credit_args(Default, Args) ->
+ case rabbit_misc:table_lookup(Args, <<"x-credit">>) of
+ {table, T} -> case {rabbit_misc:table_lookup(T, <<"credit">>),
+ rabbit_misc:table_lookup(T, <<"drain">>)} of
+ {{long, C}, {bool, D}} -> {C, drain_mode(D)};
+ _ -> {Default, auto}
+ end;
+ undefined -> {Default, auto}
+ end.
+
+lookup_ch(ChPid) ->
+ case get({ch, ChPid}) of
+ undefined -> not_found;
+ C -> C
+ end.
+
+ch_record(ChPid, LimiterPid) ->
+ Key = {ch, ChPid},
+ case get(Key) of
+ undefined -> MonitorRef = erlang:monitor(process, ChPid),
+ Limiter = rabbit_limiter:client(LimiterPid),
+ C = #cr{ch_pid = ChPid,
+ monitor_ref = MonitorRef,
+ acktags = queue:new(),
+ consumer_count = 0,
+ blocked_consumers = priority_queue:new(),
+ limiter = Limiter,
+ unsent_message_count = 0},
+ put(Key, C),
+ C;
+ C = #cr{} -> C
+ end.
+
+update_ch_record(C = #cr{consumer_count = ConsumerCount,
+ acktags = ChAckTags,
+ unsent_message_count = UnsentMessageCount}) ->
+ case {queue:is_empty(ChAckTags), ConsumerCount, UnsentMessageCount} of
+ {true, 0, 0} -> ok = erase_ch_record(C);
+ _ -> ok = store_ch_record(C)
+ end,
+ C.
+
+store_ch_record(C = #cr{ch_pid = ChPid}) ->
+ put({ch, ChPid}, C),
+ ok.
+
+erase_ch_record(#cr{ch_pid = ChPid, monitor_ref = MonitorRef}) ->
+ erlang:demonitor(MonitorRef),
+ erase({ch, ChPid}),
+ ok.
+
+all_ch_record() -> [C || {{ch, _}, C} <- get()].
+
+block_consumer(C = #cr{blocked_consumers = Blocked}, QEntry) ->
+ update_ch_record(C#cr{blocked_consumers = add_consumer(QEntry, Blocked)}).
+
+is_ch_blocked(#cr{unsent_message_count = Count, limiter = Limiter}) ->
+ Count >= ?UNSENT_MESSAGE_LIMIT orelse rabbit_limiter:is_suspended(Limiter).
+
+send_drained(C = #cr{ch_pid = ChPid, limiter = Limiter}) ->
+ case rabbit_limiter:drained(Limiter) of
+ {[], Limiter} -> C;
+ {CTagCredit, Limiter2} -> rabbit_channel:send_drained(
+ ChPid, CTagCredit),
+ C#cr{limiter = Limiter2}
+ end.
+
+credit_and_drain(C = #cr{ch_pid = ChPid, limiter = Limiter},
+ CTag, Credit, Mode, IsEmpty) ->
+ case rabbit_limiter:credit(Limiter, CTag, Credit, Mode, IsEmpty) of
+ {true, Limiter1} -> rabbit_channel:send_drained(ChPid,
+ [{CTag, Credit}]),
+ C#cr{limiter = Limiter1};
+ {false, Limiter1} -> C#cr{limiter = Limiter1}
+ end.
+
+tags(CList) -> [CTag || {_P, {_ChPid, #consumer{tag = CTag}}} <- CList].
+
+add_consumer({ChPid, Consumer = #consumer{args = Args}}, Queue) ->
+ Priority = case rabbit_misc:table_lookup(Args, <<"x-priority">>) of
+ {_, P} -> P;
+ _ -> 0
+ end,
+ priority_queue:in({ChPid, Consumer}, Priority, Queue).
+
+remove_consumer(ChPid, CTag, Queue) ->
+ priority_queue:filter(fun ({CP, #consumer{tag = CT}}) ->
+ (CP /= ChPid) or (CT /= CTag)
+ end, Queue).
+
+remove_consumers(ChPid, Queue) ->
+ priority_queue:filter(fun ({CP, _Consumer}) when CP =:= ChPid -> false;
+ (_) -> true
+ end, Queue).
+
+update_use({inactive, _, _, _} = CUInfo, inactive) ->
+ CUInfo;
+update_use({active, _, _} = CUInfo, active) ->
+ CUInfo;
+update_use({active, Since, Avg}, inactive) ->
+ Now = now_micros(),
+ {inactive, Now, Now - Since, Avg};
+update_use({inactive, Since, Active, Avg}, active) ->
+ Now = now_micros(),
+ {active, Now, use_avg(Active, Now - Since, Avg)}.
+
+use_avg(Active, Inactive, Avg) ->
+ Time = Inactive + Active,
+ rabbit_misc:moving_average(Time, ?USE_AVG_HALF_LIFE, Active / Time, Avg).
+
+now_micros() -> timer:now_diff(now(), {0,0,0}).
--- /dev/null
+-module(rabbit_queue_decorator).
+
+-include("rabbit.hrl").
+
+-export([select/1, set/1]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-callback startup(rabbit_types:amqqueue()) -> 'ok'.
+
+-callback shutdown(rabbit_types:amqqueue()) -> 'ok'.
+
+-callback policy_changed(rabbit_types:amqqueue(), rabbit_types:amqqueue()) ->
+ 'ok'.
+
+-callback active_for(rabbit_types:amqqueue()) -> boolean().
+
+%% called with Queue, MaxActivePriority, IsEmpty
+-callback consumer_state_changed(
+ rabbit_types:amqqueue(), integer(), boolean()) -> 'ok'.
+
+-else.
+
+-export([behaviour_info/1]).
+
+behaviour_info(callbacks) ->
+ [{description, 0}, {startup, 1}, {shutdown, 1}, {policy_changed, 2},
+ {active_for, 1}, {consumer_state_changed, 3}];
+behaviour_info(_Other) ->
+ undefined.
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+select(Modules) ->
+ [M || M <- Modules, code:which(M) =/= non_existing].
+
+set(Q) -> Q#amqqueue{decorators = [D || D <- list(), D:active_for(Q)]}.
+
+list() -> [M || {_, M} <- rabbit_registry:lookup_all(queue_decorator)].
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_queue_index).
+
+-export([init/2, recover/5,
+ terminate/2, delete_and_terminate/1,
+ publish/5, deliver/2, ack/2, sync/1, needs_sync/1, flush/1,
+ read/3, next_segment_boundary/1, bounds/1, start/1, stop/0]).
+
+-export([add_queue_ttl/0, avoid_zeroes/0]).
+
+-define(CLEAN_FILENAME, "clean.dot").
+
+%%----------------------------------------------------------------------------
+
+%% The queue index is responsible for recording the order of messages
+%% within a queue on disk.
+%%
+%% Because of the fact that the queue can decide at any point to send
+%% a queue entry to disk, you can not rely on publishes appearing in
+%% order. The only thing you can rely on is a message being published,
+%% then delivered, then ack'd.
+%%
+%% In order to be able to clean up ack'd messages, we write to segment
+%% files. These files have a fixed maximum size: ?SEGMENT_ENTRY_COUNT
+%% publishes, delivers and acknowledgements. They are numbered, and so
+%% it is known that the 0th segment contains messages 0 ->
+%% ?SEGMENT_ENTRY_COUNT - 1, the 1st segment contains messages
+%% ?SEGMENT_ENTRY_COUNT -> 2*?SEGMENT_ENTRY_COUNT - 1 and so on. As
+%% such, in the segment files, we only refer to message sequence ids
+%% by the LSBs as SeqId rem ?SEGMENT_ENTRY_COUNT. This gives them a
+%% fixed size.
+%%
+%% However, transient messages which are not sent to disk at any point
+%% will cause gaps to appear in segment files. Therefore, we delete a
+%% segment file whenever the number of publishes == number of acks
+%% (note that although it is not fully enforced, it is assumed that a
+%% message will never be ackd before it is delivered, thus this test
+%% also implies == number of delivers). In practise, this does not
+%% cause disk churn in the pathological case because of the journal
+%% and caching (see below).
+%%
+%% Because of the fact that publishes, delivers and acks can occur all
+%% over, we wish to avoid lots of seeking. Therefore we have a fixed
+%% sized journal to which all actions are appended. When the number of
+%% entries in this journal reaches max_journal_entries, the journal
+%% entries are scattered out to their relevant files, and the journal
+%% is truncated to zero size. Note that entries in the journal must
+%% carry the full sequence id, thus the format of entries in the
+%% journal is different to that in the segments.
+%%
+%% The journal is also kept fully in memory, pre-segmented: the state
+%% contains a mapping from segment numbers to state-per-segment (this
+%% state is held for all segments which have been "seen": thus a
+%% segment which has been read but has no pending entries in the
+%% journal is still held in this mapping. Also note that a dict is
+%% used for this mapping, not an array because with an array, you will
+%% always have entries from 0). Actions are stored directly in this
+%% state. Thus at the point of flushing the journal, firstly no
+%% reading from disk is necessary, but secondly if the known number of
+%% acks and publishes in a segment are equal, given the known state of
+%% the segment file combined with the journal, no writing needs to be
+%% done to the segment file either (in fact it is deleted if it exists
+%% at all). This is safe given that the set of acks is a subset of the
+%% set of publishes. When it is necessary to sync messages, it is
+%% sufficient to fsync on the journal: when entries are distributed
+%% from the journal to segment files, those segments appended to are
+%% fsync'd prior to the journal being truncated.
+%%
+%% This module is also responsible for scanning the queue index files
+%% and seeding the message store on start up.
+%%
+%% Note that in general, the representation of a message's state as
+%% the tuple: {('no_pub'|{MsgId, MsgProps, IsPersistent}),
+%% ('del'|'no_del'), ('ack'|'no_ack')} is richer than strictly
+%% necessary for most operations. However, for startup, and to ensure
+%% the safe and correct combination of journal entries with entries
+%% read from the segment on disk, this richer representation vastly
+%% simplifies and clarifies the code.
+%%
+%% For notes on Clean Shutdown and startup, see documentation in
+%% variable_queue.
+%%
+%%----------------------------------------------------------------------------
+
+%% ---- Journal details ----
+
+-define(JOURNAL_FILENAME, "journal.jif").
+
+-define(PUB_PERSIST_JPREFIX, 2#00).
+-define(PUB_TRANS_JPREFIX, 2#01).
+-define(DEL_JPREFIX, 2#10).
+-define(ACK_JPREFIX, 2#11).
+-define(JPREFIX_BITS, 2).
+-define(SEQ_BYTES, 8).
+-define(SEQ_BITS, ((?SEQ_BYTES * 8) - ?JPREFIX_BITS)).
+
+%% ---- Segment details ----
+
+-define(SEGMENT_EXTENSION, ".idx").
+
+%% TODO: The segment size would be configurable, but deriving all the
+%% other values is quite hairy and quite possibly noticably less
+%% efficient, depending on how clever the compiler is when it comes to
+%% binary generation/matching with constant vs variable lengths.
+
+-define(REL_SEQ_BITS, 14).
+-define(SEGMENT_ENTRY_COUNT, 16384). %% trunc(math:pow(2,?REL_SEQ_BITS))).
+
+%% seq only is binary 01 followed by 14 bits of rel seq id
+%% (range: 0 - 16383)
+-define(REL_SEQ_ONLY_PREFIX, 01).
+-define(REL_SEQ_ONLY_PREFIX_BITS, 2).
+-define(REL_SEQ_ONLY_RECORD_BYTES, 2).
+
+%% publish record is binary 1 followed by a bit for is_persistent,
+%% then 14 bits of rel seq id, 64 bits for message expiry and 128 bits
+%% of md5sum msg id
+-define(PUB_PREFIX, 1).
+-define(PUB_PREFIX_BITS, 1).
+
+-define(EXPIRY_BYTES, 8).
+-define(EXPIRY_BITS, (?EXPIRY_BYTES * 8)).
+-define(NO_EXPIRY, 0).
+
+-define(MSG_ID_BYTES, 16). %% md5sum is 128 bit or 16 bytes
+-define(MSG_ID_BITS, (?MSG_ID_BYTES * 8)).
+
+%% 16 bytes for md5sum + 8 for expiry
+-define(PUB_RECORD_BODY_BYTES, (?MSG_ID_BYTES + ?EXPIRY_BYTES)).
+%% + 2 for seq, bits and prefix
+-define(PUB_RECORD_BYTES, (?PUB_RECORD_BODY_BYTES + 2)).
+
+%% 1 publish, 1 deliver, 1 ack per msg
+-define(SEGMENT_TOTAL_SIZE, ?SEGMENT_ENTRY_COUNT *
+ (?PUB_RECORD_BYTES + (2 * ?REL_SEQ_ONLY_RECORD_BYTES))).
+
+%% ---- misc ----
+
+-define(PUB, {_, _, _}). %% {MsgId, MsgProps, IsPersistent}
+
+-define(READ_MODE, [binary, raw, read]).
+-define(READ_AHEAD_MODE, [{read_ahead, ?SEGMENT_TOTAL_SIZE} | ?READ_MODE]).
+-define(WRITE_MODE, [write | ?READ_MODE]).
+
+%%----------------------------------------------------------------------------
+
+-record(qistate, { dir, segments, journal_handle, dirty_count,
+ max_journal_entries, on_sync, unconfirmed }).
+
+-record(segment, { num, path, journal_entries, unacked }).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-rabbit_upgrade({add_queue_ttl, local, []}).
+-rabbit_upgrade({avoid_zeroes, local, [add_queue_ttl]}).
+
+-ifdef(use_specs).
+
+-type(hdl() :: ('undefined' | any())).
+-type(segment() :: ('undefined' |
+ #segment { num :: non_neg_integer(),
+ path :: file:filename(),
+ journal_entries :: array(),
+ unacked :: non_neg_integer()
+ })).
+-type(seq_id() :: integer()).
+-type(seg_dict() :: {dict(), [segment()]}).
+-type(on_sync_fun() :: fun ((gb_set()) -> ok)).
+-type(qistate() :: #qistate { dir :: file:filename(),
+ segments :: 'undefined' | seg_dict(),
+ journal_handle :: hdl(),
+ dirty_count :: integer(),
+ max_journal_entries :: non_neg_integer(),
+ on_sync :: on_sync_fun(),
+ unconfirmed :: gb_set()
+ }).
+-type(contains_predicate() :: fun ((rabbit_types:msg_id()) -> boolean())).
+-type(walker(A) :: fun ((A) -> 'finished' |
+ {rabbit_types:msg_id(), non_neg_integer(), A})).
+-type(shutdown_terms() :: [term()] | 'non_clean_shutdown').
+
+-spec(init/2 :: (rabbit_amqqueue:name(), on_sync_fun()) -> qistate()).
+-spec(recover/5 :: (rabbit_amqqueue:name(), shutdown_terms(), boolean(),
+ contains_predicate(), on_sync_fun()) ->
+ {'undefined' | non_neg_integer(), qistate()}).
+-spec(terminate/2 :: ([any()], qistate()) -> qistate()).
+-spec(delete_and_terminate/1 :: (qistate()) -> qistate()).
+-spec(publish/5 :: (rabbit_types:msg_id(), seq_id(),
+ rabbit_types:message_properties(), boolean(), qistate())
+ -> qistate()).
+-spec(deliver/2 :: ([seq_id()], qistate()) -> qistate()).
+-spec(ack/2 :: ([seq_id()], qistate()) -> qistate()).
+-spec(sync/1 :: (qistate()) -> qistate()).
+-spec(needs_sync/1 :: (qistate()) -> 'confirms' | 'other' | 'false').
+-spec(flush/1 :: (qistate()) -> qistate()).
+-spec(read/3 :: (seq_id(), seq_id(), qistate()) ->
+ {[{rabbit_types:msg_id(), seq_id(),
+ rabbit_types:message_properties(),
+ boolean(), boolean()}], qistate()}).
+-spec(next_segment_boundary/1 :: (seq_id()) -> seq_id()).
+-spec(bounds/1 :: (qistate()) ->
+ {non_neg_integer(), non_neg_integer(), qistate()}).
+-spec(start/1 :: ([rabbit_amqqueue:name()]) -> {[[any()]], {walker(A), A}}).
+
+-spec(add_queue_ttl/0 :: () -> 'ok').
+
+-endif.
+
+
+%%----------------------------------------------------------------------------
+%% public API
+%%----------------------------------------------------------------------------
+
+init(Name, OnSyncFun) ->
+ State = #qistate { dir = Dir } = blank_state(Name),
+ false = rabbit_file:is_file(Dir), %% is_file == is file or dir
+ State #qistate { on_sync = OnSyncFun }.
+
+recover(Name, Terms, MsgStoreRecovered, ContainsCheckFun, OnSyncFun) ->
+ State = blank_state(Name),
+ State1 = State #qistate { on_sync = OnSyncFun },
+ CleanShutdown = Terms /= non_clean_shutdown,
+ case CleanShutdown andalso MsgStoreRecovered of
+ true -> RecoveredCounts = proplists:get_value(segments, Terms, []),
+ init_clean(RecoveredCounts, State1);
+ false -> init_dirty(CleanShutdown, ContainsCheckFun, State1)
+ end.
+
+terminate(Terms, State = #qistate { dir = Dir }) ->
+ {SegmentCounts, State1} = terminate(State),
+ rabbit_recovery_terms:store(filename:basename(Dir),
+ [{segments, SegmentCounts} | Terms]),
+ State1.
+
+delete_and_terminate(State) ->
+ {_SegmentCounts, State1 = #qistate { dir = Dir }} = terminate(State),
+ ok = rabbit_file:recursive_delete([Dir]),
+ State1.
+
+publish(MsgId, SeqId, MsgProps, IsPersistent,
+ State = #qistate { unconfirmed = Unconfirmed })
+ when is_binary(MsgId) ->
+ ?MSG_ID_BYTES = size(MsgId),
+ {JournalHdl, State1} =
+ get_journal_handle(
+ case MsgProps#message_properties.needs_confirming of
+ true -> Unconfirmed1 = gb_sets:add_element(MsgId, Unconfirmed),
+ State #qistate { unconfirmed = Unconfirmed1 };
+ false -> State
+ end),
+ ok = file_handle_cache:append(
+ JournalHdl, [<<(case IsPersistent of
+ true -> ?PUB_PERSIST_JPREFIX;
+ false -> ?PUB_TRANS_JPREFIX
+ end):?JPREFIX_BITS,
+ SeqId:?SEQ_BITS>>,
+ create_pub_record_body(MsgId, MsgProps)]),
+ maybe_flush_journal(
+ add_to_journal(SeqId, {MsgId, MsgProps, IsPersistent}, State1)).
+
+deliver(SeqIds, State) ->
+ deliver_or_ack(del, SeqIds, State).
+
+ack(SeqIds, State) ->
+ deliver_or_ack(ack, SeqIds, State).
+
+%% This is called when there are outstanding confirms or when the
+%% queue is idle and the journal needs syncing (see needs_sync/1).
+sync(State = #qistate { journal_handle = undefined }) ->
+ State;
+sync(State = #qistate { journal_handle = JournalHdl }) ->
+ ok = file_handle_cache:sync(JournalHdl),
+ notify_sync(State).
+
+needs_sync(#qistate { journal_handle = undefined }) ->
+ false;
+needs_sync(#qistate { journal_handle = JournalHdl, unconfirmed = UC }) ->
+ case gb_sets:is_empty(UC) of
+ true -> case file_handle_cache:needs_sync(JournalHdl) of
+ true -> other;
+ false -> false
+ end;
+ false -> confirms
+ end.
+
+flush(State = #qistate { dirty_count = 0 }) -> State;
+flush(State) -> flush_journal(State).
+
+read(StartEnd, StartEnd, State) ->
+ {[], State};
+read(Start, End, State = #qistate { segments = Segments,
+ dir = Dir }) when Start =< End ->
+ %% Start is inclusive, End is exclusive.
+ LowerB = {StartSeg, _StartRelSeq} = seq_id_to_seg_and_rel_seq_id(Start),
+ UpperB = {EndSeg, _EndRelSeq} = seq_id_to_seg_and_rel_seq_id(End - 1),
+ {Messages, Segments1} =
+ lists:foldr(fun (Seg, Acc) ->
+ read_bounded_segment(Seg, LowerB, UpperB, Acc, Dir)
+ end, {[], Segments}, lists:seq(StartSeg, EndSeg)),
+ {Messages, State #qistate { segments = Segments1 }}.
+
+next_segment_boundary(SeqId) ->
+ {Seg, _RelSeq} = seq_id_to_seg_and_rel_seq_id(SeqId),
+ reconstruct_seq_id(Seg + 1, 0).
+
+bounds(State = #qistate { segments = Segments }) ->
+ %% This is not particularly efficient, but only gets invoked on
+ %% queue initialisation.
+ SegNums = lists:sort(segment_nums(Segments)),
+ %% Don't bother trying to figure out the lowest seq_id, merely the
+ %% seq_id of the start of the lowest segment. That seq_id may not
+ %% actually exist, but that's fine. The important thing is that
+ %% the segment exists and the seq_id reported is on a segment
+ %% boundary.
+ %%
+ %% We also don't really care about the max seq_id. Just start the
+ %% next segment: it makes life much easier.
+ %%
+ %% SegNums is sorted, ascending.
+ {LowSeqId, NextSeqId} =
+ case SegNums of
+ [] -> {0, 0};
+ [MinSeg|_] -> {reconstruct_seq_id(MinSeg, 0),
+ reconstruct_seq_id(1 + lists:last(SegNums), 0)}
+ end,
+ {LowSeqId, NextSeqId, State}.
+
+start(DurableQueueNames) ->
+ ok = rabbit_recovery_terms:start(),
+ {DurableTerms, DurableDirectories} =
+ lists:foldl(
+ fun(QName, {RecoveryTerms, ValidDirectories}) ->
+ DirName = queue_name_to_dir_name(QName),
+ RecoveryInfo = case rabbit_recovery_terms:read(DirName) of
+ {error, _} -> non_clean_shutdown;
+ {ok, Terms} -> Terms
+ end,
+ {[RecoveryInfo | RecoveryTerms],
+ sets:add_element(DirName, ValidDirectories)}
+ end, {[], sets:new()}, DurableQueueNames),
+
+ %% Any queue directory we've not been asked to recover is considered garbage
+ QueuesDir = queues_dir(),
+ rabbit_file:recursive_delete(
+ [filename:join(QueuesDir, DirName) ||
+ DirName <- all_queue_directory_names(QueuesDir),
+ not sets:is_element(DirName, DurableDirectories)]),
+
+ rabbit_recovery_terms:clear(),
+
+ %% The backing queue interface requires that the queue recovery terms
+ %% which come back from start/1 are in the same order as DurableQueueNames
+ OrderedTerms = lists:reverse(DurableTerms),
+ {OrderedTerms, {fun queue_index_walker/1, {start, DurableQueueNames}}}.
+
+stop() -> rabbit_recovery_terms:stop().
+
+all_queue_directory_names(Dir) ->
+ case rabbit_file:list_dir(Dir) of
+ {ok, Entries} -> [E || E <- Entries,
+ rabbit_file:is_dir(filename:join(Dir, E))];
+ {error, enoent} -> []
+ end.
+
+%%----------------------------------------------------------------------------
+%% startup and shutdown
+%%----------------------------------------------------------------------------
+
+blank_state(QueueName) ->
+ blank_state_dir(
+ filename:join(queues_dir(), queue_name_to_dir_name(QueueName))).
+
+blank_state_dir(Dir) ->
+ {ok, MaxJournal} =
+ application:get_env(rabbit, queue_index_max_journal_entries),
+ #qistate { dir = Dir,
+ segments = segments_new(),
+ journal_handle = undefined,
+ dirty_count = 0,
+ max_journal_entries = MaxJournal,
+ on_sync = fun (_) -> ok end,
+ unconfirmed = gb_sets:new() }.
+
+init_clean(RecoveredCounts, State) ->
+ %% Load the journal. Since this is a clean recovery this (almost)
+ %% gets us back to where we were on shutdown.
+ State1 = #qistate { dir = Dir, segments = Segments } = load_journal(State),
+ %% The journal loading only creates records for segments touched
+ %% by the journal, and the counts are based on the journal entries
+ %% only. We need *complete* counts for *all* segments. By an
+ %% amazing coincidence we stored that information on shutdown.
+ Segments1 =
+ lists:foldl(
+ fun ({Seg, UnackedCount}, SegmentsN) ->
+ Segment = segment_find_or_new(Seg, Dir, SegmentsN),
+ segment_store(Segment #segment { unacked = UnackedCount },
+ SegmentsN)
+ end, Segments, RecoveredCounts),
+ %% the counts above include transient messages, which would be the
+ %% wrong thing to return
+ {undefined, State1 # qistate { segments = Segments1 }}.
+
+init_dirty(CleanShutdown, ContainsCheckFun, State) ->
+ %% Recover the journal completely. This will also load segments
+ %% which have entries in the journal and remove duplicates. The
+ %% counts will correctly reflect the combination of the segment
+ %% and the journal.
+ State1 = #qistate { dir = Dir, segments = Segments } =
+ recover_journal(State),
+ {Segments1, Count, DirtyCount} =
+ %% Load each segment in turn and filter out messages that are
+ %% not in the msg_store, by adding acks to the journal. These
+ %% acks only go to the RAM journal as it doesn't matter if we
+ %% lose them. Also mark delivered if not clean shutdown. Also
+ %% find the number of unacked messages. Also accumulate the
+ %% dirty count here, so we can call maybe_flush_journal below
+ %% and avoid unnecessary file system operations.
+ lists:foldl(
+ fun (Seg, {Segments2, CountAcc, DirtyCount}) ->
+ {Segment = #segment { unacked = UnackedCount }, Dirty} =
+ recover_segment(ContainsCheckFun, CleanShutdown,
+ segment_find_or_new(Seg, Dir, Segments2)),
+ {segment_store(Segment, Segments2),
+ CountAcc + UnackedCount, DirtyCount + Dirty}
+ end, {Segments, 0, 0}, all_segment_nums(State1)),
+ State2 = maybe_flush_journal(State1 #qistate { segments = Segments1,
+ dirty_count = DirtyCount }),
+ {Count, State2}.
+
+terminate(State = #qistate { journal_handle = JournalHdl,
+ segments = Segments }) ->
+ ok = case JournalHdl of
+ undefined -> ok;
+ _ -> file_handle_cache:close(JournalHdl)
+ end,
+ SegmentCounts =
+ segment_fold(
+ fun (#segment { num = Seg, unacked = UnackedCount }, Acc) ->
+ [{Seg, UnackedCount} | Acc]
+ end, [], Segments),
+ {SegmentCounts, State #qistate { journal_handle = undefined,
+ segments = undefined }}.
+
+recover_segment(ContainsCheckFun, CleanShutdown,
+ Segment = #segment { journal_entries = JEntries }) ->
+ {SegEntries, UnackedCount} = load_segment(false, Segment),
+ {SegEntries1, UnackedCountDelta} =
+ segment_plus_journal(SegEntries, JEntries),
+ array:sparse_foldl(
+ fun (RelSeq, {{MsgId, _MsgProps, _IsPersistent}, Del, no_ack},
+ SegmentAndDirtyCount) ->
+ recover_message(ContainsCheckFun(MsgId), CleanShutdown,
+ Del, RelSeq, SegmentAndDirtyCount)
+ end,
+ {Segment #segment { unacked = UnackedCount + UnackedCountDelta }, 0},
+ SegEntries1).
+
+recover_message( true, true, _Del, _RelSeq, SegmentAndDirtyCount) ->
+ SegmentAndDirtyCount;
+recover_message( true, false, del, _RelSeq, SegmentAndDirtyCount) ->
+ SegmentAndDirtyCount;
+recover_message( true, false, no_del, RelSeq, {Segment, DirtyCount}) ->
+ {add_to_journal(RelSeq, del, Segment), DirtyCount + 1};
+recover_message(false, _, del, RelSeq, {Segment, DirtyCount}) ->
+ {add_to_journal(RelSeq, ack, Segment), DirtyCount + 1};
+recover_message(false, _, no_del, RelSeq, {Segment, DirtyCount}) ->
+ {add_to_journal(RelSeq, ack,
+ add_to_journal(RelSeq, del, Segment)),
+ DirtyCount + 2}.
+
+queue_name_to_dir_name(Name = #resource { kind = queue }) ->
+ <<Num:128>> = erlang:md5(term_to_binary(Name)),
+ rabbit_misc:format("~.36B", [Num]).
+
+queues_dir() ->
+ filename:join(rabbit_mnesia:dir(), "queues").
+
+%%----------------------------------------------------------------------------
+%% msg store startup delta function
+%%----------------------------------------------------------------------------
+
+queue_index_walker({start, DurableQueues}) when is_list(DurableQueues) ->
+ {ok, Gatherer} = gatherer:start_link(),
+ [begin
+ ok = gatherer:fork(Gatherer),
+ ok = worker_pool:submit_async(
+ fun () -> link(Gatherer),
+ ok = queue_index_walker_reader(QueueName, Gatherer),
+ unlink(Gatherer),
+ ok
+ end)
+ end || QueueName <- DurableQueues],
+ queue_index_walker({next, Gatherer});
+
+queue_index_walker({next, Gatherer}) when is_pid(Gatherer) ->
+ case gatherer:out(Gatherer) of
+ empty ->
+ unlink(Gatherer),
+ ok = gatherer:stop(Gatherer),
+ finished;
+ {value, {MsgId, Count}} ->
+ {MsgId, Count, {next, Gatherer}}
+ end.
+
+queue_index_walker_reader(QueueName, Gatherer) ->
+ State = blank_state(QueueName),
+ ok = scan_segments(
+ fun (_SeqId, MsgId, _MsgProps, true, _IsDelivered, no_ack, ok) ->
+ gatherer:sync_in(Gatherer, {MsgId, 1});
+ (_SeqId, _MsgId, _MsgProps, _IsPersistent, _IsDelivered,
+ _IsAcked, Acc) ->
+ Acc
+ end, ok, State),
+ ok = gatherer:finish(Gatherer).
+
+scan_segments(Fun, Acc, State) ->
+ State1 = #qistate { segments = Segments, dir = Dir } =
+ recover_journal(State),
+ Result = lists:foldr(
+ fun (Seg, AccN) ->
+ segment_entries_foldr(
+ fun (RelSeq, {{MsgId, MsgProps, IsPersistent},
+ IsDelivered, IsAcked}, AccM) ->
+ Fun(reconstruct_seq_id(Seg, RelSeq), MsgId, MsgProps,
+ IsPersistent, IsDelivered, IsAcked, AccM)
+ end, AccN, segment_find_or_new(Seg, Dir, Segments))
+ end, Acc, all_segment_nums(State1)),
+ {_SegmentCounts, _State} = terminate(State1),
+ Result.
+
+%%----------------------------------------------------------------------------
+%% expiry/binary manipulation
+%%----------------------------------------------------------------------------
+
+create_pub_record_body(MsgId, #message_properties { expiry = Expiry }) ->
+ [MsgId, expiry_to_binary(Expiry)].
+
+expiry_to_binary(undefined) -> <<?NO_EXPIRY:?EXPIRY_BITS>>;
+expiry_to_binary(Expiry) -> <<Expiry:?EXPIRY_BITS>>.
+
+parse_pub_record_body(<<MsgIdNum:?MSG_ID_BITS, Expiry:?EXPIRY_BITS>>) ->
+ %% work around for binary data fragmentation. See
+ %% rabbit_msg_file:read_next/2
+ <<MsgId:?MSG_ID_BYTES/binary>> = <<MsgIdNum:?MSG_ID_BITS>>,
+ Exp = case Expiry of
+ ?NO_EXPIRY -> undefined;
+ X -> X
+ end,
+ {MsgId, #message_properties { expiry = Exp }}.
+
+%%----------------------------------------------------------------------------
+%% journal manipulation
+%%----------------------------------------------------------------------------
+
+add_to_journal(SeqId, Action, State = #qistate { dirty_count = DCount,
+ segments = Segments,
+ dir = Dir }) ->
+ {Seg, RelSeq} = seq_id_to_seg_and_rel_seq_id(SeqId),
+ Segment = segment_find_or_new(Seg, Dir, Segments),
+ Segment1 = add_to_journal(RelSeq, Action, Segment),
+ State #qistate { dirty_count = DCount + 1,
+ segments = segment_store(Segment1, Segments) };
+
+add_to_journal(RelSeq, Action,
+ Segment = #segment { journal_entries = JEntries,
+ unacked = UnackedCount }) ->
+ Segment #segment {
+ journal_entries = add_to_journal(RelSeq, Action, JEntries),
+ unacked = UnackedCount + case Action of
+ ?PUB -> +1;
+ del -> 0;
+ ack -> -1
+ end};
+
+add_to_journal(RelSeq, Action, JEntries) ->
+ case array:get(RelSeq, JEntries) of
+ undefined ->
+ array:set(RelSeq,
+ case Action of
+ ?PUB -> {Action, no_del, no_ack};
+ del -> {no_pub, del, no_ack};
+ ack -> {no_pub, no_del, ack}
+ end, JEntries);
+ ({Pub, no_del, no_ack}) when Action == del ->
+ array:set(RelSeq, {Pub, del, no_ack}, JEntries);
+ ({no_pub, del, no_ack}) when Action == ack ->
+ array:set(RelSeq, {no_pub, del, ack}, JEntries);
+ ({?PUB, del, no_ack}) when Action == ack ->
+ array:reset(RelSeq, JEntries)
+ end.
+
+maybe_flush_journal(State = #qistate { dirty_count = DCount,
+ max_journal_entries = MaxJournal })
+ when DCount > MaxJournal ->
+ flush_journal(State);
+maybe_flush_journal(State) ->
+ State.
+
+flush_journal(State = #qistate { segments = Segments }) ->
+ Segments1 =
+ segment_fold(
+ fun (#segment { unacked = 0, path = Path }, SegmentsN) ->
+ case rabbit_file:is_file(Path) of
+ true -> ok = rabbit_file:delete(Path);
+ false -> ok
+ end,
+ SegmentsN;
+ (#segment {} = Segment, SegmentsN) ->
+ segment_store(append_journal_to_segment(Segment), SegmentsN)
+ end, segments_new(), Segments),
+ {JournalHdl, State1} =
+ get_journal_handle(State #qistate { segments = Segments1 }),
+ ok = file_handle_cache:clear(JournalHdl),
+ notify_sync(State1 #qistate { dirty_count = 0 }).
+
+append_journal_to_segment(#segment { journal_entries = JEntries,
+ path = Path } = Segment) ->
+ case array:sparse_size(JEntries) of
+ 0 -> Segment;
+ _ -> {ok, Hdl} = file_handle_cache:open(Path, ?WRITE_MODE,
+ [{write_buffer, infinity}]),
+ array:sparse_foldl(fun write_entry_to_segment/3, Hdl, JEntries),
+ ok = file_handle_cache:close(Hdl),
+ Segment #segment { journal_entries = array_new() }
+ end.
+
+get_journal_handle(State = #qistate { journal_handle = undefined,
+ dir = Dir }) ->
+ Path = filename:join(Dir, ?JOURNAL_FILENAME),
+ ok = rabbit_file:ensure_dir(Path),
+ {ok, Hdl} = file_handle_cache:open(Path, ?WRITE_MODE,
+ [{write_buffer, infinity}]),
+ {Hdl, State #qistate { journal_handle = Hdl }};
+get_journal_handle(State = #qistate { journal_handle = Hdl }) ->
+ {Hdl, State}.
+
+%% Loading Journal. This isn't idempotent and will mess up the counts
+%% if you call it more than once on the same state. Assumes the counts
+%% are 0 to start with.
+load_journal(State = #qistate { dir = Dir }) ->
+ case rabbit_file:is_file(filename:join(Dir, ?JOURNAL_FILENAME)) of
+ true -> {JournalHdl, State1} = get_journal_handle(State),
+ {ok, 0} = file_handle_cache:position(JournalHdl, 0),
+ load_journal_entries(State1);
+ false -> State
+ end.
+
+%% ditto
+recover_journal(State) ->
+ State1 = #qistate { segments = Segments } = load_journal(State),
+ Segments1 =
+ segment_map(
+ fun (Segment = #segment { journal_entries = JEntries,
+ unacked = UnackedCountInJournal }) ->
+ %% We want to keep ack'd entries in so that we can
+ %% remove them if duplicates are in the journal. The
+ %% counts here are purely from the segment itself.
+ {SegEntries, UnackedCountInSeg} = load_segment(true, Segment),
+ {JEntries1, UnackedCountDuplicates} =
+ journal_minus_segment(JEntries, SegEntries),
+ Segment #segment { journal_entries = JEntries1,
+ unacked = (UnackedCountInJournal +
+ UnackedCountInSeg -
+ UnackedCountDuplicates) }
+ end, Segments),
+ State1 #qistate { segments = Segments1 }.
+
+load_journal_entries(State = #qistate { journal_handle = Hdl }) ->
+ case file_handle_cache:read(Hdl, ?SEQ_BYTES) of
+ {ok, <<Prefix:?JPREFIX_BITS, SeqId:?SEQ_BITS>>} ->
+ case Prefix of
+ ?DEL_JPREFIX ->
+ load_journal_entries(add_to_journal(SeqId, del, State));
+ ?ACK_JPREFIX ->
+ load_journal_entries(add_to_journal(SeqId, ack, State));
+ _ ->
+ case file_handle_cache:read(Hdl, ?PUB_RECORD_BODY_BYTES) of
+ %% Journal entry composed only of zeroes was probably
+ %% produced during a dirty shutdown so stop reading
+ {ok, <<0:?PUB_RECORD_BODY_BYTES/unit:8>>} ->
+ State;
+ {ok, <<Bin:?PUB_RECORD_BODY_BYTES/binary>>} ->
+ {MsgId, MsgProps} = parse_pub_record_body(Bin),
+ IsPersistent = case Prefix of
+ ?PUB_PERSIST_JPREFIX -> true;
+ ?PUB_TRANS_JPREFIX -> false
+ end,
+ load_journal_entries(
+ add_to_journal(
+ SeqId, {MsgId, MsgProps, IsPersistent}, State));
+ _ErrOrEoF -> %% err, we've lost at least a publish
+ State
+ end
+ end;
+ _ErrOrEoF -> State
+ end.
+
+deliver_or_ack(_Kind, [], State) ->
+ State;
+deliver_or_ack(Kind, SeqIds, State) ->
+ JPrefix = case Kind of ack -> ?ACK_JPREFIX; del -> ?DEL_JPREFIX end,
+ {JournalHdl, State1} = get_journal_handle(State),
+ ok = file_handle_cache:append(
+ JournalHdl,
+ [<<JPrefix:?JPREFIX_BITS, SeqId:?SEQ_BITS>> || SeqId <- SeqIds]),
+ maybe_flush_journal(lists:foldl(fun (SeqId, StateN) ->
+ add_to_journal(SeqId, Kind, StateN)
+ end, State1, SeqIds)).
+
+notify_sync(State = #qistate { unconfirmed = UC, on_sync = OnSyncFun }) ->
+ case gb_sets:is_empty(UC) of
+ true -> State;
+ false -> OnSyncFun(UC),
+ State #qistate { unconfirmed = gb_sets:new() }
+ end.
+
+%%----------------------------------------------------------------------------
+%% segment manipulation
+%%----------------------------------------------------------------------------
+
+seq_id_to_seg_and_rel_seq_id(SeqId) ->
+ { SeqId div ?SEGMENT_ENTRY_COUNT, SeqId rem ?SEGMENT_ENTRY_COUNT }.
+
+reconstruct_seq_id(Seg, RelSeq) ->
+ (Seg * ?SEGMENT_ENTRY_COUNT) + RelSeq.
+
+all_segment_nums(#qistate { dir = Dir, segments = Segments }) ->
+ lists:sort(
+ sets:to_list(
+ lists:foldl(
+ fun (SegName, Set) ->
+ sets:add_element(
+ list_to_integer(
+ lists:takewhile(fun (C) -> $0 =< C andalso C =< $9 end,
+ SegName)), Set)
+ end, sets:from_list(segment_nums(Segments)),
+ rabbit_file:wildcard(".*\\" ++ ?SEGMENT_EXTENSION, Dir)))).
+
+segment_find_or_new(Seg, Dir, Segments) ->
+ case segment_find(Seg, Segments) of
+ {ok, Segment} -> Segment;
+ error -> SegName = integer_to_list(Seg) ++ ?SEGMENT_EXTENSION,
+ Path = filename:join(Dir, SegName),
+ #segment { num = Seg,
+ path = Path,
+ journal_entries = array_new(),
+ unacked = 0 }
+ end.
+
+segment_find(Seg, {_Segments, [Segment = #segment { num = Seg } |_]}) ->
+ {ok, Segment}; %% 1 or (2, matches head)
+segment_find(Seg, {_Segments, [_, Segment = #segment { num = Seg }]}) ->
+ {ok, Segment}; %% 2, matches tail
+segment_find(Seg, {Segments, _}) -> %% no match
+ dict:find(Seg, Segments).
+
+segment_store(Segment = #segment { num = Seg }, %% 1 or (2, matches head)
+ {Segments, [#segment { num = Seg } | Tail]}) ->
+ {Segments, [Segment | Tail]};
+segment_store(Segment = #segment { num = Seg }, %% 2, matches tail
+ {Segments, [SegmentA, #segment { num = Seg }]}) ->
+ {Segments, [Segment, SegmentA]};
+segment_store(Segment = #segment { num = Seg }, {Segments, []}) ->
+ {dict:erase(Seg, Segments), [Segment]};
+segment_store(Segment = #segment { num = Seg }, {Segments, [SegmentA]}) ->
+ {dict:erase(Seg, Segments), [Segment, SegmentA]};
+segment_store(Segment = #segment { num = Seg },
+ {Segments, [SegmentA, SegmentB]}) ->
+ {dict:store(SegmentB#segment.num, SegmentB, dict:erase(Seg, Segments)),
+ [Segment, SegmentA]}.
+
+segment_fold(Fun, Acc, {Segments, CachedSegments}) ->
+ dict:fold(fun (_Seg, Segment, Acc1) -> Fun(Segment, Acc1) end,
+ lists:foldl(Fun, Acc, CachedSegments), Segments).
+
+segment_map(Fun, {Segments, CachedSegments}) ->
+ {dict:map(fun (_Seg, Segment) -> Fun(Segment) end, Segments),
+ lists:map(Fun, CachedSegments)}.
+
+segment_nums({Segments, CachedSegments}) ->
+ lists:map(fun (#segment { num = Num }) -> Num end, CachedSegments) ++
+ dict:fetch_keys(Segments).
+
+segments_new() ->
+ {dict:new(), []}.
+
+write_entry_to_segment(_RelSeq, {?PUB, del, ack}, Hdl) ->
+ Hdl;
+write_entry_to_segment(RelSeq, {Pub, Del, Ack}, Hdl) ->
+ ok = case Pub of
+ no_pub ->
+ ok;
+ {MsgId, MsgProps, IsPersistent} ->
+ file_handle_cache:append(
+ Hdl, [<<?PUB_PREFIX:?PUB_PREFIX_BITS,
+ (bool_to_int(IsPersistent)):1,
+ RelSeq:?REL_SEQ_BITS>>,
+ create_pub_record_body(MsgId, MsgProps)])
+ end,
+ ok = case {Del, Ack} of
+ {no_del, no_ack} ->
+ ok;
+ _ ->
+ Binary = <<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS,
+ RelSeq:?REL_SEQ_BITS>>,
+ file_handle_cache:append(
+ Hdl, case {Del, Ack} of
+ {del, ack} -> [Binary, Binary];
+ _ -> Binary
+ end)
+ end,
+ Hdl.
+
+read_bounded_segment(Seg, {StartSeg, StartRelSeq}, {EndSeg, EndRelSeq},
+ {Messages, Segments}, Dir) ->
+ Segment = segment_find_or_new(Seg, Dir, Segments),
+ {segment_entries_foldr(
+ fun (RelSeq, {{MsgId, MsgProps, IsPersistent}, IsDelivered, no_ack}, Acc)
+ when (Seg > StartSeg orelse StartRelSeq =< RelSeq) andalso
+ (Seg < EndSeg orelse EndRelSeq >= RelSeq) ->
+ [ {MsgId, reconstruct_seq_id(StartSeg, RelSeq), MsgProps,
+ IsPersistent, IsDelivered == del} | Acc ];
+ (_RelSeq, _Value, Acc) ->
+ Acc
+ end, Messages, Segment),
+ segment_store(Segment, Segments)}.
+
+segment_entries_foldr(Fun, Init,
+ Segment = #segment { journal_entries = JEntries }) ->
+ {SegEntries, _UnackedCount} = load_segment(false, Segment),
+ {SegEntries1, _UnackedCountD} = segment_plus_journal(SegEntries, JEntries),
+ array:sparse_foldr(Fun, Init, SegEntries1).
+
+%% Loading segments
+%%
+%% Does not do any combining with the journal at all.
+load_segment(KeepAcked, #segment { path = Path }) ->
+ Empty = {array_new(), 0},
+ case rabbit_file:is_file(Path) of
+ false -> Empty;
+ true -> {ok, Hdl} = file_handle_cache:open(Path, ?READ_AHEAD_MODE, []),
+ {ok, 0} = file_handle_cache:position(Hdl, bof),
+ Res = case file_handle_cache:read(Hdl, ?SEGMENT_TOTAL_SIZE) of
+ {ok, SegData} -> load_segment_entries(
+ KeepAcked, SegData, Empty);
+ eof -> Empty
+ end,
+ ok = file_handle_cache:close(Hdl),
+ Res
+ end.
+
+load_segment_entries(KeepAcked,
+ <<?PUB_PREFIX:?PUB_PREFIX_BITS,
+ IsPersistentNum:1, RelSeq:?REL_SEQ_BITS,
+ PubRecordBody:?PUB_RECORD_BODY_BYTES/binary,
+ SegData/binary>>,
+ {SegEntries, UnackedCount}) ->
+ {MsgId, MsgProps} = parse_pub_record_body(PubRecordBody),
+ Obj = {{MsgId, MsgProps, 1 == IsPersistentNum}, no_del, no_ack},
+ SegEntries1 = array:set(RelSeq, Obj, SegEntries),
+ load_segment_entries(KeepAcked, SegData, {SegEntries1, UnackedCount + 1});
+load_segment_entries(KeepAcked,
+ <<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS,
+ RelSeq:?REL_SEQ_BITS, SegData/binary>>,
+ {SegEntries, UnackedCount}) ->
+ {UnackedCountDelta, SegEntries1} =
+ case array:get(RelSeq, SegEntries) of
+ {Pub, no_del, no_ack} ->
+ { 0, array:set(RelSeq, {Pub, del, no_ack}, SegEntries)};
+ {Pub, del, no_ack} when KeepAcked ->
+ {-1, array:set(RelSeq, {Pub, del, ack}, SegEntries)};
+ {_Pub, del, no_ack} ->
+ {-1, array:reset(RelSeq, SegEntries)}
+ end,
+ load_segment_entries(KeepAcked, SegData,
+ {SegEntries1, UnackedCount + UnackedCountDelta});
+load_segment_entries(_KeepAcked, _SegData, Res) ->
+ Res.
+
+array_new() ->
+ array:new([{default, undefined}, fixed, {size, ?SEGMENT_ENTRY_COUNT}]).
+
+bool_to_int(true ) -> 1;
+bool_to_int(false) -> 0.
+
+%%----------------------------------------------------------------------------
+%% journal & segment combination
+%%----------------------------------------------------------------------------
+
+%% Combine what we have just read from a segment file with what we're
+%% holding for that segment in memory. There must be no duplicates.
+segment_plus_journal(SegEntries, JEntries) ->
+ array:sparse_foldl(
+ fun (RelSeq, JObj, {SegEntriesOut, AdditionalUnacked}) ->
+ SegEntry = array:get(RelSeq, SegEntriesOut),
+ {Obj, AdditionalUnackedDelta} =
+ segment_plus_journal1(SegEntry, JObj),
+ {case Obj of
+ undefined -> array:reset(RelSeq, SegEntriesOut);
+ _ -> array:set(RelSeq, Obj, SegEntriesOut)
+ end,
+ AdditionalUnacked + AdditionalUnackedDelta}
+ end, {SegEntries, 0}, JEntries).
+
+%% Here, the result is a tuple with the first element containing the
+%% item which we may be adding to (for items only in the journal),
+%% modifying in (bits in both), or, when returning 'undefined',
+%% erasing from (ack in journal, not segment) the segment array. The
+%% other element of the tuple is the delta for AdditionalUnacked.
+segment_plus_journal1(undefined, {?PUB, no_del, no_ack} = Obj) ->
+ {Obj, 1};
+segment_plus_journal1(undefined, {?PUB, del, no_ack} = Obj) ->
+ {Obj, 1};
+segment_plus_journal1(undefined, {?PUB, del, ack}) ->
+ {undefined, 0};
+
+segment_plus_journal1({?PUB = Pub, no_del, no_ack}, {no_pub, del, no_ack}) ->
+ {{Pub, del, no_ack}, 0};
+segment_plus_journal1({?PUB, no_del, no_ack}, {no_pub, del, ack}) ->
+ {undefined, -1};
+segment_plus_journal1({?PUB, del, no_ack}, {no_pub, no_del, ack}) ->
+ {undefined, -1}.
+
+%% Remove from the journal entries for a segment, items that are
+%% duplicates of entries found in the segment itself. Used on start up
+%% to clean up the journal.
+journal_minus_segment(JEntries, SegEntries) ->
+ array:sparse_foldl(
+ fun (RelSeq, JObj, {JEntriesOut, UnackedRemoved}) ->
+ SegEntry = array:get(RelSeq, SegEntries),
+ {Obj, UnackedRemovedDelta} =
+ journal_minus_segment1(JObj, SegEntry),
+ {case Obj of
+ keep -> JEntriesOut;
+ undefined -> array:reset(RelSeq, JEntriesOut);
+ _ -> array:set(RelSeq, Obj, JEntriesOut)
+ end,
+ UnackedRemoved + UnackedRemovedDelta}
+ end, {JEntries, 0}, JEntries).
+
+%% Here, the result is a tuple with the first element containing the
+%% item we are adding to or modifying in the (initially fresh) journal
+%% array. If the item is 'undefined' we leave the journal array
+%% alone. The other element of the tuple is the deltas for
+%% UnackedRemoved.
+
+%% Both the same. Must be at least the publish
+journal_minus_segment1({?PUB, _Del, no_ack} = Obj, Obj) ->
+ {undefined, 1};
+journal_minus_segment1({?PUB, _Del, ack} = Obj, Obj) ->
+ {undefined, 0};
+
+%% Just publish in journal
+journal_minus_segment1({?PUB, no_del, no_ack}, undefined) ->
+ {keep, 0};
+
+%% Publish and deliver in journal
+journal_minus_segment1({?PUB, del, no_ack}, undefined) ->
+ {keep, 0};
+journal_minus_segment1({?PUB = Pub, del, no_ack}, {Pub, no_del, no_ack}) ->
+ {{no_pub, del, no_ack}, 1};
+
+%% Publish, deliver and ack in journal
+journal_minus_segment1({?PUB, del, ack}, undefined) ->
+ {keep, 0};
+journal_minus_segment1({?PUB = Pub, del, ack}, {Pub, no_del, no_ack}) ->
+ {{no_pub, del, ack}, 1};
+journal_minus_segment1({?PUB = Pub, del, ack}, {Pub, del, no_ack}) ->
+ {{no_pub, no_del, ack}, 1};
+
+%% Just deliver in journal
+journal_minus_segment1({no_pub, del, no_ack}, {?PUB, no_del, no_ack}) ->
+ {keep, 0};
+journal_minus_segment1({no_pub, del, no_ack}, {?PUB, del, no_ack}) ->
+ {undefined, 0};
+
+%% Just ack in journal
+journal_minus_segment1({no_pub, no_del, ack}, {?PUB, del, no_ack}) ->
+ {keep, 0};
+journal_minus_segment1({no_pub, no_del, ack}, {?PUB, del, ack}) ->
+ {undefined, -1};
+
+%% Deliver and ack in journal
+journal_minus_segment1({no_pub, del, ack}, {?PUB, no_del, no_ack}) ->
+ {keep, 0};
+journal_minus_segment1({no_pub, del, ack}, {?PUB, del, no_ack}) ->
+ {{no_pub, no_del, ack}, 0};
+journal_minus_segment1({no_pub, del, ack}, {?PUB, del, ack}) ->
+ {undefined, -1};
+
+%% Missing segment. If flush_journal/1 is interrupted after deleting
+%% the segment but before truncating the journal we can get these
+%% cases: a delivery and an acknowledgement in the journal, or just an
+%% acknowledgement in the journal, but with no segment. In both cases
+%% we have really forgotten the message; so ignore what's in the
+%% journal.
+journal_minus_segment1({no_pub, no_del, ack}, undefined) ->
+ {undefined, 0};
+journal_minus_segment1({no_pub, del, ack}, undefined) ->
+ {undefined, 0}.
+
+%%----------------------------------------------------------------------------
+%% upgrade
+%%----------------------------------------------------------------------------
+
+add_queue_ttl() ->
+ foreach_queue_index({fun add_queue_ttl_journal/1,
+ fun add_queue_ttl_segment/1}).
+
+add_queue_ttl_journal(<<?DEL_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ Rest/binary>>) ->
+ {<<?DEL_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS>>, Rest};
+add_queue_ttl_journal(<<?ACK_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ Rest/binary>>) ->
+ {<<?ACK_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS>>, Rest};
+add_queue_ttl_journal(<<Prefix:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ MsgId:?MSG_ID_BYTES/binary, Rest/binary>>) ->
+ {[<<Prefix:?JPREFIX_BITS, SeqId:?SEQ_BITS>>, MsgId,
+ expiry_to_binary(undefined)], Rest};
+add_queue_ttl_journal(_) ->
+ stop.
+
+add_queue_ttl_segment(<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1,
+ RelSeq:?REL_SEQ_BITS, MsgId:?MSG_ID_BYTES/binary,
+ Rest/binary>>) ->
+ {[<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1, RelSeq:?REL_SEQ_BITS>>,
+ MsgId, expiry_to_binary(undefined)], Rest};
+add_queue_ttl_segment(<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS,
+ RelSeq:?REL_SEQ_BITS, Rest/binary>>) ->
+ {<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS, RelSeq:?REL_SEQ_BITS>>,
+ Rest};
+add_queue_ttl_segment(_) ->
+ stop.
+
+avoid_zeroes() ->
+ foreach_queue_index({none, fun avoid_zeroes_segment/1}).
+
+avoid_zeroes_segment(<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1,
+ RelSeq:?REL_SEQ_BITS, MsgId:?MSG_ID_BITS,
+ Expiry:?EXPIRY_BITS, Rest/binary>>) ->
+ {<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1, RelSeq:?REL_SEQ_BITS,
+ MsgId:?MSG_ID_BITS, Expiry:?EXPIRY_BITS>>, Rest};
+avoid_zeroes_segment(<<0:?REL_SEQ_ONLY_PREFIX_BITS,
+ RelSeq:?REL_SEQ_BITS, Rest/binary>>) ->
+ {<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS, RelSeq:?REL_SEQ_BITS>>,
+ Rest};
+avoid_zeroes_segment(_) ->
+ stop.
+
+%%----------------------------------------------------------------------------
+
+foreach_queue_index(Funs) ->
+ QueuesDir = queues_dir(),
+ QueueDirNames = all_queue_directory_names(QueuesDir),
+ {ok, Gatherer} = gatherer:start_link(),
+ [begin
+ ok = gatherer:fork(Gatherer),
+ ok = worker_pool:submit_async(
+ fun () ->
+ transform_queue(filename:join(QueuesDir, QueueDirName),
+ Gatherer, Funs)
+ end)
+ end || QueueDirName <- QueueDirNames],
+ empty = gatherer:out(Gatherer),
+ unlink(Gatherer),
+ ok = gatherer:stop(Gatherer).
+
+transform_queue(Dir, Gatherer, {JournalFun, SegmentFun}) ->
+ ok = transform_file(filename:join(Dir, ?JOURNAL_FILENAME), JournalFun),
+ [ok = transform_file(filename:join(Dir, Seg), SegmentFun)
+ || Seg <- rabbit_file:wildcard(".*\\" ++ ?SEGMENT_EXTENSION, Dir)],
+ ok = gatherer:finish(Gatherer).
+
+transform_file(_Path, none) ->
+ ok;
+transform_file(Path, Fun) when is_function(Fun)->
+ PathTmp = Path ++ ".upgrade",
+ case rabbit_file:file_size(Path) of
+ 0 -> ok;
+ Size -> {ok, PathTmpHdl} =
+ file_handle_cache:open(PathTmp, ?WRITE_MODE,
+ [{write_buffer, infinity}]),
+
+ {ok, PathHdl} = file_handle_cache:open(
+ Path, [{read_ahead, Size} | ?READ_MODE], []),
+ {ok, Content} = file_handle_cache:read(PathHdl, Size),
+ ok = file_handle_cache:close(PathHdl),
+
+ ok = drive_transform_fun(Fun, PathTmpHdl, Content),
+
+ ok = file_handle_cache:close(PathTmpHdl),
+ ok = rabbit_file:rename(PathTmp, Path)
+ end.
+
+drive_transform_fun(Fun, Hdl, Contents) ->
+ case Fun(Contents) of
+ stop -> ok;
+ {Output, Contents1} -> ok = file_handle_cache:append(Hdl, Output),
+ drive_transform_fun(Fun, Hdl, Contents1)
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_reader).
+-include("rabbit_framing.hrl").
+-include("rabbit.hrl").
+
+-export([start_link/1, info_keys/0, info/1, info/2, force_event_refresh/2,
+ shutdown/2]).
+
+-export([system_continue/3, system_terminate/4, system_code_change/4]).
+
+-export([init/2, mainloop/4, recvloop/4]).
+
+-export([conserve_resources/3, server_properties/1]).
+
+-define(HANDSHAKE_TIMEOUT, 10).
+-define(NORMAL_TIMEOUT, 3).
+-define(CLOSING_TIMEOUT, 30).
+-define(CHANNEL_TERMINATION_TIMEOUT, 3).
+-define(SILENT_CLOSE_DELAY, 3).
+-define(CHANNEL_MIN, 1).
+
+%%--------------------------------------------------------------------------
+
+-record(v1, {parent, sock, connection, callback, recv_len, pending_recv,
+ connection_state, helper_sup, queue_collector, heartbeater,
+ stats_timer, channel_sup_sup_pid, channel_count, throttle}).
+
+-record(connection, {name, host, peer_host, port, peer_port,
+ protocol, user, timeout_sec, frame_max, channel_max, vhost,
+ client_properties, capabilities,
+ auth_mechanism, auth_state}).
+
+-record(throttle, {alarmed_by, last_blocked_by, last_blocked_at}).
+
+-define(STATISTICS_KEYS, [pid, recv_oct, recv_cnt, send_oct, send_cnt,
+ send_pend, state, channels]).
+
+-define(CREATION_EVENT_KEYS,
+ [pid, name, port, peer_port, host,
+ peer_host, ssl, peer_cert_subject, peer_cert_issuer,
+ peer_cert_validity, auth_mechanism, ssl_protocol,
+ ssl_key_exchange, ssl_cipher, ssl_hash, protocol, user, vhost,
+ timeout, frame_max, channel_max, client_properties]).
+
+-define(INFO_KEYS, ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [pid]).
+
+-define(IS_RUNNING(State),
+ (State#v1.connection_state =:= running orelse
+ State#v1.connection_state =:= blocking orelse
+ State#v1.connection_state =:= blocked)).
+
+-define(IS_STOPPING(State),
+ (State#v1.connection_state =:= closing orelse
+ State#v1.connection_state =:= closed)).
+
+%%--------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/1 :: (pid()) -> rabbit_types:ok(pid())).
+-spec(info_keys/0 :: () -> rabbit_types:info_keys()).
+-spec(info/1 :: (pid()) -> rabbit_types:infos()).
+-spec(info/2 :: (pid(), rabbit_types:info_keys()) -> rabbit_types:infos()).
+-spec(force_event_refresh/2 :: (pid(), reference()) -> 'ok').
+-spec(shutdown/2 :: (pid(), string()) -> 'ok').
+-spec(conserve_resources/3 :: (pid(), atom(), boolean()) -> 'ok').
+-spec(server_properties/1 :: (rabbit_types:protocol()) ->
+ rabbit_framing:amqp_table()).
+
+%% These specs only exists to add no_return() to keep dialyzer happy
+-spec(init/2 :: (pid(), pid()) -> no_return()).
+-spec(start_connection/5 ::
+ (pid(), pid(), any(), rabbit_net:socket(),
+ fun ((rabbit_net:socket()) ->
+ rabbit_types:ok_or_error2(
+ rabbit_net:socket(), any()))) -> no_return()).
+
+-spec(mainloop/4 :: (_,[binary()], non_neg_integer(), #v1{}) -> any()).
+-spec(system_code_change/4 :: (_,_,_,_) -> {'ok',_}).
+-spec(system_continue/3 :: (_,_,{[binary()], non_neg_integer(), #v1{}}) ->
+ any()).
+-spec(system_terminate/4 :: (_,_,_,_) -> none()).
+
+-endif.
+
+%%--------------------------------------------------------------------------
+
+start_link(HelperSup) ->
+ {ok, proc_lib:spawn_link(?MODULE, init, [self(), HelperSup])}.
+
+shutdown(Pid, Explanation) ->
+ gen_server:call(Pid, {shutdown, Explanation}, infinity).
+
+init(Parent, HelperSup) ->
+ Deb = sys:debug_options([]),
+ receive
+ {go, Sock, SockTransform} ->
+ start_connection(Parent, HelperSup, Deb, Sock, SockTransform)
+ end.
+
+system_continue(Parent, Deb, {Buf, BufLen, State}) ->
+ mainloop(Deb, Buf, BufLen, State#v1{parent = Parent}).
+
+system_terminate(Reason, _Parent, _Deb, _State) ->
+ exit(Reason).
+
+system_code_change(Misc, _Module, _OldVsn, _Extra) ->
+ {ok, Misc}.
+
+info_keys() -> ?INFO_KEYS.
+
+info(Pid) ->
+ gen_server:call(Pid, info, infinity).
+
+info(Pid, Items) ->
+ case gen_server:call(Pid, {info, Items}, infinity) of
+ {ok, Res} -> Res;
+ {error, Error} -> throw(Error)
+ end.
+
+force_event_refresh(Pid, Ref) ->
+ gen_server:cast(Pid, {force_event_refresh, Ref}).
+
+conserve_resources(Pid, Source, Conserve) ->
+ Pid ! {conserve_resources, Source, Conserve},
+ ok.
+
+server_properties(Protocol) ->
+ {ok, Product} = application:get_key(rabbit, id),
+ {ok, Version} = application:get_key(rabbit, vsn),
+
+ %% Get any configuration-specified server properties
+ {ok, RawConfigServerProps} = application:get_env(rabbit,
+ server_properties),
+
+ %% Normalize the simplifed (2-tuple) and unsimplified (3-tuple) forms
+ %% from the config and merge them with the generated built-in properties
+ NormalizedConfigServerProps =
+ [{<<"capabilities">>, table, server_capabilities(Protocol)} |
+ [case X of
+ {KeyAtom, Value} -> {list_to_binary(atom_to_list(KeyAtom)),
+ longstr,
+ maybe_list_to_binary(Value)};
+ {BinKey, Type, Value} -> {BinKey, Type, Value}
+ end || X <- RawConfigServerProps ++
+ [{product, Product},
+ {version, Version},
+ {cluster_name, rabbit_nodes:cluster_name()},
+ {platform, "Erlang/OTP"},
+ {copyright, ?COPYRIGHT_MESSAGE},
+ {information, ?INFORMATION_MESSAGE}]]],
+
+ %% Filter duplicated properties in favour of config file provided values
+ lists:usort(fun ({K1,_,_}, {K2,_,_}) -> K1 =< K2 end,
+ NormalizedConfigServerProps).
+
+maybe_list_to_binary(V) when is_binary(V) -> V;
+maybe_list_to_binary(V) when is_list(V) -> list_to_binary(V).
+
+server_capabilities(rabbit_framing_amqp_0_9_1) ->
+ [{<<"publisher_confirms">>, bool, true},
+ {<<"exchange_exchange_bindings">>, bool, true},
+ {<<"basic.nack">>, bool, true},
+ {<<"consumer_cancel_notify">>, bool, true},
+ {<<"connection.blocked">>, bool, true},
+ {<<"consumer_priorities">>, bool, true},
+ {<<"authentication_failure_close">>, bool, true},
+ {<<"per_consumer_qos">>, bool, true}];
+server_capabilities(_) ->
+ [].
+
+%%--------------------------------------------------------------------------
+
+log(Level, Fmt, Args) -> rabbit_log:log(connection, Level, Fmt, Args).
+
+socket_error(Reason) when is_atom(Reason) ->
+ log(error, "error on AMQP connection ~p: ~s~n",
+ [self(), rabbit_misc:format_inet_error(Reason)]);
+socket_error(Reason) ->
+ log(error, "error on AMQP connection ~p:~n~p~n", [self(), Reason]).
+
+inet_op(F) -> rabbit_misc:throw_on_error(inet_error, F).
+
+socket_op(Sock, Fun) ->
+ case Fun(Sock) of
+ {ok, Res} -> Res;
+ {error, Reason} -> socket_error(Reason),
+ %% NB: this is tcp socket, even in case of ssl
+ rabbit_net:fast_close(Sock),
+ exit(normal)
+ end.
+
+start_connection(Parent, HelperSup, Deb, Sock, SockTransform) ->
+ process_flag(trap_exit, true),
+ Name = case rabbit_net:connection_string(Sock, inbound) of
+ {ok, Str} -> Str;
+ {error, enotconn} -> rabbit_net:fast_close(Sock),
+ exit(normal);
+ {error, Reason} -> socket_error(Reason),
+ rabbit_net:fast_close(Sock),
+ exit(normal)
+ end,
+ log(info, "accepting AMQP connection ~p (~s)~n", [self(), Name]),
+ ClientSock = socket_op(Sock, SockTransform),
+ erlang:send_after(?HANDSHAKE_TIMEOUT * 1000, self(), handshake_timeout),
+ {PeerHost, PeerPort, Host, Port} =
+ socket_op(Sock, fun (S) -> rabbit_net:socket_ends(S, inbound) end),
+ ?store_proc_name(list_to_binary(Name)),
+ State = #v1{parent = Parent,
+ sock = ClientSock,
+ connection = #connection{
+ name = list_to_binary(Name),
+ host = Host,
+ peer_host = PeerHost,
+ port = Port,
+ peer_port = PeerPort,
+ protocol = none,
+ user = none,
+ timeout_sec = ?HANDSHAKE_TIMEOUT,
+ frame_max = ?FRAME_MIN_SIZE,
+ vhost = none,
+ client_properties = none,
+ capabilities = [],
+ auth_mechanism = none,
+ auth_state = none},
+ callback = uninitialized_callback,
+ recv_len = 0,
+ pending_recv = false,
+ connection_state = pre_init,
+ queue_collector = undefined, %% started on tune-ok
+ helper_sup = HelperSup,
+ heartbeater = none,
+ channel_sup_sup_pid = none,
+ channel_count = 0,
+ throttle = #throttle{
+ alarmed_by = [],
+ last_blocked_by = none,
+ last_blocked_at = never}},
+ try
+ run({?MODULE, recvloop,
+ [Deb, [], 0, switch_callback(rabbit_event:init_stats_timer(
+ State, #v1.stats_timer),
+ handshake, 8)]}),
+ log(info, "closing AMQP connection ~p (~s)~n", [self(), Name])
+ catch
+ Ex -> log(case Ex of
+ connection_closed_abruptly -> warning;
+ _ -> error
+ end, "closing AMQP connection ~p (~s):~n~p~n",
+ [self(), Name, Ex])
+ after
+ %% We don't call gen_tcp:close/1 here since it waits for
+ %% pending output to be sent, which results in unnecessary
+ %% delays. We could just terminate - the reader is the
+ %% controlling process and hence its termination will close
+ %% the socket. However, to keep the file_handle_cache
+ %% accounting as accurate as possible we ought to close the
+ %% socket w/o delay before termination.
+ rabbit_net:fast_close(ClientSock),
+ rabbit_networking:unregister_connection(self()),
+ rabbit_event:notify(connection_closed, [{pid, self()}])
+ end,
+ done.
+
+run({M, F, A}) ->
+ try apply(M, F, A)
+ catch {become, MFA} -> run(MFA)
+ end.
+
+recvloop(Deb, Buf, BufLen, State = #v1{pending_recv = true}) ->
+ mainloop(Deb, Buf, BufLen, State);
+recvloop(Deb, Buf, BufLen, State = #v1{connection_state = blocked}) ->
+ mainloop(Deb, Buf, BufLen, State);
+recvloop(Deb, Buf, BufLen, State = #v1{connection_state = {become, F}}) ->
+ throw({become, F(Deb, Buf, BufLen, State)});
+recvloop(Deb, Buf, BufLen, State = #v1{sock = Sock, recv_len = RecvLen})
+ when BufLen < RecvLen ->
+ case rabbit_net:setopts(Sock, [{active, once}]) of
+ ok -> mainloop(Deb, Buf, BufLen,
+ State#v1{pending_recv = true});
+ {error, Reason} -> stop(Reason, State)
+ end;
+recvloop(Deb, [B], _BufLen, State) ->
+ {Rest, State1} = handle_input(State#v1.callback, B, State),
+ recvloop(Deb, [Rest], size(Rest), State1);
+recvloop(Deb, Buf, BufLen, State = #v1{recv_len = RecvLen}) ->
+ {DataLRev, RestLRev} = binlist_split(BufLen - RecvLen, Buf, []),
+ Data = list_to_binary(lists:reverse(DataLRev)),
+ {<<>>, State1} = handle_input(State#v1.callback, Data, State),
+ recvloop(Deb, lists:reverse(RestLRev), BufLen - RecvLen, State1).
+
+binlist_split(0, L, Acc) ->
+ {L, Acc};
+binlist_split(Len, L, [Acc0|Acc]) when Len < 0 ->
+ {H, T} = split_binary(Acc0, -Len),
+ {[H|L], [T|Acc]};
+binlist_split(Len, [H|T], Acc) ->
+ binlist_split(Len - size(H), T, [H|Acc]).
+
+mainloop(Deb, Buf, BufLen, State = #v1{sock = Sock}) ->
+ case rabbit_net:recv(Sock) of
+ {data, Data} ->
+ recvloop(Deb, [Data | Buf], BufLen + size(Data),
+ State#v1{pending_recv = false});
+ closed when State#v1.connection_state =:= closed ->
+ ok;
+ closed ->
+ stop(closed, State);
+ {error, Reason} ->
+ stop(Reason, State);
+ {other, {system, From, Request}} ->
+ sys:handle_system_msg(Request, From, State#v1.parent,
+ ?MODULE, Deb, {Buf, BufLen, State});
+ {other, Other} ->
+ case handle_other(Other, State) of
+ stop -> ok;
+ NewState -> recvloop(Deb, Buf, BufLen, NewState)
+ end
+ end.
+
+stop(closed, State) -> maybe_emit_stats(State),
+ throw(connection_closed_abruptly);
+stop(Reason, State) -> maybe_emit_stats(State),
+ throw({inet_error, Reason}).
+
+handle_other({conserve_resources, Source, Conserve},
+ State = #v1{throttle = Throttle = #throttle{alarmed_by = CR}}) ->
+ CR1 = case Conserve of
+ true -> lists:usort([Source | CR]);
+ false -> CR -- [Source]
+ end,
+ State1 = control_throttle(
+ State#v1{throttle = Throttle#throttle{alarmed_by = CR1}}),
+ case {blocked_by_alarm(State), blocked_by_alarm(State1)} of
+ {false, true} -> ok = send_blocked(State1);
+ {true, false} -> ok = send_unblocked(State1);
+ {_, _} -> ok
+ end,
+ State1;
+handle_other({channel_closing, ChPid}, State) ->
+ ok = rabbit_channel:ready_for_close(ChPid),
+ {_, State1} = channel_cleanup(ChPid, State),
+ maybe_close(control_throttle(State1));
+handle_other({'EXIT', Parent, Reason}, State = #v1{parent = Parent}) ->
+ terminate(io_lib:format("broker forced connection closure "
+ "with reason '~w'", [Reason]), State),
+ %% this is what we are expected to do according to
+ %% http://www.erlang.org/doc/man/sys.html
+ %%
+ %% If we wanted to be *really* nice we should wait for a while for
+ %% clients to close the socket at their end, just as we do in the
+ %% ordinary error case. However, since this termination is
+ %% initiated by our parent it is probably more important to exit
+ %% quickly.
+ maybe_emit_stats(State),
+ exit(Reason);
+handle_other({channel_exit, _Channel, E = {writer, send_failed, _E}}, State) ->
+ maybe_emit_stats(State),
+ throw(E);
+handle_other({channel_exit, Channel, Reason}, State) ->
+ handle_exception(State, Channel, Reason);
+handle_other({'DOWN', _MRef, process, ChPid, Reason}, State) ->
+ handle_dependent_exit(ChPid, Reason, State);
+handle_other(terminate_connection, State) ->
+ maybe_emit_stats(State),
+ stop;
+handle_other(handshake_timeout, State)
+ when ?IS_RUNNING(State) orelse ?IS_STOPPING(State) ->
+ State;
+handle_other(handshake_timeout, State) ->
+ maybe_emit_stats(State),
+ throw({handshake_timeout, State#v1.callback});
+handle_other(heartbeat_timeout, State = #v1{connection_state = closed}) ->
+ State;
+handle_other(heartbeat_timeout, State = #v1{connection_state = S}) ->
+ maybe_emit_stats(State),
+ throw({heartbeat_timeout, S});
+handle_other({'$gen_call', From, {shutdown, Explanation}}, State) ->
+ {ForceTermination, NewState} = terminate(Explanation, State),
+ gen_server:reply(From, ok),
+ case ForceTermination of
+ force -> stop;
+ normal -> NewState
+ end;
+handle_other({'$gen_call', From, info}, State) ->
+ gen_server:reply(From, infos(?INFO_KEYS, State)),
+ State;
+handle_other({'$gen_call', From, {info, Items}}, State) ->
+ gen_server:reply(From, try {ok, infos(Items, State)}
+ catch Error -> {error, Error}
+ end),
+ State;
+handle_other({'$gen_cast', {force_event_refresh, Ref}}, State)
+ when ?IS_RUNNING(State) ->
+ rabbit_event:notify(
+ connection_created,
+ [{type, network} | infos(?CREATION_EVENT_KEYS, State)], Ref),
+ State;
+handle_other({'$gen_cast', force_event_refresh}, State) ->
+ %% Ignore, we will emit a created event once we start running.
+ State;
+handle_other(ensure_stats, State) ->
+ ensure_stats_timer(State);
+handle_other(emit_stats, State) ->
+ emit_stats(State);
+handle_other({bump_credit, Msg}, State) ->
+ credit_flow:handle_bump_msg(Msg),
+ control_throttle(State);
+handle_other(Other, State) ->
+ %% internal error -> something worth dying for
+ maybe_emit_stats(State),
+ exit({unexpected_message, Other}).
+
+switch_callback(State, Callback, Length) ->
+ State#v1{callback = Callback, recv_len = Length}.
+
+terminate(Explanation, State) when ?IS_RUNNING(State) ->
+ {normal, handle_exception(State, 0,
+ rabbit_misc:amqp_error(
+ connection_forced, Explanation, [], none))};
+terminate(_Explanation, State) ->
+ {force, State}.
+
+control_throttle(State = #v1{connection_state = CS, throttle = Throttle}) ->
+ IsThrottled = ((Throttle#throttle.alarmed_by =/= []) orelse
+ credit_flow:blocked()),
+ case {CS, IsThrottled} of
+ {running, true} -> State#v1{connection_state = blocking};
+ {blocking, false} -> State#v1{connection_state = running};
+ {blocked, false} -> ok = rabbit_heartbeat:resume_monitor(
+ State#v1.heartbeater),
+ State#v1{connection_state = running};
+ {blocked, true} -> State#v1{throttle = update_last_blocked_by(
+ Throttle)};
+ {_, _} -> State
+ end.
+
+maybe_block(State = #v1{connection_state = blocking,
+ throttle = Throttle}) ->
+ ok = rabbit_heartbeat:pause_monitor(State#v1.heartbeater),
+ State1 = State#v1{connection_state = blocked,
+ throttle = update_last_blocked_by(
+ Throttle#throttle{
+ last_blocked_at = erlang:now()})},
+ case {blocked_by_alarm(State), blocked_by_alarm(State1)} of
+ {false, true} -> ok = send_blocked(State1);
+ {_, _} -> ok
+ end,
+ State1;
+maybe_block(State) ->
+ State.
+
+
+blocked_by_alarm(#v1{connection_state = blocked,
+ throttle = #throttle{alarmed_by = CR}})
+ when CR =/= [] ->
+ true;
+blocked_by_alarm(#v1{}) ->
+ false.
+
+send_blocked(#v1{throttle = #throttle{alarmed_by = CR},
+ connection = #connection{protocol = Protocol,
+ capabilities = Capabilities},
+ sock = Sock}) ->
+ case rabbit_misc:table_lookup(Capabilities, <<"connection.blocked">>) of
+ {bool, true} ->
+ RStr = string:join([atom_to_list(A) || A <- CR], " & "),
+ Reason = list_to_binary(rabbit_misc:format("low on ~s", [RStr])),
+ ok = send_on_channel0(Sock, #'connection.blocked'{reason = Reason},
+ Protocol);
+ _ ->
+ ok
+ end.
+
+send_unblocked(#v1{connection = #connection{protocol = Protocol,
+ capabilities = Capabilities},
+ sock = Sock}) ->
+ case rabbit_misc:table_lookup(Capabilities, <<"connection.blocked">>) of
+ {bool, true} ->
+ ok = send_on_channel0(Sock, #'connection.unblocked'{}, Protocol);
+ _ ->
+ ok
+ end.
+
+update_last_blocked_by(Throttle = #throttle{alarmed_by = []}) ->
+ Throttle#throttle{last_blocked_by = flow};
+update_last_blocked_by(Throttle) ->
+ Throttle#throttle{last_blocked_by = resource}.
+
+%%--------------------------------------------------------------------------
+%% error handling / termination
+
+close_connection(State = #v1{queue_collector = Collector,
+ connection = #connection{
+ timeout_sec = TimeoutSec}}) ->
+ %% The spec says "Exclusive queues may only be accessed by the
+ %% current connection, and are deleted when that connection
+ %% closes." This does not strictly imply synchrony, but in
+ %% practice it seems to be what people assume.
+ rabbit_queue_collector:delete_all(Collector),
+ %% We terminate the connection after the specified interval, but
+ %% no later than ?CLOSING_TIMEOUT seconds.
+ erlang:send_after((if TimeoutSec > 0 andalso
+ TimeoutSec < ?CLOSING_TIMEOUT -> TimeoutSec;
+ true -> ?CLOSING_TIMEOUT
+ end) * 1000, self(), terminate_connection),
+ State#v1{connection_state = closed}.
+
+handle_dependent_exit(ChPid, Reason, State) ->
+ {Channel, State1} = channel_cleanup(ChPid, State),
+ case {Channel, termination_kind(Reason)} of
+ {undefined, controlled} -> State1;
+ {undefined, uncontrolled} -> exit({abnormal_dependent_exit,
+ ChPid, Reason});
+ {_, controlled} -> maybe_close(control_throttle(State1));
+ {_, uncontrolled} -> State2 = handle_exception(
+ State1, Channel, Reason),
+ maybe_close(control_throttle(State2))
+ end.
+
+terminate_channels(#v1{channel_count = 0} = State) ->
+ State;
+terminate_channels(#v1{channel_count = ChannelCount} = State) ->
+ lists:foreach(fun rabbit_channel:shutdown/1, all_channels()),
+ Timeout = 1000 * ?CHANNEL_TERMINATION_TIMEOUT * ChannelCount,
+ TimerRef = erlang:send_after(Timeout, self(), cancel_wait),
+ wait_for_channel_termination(ChannelCount, TimerRef, State).
+
+wait_for_channel_termination(0, TimerRef, State) ->
+ case erlang:cancel_timer(TimerRef) of
+ false -> receive
+ cancel_wait -> State
+ end;
+ _ -> State
+ end;
+wait_for_channel_termination(N, TimerRef, State) ->
+ receive
+ {'DOWN', _MRef, process, ChPid, Reason} ->
+ {Channel, State1} = channel_cleanup(ChPid, State),
+ case {Channel, termination_kind(Reason)} of
+ {undefined, _} -> exit({abnormal_dependent_exit,
+ ChPid, Reason});
+ {_, controlled} -> wait_for_channel_termination(
+ N-1, TimerRef, State1);
+ {_, uncontrolled} -> log(error,
+ "AMQP connection ~p, channel ~p - "
+ "error while terminating:~n~p~n",
+ [self(), Channel, Reason]),
+ wait_for_channel_termination(
+ N-1, TimerRef, State1)
+ end;
+ cancel_wait ->
+ exit(channel_termination_timeout)
+ end.
+
+maybe_close(State = #v1{connection_state = closing,
+ channel_count = 0,
+ connection = #connection{protocol = Protocol},
+ sock = Sock}) ->
+ NewState = close_connection(State),
+ ok = send_on_channel0(Sock, #'connection.close_ok'{}, Protocol),
+ NewState;
+maybe_close(State) ->
+ State.
+
+termination_kind(normal) -> controlled;
+termination_kind(_) -> uncontrolled.
+
+handle_exception(State = #v1{connection_state = closed}, Channel, Reason) ->
+ log(error, "AMQP connection ~p (~p), channel ~p - error:~n~p~n",
+ [self(), closed, Channel, Reason]),
+ State;
+handle_exception(State = #v1{connection = #connection{protocol = Protocol},
+ connection_state = CS},
+ Channel, Reason)
+ when ?IS_RUNNING(State) orelse CS =:= closing ->
+ log(error, "AMQP connection ~p (~p), channel ~p - error:~n~p~n",
+ [self(), CS, Channel, Reason]),
+ {0, CloseMethod} =
+ rabbit_binary_generator:map_exception(Channel, Reason, Protocol),
+ State1 = close_connection(terminate_channels(State)),
+ ok = send_on_channel0(State1#v1.sock, CloseMethod, Protocol),
+ State1;
+handle_exception(State, Channel, Reason) ->
+ %% We don't trust the client at this point - force them to wait
+ %% for a bit so they can't DOS us with repeated failed logins etc.
+ timer:sleep(?SILENT_CLOSE_DELAY * 1000),
+ throw({handshake_error, State#v1.connection_state, Channel, Reason}).
+
+%% we've "lost sync" with the client and hence must not accept any
+%% more input
+fatal_frame_error(Error, Type, Channel, Payload, State) ->
+ frame_error(Error, Type, Channel, Payload, State),
+ %% grace period to allow transmission of error
+ timer:sleep(?SILENT_CLOSE_DELAY * 1000),
+ throw(fatal_frame_error).
+
+frame_error(Error, Type, Channel, Payload, State) ->
+ {Str, Bin} = payload_snippet(Payload),
+ handle_exception(State, Channel,
+ rabbit_misc:amqp_error(frame_error,
+ "type ~p, ~s octets = ~p: ~p",
+ [Type, Str, Bin, Error], none)).
+
+unexpected_frame(Type, Channel, Payload, State) ->
+ {Str, Bin} = payload_snippet(Payload),
+ handle_exception(State, Channel,
+ rabbit_misc:amqp_error(unexpected_frame,
+ "type ~p, ~s octets = ~p",
+ [Type, Str, Bin], none)).
+
+payload_snippet(Payload) when size(Payload) =< 16 ->
+ {"all", Payload};
+payload_snippet(<<Snippet:16/binary, _/binary>>) ->
+ {"first 16", Snippet}.
+
+%%--------------------------------------------------------------------------
+
+create_channel(_Channel,
+ #v1{channel_count = ChannelCount,
+ connection = #connection{channel_max = ChannelMax}})
+ when ChannelMax /= 0 andalso ChannelCount >= ChannelMax ->
+ {error, rabbit_misc:amqp_error(
+ not_allowed, "number of channels opened (~w) has reached the "
+ "negotiated channel_max (~w)",
+ [ChannelCount, ChannelMax], 'none')};
+create_channel(Channel,
+ #v1{sock = Sock,
+ queue_collector = Collector,
+ channel_sup_sup_pid = ChanSupSup,
+ channel_count = ChannelCount,
+ connection =
+ #connection{name = Name,
+ protocol = Protocol,
+ frame_max = FrameMax,
+ user = User,
+ vhost = VHost,
+ capabilities = Capabilities}} = State) ->
+ {ok, _ChSupPid, {ChPid, AState}} =
+ rabbit_channel_sup_sup:start_channel(
+ ChanSupSup, {tcp, Sock, Channel, FrameMax, self(), Name,
+ Protocol, User, VHost, Capabilities, Collector}),
+ MRef = erlang:monitor(process, ChPid),
+ put({ch_pid, ChPid}, {Channel, MRef}),
+ put({channel, Channel}, {ChPid, AState}),
+ {ok, {ChPid, AState}, State#v1{channel_count = ChannelCount + 1}}.
+
+channel_cleanup(ChPid, State = #v1{channel_count = ChannelCount}) ->
+ case get({ch_pid, ChPid}) of
+ undefined -> {undefined, State};
+ {Channel, MRef} -> credit_flow:peer_down(ChPid),
+ erase({channel, Channel}),
+ erase({ch_pid, ChPid}),
+ erlang:demonitor(MRef, [flush]),
+ {Channel, State#v1{channel_count = ChannelCount - 1}}
+ end.
+
+all_channels() -> [ChPid || {{ch_pid, ChPid}, _ChannelMRef} <- get()].
+
+%%--------------------------------------------------------------------------
+
+handle_frame(Type, 0, Payload,
+ State = #v1{connection = #connection{protocol = Protocol}})
+ when ?IS_STOPPING(State) ->
+ case rabbit_command_assembler:analyze_frame(Type, Payload, Protocol) of
+ {method, MethodName, FieldsBin} ->
+ handle_method0(MethodName, FieldsBin, State);
+ _Other -> State
+ end;
+handle_frame(Type, 0, Payload,
+ State = #v1{connection = #connection{protocol = Protocol}}) ->
+ case rabbit_command_assembler:analyze_frame(Type, Payload, Protocol) of
+ error -> frame_error(unknown_frame, Type, 0, Payload, State);
+ heartbeat -> State;
+ {method, MethodName, FieldsBin} ->
+ handle_method0(MethodName, FieldsBin, State);
+ _Other -> unexpected_frame(Type, 0, Payload, State)
+ end;
+handle_frame(Type, Channel, Payload,
+ State = #v1{connection = #connection{protocol = Protocol}})
+ when ?IS_RUNNING(State) ->
+ case rabbit_command_assembler:analyze_frame(Type, Payload, Protocol) of
+ error -> frame_error(unknown_frame, Type, Channel, Payload, State);
+ heartbeat -> unexpected_frame(Type, Channel, Payload, State);
+ Frame -> process_frame(Frame, Channel, State)
+ end;
+handle_frame(_Type, _Channel, _Payload, State) when ?IS_STOPPING(State) ->
+ State;
+handle_frame(Type, Channel, Payload, State) ->
+ unexpected_frame(Type, Channel, Payload, State).
+
+process_frame(Frame, Channel, State) ->
+ ChKey = {channel, Channel},
+ case (case get(ChKey) of
+ undefined -> create_channel(Channel, State);
+ Other -> {ok, Other, State}
+ end) of
+ {error, Error} ->
+ handle_exception(State, Channel, Error);
+ {ok, {ChPid, AState}, State1} ->
+ case rabbit_command_assembler:process(Frame, AState) of
+ {ok, NewAState} ->
+ put(ChKey, {ChPid, NewAState}),
+ post_process_frame(Frame, ChPid, State1);
+ {ok, Method, NewAState} ->
+ rabbit_channel:do(ChPid, Method),
+ put(ChKey, {ChPid, NewAState}),
+ post_process_frame(Frame, ChPid, State1);
+ {ok, Method, Content, NewAState} ->
+ rabbit_channel:do_flow(ChPid, Method, Content),
+ put(ChKey, {ChPid, NewAState}),
+ post_process_frame(Frame, ChPid, control_throttle(State1));
+ {error, Reason} ->
+ handle_exception(State1, Channel, Reason)
+ end
+ end.
+
+post_process_frame({method, 'channel.close_ok', _}, ChPid, State) ->
+ {_, State1} = channel_cleanup(ChPid, State),
+ %% This is not strictly necessary, but more obviously
+ %% correct. Also note that we do not need to call maybe_close/1
+ %% since we cannot possibly be in the 'closing' state.
+ control_throttle(State1);
+post_process_frame({content_header, _, _, _, _}, _ChPid, State) ->
+ maybe_block(State);
+post_process_frame({content_body, _}, _ChPid, State) ->
+ maybe_block(State);
+post_process_frame(_Frame, _ChPid, State) ->
+ State.
+
+%%--------------------------------------------------------------------------
+
+%% We allow clients to exceed the frame size a little bit since quite
+%% a few get it wrong - off-by 1 or 8 (empty frame size) are typical.
+-define(FRAME_SIZE_FUDGE, ?EMPTY_FRAME_SIZE).
+
+handle_input(frame_header, <<Type:8,Channel:16,PayloadSize:32, _/binary>>,
+ State = #v1{connection = #connection{frame_max = FrameMax}})
+ when FrameMax /= 0 andalso
+ PayloadSize > FrameMax - ?EMPTY_FRAME_SIZE + ?FRAME_SIZE_FUDGE ->
+ fatal_frame_error(
+ {frame_too_large, PayloadSize, FrameMax - ?EMPTY_FRAME_SIZE},
+ Type, Channel, <<>>, State);
+handle_input(frame_header, <<Type:8,Channel:16,PayloadSize:32,
+ Payload:PayloadSize/binary, ?FRAME_END,
+ Rest/binary>>,
+ State) ->
+ {Rest, ensure_stats_timer(handle_frame(Type, Channel, Payload, State))};
+handle_input(frame_header, <<Type:8,Channel:16,PayloadSize:32, Rest/binary>>,
+ State) ->
+ {Rest, ensure_stats_timer(
+ switch_callback(State,
+ {frame_payload, Type, Channel, PayloadSize},
+ PayloadSize + 1))};
+handle_input({frame_payload, Type, Channel, PayloadSize}, Data, State) ->
+ <<Payload:PayloadSize/binary, EndMarker, Rest/binary>> = Data,
+ case EndMarker of
+ ?FRAME_END -> State1 = handle_frame(Type, Channel, Payload, State),
+ {Rest, switch_callback(State1, frame_header, 7)};
+ _ -> fatal_frame_error({invalid_frame_end_marker, EndMarker},
+ Type, Channel, Payload, State)
+ end;
+handle_input(handshake, <<"AMQP", A, B, C, D, Rest/binary>>, State) ->
+ {Rest, handshake({A, B, C, D}, State)};
+handle_input(handshake, <<Other:8/binary, _/binary>>, #v1{sock = Sock}) ->
+ refuse_connection(Sock, {bad_header, Other});
+handle_input(Callback, Data, _State) ->
+ throw({bad_input, Callback, Data}).
+
+%% The two rules pertaining to version negotiation:
+%%
+%% * If the server cannot support the protocol specified in the
+%% protocol header, it MUST respond with a valid protocol header and
+%% then close the socket connection.
+%%
+%% * The server MUST provide a protocol version that is lower than or
+%% equal to that requested by the client in the protocol header.
+handshake({0, 0, 9, 1}, State) ->
+ start_connection({0, 9, 1}, rabbit_framing_amqp_0_9_1, State);
+
+%% This is the protocol header for 0-9, which we can safely treat as
+%% though it were 0-9-1.
+handshake({1, 1, 0, 9}, State) ->
+ start_connection({0, 9, 0}, rabbit_framing_amqp_0_9_1, State);
+
+%% This is what most clients send for 0-8. The 0-8 spec, confusingly,
+%% defines the version as 8-0.
+handshake({1, 1, 8, 0}, State) ->
+ start_connection({8, 0, 0}, rabbit_framing_amqp_0_8, State);
+
+%% The 0-8 spec as on the AMQP web site actually has this as the
+%% protocol header; some libraries e.g., py-amqplib, send it when they
+%% want 0-8.
+handshake({1, 1, 9, 1}, State) ->
+ start_connection({8, 0, 0}, rabbit_framing_amqp_0_8, State);
+
+%% ... and finally, the 1.0 spec is crystal clear!
+handshake({Id, 1, 0, 0}, State) ->
+ become_1_0(Id, State);
+
+handshake(Vsn, #v1{sock = Sock}) ->
+ refuse_connection(Sock, {bad_version, Vsn}).
+
+%% Offer a protocol version to the client. Connection.start only
+%% includes a major and minor version number, Luckily 0-9 and 0-9-1
+%% are similar enough that clients will be happy with either.
+start_connection({ProtocolMajor, ProtocolMinor, _ProtocolRevision},
+ Protocol,
+ State = #v1{sock = Sock, connection = Connection}) ->
+ rabbit_networking:register_connection(self()),
+ Start = #'connection.start'{
+ version_major = ProtocolMajor,
+ version_minor = ProtocolMinor,
+ server_properties = server_properties(Protocol),
+ mechanisms = auth_mechanisms_binary(Sock),
+ locales = <<"en_US">> },
+ ok = send_on_channel0(Sock, Start, Protocol),
+ switch_callback(State#v1{connection = Connection#connection{
+ timeout_sec = ?NORMAL_TIMEOUT,
+ protocol = Protocol},
+ connection_state = starting},
+ frame_header, 7).
+
+refuse_connection(Sock, Exception, {A, B, C, D}) ->
+ ok = inet_op(fun () -> rabbit_net:send(Sock, <<"AMQP",A,B,C,D>>) end),
+ throw(Exception).
+
+-ifdef(use_specs).
+-spec(refuse_connection/2 :: (rabbit_net:socket(), any()) -> no_return()).
+-endif.
+refuse_connection(Sock, Exception) ->
+ refuse_connection(Sock, Exception, {0, 0, 9, 1}).
+
+ensure_stats_timer(State = #v1{connection_state = running}) ->
+ rabbit_event:ensure_stats_timer(State, #v1.stats_timer, emit_stats);
+ensure_stats_timer(State) ->
+ State.
+
+%%--------------------------------------------------------------------------
+
+handle_method0(MethodName, FieldsBin,
+ State = #v1{connection = #connection{protocol = Protocol}}) ->
+ try
+ handle_method0(Protocol:decode_method_fields(MethodName, FieldsBin),
+ State)
+ catch throw:{inet_error, closed} ->
+ maybe_emit_stats(State),
+ throw(connection_closed_abruptly);
+ exit:#amqp_error{method = none} = Reason ->
+ handle_exception(State, 0, Reason#amqp_error{method = MethodName});
+ Type:Reason ->
+ Stack = erlang:get_stacktrace(),
+ handle_exception(State, 0, {Type, Reason, MethodName, Stack})
+ end.
+
+handle_method0(#'connection.start_ok'{mechanism = Mechanism,
+ response = Response,
+ client_properties = ClientProperties},
+ State0 = #v1{connection_state = starting,
+ connection = Connection,
+ sock = Sock}) ->
+ AuthMechanism = auth_mechanism_to_module(Mechanism, Sock),
+ Capabilities =
+ case rabbit_misc:table_lookup(ClientProperties, <<"capabilities">>) of
+ {table, Capabilities1} -> Capabilities1;
+ _ -> []
+ end,
+ State = State0#v1{connection_state = securing,
+ connection =
+ Connection#connection{
+ client_properties = ClientProperties,
+ capabilities = Capabilities,
+ auth_mechanism = {Mechanism, AuthMechanism},
+ auth_state = AuthMechanism:init(Sock)}},
+ auth_phase(Response, State);
+
+handle_method0(#'connection.secure_ok'{response = Response},
+ State = #v1{connection_state = securing}) ->
+ auth_phase(Response, State);
+
+handle_method0(#'connection.tune_ok'{frame_max = FrameMax,
+ channel_max = ChannelMax,
+ heartbeat = ClientHeartbeat},
+ State = #v1{connection_state = tuning,
+ connection = Connection,
+ helper_sup = SupPid,
+ sock = Sock}) ->
+ ok = validate_negotiated_integer_value(
+ frame_max, ?FRAME_MIN_SIZE, FrameMax),
+ ok = validate_negotiated_integer_value(
+ channel_max, ?CHANNEL_MIN, ChannelMax),
+ {ok, Collector} = rabbit_connection_helper_sup:start_queue_collector(
+ SupPid, Connection#connection.name),
+ Frame = rabbit_binary_generator:build_heartbeat_frame(),
+ SendFun = fun() -> catch rabbit_net:send(Sock, Frame) end,
+ Parent = self(),
+ ReceiveFun = fun() -> Parent ! heartbeat_timeout end,
+ Heartbeater = rabbit_heartbeat:start(
+ SupPid, Sock, Connection#connection.name,
+ ClientHeartbeat, SendFun, ClientHeartbeat, ReceiveFun),
+ State#v1{connection_state = opening,
+ connection = Connection#connection{
+ frame_max = FrameMax,
+ channel_max = ChannelMax,
+ timeout_sec = ClientHeartbeat},
+ queue_collector = Collector,
+ heartbeater = Heartbeater};
+
+handle_method0(#'connection.open'{virtual_host = VHostPath},
+ State = #v1{connection_state = opening,
+ connection = Connection = #connection{
+ user = User,
+ protocol = Protocol},
+ helper_sup = SupPid,
+ sock = Sock,
+ throttle = Throttle}) ->
+ ok = rabbit_access_control:check_vhost_access(User, VHostPath),
+ NewConnection = Connection#connection{vhost = VHostPath},
+ ok = send_on_channel0(Sock, #'connection.open_ok'{}, Protocol),
+ Conserve = rabbit_alarm:register(self(), {?MODULE, conserve_resources, []}),
+ Throttle1 = Throttle#throttle{alarmed_by = Conserve},
+ {ok, ChannelSupSupPid} =
+ rabbit_connection_helper_sup:start_channel_sup_sup(SupPid),
+ State1 = control_throttle(
+ State#v1{connection_state = running,
+ connection = NewConnection,
+ channel_sup_sup_pid = ChannelSupSupPid,
+ throttle = Throttle1}),
+ rabbit_event:notify(connection_created,
+ [{type, network} |
+ infos(?CREATION_EVENT_KEYS, State1)]),
+ maybe_emit_stats(State1),
+ State1;
+handle_method0(#'connection.close'{}, State) when ?IS_RUNNING(State) ->
+ lists:foreach(fun rabbit_channel:shutdown/1, all_channels()),
+ maybe_close(State#v1{connection_state = closing});
+handle_method0(#'connection.close'{},
+ State = #v1{connection = #connection{protocol = Protocol},
+ sock = Sock})
+ when ?IS_STOPPING(State) ->
+ %% We're already closed or closing, so we don't need to cleanup
+ %% anything.
+ ok = send_on_channel0(Sock, #'connection.close_ok'{}, Protocol),
+ State;
+handle_method0(#'connection.close_ok'{},
+ State = #v1{connection_state = closed}) ->
+ self() ! terminate_connection,
+ State;
+handle_method0(_Method, State) when ?IS_STOPPING(State) ->
+ State;
+handle_method0(_Method, #v1{connection_state = S}) ->
+ rabbit_misc:protocol_error(
+ channel_error, "unexpected method in connection state ~w", [S]).
+
+validate_negotiated_integer_value(Field, Min, ClientValue) ->
+ ServerValue = get_env(Field),
+ if ClientValue /= 0 andalso ClientValue < Min ->
+ fail_negotiation(Field, min, ServerValue, ClientValue);
+ ServerValue /= 0 andalso (ClientValue =:= 0 orelse
+ ClientValue > ServerValue) ->
+ fail_negotiation(Field, max, ServerValue, ClientValue);
+ true ->
+ ok
+ end.
+
+%% keep dialyzer happy
+-spec fail_negotiation(atom(), 'min' | 'max', integer(), integer()) ->
+ no_return().
+fail_negotiation(Field, MinOrMax, ServerValue, ClientValue) ->
+ {S1, S2} = case MinOrMax of
+ min -> {lower, minimum};
+ max -> {higher, maximum}
+ end,
+ rabbit_misc:protocol_error(
+ not_allowed, "negotiated ~w = ~w is ~w than the ~w allowed value (~w)",
+ [Field, ClientValue, S1, S2, ServerValue], 'connection.tune').
+
+get_env(Key) ->
+ {ok, Value} = application:get_env(rabbit, Key),
+ Value.
+
+send_on_channel0(Sock, Method, Protocol) ->
+ ok = rabbit_writer:internal_send_command(Sock, 0, Method, Protocol).
+
+auth_mechanism_to_module(TypeBin, Sock) ->
+ case rabbit_registry:binary_to_type(TypeBin) of
+ {error, not_found} ->
+ rabbit_misc:protocol_error(
+ command_invalid, "unknown authentication mechanism '~s'",
+ [TypeBin]);
+ T ->
+ case {lists:member(T, auth_mechanisms(Sock)),
+ rabbit_registry:lookup_module(auth_mechanism, T)} of
+ {true, {ok, Module}} ->
+ Module;
+ _ ->
+ rabbit_misc:protocol_error(
+ command_invalid,
+ "invalid authentication mechanism '~s'", [T])
+ end
+ end.
+
+auth_mechanisms(Sock) ->
+ {ok, Configured} = application:get_env(auth_mechanisms),
+ [Name || {Name, Module} <- rabbit_registry:lookup_all(auth_mechanism),
+ Module:should_offer(Sock), lists:member(Name, Configured)].
+
+auth_mechanisms_binary(Sock) ->
+ list_to_binary(
+ string:join([atom_to_list(A) || A <- auth_mechanisms(Sock)], " ")).
+
+auth_phase(Response,
+ State = #v1{connection = Connection =
+ #connection{protocol = Protocol,
+ auth_mechanism = {Name, AuthMechanism},
+ auth_state = AuthState},
+ sock = Sock}) ->
+ case AuthMechanism:handle_response(Response, AuthState) of
+ {refused, Msg, Args} ->
+ auth_fail(Msg, Args, Name, State);
+ {protocol_error, Msg, Args} ->
+ rabbit_misc:protocol_error(syntax_error, Msg, Args);
+ {challenge, Challenge, AuthState1} ->
+ Secure = #'connection.secure'{challenge = Challenge},
+ ok = send_on_channel0(Sock, Secure, Protocol),
+ State#v1{connection = Connection#connection{
+ auth_state = AuthState1}};
+ {ok, User = #user{username = Username}} ->
+ case rabbit_access_control:check_user_loopback(Username, Sock) of
+ ok -> ok;
+ not_allowed -> auth_fail("user '~s' can only connect via "
+ "localhost", [Username], Name, State)
+ end,
+ Tune = #'connection.tune'{frame_max = get_env(frame_max),
+ channel_max = get_env(channel_max),
+ heartbeat = get_env(heartbeat)},
+ ok = send_on_channel0(Sock, Tune, Protocol),
+ State#v1{connection_state = tuning,
+ connection = Connection#connection{user = User,
+ auth_state = none}}
+ end.
+
+-ifdef(use_specs).
+-spec(auth_fail/4 :: (string(), [any()], binary(), #v1{}) -> no_return()).
+-endif.
+auth_fail(Msg, Args, AuthName,
+ State = #v1{connection = #connection{protocol = Protocol,
+ capabilities = Capabilities}}) ->
+ AmqpError = rabbit_misc:amqp_error(
+ access_refused, "~s login refused: ~s",
+ [AuthName, io_lib:format(Msg, Args)], none),
+ case rabbit_misc:table_lookup(Capabilities,
+ <<"authentication_failure_close">>) of
+ {bool, true} ->
+ SafeMsg = io_lib:format(
+ "Login was refused using authentication "
+ "mechanism ~s. For details see the broker "
+ "logfile.", [AuthName]),
+ AmqpError1 = AmqpError#amqp_error{explanation = SafeMsg},
+ {0, CloseMethod} = rabbit_binary_generator:map_exception(
+ 0, AmqpError1, Protocol),
+ ok = send_on_channel0(State#v1.sock, CloseMethod, Protocol);
+ _ -> ok
+ end,
+ rabbit_misc:protocol_error(AmqpError).
+
+%%--------------------------------------------------------------------------
+
+infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
+
+i(pid, #v1{}) -> self();
+i(SockStat, S) when SockStat =:= recv_oct;
+ SockStat =:= recv_cnt;
+ SockStat =:= send_oct;
+ SockStat =:= send_cnt;
+ SockStat =:= send_pend ->
+ socket_info(fun (Sock) -> rabbit_net:getstat(Sock, [SockStat]) end,
+ fun ([{_, I}]) -> I end, S);
+i(ssl, #v1{sock = Sock}) -> rabbit_net:is_ssl(Sock);
+i(ssl_protocol, S) -> ssl_info(fun ({P, _}) -> P end, S);
+i(ssl_key_exchange, S) -> ssl_info(fun ({_, {K, _, _}}) -> K end, S);
+i(ssl_cipher, S) -> ssl_info(fun ({_, {_, C, _}}) -> C end, S);
+i(ssl_hash, S) -> ssl_info(fun ({_, {_, _, H}}) -> H end, S);
+i(peer_cert_issuer, S) -> cert_info(fun rabbit_ssl:peer_cert_issuer/1, S);
+i(peer_cert_subject, S) -> cert_info(fun rabbit_ssl:peer_cert_subject/1, S);
+i(peer_cert_validity, S) -> cert_info(fun rabbit_ssl:peer_cert_validity/1, S);
+i(channels, #v1{channel_count = ChannelCount}) -> ChannelCount;
+i(state, #v1{connection_state = ConnectionState,
+ throttle = #throttle{alarmed_by = Alarms,
+ last_blocked_by = WasBlockedBy,
+ last_blocked_at = T}}) ->
+ case Alarms =:= [] andalso %% not throttled by resource alarms
+ (credit_flow:blocked() %% throttled by flow now
+ orelse %% throttled by flow recently
+ (WasBlockedBy =:= flow andalso T =/= never andalso
+ timer:now_diff(erlang:now(), T) < 5000000)) of
+ true -> flow;
+ false -> ConnectionState
+ end;
+i(Item, #v1{connection = Conn}) -> ic(Item, Conn).
+
+ic(name, #connection{name = Name}) -> Name;
+ic(host, #connection{host = Host}) -> Host;
+ic(peer_host, #connection{peer_host = PeerHost}) -> PeerHost;
+ic(port, #connection{port = Port}) -> Port;
+ic(peer_port, #connection{peer_port = PeerPort}) -> PeerPort;
+ic(protocol, #connection{protocol = none}) -> none;
+ic(protocol, #connection{protocol = P}) -> P:version();
+ic(user, #connection{user = none}) -> '';
+ic(user, #connection{user = U}) -> U#user.username;
+ic(vhost, #connection{vhost = VHost}) -> VHost;
+ic(timeout, #connection{timeout_sec = Timeout}) -> Timeout;
+ic(frame_max, #connection{frame_max = FrameMax}) -> FrameMax;
+ic(channel_max, #connection{channel_max = ChMax}) -> ChMax;
+ic(client_properties, #connection{client_properties = CP}) -> CP;
+ic(auth_mechanism, #connection{auth_mechanism = none}) -> none;
+ic(auth_mechanism, #connection{auth_mechanism = {Name, _Mod}}) -> Name;
+ic(Item, #connection{}) -> throw({bad_argument, Item}).
+
+socket_info(Get, Select, #v1{sock = Sock}) ->
+ case Get(Sock) of
+ {ok, T} -> Select(T);
+ {error, _} -> ''
+ end.
+
+ssl_info(F, #v1{sock = Sock}) ->
+ %% The first ok form is R14
+ %% The second is R13 - the extra term is exportability (by inspection,
+ %% the docs are wrong)
+ case rabbit_net:ssl_info(Sock) of
+ nossl -> '';
+ {error, _} -> '';
+ {ok, {P, {K, C, H}}} -> F({P, {K, C, H}});
+ {ok, {P, {K, C, H, _}}} -> F({P, {K, C, H}})
+ end.
+
+cert_info(F, #v1{sock = Sock}) ->
+ case rabbit_net:peercert(Sock) of
+ nossl -> '';
+ {error, no_peercert} -> '';
+ {ok, Cert} -> list_to_binary(F(Cert))
+ end.
+
+maybe_emit_stats(State) ->
+ rabbit_event:if_enabled(State, #v1.stats_timer,
+ fun() -> emit_stats(State) end).
+
+emit_stats(State) ->
+ Infos = infos(?STATISTICS_KEYS, State),
+ rabbit_event:notify(connection_stats, Infos),
+ State1 = rabbit_event:reset_stats_timer(State, #v1.stats_timer),
+ %% If we emit an event which looks like we are in flow control, it's not a
+ %% good idea for it to be our last even if we go idle. Keep emitting
+ %% events, either we stay busy or we drop out of flow control.
+ case proplists:get_value(state, Infos) of
+ flow -> ensure_stats_timer(State1);
+ _ -> State1
+ end.
+
+%% 1.0 stub
+-ifdef(use_specs).
+-spec(become_1_0/2 :: (non_neg_integer(), #v1{}) -> no_return()).
+-endif.
+become_1_0(Id, State = #v1{sock = Sock}) ->
+ case code:is_loaded(rabbit_amqp1_0_reader) of
+ false -> refuse_connection(Sock, amqp1_0_plugin_not_enabled);
+ _ -> Mode = case Id of
+ 0 -> amqp;
+ 3 -> sasl;
+ _ -> refuse_connection(
+ Sock, {unsupported_amqp1_0_protocol_id, Id},
+ {3, 1, 0, 0})
+ end,
+ F = fun (_Deb, Buf, BufLen, S) ->
+ {rabbit_amqp1_0_reader, init,
+ [Mode, pack_for_1_0(Buf, BufLen, S)]}
+ end,
+ State#v1{connection_state = {become, F}}
+ end.
+
+pack_for_1_0(Buf, BufLen, #v1{parent = Parent,
+ sock = Sock,
+ recv_len = RecvLen,
+ pending_recv = PendingRecv,
+ helper_sup = SupPid}) ->
+ {Parent, Sock, RecvLen, PendingRecv, SupPid, Buf, BufLen}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% We use a gen_server simply so that during the terminate/2 call
+%% (i.e., during shutdown), we can sync/flush the dets table to disk.
+
+-module(rabbit_recovery_terms).
+
+-behaviour(gen_server).
+
+-export([start/0, stop/0, store/2, read/1, clear/0]).
+
+-export([upgrade_recovery_terms/0, start_link/0]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-rabbit_upgrade({upgrade_recovery_terms, local, []}).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start() -> rabbit_types:ok_or_error(term())).
+-spec(stop() -> rabbit_types:ok_or_error(term())).
+-spec(store(file:filename(), term()) -> rabbit_types:ok_or_error(term())).
+-spec(read(file:filename()) -> rabbit_types:ok_or_error2(term(), not_found)).
+-spec(clear() -> 'ok').
+
+-endif. % use_specs
+
+%%----------------------------------------------------------------------------
+
+-define(SERVER, ?MODULE).
+
+start() -> rabbit_sup:start_child(?MODULE).
+
+stop() -> rabbit_sup:stop_child(?MODULE).
+
+store(DirBaseName, Terms) -> dets:insert(?MODULE, {DirBaseName, Terms}).
+
+read(DirBaseName) ->
+ case dets:lookup(?MODULE, DirBaseName) of
+ [{_, Terms}] -> {ok, Terms};
+ _ -> {error, not_found}
+ end.
+
+clear() ->
+ dets:delete_all_objects(?MODULE),
+ flush().
+
+%%----------------------------------------------------------------------------
+
+upgrade_recovery_terms() ->
+ open_table(),
+ try
+ QueuesDir = filename:join(rabbit_mnesia:dir(), "queues"),
+ Dirs = case rabbit_file:list_dir(QueuesDir) of
+ {ok, Entries} -> Entries;
+ {error, _} -> []
+ end,
+ [begin
+ File = filename:join([QueuesDir, Dir, "clean.dot"]),
+ case rabbit_file:read_term_file(File) of
+ {ok, Terms} -> ok = store(Dir, Terms);
+ {error, _} -> ok
+ end,
+ file:delete(File)
+ end || Dir <- Dirs],
+ ok
+ after
+ close_table()
+ end.
+
+start_link() -> gen_server:start_link(?MODULE, [], []).
+
+%%----------------------------------------------------------------------------
+
+init(_) ->
+ process_flag(trap_exit, true),
+ open_table(),
+ {ok, undefined}.
+
+handle_call(Msg, _, State) -> {stop, {unexpected_call, Msg}, State}.
+
+handle_cast(Msg, State) -> {stop, {unexpected_cast, Msg}, State}.
+
+handle_info(_Info, State) -> {noreply, State}.
+
+terminate(_Reason, _State) ->
+ close_table().
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+open_table() ->
+ File = filename:join(rabbit_mnesia:dir(), "recovery.dets"),
+ {ok, _} = dets:open_file(?MODULE, [{file, File},
+ {ram_file, true},
+ {auto_save, infinity}]).
+
+flush() -> dets:sync(?MODULE).
+
+close_table() ->
+ ok = flush(),
+ ok = dets:close(?MODULE).
+
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_registry).
+
+-behaviour(gen_server).
+
+-export([start_link/0]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-export([register/3, unregister/2,
+ binary_to_type/1, lookup_module/2, lookup_all/1]).
+
+-define(SERVER, ?MODULE).
+-define(ETS_NAME, ?MODULE).
+
+-ifdef(use_specs).
+
+-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
+-spec(register/3 :: (atom(), binary(), atom()) -> 'ok').
+-spec(unregister/2 :: (atom(), binary()) -> 'ok').
+-spec(binary_to_type/1 ::
+ (binary()) -> atom() | rabbit_types:error('not_found')).
+-spec(lookup_module/2 ::
+ (atom(), atom()) -> rabbit_types:ok_or_error2(atom(), 'not_found')).
+-spec(lookup_all/1 :: (atom()) -> [{atom(), atom()}]).
+
+-endif.
+
+%%---------------------------------------------------------------------------
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+%%---------------------------------------------------------------------------
+
+register(Class, TypeName, ModuleName) ->
+ gen_server:call(?SERVER, {register, Class, TypeName, ModuleName}, infinity).
+
+unregister(Class, TypeName) ->
+ gen_server:call(?SERVER, {unregister, Class, TypeName}, infinity).
+
+%% This is used with user-supplied arguments (e.g., on exchange
+%% declare), so we restrict it to existing atoms only. This means it
+%% can throw a badarg, indicating that the type cannot have been
+%% registered.
+binary_to_type(TypeBin) when is_binary(TypeBin) ->
+ case catch list_to_existing_atom(binary_to_list(TypeBin)) of
+ {'EXIT', {badarg, _}} -> {error, not_found};
+ TypeAtom -> TypeAtom
+ end.
+
+lookup_module(Class, T) when is_atom(T) ->
+ case ets:lookup(?ETS_NAME, {Class, T}) of
+ [{_, Module}] ->
+ {ok, Module};
+ [] ->
+ {error, not_found}
+ end.
+
+lookup_all(Class) ->
+ [{K, V} || [K, V] <- ets:match(?ETS_NAME, {{Class, '$1'}, '$2'})].
+
+%%---------------------------------------------------------------------------
+
+internal_binary_to_type(TypeBin) when is_binary(TypeBin) ->
+ list_to_atom(binary_to_list(TypeBin)).
+
+internal_register(Class, TypeName, ModuleName)
+ when is_atom(Class), is_binary(TypeName), is_atom(ModuleName) ->
+ ok = sanity_check_module(class_module(Class), ModuleName),
+ RegArg = {{Class, internal_binary_to_type(TypeName)}, ModuleName},
+ true = ets:insert(?ETS_NAME, RegArg),
+ conditional_register(RegArg),
+ ok.
+
+internal_unregister(Class, TypeName) ->
+ UnregArg = {Class, internal_binary_to_type(TypeName)},
+ conditional_unregister(UnregArg),
+ true = ets:delete(?ETS_NAME, UnregArg),
+ ok.
+
+%% register exchange decorator route callback only when implemented,
+%% in order to avoid unnecessary decorator calls on the fast
+%% publishing path
+conditional_register({{exchange_decorator, Type}, ModuleName}) ->
+ case erlang:function_exported(ModuleName, route, 2) of
+ true -> true = ets:insert(?ETS_NAME,
+ {{exchange_decorator_route, Type},
+ ModuleName});
+ false -> ok
+ end;
+conditional_register(_) ->
+ ok.
+
+conditional_unregister({exchange_decorator, Type}) ->
+ true = ets:delete(?ETS_NAME, {exchange_decorator_route, Type}),
+ ok;
+conditional_unregister(_) ->
+ ok.
+
+sanity_check_module(ClassModule, Module) ->
+ case catch lists:member(ClassModule,
+ lists:flatten(
+ [Bs || {Attr, Bs} <-
+ Module:module_info(attributes),
+ Attr =:= behavior orelse
+ Attr =:= behaviour])) of
+ {'EXIT', {undef, _}} -> {error, not_module};
+ false -> {error, {not_type, ClassModule}};
+ true -> ok
+ end.
+
+class_module(exchange) -> rabbit_exchange_type;
+class_module(auth_mechanism) -> rabbit_auth_mechanism;
+class_module(runtime_parameter) -> rabbit_runtime_parameter;
+class_module(exchange_decorator) -> rabbit_exchange_decorator;
+class_module(queue_decorator) -> rabbit_queue_decorator;
+class_module(policy_validator) -> rabbit_policy_validator;
+class_module(ha_mode) -> rabbit_mirror_queue_mode;
+class_module(channel_interceptor) -> rabbit_channel_interceptor.
+
+%%---------------------------------------------------------------------------
+
+init([]) ->
+ ?ETS_NAME = ets:new(?ETS_NAME, [protected, set, named_table]),
+ {ok, none}.
+
+handle_call({register, Class, TypeName, ModuleName}, _From, State) ->
+ ok = internal_register(Class, TypeName, ModuleName),
+ {reply, ok, State};
+
+handle_call({unregister, Class, TypeName}, _From, State) ->
+ ok = internal_unregister(Class, TypeName),
+ {reply, ok, State};
+
+handle_call(Request, _From, State) ->
+ {stop, {unhandled_call, Request}, State}.
+
+handle_cast(Request, State) ->
+ {stop, {unhandled_cast, Request}, State}.
+
+handle_info(Message, State) ->
+ {stop, {unhandled_info, Message}, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_restartable_sup).
+
+-behaviour(supervisor2).
+
+-export([start_link/3]).
+
+-export([init/1]).
+
+-include("rabbit.hrl").
+
+-define(DELAY, 2).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/3 :: (atom(), rabbit_types:mfargs(), boolean()) ->
+ rabbit_types:ok_pid_or_error()).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start_link(Name, {_M, _F, _A} = Fun, Delay) ->
+ supervisor2:start_link({local, Name}, ?MODULE, [Fun, Delay]).
+
+init([{Mod, _F, _A} = Fun, Delay]) ->
+ {ok, {{one_for_one, 10, 10},
+ [{Mod, Fun, case Delay of
+ true -> {transient, 1};
+ false -> transient
+ end, ?MAX_WAIT, worker, [Mod]}]}}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_router).
+-include_lib("stdlib/include/qlc.hrl").
+-include("rabbit.hrl").
+
+-export([match_bindings/2, match_routing_key/2]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-export_type([routing_key/0, match_result/0]).
+
+-type(routing_key() :: binary()).
+-type(match_result() :: [rabbit_types:binding_destination()]).
+
+-spec(match_bindings/2 :: (rabbit_types:binding_source(),
+ fun ((rabbit_types:binding()) -> boolean())) ->
+ match_result()).
+-spec(match_routing_key/2 :: (rabbit_types:binding_source(),
+ [routing_key()] | ['_']) ->
+ match_result()).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+%% TODO: Maybe this should be handled by a cursor instead.
+%% TODO: This causes a full scan for each entry with the same source
+match_bindings(SrcName, Match) ->
+ Query = qlc:q([DestinationName ||
+ #route{binding = Binding = #binding{
+ source = SrcName1,
+ destination = DestinationName}} <-
+ mnesia:table(rabbit_route),
+ SrcName == SrcName1,
+ Match(Binding)]),
+ mnesia:async_dirty(fun qlc:e/1, [Query]).
+
+match_routing_key(SrcName, [RoutingKey]) ->
+ find_routes(#route{binding = #binding{source = SrcName,
+ destination = '$1',
+ key = RoutingKey,
+ _ = '_'}},
+ []);
+match_routing_key(SrcName, [_|_] = RoutingKeys) ->
+ find_routes(#route{binding = #binding{source = SrcName,
+ destination = '$1',
+ key = '$2',
+ _ = '_'}},
+ [list_to_tuple(['orelse' | [{'=:=', '$2', RKey} ||
+ RKey <- RoutingKeys]])]).
+
+%%--------------------------------------------------------------------
+
+%% Normally we'd call mnesia:dirty_select/2 here, but that is quite
+%% expensive for the same reasons as above, and, additionally, due to
+%% mnesia 'fixing' the table with ets:safe_fixtable/2, which is wholly
+%% unnecessary. According to the ets docs (and the code in erl_db.c),
+%% 'select' is safe anyway ("Functions that internally traverse over a
+%% table, like select and match, will give the same guarantee as
+%% safe_fixtable.") and, furthermore, even the lower level iterators
+%% ('first' and 'next') are safe on ordered_set tables ("Note that for
+%% tables of the ordered_set type, safe_fixtable/2 is not necessary as
+%% calls to first/1 and next/2 will always succeed."), which
+%% rabbit_route is.
+find_routes(MatchHead, Conditions) ->
+ ets:select(rabbit_route, [{MatchHead, Conditions, ['$1']}]).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_runtime_parameter).
+
+-ifdef(use_specs).
+
+-type(validate_results() ::
+ 'ok' | {error, string(), [term()]} | [validate_results()]).
+
+-callback validate(rabbit_types:vhost(), binary(), binary(),
+ term(), rabbit_types:user()) -> validate_results().
+-callback notify(rabbit_types:vhost(), binary(), binary(), term()) -> 'ok'.
+-callback notify_clear(rabbit_types:vhost(), binary(), binary()) -> 'ok'.
+
+-else.
+
+-export([behaviour_info/1]).
+
+behaviour_info(callbacks) ->
+ [
+ {validate, 4},
+ {notify, 4},
+ {notify_clear, 3}
+ ];
+behaviour_info(_Other) ->
+ undefined.
+
+-endif.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_runtime_parameters).
+
+-include("rabbit.hrl").
+
+-export([parse_set/5, set/5, set_any/5, clear/3, clear_any/3, list/0, list/1,
+ list_component/1, list/2, list_formatted/1, lookup/3,
+ value/3, value/4, info_keys/0]).
+
+-export([set_global/2, value_global/1, value_global/2]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-type(ok_or_error_string() :: 'ok' | {'error_string', string()}).
+-type(ok_thunk_or_error_string() :: ok_or_error_string() | fun(() -> 'ok')).
+
+-spec(parse_set/5 :: (rabbit_types:vhost(), binary(), binary(), string(),
+ rabbit_types:user() | 'none') -> ok_or_error_string()).
+-spec(set/5 :: (rabbit_types:vhost(), binary(), binary(), term(),
+ rabbit_types:user() | 'none') -> ok_or_error_string()).
+-spec(set_any/5 :: (rabbit_types:vhost(), binary(), binary(), term(),
+ rabbit_types:user() | 'none') -> ok_or_error_string()).
+-spec(set_global/2 :: (atom(), term()) -> 'ok').
+-spec(clear/3 :: (rabbit_types:vhost(), binary(), binary())
+ -> ok_thunk_or_error_string()).
+-spec(clear_any/3 :: (rabbit_types:vhost(), binary(), binary())
+ -> ok_thunk_or_error_string()).
+-spec(list/0 :: () -> [rabbit_types:infos()]).
+-spec(list/1 :: (rabbit_types:vhost() | '_') -> [rabbit_types:infos()]).
+-spec(list_component/1 :: (binary()) -> [rabbit_types:infos()]).
+-spec(list/2 :: (rabbit_types:vhost() | '_', binary() | '_')
+ -> [rabbit_types:infos()]).
+-spec(list_formatted/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]).
+-spec(lookup/3 :: (rabbit_types:vhost(), binary(), binary())
+ -> rabbit_types:infos() | 'not_found').
+-spec(value/3 :: (rabbit_types:vhost(), binary(), binary()) -> term()).
+-spec(value/4 :: (rabbit_types:vhost(), binary(), binary(), term()) -> term()).
+-spec(value_global/1 :: (atom()) -> term() | 'not_found').
+-spec(value_global/2 :: (atom(), term()) -> term()).
+-spec(info_keys/0 :: () -> rabbit_types:info_keys()).
+
+-endif.
+
+%%---------------------------------------------------------------------------
+
+-import(rabbit_misc, [pget/2, pset/3]).
+
+-define(TABLE, rabbit_runtime_parameters).
+
+%%---------------------------------------------------------------------------
+
+parse_set(_, <<"policy">>, _, _, _) ->
+ {error_string, "policies may not be set using this method"};
+parse_set(VHost, Component, Name, String, User) ->
+ case rabbit_misc:json_decode(String) of
+ {ok, JSON} -> set(VHost, Component, Name,
+ rabbit_misc:json_to_term(JSON), User);
+ error -> {error_string, "JSON decoding error"}
+ end.
+
+set(_, <<"policy">>, _, _, _) ->
+ {error_string, "policies may not be set using this method"};
+set(VHost, Component, Name, Term, User) ->
+ set_any(VHost, Component, Name, Term, User).
+
+set_global(Name, Term) ->
+ mnesia_update(Name, Term),
+ event_notify(parameter_set, none, global, [{name, Name},
+ {value, Term}]),
+ ok.
+
+format_error(L) ->
+ {error_string, rabbit_misc:format_many([{"Validation failed~n", []} | L])}.
+
+set_any(VHost, Component, Name, Term, User) ->
+ case set_any0(VHost, Component, Name, Term, User) of
+ ok -> ok;
+ {errors, L} -> format_error(L)
+ end.
+
+set_any0(VHost, Component, Name, Term, User) ->
+ case lookup_component(Component) of
+ {ok, Mod} ->
+ case flatten_errors(
+ Mod:validate(VHost, Component, Name, Term, User)) of
+ ok ->
+ case mnesia_update(VHost, Component, Name, Term) of
+ {old, Term} -> ok;
+ _ -> event_notify(
+ parameter_set, VHost, Component,
+ [{name, Name},
+ {value, Term}]),
+ Mod:notify(VHost, Component, Name, Term)
+ end,
+ ok;
+ E ->
+ E
+ end;
+ E ->
+ E
+ end.
+
+mnesia_update(Key, Term) ->
+ rabbit_misc:execute_mnesia_transaction(mnesia_update_fun(Key, Term)).
+
+mnesia_update(VHost, Comp, Name, Term) ->
+ rabbit_misc:execute_mnesia_transaction(
+ rabbit_vhost:with(VHost, mnesia_update_fun({VHost, Comp, Name}, Term))).
+
+mnesia_update_fun(Key, Term) ->
+ fun () ->
+ Res = case mnesia:read(?TABLE, Key, read) of
+ [] -> new;
+ [Params] -> {old, Params#runtime_parameters.value}
+ end,
+ ok = mnesia:write(?TABLE, c(Key, Term), write),
+ Res
+ end.
+
+clear(_, <<"policy">> , _) ->
+ {error_string, "policies may not be cleared using this method"};
+clear(VHost, Component, Name) ->
+ clear_any(VHost, Component, Name).
+
+clear_any(VHost, Component, Name) ->
+ Notify = fun () ->
+ case lookup_component(Component) of
+ {ok, Mod} -> event_notify(
+ parameter_cleared, VHost, Component,
+ [{name, Name}]),
+ Mod:notify_clear(VHost, Component, Name);
+ _ -> ok
+ end
+ end,
+ case lookup(VHost, Component, Name) of
+ not_found -> {error_string, "Parameter does not exist"};
+ _ -> mnesia_clear(VHost, Component, Name),
+ case mnesia:is_transaction() of
+ true -> Notify;
+ false -> Notify()
+ end
+ end.
+
+mnesia_clear(VHost, Component, Name) ->
+ F = fun () ->
+ ok = mnesia:delete(?TABLE, {VHost, Component, Name}, write)
+ end,
+ ok = rabbit_misc:execute_mnesia_transaction(rabbit_vhost:with(VHost, F)).
+
+event_notify(_Event, _VHost, <<"policy">>, _Props) ->
+ ok;
+event_notify(Event, none, Component, Props) ->
+ rabbit_event:notify(Event, [{component, Component} | Props]);
+event_notify(Event, VHost, Component, Props) ->
+ rabbit_event:notify(Event, [{vhost, VHost},
+ {component, Component} | Props]).
+
+list() ->
+ [p(P) || #runtime_parameters{ key = {_VHost, Comp, _Name}} = P <-
+ rabbit_misc:dirty_read_all(?TABLE), Comp /= <<"policy">>].
+
+list(VHost) -> list(VHost, '_').
+list_component(Component) -> list('_', Component).
+
+%% Not dirty_match_object since that would not be transactional when used in a
+%% tx context
+list(VHost, Component) ->
+ mnesia:async_dirty(
+ fun () ->
+ case VHost of
+ '_' -> ok;
+ _ -> rabbit_vhost:assert(VHost)
+ end,
+ Match = #runtime_parameters{key = {VHost, Component, '_'},
+ _ = '_'},
+ [p(P) || #runtime_parameters{key = {_VHost, Comp, _Name}} = P <-
+ mnesia:match_object(?TABLE, Match, read),
+ Comp =/= <<"policy">> orelse Component =:= <<"policy">>]
+ end).
+
+list_formatted(VHost) ->
+ [pset(value, format(pget(value, P)), P) || P <- list(VHost)].
+
+lookup(VHost, Component, Name) ->
+ case lookup0({VHost, Component, Name}, rabbit_misc:const(not_found)) of
+ not_found -> not_found;
+ Params -> p(Params)
+ end.
+
+value(VHost, Comp, Name) -> value0({VHost, Comp, Name}).
+value(VHost, Comp, Name, Def) -> value0({VHost, Comp, Name}, Def).
+
+value_global(Key) -> value0(Key).
+value_global(Key, Default) -> value0(Key, Default).
+
+value0(Key) ->
+ case lookup0(Key, rabbit_misc:const(not_found)) of
+ not_found -> not_found;
+ Params -> Params#runtime_parameters.value
+ end.
+
+value0(Key, Default) ->
+ Params = lookup0(Key, fun () -> lookup_missing(Key, Default) end),
+ Params#runtime_parameters.value.
+
+lookup0(Key, DefaultFun) ->
+ case mnesia:dirty_read(?TABLE, Key) of
+ [] -> DefaultFun();
+ [R] -> R
+ end.
+
+lookup_missing(Key, Default) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ case mnesia:read(?TABLE, Key, read) of
+ [] -> Record = c(Key, Default),
+ mnesia:write(?TABLE, Record, write),
+ Record;
+ [R] -> R
+ end
+ end).
+
+c(Key, Default) ->
+ #runtime_parameters{key = Key,
+ value = Default}.
+
+p(#runtime_parameters{key = {VHost, Component, Name}, value = Value}) ->
+ [{vhost, VHost},
+ {component, Component},
+ {name, Name},
+ {value, Value}].
+
+info_keys() -> [component, name, value].
+
+%%---------------------------------------------------------------------------
+
+lookup_component(Component) ->
+ case rabbit_registry:lookup_module(
+ runtime_parameter, list_to_atom(binary_to_list(Component))) of
+ {error, not_found} -> {errors,
+ [{"component ~s not found", [Component]}]};
+ {ok, Module} -> {ok, Module}
+ end.
+
+format(Term) ->
+ {ok, JSON} = rabbit_misc:json_encode(rabbit_misc:term_to_json(Term)),
+ list_to_binary(JSON).
+
+flatten_errors(L) ->
+ case [{F, A} || I <- lists:flatten([L]), {error, F, A} <- [I]] of
+ [] -> ok;
+ E -> {errors, E}
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_runtime_parameters_test).
+-behaviour(rabbit_runtime_parameter).
+-behaviour(rabbit_policy_validator).
+
+-include("rabbit.hrl").
+
+-export([validate/5, notify/4, notify_clear/3]).
+-export([register/0, unregister/0]).
+-export([validate_policy/1]).
+-export([register_policy_validator/0, unregister_policy_validator/0]).
+
+%----------------------------------------------------------------------------
+
+register() ->
+ rabbit_registry:register(runtime_parameter, <<"test">>, ?MODULE).
+
+unregister() ->
+ rabbit_registry:unregister(runtime_parameter, <<"test">>).
+
+validate(_, <<"test">>, <<"good">>, _Term, _User) -> ok;
+validate(_, <<"test">>, <<"maybe">>, <<"good">>, _User) -> ok;
+validate(_, <<"test">>, <<"admin">>, _Term, none) -> ok;
+validate(_, <<"test">>, <<"admin">>, _Term, User) ->
+ case lists:member(administrator, User#user.tags) of
+ true -> ok;
+ false -> {error, "meh", []}
+ end;
+validate(_, <<"test">>, _, _, _) -> {error, "meh", []}.
+
+notify(_, _, _, _) -> ok.
+notify_clear(_, _, _) -> ok.
+
+%----------------------------------------------------------------------------
+
+register_policy_validator() ->
+ rabbit_registry:register(policy_validator, <<"testeven">>, ?MODULE),
+ rabbit_registry:register(policy_validator, <<"testpos">>, ?MODULE).
+
+unregister_policy_validator() ->
+ rabbit_registry:unregister(policy_validator, <<"testeven">>),
+ rabbit_registry:unregister(policy_validator, <<"testpos">>).
+
+validate_policy([{<<"testeven">>, Terms}]) when is_list(Terms) ->
+ case length(Terms) rem 2 =:= 0 of
+ true -> ok;
+ false -> {error, "meh", []}
+ end;
+
+validate_policy([{<<"testpos">>, Terms}]) when is_list(Terms) ->
+ case lists:all(fun (N) -> is_integer(N) andalso N > 0 end, Terms) of
+ true -> ok;
+ false -> {error, "meh", []}
+ end;
+
+validate_policy(_) ->
+ {error, "meh", []}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_sasl_report_file_h).
+-include("rabbit.hrl").
+
+-behaviour(gen_event).
+
+-export([init/1, handle_event/2, handle_call/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-import(rabbit_error_logger_file_h, [safe_handle_event/3]).
+
+%% rabbit_sasl_report_file_h is a wrapper around the sasl_report_file_h
+%% module because the original's init/1 does not match properly
+%% with the result of closing the old handler when swapping handlers.
+%% The first init/1 additionally allows for simple log rotation
+%% when the suffix is not the empty string.
+%% The original init/1 also opened the file in 'write' mode, thus
+%% overwriting old logs. To remedy this, init/1 from
+%% lib/sasl/src/sasl_report_file_h.erl from R14B3 was copied as
+%% init_file/1 and changed so that it opens the file in 'append' mode.
+
+%% Used only when swapping handlers and performing
+%% log rotation
+init({{File, Suffix}, []}) ->
+ case rabbit_file:append_file(File, Suffix) of
+ ok -> file:delete(File),
+ ok;
+ {error, Error} ->
+ rabbit_log:error("Failed to append contents of "
+ "sasl log file '~s' to '~s':~n~p~n",
+ [File, [File, Suffix], Error])
+ end,
+ init(File);
+%% Used only when swapping handlers and the original handler
+%% failed to terminate or was never installed
+init({{File, _}, error}) ->
+ init(File);
+%% Used only when swapping handlers without
+%% doing any log rotation
+init({File, []}) ->
+ init(File);
+init({File, _Type} = FileInfo) ->
+ rabbit_file:ensure_parent_dirs_exist(File),
+ init_file(FileInfo);
+init(File) ->
+ rabbit_file:ensure_parent_dirs_exist(File),
+ init_file({File, sasl_error_logger_type()}).
+
+init_file({File, Type}) ->
+ process_flag(trap_exit, true),
+ case file:open(File, [append]) of
+ {ok,Fd} -> {ok, {Fd, File, Type}};
+ Error -> Error
+ end.
+
+handle_event(Event, State) ->
+ safe_handle_event(fun handle_event0/2, Event, State).
+
+handle_event0(Event, State) ->
+ sasl_report_file_h:handle_event(
+ truncate:log_event(Event, ?LOG_TRUNC), State).
+
+handle_info(Info, State) ->
+ sasl_report_file_h:handle_info(Info, State).
+
+handle_call(Call, State) ->
+ sasl_report_file_h:handle_call(Call, State).
+
+terminate(Reason, State) ->
+ sasl_report_file_h:terminate(Reason, State).
+
+code_change(_OldVsn, State, _Extra) ->
+ %% There is no sasl_report_file_h:code_change/3
+ {ok, State}.
+
+%%----------------------------------------------------------------------
+
+sasl_error_logger_type() ->
+ case application:get_env(sasl, errlog_type) of
+ {ok, error} -> error;
+ {ok, progress} -> progress;
+ {ok, all} -> all;
+ {ok, Bad} -> throw({error, {wrong_errlog_type, Bad}});
+ _ -> all
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_ssl).
+
+-include("rabbit.hrl").
+
+-include_lib("public_key/include/public_key.hrl").
+
+-export([peer_cert_issuer/1, peer_cert_subject/1, peer_cert_validity/1]).
+-export([peer_cert_subject_items/2, peer_cert_auth_name/1]).
+
+%%--------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-export_type([certificate/0]).
+
+-type(certificate() :: binary()).
+
+-spec(peer_cert_issuer/1 :: (certificate()) -> string()).
+-spec(peer_cert_subject/1 :: (certificate()) -> string()).
+-spec(peer_cert_validity/1 :: (certificate()) -> string()).
+-spec(peer_cert_subject_items/2 ::
+ (certificate(), tuple()) -> [string()] | 'not_found').
+-spec(peer_cert_auth_name/1 ::
+ (certificate()) -> binary() | 'not_found' | 'unsafe').
+
+-endif.
+
+%%--------------------------------------------------------------------------
+%% High-level functions used by reader
+%%--------------------------------------------------------------------------
+
+%% Return a string describing the certificate's issuer.
+peer_cert_issuer(Cert) ->
+ cert_info(fun(#'OTPCertificate' {
+ tbsCertificate = #'OTPTBSCertificate' {
+ issuer = Issuer }}) ->
+ format_rdn_sequence(Issuer)
+ end, Cert).
+
+%% Return a string describing the certificate's subject, as per RFC4514.
+peer_cert_subject(Cert) ->
+ cert_info(fun(#'OTPCertificate' {
+ tbsCertificate = #'OTPTBSCertificate' {
+ subject = Subject }}) ->
+ format_rdn_sequence(Subject)
+ end, Cert).
+
+%% Return the parts of the certificate's subject.
+peer_cert_subject_items(Cert, Type) ->
+ cert_info(fun(#'OTPCertificate' {
+ tbsCertificate = #'OTPTBSCertificate' {
+ subject = Subject }}) ->
+ find_by_type(Type, Subject)
+ end, Cert).
+
+%% Return a string describing the certificate's validity.
+peer_cert_validity(Cert) ->
+ cert_info(fun(#'OTPCertificate' {
+ tbsCertificate = #'OTPTBSCertificate' {
+ validity = {'Validity', Start, End} }}) ->
+ rabbit_misc:format("~s - ~s", [format_asn1_value(Start),
+ format_asn1_value(End)])
+ end, Cert).
+
+%% Extract a username from the certificate
+peer_cert_auth_name(Cert) ->
+ {ok, Mode} = application:get_env(rabbit, ssl_cert_login_from),
+ peer_cert_auth_name(Mode, Cert).
+
+peer_cert_auth_name(distinguished_name, Cert) ->
+ case auth_config_sane() of
+ true -> iolist_to_binary(peer_cert_subject(Cert));
+ false -> unsafe
+ end;
+
+peer_cert_auth_name(common_name, Cert) ->
+ %% If there is more than one CN then we join them with "," in a
+ %% vaguely DN-like way. But this is more just so we do something
+ %% more intelligent than crashing, if you actually want to escape
+ %% things properly etc, use DN mode.
+ case auth_config_sane() of
+ true -> case peer_cert_subject_items(Cert, ?'id-at-commonName') of
+ not_found -> not_found;
+ CNs -> list_to_binary(string:join(CNs, ","))
+ end;
+ false -> unsafe
+ end.
+
+auth_config_sane() ->
+ {ok, Opts} = application:get_env(rabbit, ssl_options),
+ case proplists:get_value(verify, Opts) of
+ verify_peer -> true;
+ V -> rabbit_log:warning("SSL certificate authentication "
+ "disabled, verify=~p~n", [V]),
+ false
+ end.
+
+%%--------------------------------------------------------------------------
+
+cert_info(F, Cert) ->
+ F(case public_key:pkix_decode_cert(Cert, otp) of
+ {ok, DecCert} -> DecCert; %%pre R14B
+ DecCert -> DecCert %%R14B onwards
+ end).
+
+find_by_type(Type, {rdnSequence, RDNs}) ->
+ case [V || #'AttributeTypeAndValue'{type = T, value = V}
+ <- lists:flatten(RDNs),
+ T == Type] of
+ [] -> not_found;
+ L -> [format_asn1_value(V) || V <- L]
+ end.
+
+%%--------------------------------------------------------------------------
+%% Formatting functions
+%%--------------------------------------------------------------------------
+
+%% Format and rdnSequence as a RFC4514 subject string.
+format_rdn_sequence({rdnSequence, Seq}) ->
+ string:join(lists:reverse([format_complex_rdn(RDN) || RDN <- Seq]), ",").
+
+%% Format an RDN set.
+format_complex_rdn(RDNs) ->
+ string:join([format_rdn(RDN) || RDN <- RDNs], "+").
+
+%% Format an RDN. If the type name is unknown, use the dotted decimal
+%% representation. See RFC4514, section 2.3.
+format_rdn(#'AttributeTypeAndValue'{type = T, value = V}) ->
+ FV = escape_rdn_value(format_asn1_value(V)),
+ Fmts = [{?'id-at-surname' , "SN"},
+ {?'id-at-givenName' , "GIVENNAME"},
+ {?'id-at-initials' , "INITIALS"},
+ {?'id-at-generationQualifier' , "GENERATIONQUALIFIER"},
+ {?'id-at-commonName' , "CN"},
+ {?'id-at-localityName' , "L"},
+ {?'id-at-stateOrProvinceName' , "ST"},
+ {?'id-at-organizationName' , "O"},
+ {?'id-at-organizationalUnitName' , "OU"},
+ {?'id-at-title' , "TITLE"},
+ {?'id-at-countryName' , "C"},
+ {?'id-at-serialNumber' , "SERIALNUMBER"},
+ {?'id-at-pseudonym' , "PSEUDONYM"},
+ {?'id-domainComponent' , "DC"},
+ {?'id-emailAddress' , "EMAILADDRESS"},
+ {?'street-address' , "STREET"},
+ {{0,9,2342,19200300,100,1,1} , "UID"}], %% Not in public_key.hrl
+ case proplists:lookup(T, Fmts) of
+ {_, Fmt} ->
+ rabbit_misc:format(Fmt ++ "=~s", [FV]);
+ none when is_tuple(T) ->
+ TypeL = [rabbit_misc:format("~w", [X]) || X <- tuple_to_list(T)],
+ rabbit_misc:format("~s=~s", [string:join(TypeL, "."), FV]);
+ none ->
+ rabbit_misc:format("~p=~s", [T, FV])
+ end.
+
+%% Escape a string as per RFC4514.
+escape_rdn_value(V) ->
+ escape_rdn_value(V, start).
+
+escape_rdn_value([], _) ->
+ [];
+escape_rdn_value([C | S], start) when C =:= $ ; C =:= $# ->
+ [$\\, C | escape_rdn_value(S, middle)];
+escape_rdn_value(S, start) ->
+ escape_rdn_value(S, middle);
+escape_rdn_value([$ ], middle) ->
+ [$\\, $ ];
+escape_rdn_value([C | S], middle) when C =:= $"; C =:= $+; C =:= $,; C =:= $;;
+ C =:= $<; C =:= $>; C =:= $\\ ->
+ [$\\, C | escape_rdn_value(S, middle)];
+escape_rdn_value([C | S], middle) when C < 32 ; C >= 126 ->
+ %% Of ASCII characters only U+0000 needs escaping, but for display
+ %% purposes it's handy to escape all non-printable chars. All non-ASCII
+ %% characters get converted to UTF-8 sequences and then escaped. We've
+ %% already got a UTF-8 sequence here, so just escape it.
+ rabbit_misc:format("\\~2.16.0B", [C]) ++ escape_rdn_value(S, middle);
+escape_rdn_value([C | S], middle) ->
+ [C | escape_rdn_value(S, middle)].
+
+%% Get the string representation of an OTPCertificate field.
+format_asn1_value({ST, S}) when ST =:= teletexString; ST =:= printableString;
+ ST =:= universalString; ST =:= utf8String;
+ ST =:= bmpString ->
+ format_directory_string(ST, S);
+format_asn1_value({utcTime, [Y1, Y2, M1, M2, D1, D2, H1, H2,
+ Min1, Min2, S1, S2, $Z]}) ->
+ rabbit_misc:format("20~c~c-~c~c-~c~cT~c~c:~c~c:~c~cZ",
+ [Y1, Y2, M1, M2, D1, D2, H1, H2, Min1, Min2, S1, S2]);
+%% We appear to get an untagged value back for an ia5string
+%% (e.g. domainComponent).
+format_asn1_value(V) when is_list(V) ->
+ V;
+format_asn1_value(V) when is_binary(V) ->
+ %% OTP does not decode some values when combined with an unknown
+ %% type. That's probably wrong, so as a last ditch effort let's
+ %% try manually decoding. 'DirectoryString' is semi-arbitrary -
+ %% but it is the type which covers the various string types we
+ %% handle below.
+ try
+ {ST, S} = public_key:der_decode('DirectoryString', V),
+ format_directory_string(ST, S)
+ catch _:_ ->
+ rabbit_misc:format("~p", [V])
+ end;
+format_asn1_value(V) ->
+ rabbit_misc:format("~p", [V]).
+
+%% DirectoryString { INTEGER : maxSize } ::= CHOICE {
+%% teletexString TeletexString (SIZE (1..maxSize)),
+%% printableString PrintableString (SIZE (1..maxSize)),
+%% bmpString BMPString (SIZE (1..maxSize)),
+%% universalString UniversalString (SIZE (1..maxSize)),
+%% uTF8String UTF8String (SIZE (1..maxSize)) }
+%%
+%% Precise definitions of printable / teletexString are hard to come
+%% by. This is what I reconstructed:
+%%
+%% printableString:
+%% "intended to represent the limited character sets available to
+%% mainframe input terminals"
+%% A-Z a-z 0-9 ' ( ) + , - . / : = ? [space]
+%% http://msdn.microsoft.com/en-us/library/bb540814(v=vs.85).aspx
+%%
+%% teletexString:
+%% "a sizable volume of software in the world treats TeletexString
+%% (T61String) as a simple 8-bit string with mostly Windows Latin 1
+%% (superset of iso-8859-1) encoding"
+%% http://www.mail-archive.com/asn1@asn1.org/msg00460.html
+%%
+%% (However according to that link X.680 actually defines
+%% TeletexString in some much more involved and crazy way. I suggest
+%% we treat it as ISO-8859-1 since Erlang does not support Windows
+%% Latin 1).
+%%
+%% bmpString:
+%% UCS-2 according to RFC 3641. Hence cannot represent Unicode
+%% characters above 65535 (outside the "Basic Multilingual Plane").
+%%
+%% universalString:
+%% UCS-4 according to RFC 3641.
+%%
+%% utf8String:
+%% UTF-8 according to RFC 3641.
+%%
+%% Within Rabbit we assume UTF-8 encoding. Since printableString is a
+%% subset of ASCII it is also a subset of UTF-8. The others need
+%% converting. Fortunately since the Erlang SSL library does the
+%% decoding for us (albeit into a weird format, see below), we just
+%% need to handle encoding into UTF-8. Note also that utf8Strings come
+%% back as binary.
+%%
+%% Note for testing: the default Ubuntu configuration for openssl will
+%% only create printableString or teletexString types no matter what
+%% you do. Edit string_mask in the [req] section of
+%% /etc/ssl/openssl.cnf to change this (see comments there). You
+%% probably also need to set utf8 = yes to get it to accept UTF-8 on
+%% the command line. Also note I could not get openssl to generate a
+%% universalString.
+
+format_directory_string(printableString, S) -> S;
+format_directory_string(teletexString, S) -> utf8_list_from(S);
+format_directory_string(bmpString, S) -> utf8_list_from(S);
+format_directory_string(universalString, S) -> utf8_list_from(S);
+format_directory_string(utf8String, S) -> binary_to_list(S).
+
+utf8_list_from(S) ->
+ binary_to_list(
+ unicode:characters_to_binary(flatten_ssl_list(S), utf32, utf8)).
+
+%% The Erlang SSL implementation invents its own representation for
+%% non-ascii strings - looking like [97,{0,0,3,187}] (that's LATIN
+%% SMALL LETTER A followed by GREEK SMALL LETTER LAMDA). We convert
+%% this into a list of unicode characters, which we can tell
+%% unicode:characters_to_binary is utf32.
+
+flatten_ssl_list(L) -> [flatten_ssl_list_item(I) || I <- L].
+
+flatten_ssl_list_item({A, B, C, D}) ->
+ A * (1 bsl 24) + B * (1 bsl 16) + C * (1 bsl 8) + D;
+flatten_ssl_list_item(N) when is_number (N) ->
+ N.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_sup).
+
+-behaviour(supervisor).
+
+-export([start_link/0, start_child/1, start_child/2, start_child/3,
+ start_supervisor_child/1, start_supervisor_child/2,
+ start_supervisor_child/3,
+ start_restartable_child/1, start_restartable_child/2,
+ start_delayed_restartable_child/1, start_delayed_restartable_child/2,
+ stop_child/1]).
+
+-export([init/1]).
+
+-include("rabbit.hrl").
+
+-define(SERVER, ?MODULE).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
+-spec(start_child/1 :: (atom()) -> 'ok').
+-spec(start_child/2 :: (atom(), [any()]) -> 'ok').
+-spec(start_child/3 :: (atom(), atom(), [any()]) -> 'ok').
+-spec(start_supervisor_child/1 :: (atom()) -> 'ok').
+-spec(start_supervisor_child/2 :: (atom(), [any()]) -> 'ok').
+-spec(start_supervisor_child/3 :: (atom(), atom(), [any()]) -> 'ok').
+-spec(start_restartable_child/1 :: (atom()) -> 'ok').
+-spec(start_restartable_child/2 :: (atom(), [any()]) -> 'ok').
+-spec(start_delayed_restartable_child/1 :: (atom()) -> 'ok').
+-spec(start_delayed_restartable_child/2 :: (atom(), [any()]) -> 'ok').
+-spec(stop_child/1 :: (atom()) -> rabbit_types:ok_or_error(any())).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start_link() -> supervisor:start_link({local, ?SERVER}, ?MODULE, []).
+
+start_child(Mod) -> start_child(Mod, []).
+
+start_child(Mod, Args) -> start_child(Mod, Mod, Args).
+
+start_child(ChildId, Mod, Args) ->
+ child_reply(supervisor:start_child(
+ ?SERVER,
+ {ChildId, {Mod, start_link, Args},
+ transient, ?MAX_WAIT, worker, [Mod]})).
+
+start_supervisor_child(Mod) -> start_supervisor_child(Mod, []).
+
+start_supervisor_child(Mod, Args) -> start_supervisor_child(Mod, Mod, Args).
+
+start_supervisor_child(ChildId, Mod, Args) ->
+ child_reply(supervisor:start_child(
+ ?SERVER,
+ {ChildId, {Mod, start_link, Args},
+ transient, infinity, supervisor, [Mod]})).
+
+start_restartable_child(M) -> start_restartable_child(M, [], false).
+start_restartable_child(M, A) -> start_restartable_child(M, A, false).
+start_delayed_restartable_child(M) -> start_restartable_child(M, [], true).
+start_delayed_restartable_child(M, A) -> start_restartable_child(M, A, true).
+
+start_restartable_child(Mod, Args, Delay) ->
+ Name = list_to_atom(atom_to_list(Mod) ++ "_sup"),
+ child_reply(supervisor:start_child(
+ ?SERVER,
+ {Name, {rabbit_restartable_sup, start_link,
+ [Name, {Mod, start_link, Args}, Delay]},
+ transient, infinity, supervisor, [rabbit_restartable_sup]})).
+
+stop_child(ChildId) ->
+ case supervisor:terminate_child(?SERVER, ChildId) of
+ ok -> supervisor:delete_child(?SERVER, ChildId);
+ E -> E
+ end.
+
+init([]) -> {ok, {{one_for_all, 0, 1}, []}}.
+
+
+%%----------------------------------------------------------------------------
+
+child_reply({ok, _}) -> ok;
+child_reply(X) -> X.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_table).
+
+-export([create/0, create_local_copy/1, wait_for_replicated/0, wait/1,
+ force_load/0, is_present/0, is_empty/0,
+ check_schema_integrity/0, clear_ram_only_tables/0]).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(create/0 :: () -> 'ok').
+-spec(create_local_copy/1 :: ('disc' | 'ram') -> 'ok').
+-spec(wait_for_replicated/0 :: () -> 'ok').
+-spec(wait/1 :: ([atom()]) -> 'ok').
+-spec(force_load/0 :: () -> 'ok').
+-spec(is_present/0 :: () -> boolean()).
+-spec(is_empty/0 :: () -> boolean()).
+-spec(check_schema_integrity/0 :: () -> rabbit_types:ok_or_error(any())).
+-spec(clear_ram_only_tables/0 :: () -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+%% Main interface
+%%----------------------------------------------------------------------------
+
+create() ->
+ lists:foreach(fun ({Tab, TabDef}) ->
+ TabDef1 = proplists:delete(match, TabDef),
+ case mnesia:create_table(Tab, TabDef1) of
+ {atomic, ok} -> ok;
+ {aborted, Reason} ->
+ throw({error, {table_creation_failed,
+ Tab, TabDef1, Reason}})
+ end
+ end, definitions()),
+ ok.
+
+%% The sequence in which we delete the schema and then the other
+%% tables is important: if we delete the schema first when moving to
+%% RAM mnesia will loudly complain since it doesn't make much sense to
+%% do that. But when moving to disc, we need to move the schema first.
+create_local_copy(disc) ->
+ create_local_copy(schema, disc_copies),
+ create_local_copies(disc);
+create_local_copy(ram) ->
+ create_local_copies(ram),
+ create_local_copy(schema, ram_copies).
+
+wait_for_replicated() ->
+ wait([Tab || {Tab, TabDef} <- definitions(),
+ not lists:member({local_content, true}, TabDef)]).
+
+wait(TableNames) ->
+ case mnesia:wait_for_tables(TableNames, 30000) of
+ ok ->
+ ok;
+ {timeout, BadTabs} ->
+ throw({error, {timeout_waiting_for_tables, BadTabs}});
+ {error, Reason} ->
+ throw({error, {failed_waiting_for_tables, Reason}})
+ end.
+
+force_load() -> [mnesia:force_load_table(T) || T <- names()], ok.
+
+is_present() -> names() -- mnesia:system_info(tables) =:= [].
+
+is_empty() ->
+ lists:all(fun (Tab) -> mnesia:dirty_first(Tab) == '$end_of_table' end,
+ names()).
+
+check_schema_integrity() ->
+ Tables = mnesia:system_info(tables),
+ case check(fun (Tab, TabDef) ->
+ case lists:member(Tab, Tables) of
+ false -> {error, {table_missing, Tab}};
+ true -> check_attributes(Tab, TabDef)
+ end
+ end) of
+ ok -> ok = wait(names()),
+ check(fun check_content/2);
+ Other -> Other
+ end.
+
+clear_ram_only_tables() ->
+ Node = node(),
+ lists:foreach(
+ fun (TabName) ->
+ case lists:member(Node, mnesia:table_info(TabName, ram_copies)) of
+ true -> {atomic, ok} = mnesia:clear_table(TabName);
+ false -> ok
+ end
+ end, names()),
+ ok.
+
+%%--------------------------------------------------------------------
+%% Internal helpers
+%%--------------------------------------------------------------------
+
+create_local_copies(Type) ->
+ lists:foreach(
+ fun ({Tab, TabDef}) ->
+ HasDiscCopies = has_copy_type(TabDef, disc_copies),
+ HasDiscOnlyCopies = has_copy_type(TabDef, disc_only_copies),
+ LocalTab = proplists:get_bool(local_content, TabDef),
+ StorageType =
+ if
+ Type =:= disc orelse LocalTab ->
+ if
+ HasDiscCopies -> disc_copies;
+ HasDiscOnlyCopies -> disc_only_copies;
+ true -> ram_copies
+ end;
+ Type =:= ram ->
+ ram_copies
+ end,
+ ok = create_local_copy(Tab, StorageType)
+ end, definitions(Type)),
+ ok.
+
+create_local_copy(Tab, Type) ->
+ StorageType = mnesia:table_info(Tab, storage_type),
+ {atomic, ok} =
+ if
+ StorageType == unknown ->
+ mnesia:add_table_copy(Tab, node(), Type);
+ StorageType /= Type ->
+ mnesia:change_table_copy_type(Tab, node(), Type);
+ true -> {atomic, ok}
+ end,
+ ok.
+
+has_copy_type(TabDef, DiscType) ->
+ lists:member(node(), proplists:get_value(DiscType, TabDef, [])).
+
+check_attributes(Tab, TabDef) ->
+ {_, ExpAttrs} = proplists:lookup(attributes, TabDef),
+ case mnesia:table_info(Tab, attributes) of
+ ExpAttrs -> ok;
+ Attrs -> {error, {table_attributes_mismatch, Tab, ExpAttrs, Attrs}}
+ end.
+
+check_content(Tab, TabDef) ->
+ {_, Match} = proplists:lookup(match, TabDef),
+ case mnesia:dirty_first(Tab) of
+ '$end_of_table' ->
+ ok;
+ Key ->
+ ObjList = mnesia:dirty_read(Tab, Key),
+ MatchComp = ets:match_spec_compile([{Match, [], ['$_']}]),
+ case ets:match_spec_run(ObjList, MatchComp) of
+ ObjList -> ok;
+ _ -> {error, {table_content_invalid, Tab, Match, ObjList}}
+ end
+ end.
+
+check(Fun) ->
+ case [Error || {Tab, TabDef} <- definitions(),
+ case Fun(Tab, TabDef) of
+ ok -> Error = none, false;
+ {error, Error} -> true
+ end] of
+ [] -> ok;
+ Errors -> {error, Errors}
+ end.
+
+%%--------------------------------------------------------------------
+%% Table definitions
+%%--------------------------------------------------------------------
+
+names() -> [Tab || {Tab, _} <- definitions()].
+
+%% The tables aren't supposed to be on disk on a ram node
+definitions(disc) ->
+ definitions();
+definitions(ram) ->
+ [{Tab, [{disc_copies, []}, {ram_copies, [node()]} |
+ proplists:delete(
+ ram_copies, proplists:delete(disc_copies, TabDef))]} ||
+ {Tab, TabDef} <- definitions()].
+
+definitions() ->
+ [{rabbit_user,
+ [{record_name, internal_user},
+ {attributes, record_info(fields, internal_user)},
+ {disc_copies, [node()]},
+ {match, #internal_user{_='_'}}]},
+ {rabbit_user_permission,
+ [{record_name, user_permission},
+ {attributes, record_info(fields, user_permission)},
+ {disc_copies, [node()]},
+ {match, #user_permission{user_vhost = #user_vhost{_='_'},
+ permission = #permission{_='_'},
+ _='_'}}]},
+ {rabbit_vhost,
+ [{record_name, vhost},
+ {attributes, record_info(fields, vhost)},
+ {disc_copies, [node()]},
+ {match, #vhost{_='_'}}]},
+ {rabbit_listener,
+ [{record_name, listener},
+ {attributes, record_info(fields, listener)},
+ {type, bag},
+ {match, #listener{_='_'}}]},
+ {rabbit_durable_route,
+ [{record_name, route},
+ {attributes, record_info(fields, route)},
+ {disc_copies, [node()]},
+ {match, #route{binding = binding_match(), _='_'}}]},
+ {rabbit_semi_durable_route,
+ [{record_name, route},
+ {attributes, record_info(fields, route)},
+ {type, ordered_set},
+ {match, #route{binding = binding_match(), _='_'}}]},
+ {rabbit_route,
+ [{record_name, route},
+ {attributes, record_info(fields, route)},
+ {type, ordered_set},
+ {match, #route{binding = binding_match(), _='_'}}]},
+ {rabbit_reverse_route,
+ [{record_name, reverse_route},
+ {attributes, record_info(fields, reverse_route)},
+ {type, ordered_set},
+ {match, #reverse_route{reverse_binding = reverse_binding_match(),
+ _='_'}}]},
+ {rabbit_topic_trie_node,
+ [{record_name, topic_trie_node},
+ {attributes, record_info(fields, topic_trie_node)},
+ {type, ordered_set},
+ {match, #topic_trie_node{trie_node = trie_node_match(), _='_'}}]},
+ {rabbit_topic_trie_edge,
+ [{record_name, topic_trie_edge},
+ {attributes, record_info(fields, topic_trie_edge)},
+ {type, ordered_set},
+ {match, #topic_trie_edge{trie_edge = trie_edge_match(), _='_'}}]},
+ {rabbit_topic_trie_binding,
+ [{record_name, topic_trie_binding},
+ {attributes, record_info(fields, topic_trie_binding)},
+ {type, ordered_set},
+ {match, #topic_trie_binding{trie_binding = trie_binding_match(),
+ _='_'}}]},
+ {rabbit_durable_exchange,
+ [{record_name, exchange},
+ {attributes, record_info(fields, exchange)},
+ {disc_copies, [node()]},
+ {match, #exchange{name = exchange_name_match(), _='_'}}]},
+ {rabbit_exchange,
+ [{record_name, exchange},
+ {attributes, record_info(fields, exchange)},
+ {match, #exchange{name = exchange_name_match(), _='_'}}]},
+ {rabbit_exchange_serial,
+ [{record_name, exchange_serial},
+ {attributes, record_info(fields, exchange_serial)},
+ {match, #exchange_serial{name = exchange_name_match(), _='_'}}]},
+ {rabbit_runtime_parameters,
+ [{record_name, runtime_parameters},
+ {attributes, record_info(fields, runtime_parameters)},
+ {disc_copies, [node()]},
+ {match, #runtime_parameters{_='_'}}]},
+ {rabbit_durable_queue,
+ [{record_name, amqqueue},
+ {attributes, record_info(fields, amqqueue)},
+ {disc_copies, [node()]},
+ {match, #amqqueue{name = queue_name_match(), _='_'}}]},
+ {rabbit_queue,
+ [{record_name, amqqueue},
+ {attributes, record_info(fields, amqqueue)},
+ {match, #amqqueue{name = queue_name_match(), _='_'}}]}]
+ ++ gm:table_definitions()
+ ++ mirrored_supervisor:table_definitions().
+
+binding_match() ->
+ #binding{source = exchange_name_match(),
+ destination = binding_destination_match(),
+ _='_'}.
+reverse_binding_match() ->
+ #reverse_binding{destination = binding_destination_match(),
+ source = exchange_name_match(),
+ _='_'}.
+binding_destination_match() ->
+ resource_match('_').
+trie_node_match() ->
+ #trie_node{ exchange_name = exchange_name_match(), _='_'}.
+trie_edge_match() ->
+ #trie_edge{ exchange_name = exchange_name_match(), _='_'}.
+trie_binding_match() ->
+ #trie_binding{exchange_name = exchange_name_match(), _='_'}.
+exchange_name_match() ->
+ resource_match(exchange).
+queue_name_match() ->
+ resource_match(queue).
+resource_match(Kind) ->
+ #resource{kind = Kind, _='_'}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_tests).
+
+-compile([export_all]).
+
+-export([all_tests/0]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+-include_lib("kernel/include/file.hrl").
+
+-define(PERSISTENT_MSG_STORE, msg_store_persistent).
+-define(TRANSIENT_MSG_STORE, msg_store_transient).
+-define(CLEANUP_QUEUE_NAME, <<"cleanup-queue">>).
+-define(TIMEOUT, 5000).
+
+all_tests() ->
+ ok = setup_cluster(),
+ ok = truncate:test(),
+ ok = supervisor2_tests:test_all(),
+ passed = gm_tests:all_tests(),
+ passed = mirrored_supervisor_tests:all_tests(),
+ application:set_env(rabbit, file_handles_high_watermark, 10, infinity),
+ ok = file_handle_cache:set_limit(10),
+ passed = test_version_equivalance(),
+ passed = test_file_handle_cache(),
+ passed = test_backing_queue(),
+ passed = test_rabbit_basic_header_handling(),
+ passed = test_priority_queue(),
+ passed = test_pg_local(),
+ passed = test_unfold(),
+ passed = test_supervisor_delayed_restart(),
+ passed = test_table_codec(),
+ passed = test_content_framing(),
+ passed = test_content_transcoding(),
+ passed = test_topic_matching(),
+ passed = test_log_management(),
+ passed = test_app_management(),
+ passed = test_log_management_during_startup(),
+ passed = test_statistics(),
+ passed = test_arguments_parser(),
+ passed = test_dynamic_mirroring(),
+ passed = test_user_management(),
+ passed = test_runtime_parameters(),
+ passed = test_policy_validation(),
+ passed = test_policy_opts_validation(),
+ passed = test_ha_policy_validation(),
+ passed = test_server_status(),
+ passed = test_amqp_connection_refusal(),
+ passed = test_confirms(),
+ passed = test_with_state(),
+ passed = test_mcall(),
+ passed =
+ do_if_secondary_node(
+ fun run_cluster_dependent_tests/1,
+ fun (SecondaryNode) ->
+ io:format("Skipping cluster dependent tests with node ~p~n",
+ [SecondaryNode]),
+ passed
+ end),
+ passed = test_configurable_server_properties(),
+ passed = vm_memory_monitor_tests:all_tests(),
+ passed.
+
+
+do_if_secondary_node(Up, Down) ->
+ SecondaryNode = rabbit_nodes:make("hare"),
+
+ case net_adm:ping(SecondaryNode) of
+ pong -> Up(SecondaryNode);
+ pang -> Down(SecondaryNode)
+ end.
+
+setup_cluster() ->
+ do_if_secondary_node(
+ fun (SecondaryNode) ->
+ ok = control_action(stop_app, []),
+ ok = control_action(join_cluster,
+ [atom_to_list(SecondaryNode)]),
+ ok = control_action(start_app, []),
+ ok = control_action(start_app, SecondaryNode, [], [])
+ end,
+ fun (_) -> ok end).
+
+maybe_run_cluster_dependent_tests() ->
+ do_if_secondary_node(
+ fun (SecondaryNode) ->
+ passed = run_cluster_dependent_tests(SecondaryNode)
+ end,
+ fun (SecondaryNode) ->
+ io:format("Skipping cluster dependent tests with node ~p~n",
+ [SecondaryNode])
+ end).
+
+run_cluster_dependent_tests(SecondaryNode) ->
+ io:format("Running cluster dependent tests with node ~p~n", [SecondaryNode]),
+ passed = test_delegates_async(SecondaryNode),
+ passed = test_delegates_sync(SecondaryNode),
+ passed = test_queue_cleanup(SecondaryNode),
+ passed = test_declare_on_dead_queue(SecondaryNode),
+ passed = test_refresh_events(SecondaryNode),
+
+ %% we now run the tests remotely, so that code coverage on the
+ %% local node picks up more of the delegate
+ Node = node(),
+ Self = self(),
+ Remote = spawn(SecondaryNode,
+ fun () -> Rs = [ test_delegates_async(Node),
+ test_delegates_sync(Node),
+ test_queue_cleanup(Node),
+ test_declare_on_dead_queue(Node),
+ test_refresh_events(Node) ],
+ Self ! {self(), Rs}
+ end),
+ receive
+ {Remote, Result} ->
+ Result = lists:duplicate(length(Result), passed)
+ after 30000 ->
+ throw(timeout)
+ end,
+
+ passed.
+
+test_version_equivalance() ->
+ true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.0"),
+ true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.1"),
+ true = rabbit_misc:version_minor_equivalent("%%VSN%%", "%%VSN%%"),
+ false = rabbit_misc:version_minor_equivalent("3.0.0", "3.1.0"),
+ false = rabbit_misc:version_minor_equivalent("3.0.0", "3.0"),
+ false = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.0.1"),
+ false = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.foo"),
+ passed.
+
+test_rabbit_basic_header_handling() ->
+ passed = write_table_with_invalid_existing_type_test(),
+ passed = invalid_existing_headers_test(),
+ passed = disparate_invalid_header_entries_accumulate_separately_test(),
+ passed = corrupt_or_invalid_headers_are_overwritten_test(),
+ passed = invalid_same_header_entry_accumulation_test(),
+ passed.
+
+-define(XDEATH_TABLE,
+ [{<<"reason">>, longstr, <<"blah">>},
+ {<<"queue">>, longstr, <<"foo.bar.baz">>},
+ {<<"exchange">>, longstr, <<"my-exchange">>},
+ {<<"routing-keys">>, array, []}]).
+
+-define(ROUTE_TABLE, [{<<"redelivered">>, bool, <<"true">>}]).
+
+-define(BAD_HEADER(K), {<<K>>, longstr, <<"bad ", K>>}).
+-define(BAD_HEADER2(K, Suf), {<<K>>, longstr, <<"bad ", K, Suf>>}).
+-define(FOUND_BAD_HEADER(K), {<<K>>, array, [{longstr, <<"bad ", K>>}]}).
+
+write_table_with_invalid_existing_type_test() ->
+ prepend_check(<<"header1">>, ?XDEATH_TABLE, [?BAD_HEADER("header1")]),
+ passed.
+
+invalid_existing_headers_test() ->
+ Headers =
+ prepend_check(<<"header2">>, ?ROUTE_TABLE, [?BAD_HEADER("header2")]),
+ {array, [{table, ?ROUTE_TABLE}]} =
+ rabbit_misc:table_lookup(Headers, <<"header2">>),
+ passed.
+
+disparate_invalid_header_entries_accumulate_separately_test() ->
+ BadHeaders = [?BAD_HEADER("header2")],
+ Headers = prepend_check(<<"header2">>, ?ROUTE_TABLE, BadHeaders),
+ Headers2 = prepend_check(<<"header1">>, ?XDEATH_TABLE,
+ [?BAD_HEADER("header1") | Headers]),
+ {table, [?FOUND_BAD_HEADER("header1"),
+ ?FOUND_BAD_HEADER("header2")]} =
+ rabbit_misc:table_lookup(Headers2, ?INVALID_HEADERS_KEY),
+ passed.
+
+corrupt_or_invalid_headers_are_overwritten_test() ->
+ Headers0 = [?BAD_HEADER("header1"),
+ ?BAD_HEADER("x-invalid-headers")],
+ Headers1 = prepend_check(<<"header1">>, ?XDEATH_TABLE, Headers0),
+ {table,[?FOUND_BAD_HEADER("header1"),
+ ?FOUND_BAD_HEADER("x-invalid-headers")]} =
+ rabbit_misc:table_lookup(Headers1, ?INVALID_HEADERS_KEY),
+ passed.
+
+invalid_same_header_entry_accumulation_test() ->
+ BadHeader1 = ?BAD_HEADER2("header1", "a"),
+ Headers = prepend_check(<<"header1">>, ?ROUTE_TABLE, [BadHeader1]),
+ Headers2 = prepend_check(<<"header1">>, ?ROUTE_TABLE,
+ [?BAD_HEADER2("header1", "b") | Headers]),
+ {table, InvalidHeaders} =
+ rabbit_misc:table_lookup(Headers2, ?INVALID_HEADERS_KEY),
+ {array, [{longstr,<<"bad header1b">>},
+ {longstr,<<"bad header1a">>}]} =
+ rabbit_misc:table_lookup(InvalidHeaders, <<"header1">>),
+ passed.
+
+prepend_check(HeaderKey, HeaderTable, Headers) ->
+ Headers1 = rabbit_basic:prepend_table_header(
+ HeaderKey, HeaderTable, Headers),
+ {table, Invalid} =
+ rabbit_misc:table_lookup(Headers1, ?INVALID_HEADERS_KEY),
+ {Type, Value} = rabbit_misc:table_lookup(Headers, HeaderKey),
+ {array, [{Type, Value} | _]} =
+ rabbit_misc:table_lookup(Invalid, HeaderKey),
+ Headers1.
+
+test_priority_queue() ->
+
+ false = priority_queue:is_queue(not_a_queue),
+
+ %% empty Q
+ Q = priority_queue:new(),
+ {true, true, 0, [], []} = test_priority_queue(Q),
+
+ %% 1-4 element no-priority Q
+ true = lists:all(fun (X) -> X =:= passed end,
+ lists:map(fun test_simple_n_element_queue/1,
+ lists:seq(1, 4))),
+
+ %% 1-element priority Q
+ Q1 = priority_queue:in(foo, 1, priority_queue:new()),
+ {true, false, 1, [{1, foo}], [foo]} =
+ test_priority_queue(Q1),
+
+ %% 2-element same-priority Q
+ Q2 = priority_queue:in(bar, 1, Q1),
+ {true, false, 2, [{1, foo}, {1, bar}], [foo, bar]} =
+ test_priority_queue(Q2),
+
+ %% 2-element different-priority Q
+ Q3 = priority_queue:in(bar, 2, Q1),
+ {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} =
+ test_priority_queue(Q3),
+
+ %% 1-element negative priority Q
+ Q4 = priority_queue:in(foo, -1, priority_queue:new()),
+ {true, false, 1, [{-1, foo}], [foo]} = test_priority_queue(Q4),
+
+ %% merge 2 * 1-element no-priority Qs
+ Q5 = priority_queue:join(priority_queue:in(foo, Q),
+ priority_queue:in(bar, Q)),
+ {true, false, 2, [{0, foo}, {0, bar}], [foo, bar]} =
+ test_priority_queue(Q5),
+
+ %% merge 1-element no-priority Q with 1-element priority Q
+ Q6 = priority_queue:join(priority_queue:in(foo, Q),
+ priority_queue:in(bar, 1, Q)),
+ {true, false, 2, [{1, bar}, {0, foo}], [bar, foo]} =
+ test_priority_queue(Q6),
+
+ %% merge 1-element priority Q with 1-element no-priority Q
+ Q7 = priority_queue:join(priority_queue:in(foo, 1, Q),
+ priority_queue:in(bar, Q)),
+ {true, false, 2, [{1, foo}, {0, bar}], [foo, bar]} =
+ test_priority_queue(Q7),
+
+ %% merge 2 * 1-element same-priority Qs
+ Q8 = priority_queue:join(priority_queue:in(foo, 1, Q),
+ priority_queue:in(bar, 1, Q)),
+ {true, false, 2, [{1, foo}, {1, bar}], [foo, bar]} =
+ test_priority_queue(Q8),
+
+ %% merge 2 * 1-element different-priority Qs
+ Q9 = priority_queue:join(priority_queue:in(foo, 1, Q),
+ priority_queue:in(bar, 2, Q)),
+ {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} =
+ test_priority_queue(Q9),
+
+ %% merge 2 * 1-element different-priority Qs (other way around)
+ Q10 = priority_queue:join(priority_queue:in(bar, 2, Q),
+ priority_queue:in(foo, 1, Q)),
+ {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} =
+ test_priority_queue(Q10),
+
+ %% merge 2 * 2-element multi-different-priority Qs
+ Q11 = priority_queue:join(Q6, Q5),
+ {true, false, 4, [{1, bar}, {0, foo}, {0, foo}, {0, bar}],
+ [bar, foo, foo, bar]} = test_priority_queue(Q11),
+
+ %% and the other way around
+ Q12 = priority_queue:join(Q5, Q6),
+ {true, false, 4, [{1, bar}, {0, foo}, {0, bar}, {0, foo}],
+ [bar, foo, bar, foo]} = test_priority_queue(Q12),
+
+ %% merge with negative priorities
+ Q13 = priority_queue:join(Q4, Q5),
+ {true, false, 3, [{0, foo}, {0, bar}, {-1, foo}], [foo, bar, foo]} =
+ test_priority_queue(Q13),
+
+ %% and the other way around
+ Q14 = priority_queue:join(Q5, Q4),
+ {true, false, 3, [{0, foo}, {0, bar}, {-1, foo}], [foo, bar, foo]} =
+ test_priority_queue(Q14),
+
+ %% joins with empty queues:
+ Q1 = priority_queue:join(Q, Q1),
+ Q1 = priority_queue:join(Q1, Q),
+
+ %% insert with priority into non-empty zero-priority queue
+ Q15 = priority_queue:in(baz, 1, Q5),
+ {true, false, 3, [{1, baz}, {0, foo}, {0, bar}], [baz, foo, bar]} =
+ test_priority_queue(Q15),
+
+ %% 1-element infinity priority Q
+ Q16 = priority_queue:in(foo, infinity, Q),
+ {true, false, 1, [{infinity, foo}], [foo]} = test_priority_queue(Q16),
+
+ %% add infinity to 0-priority Q
+ Q17 = priority_queue:in(foo, infinity, priority_queue:in(bar, Q)),
+ {true, false, 2, [{infinity, foo}, {0, bar}], [foo, bar]} =
+ test_priority_queue(Q17),
+
+ %% and the other way around
+ Q18 = priority_queue:in(bar, priority_queue:in(foo, infinity, Q)),
+ {true, false, 2, [{infinity, foo}, {0, bar}], [foo, bar]} =
+ test_priority_queue(Q18),
+
+ %% add infinity to mixed-priority Q
+ Q19 = priority_queue:in(qux, infinity, Q3),
+ {true, false, 3, [{infinity, qux}, {2, bar}, {1, foo}], [qux, bar, foo]} =
+ test_priority_queue(Q19),
+
+ %% merge the above with a negative priority Q
+ Q20 = priority_queue:join(Q19, Q4),
+ {true, false, 4, [{infinity, qux}, {2, bar}, {1, foo}, {-1, foo}],
+ [qux, bar, foo, foo]} = test_priority_queue(Q20),
+
+ %% merge two infinity priority queues
+ Q21 = priority_queue:join(priority_queue:in(foo, infinity, Q),
+ priority_queue:in(bar, infinity, Q)),
+ {true, false, 2, [{infinity, foo}, {infinity, bar}], [foo, bar]} =
+ test_priority_queue(Q21),
+
+ %% merge two mixed priority with infinity queues
+ Q22 = priority_queue:join(Q18, Q20),
+ {true, false, 6, [{infinity, foo}, {infinity, qux}, {2, bar}, {1, foo},
+ {0, bar}, {-1, foo}], [foo, qux, bar, foo, bar, foo]} =
+ test_priority_queue(Q22),
+
+ passed.
+
+priority_queue_in_all(Q, L) ->
+ lists:foldl(fun (X, Acc) -> priority_queue:in(X, Acc) end, Q, L).
+
+priority_queue_out_all(Q) ->
+ case priority_queue:out(Q) of
+ {empty, _} -> [];
+ {{value, V}, Q1} -> [V | priority_queue_out_all(Q1)]
+ end.
+
+test_priority_queue(Q) ->
+ {priority_queue:is_queue(Q),
+ priority_queue:is_empty(Q),
+ priority_queue:len(Q),
+ priority_queue:to_list(Q),
+ priority_queue_out_all(Q)}.
+
+test_simple_n_element_queue(N) ->
+ Items = lists:seq(1, N),
+ Q = priority_queue_in_all(priority_queue:new(), Items),
+ ToListRes = [{0, X} || X <- Items],
+ {true, false, N, ToListRes, Items} = test_priority_queue(Q),
+ passed.
+
+test_pg_local() ->
+ [P, Q] = [spawn(fun () -> receive X -> X end end) || _ <- [x, x]],
+ check_pg_local(ok, [], []),
+ check_pg_local(pg_local:join(a, P), [P], []),
+ check_pg_local(pg_local:join(b, P), [P], [P]),
+ check_pg_local(pg_local:join(a, P), [P, P], [P]),
+ check_pg_local(pg_local:join(a, Q), [P, P, Q], [P]),
+ check_pg_local(pg_local:join(b, Q), [P, P, Q], [P, Q]),
+ check_pg_local(pg_local:join(b, Q), [P, P, Q], [P, Q, Q]),
+ check_pg_local(pg_local:leave(a, P), [P, Q], [P, Q, Q]),
+ check_pg_local(pg_local:leave(b, P), [P, Q], [Q, Q]),
+ check_pg_local(pg_local:leave(a, P), [Q], [Q, Q]),
+ check_pg_local(pg_local:leave(a, P), [Q], [Q, Q]),
+ [begin X ! done,
+ Ref = erlang:monitor(process, X),
+ receive {'DOWN', Ref, process, X, _Info} -> ok end
+ end || X <- [P, Q]],
+ check_pg_local(ok, [], []),
+ passed.
+
+check_pg_local(ok, APids, BPids) ->
+ ok = pg_local:sync(),
+ [true, true] = [lists:sort(Pids) == lists:sort(pg_local:get_members(Key)) ||
+ {Key, Pids} <- [{a, APids}, {b, BPids}]].
+
+test_unfold() ->
+ {[], test} = rabbit_misc:unfold(fun (_V) -> false end, test),
+ List = lists:seq(2,20,2),
+ {List, 0} = rabbit_misc:unfold(fun (0) -> false;
+ (N) -> {true, N*2, N-1}
+ end, 10),
+ passed.
+
+test_table_codec() ->
+ %% FIXME this does not test inexact numbers (double and float) yet,
+ %% because they won't pass the equality assertions
+ Table = [{<<"longstr">>, longstr, <<"Here is a long string">>},
+ {<<"signedint">>, signedint, 12345},
+ {<<"decimal">>, decimal, {3, 123456}},
+ {<<"timestamp">>, timestamp, 109876543209876},
+ {<<"table">>, table, [{<<"one">>, signedint, 54321},
+ {<<"two">>, longstr,
+ <<"A long string">>}]},
+ {<<"byte">>, byte, -128},
+ {<<"long">>, long, 1234567890},
+ {<<"short">>, short, 655},
+ {<<"bool">>, bool, true},
+ {<<"binary">>, binary, <<"a binary string">>},
+ {<<"void">>, void, undefined},
+ {<<"array">>, array, [{signedint, 54321},
+ {longstr, <<"A long string">>}]}
+ ],
+ Binary = <<
+ 7,"longstr", "S", 21:32, "Here is a long string",
+ 9,"signedint", "I", 12345:32/signed,
+ 7,"decimal", "D", 3, 123456:32,
+ 9,"timestamp", "T", 109876543209876:64,
+ 5,"table", "F", 31:32, % length of table
+ 3,"one", "I", 54321:32,
+ 3,"two", "S", 13:32, "A long string",
+ 4,"byte", "b", -128:8/signed,
+ 4,"long", "l", 1234567890:64,
+ 5,"short", "s", 655:16,
+ 4,"bool", "t", 1,
+ 6,"binary", "x", 15:32, "a binary string",
+ 4,"void", "V",
+ 5,"array", "A", 23:32,
+ "I", 54321:32,
+ "S", 13:32, "A long string"
+ >>,
+ Binary = rabbit_binary_generator:generate_table(Table),
+ Table = rabbit_binary_parser:parse_table(Binary),
+ passed.
+
+%% Test that content frames don't exceed frame-max
+test_content_framing(FrameMax, BodyBin) ->
+ [Header | Frames] =
+ rabbit_binary_generator:build_simple_content_frames(
+ 1,
+ rabbit_binary_generator:ensure_content_encoded(
+ rabbit_basic:build_content(#'P_basic'{}, BodyBin),
+ rabbit_framing_amqp_0_9_1),
+ FrameMax,
+ rabbit_framing_amqp_0_9_1),
+ %% header is formatted correctly and the size is the total of the
+ %% fragments
+ <<_FrameHeader:7/binary, _ClassAndWeight:4/binary,
+ BodySize:64/unsigned, _Rest/binary>> = list_to_binary(Header),
+ BodySize = size(BodyBin),
+ true = lists:all(
+ fun (ContentFrame) ->
+ FrameBinary = list_to_binary(ContentFrame),
+ %% assert
+ <<_TypeAndChannel:3/binary,
+ Size:32/unsigned, _Payload:Size/binary, 16#CE>> =
+ FrameBinary,
+ size(FrameBinary) =< FrameMax
+ end, Frames),
+ passed.
+
+test_content_framing() ->
+ %% no content
+ passed = test_content_framing(4096, <<>>),
+ %% easily fit in one frame
+ passed = test_content_framing(4096, <<"Easy">>),
+ %% exactly one frame (empty frame = 8 bytes)
+ passed = test_content_framing(11, <<"One">>),
+ %% more than one frame
+ passed = test_content_framing(11, <<"More than one frame">>),
+ passed.
+
+test_content_transcoding() ->
+ %% there are no guarantees provided by 'clear' - it's just a hint
+ ClearDecoded = fun rabbit_binary_parser:clear_decoded_content/1,
+ ClearEncoded = fun rabbit_binary_generator:clear_encoded_content/1,
+ EnsureDecoded =
+ fun (C0) ->
+ C1 = rabbit_binary_parser:ensure_content_decoded(C0),
+ true = C1#content.properties =/= none,
+ C1
+ end,
+ EnsureEncoded =
+ fun (Protocol) ->
+ fun (C0) ->
+ C1 = rabbit_binary_generator:ensure_content_encoded(
+ C0, Protocol),
+ true = C1#content.properties_bin =/= none,
+ C1
+ end
+ end,
+ %% Beyond the assertions in Ensure*, the only testable guarantee
+ %% is that the operations should never fail.
+ %%
+ %% If we were using quickcheck we'd simply stuff all the above
+ %% into a generator for sequences of operations. In the absence of
+ %% quickcheck we pick particularly interesting sequences that:
+ %%
+ %% - execute every op twice since they are idempotent
+ %% - invoke clear_decoded, clear_encoded, decode and transcode
+ %% with one or both of decoded and encoded content present
+ [begin
+ sequence_with_content([Op]),
+ sequence_with_content([ClearEncoded, Op]),
+ sequence_with_content([ClearDecoded, Op])
+ end || Op <- [ClearDecoded, ClearEncoded, EnsureDecoded,
+ EnsureEncoded(rabbit_framing_amqp_0_9_1),
+ EnsureEncoded(rabbit_framing_amqp_0_8)]],
+ passed.
+
+sequence_with_content(Sequence) ->
+ lists:foldl(fun (F, V) -> F(F(V)) end,
+ rabbit_binary_generator:ensure_content_encoded(
+ rabbit_basic:build_content(#'P_basic'{}, <<>>),
+ rabbit_framing_amqp_0_9_1),
+ Sequence).
+
+test_topic_matching() ->
+ XName = #resource{virtual_host = <<"/">>,
+ kind = exchange,
+ name = <<"test_exchange">>},
+ X0 = #exchange{name = XName, type = topic, durable = false,
+ auto_delete = false, arguments = []},
+ X = rabbit_exchange_decorator:set(X0),
+ %% create
+ rabbit_exchange_type_topic:validate(X),
+ exchange_op_callback(X, create, []),
+
+ %% add some bindings
+ Bindings = [#binding{source = XName,
+ key = list_to_binary(Key),
+ destination = #resource{virtual_host = <<"/">>,
+ kind = queue,
+ name = list_to_binary(Q)},
+ args = Args} ||
+ {Key, Q, Args} <- [{"a.b.c", "t1", []},
+ {"a.*.c", "t2", []},
+ {"a.#.b", "t3", []},
+ {"a.b.b.c", "t4", []},
+ {"#", "t5", []},
+ {"#.#", "t6", []},
+ {"#.b", "t7", []},
+ {"*.*", "t8", []},
+ {"a.*", "t9", []},
+ {"*.b.c", "t10", []},
+ {"a.#", "t11", []},
+ {"a.#.#", "t12", []},
+ {"b.b.c", "t13", []},
+ {"a.b.b", "t14", []},
+ {"a.b", "t15", []},
+ {"b.c", "t16", []},
+ {"", "t17", []},
+ {"*.*.*", "t18", []},
+ {"vodka.martini", "t19", []},
+ {"a.b.c", "t20", []},
+ {"*.#", "t21", []},
+ {"#.*.#", "t22", []},
+ {"*.#.#", "t23", []},
+ {"#.#.#", "t24", []},
+ {"*", "t25", []},
+ {"#.b.#", "t26", []},
+ {"args-test", "t27",
+ [{<<"foo">>, longstr, <<"bar">>}]},
+ {"args-test", "t27", %% Note aliasing
+ [{<<"foo">>, longstr, <<"baz">>}]}]],
+ lists:foreach(fun (B) -> exchange_op_callback(X, add_binding, [B]) end,
+ Bindings),
+
+ %% test some matches
+ test_topic_expect_match(
+ X, [{"a.b.c", ["t1", "t2", "t5", "t6", "t10", "t11", "t12",
+ "t18", "t20", "t21", "t22", "t23", "t24",
+ "t26"]},
+ {"a.b", ["t3", "t5", "t6", "t7", "t8", "t9", "t11",
+ "t12", "t15", "t21", "t22", "t23", "t24",
+ "t26"]},
+ {"a.b.b", ["t3", "t5", "t6", "t7", "t11", "t12", "t14",
+ "t18", "t21", "t22", "t23", "t24", "t26"]},
+ {"", ["t5", "t6", "t17", "t24"]},
+ {"b.c.c", ["t5", "t6", "t18", "t21", "t22", "t23",
+ "t24", "t26"]},
+ {"a.a.a.a.a", ["t5", "t6", "t11", "t12", "t21", "t22",
+ "t23", "t24"]},
+ {"vodka.gin", ["t5", "t6", "t8", "t21", "t22", "t23",
+ "t24"]},
+ {"vodka.martini", ["t5", "t6", "t8", "t19", "t21", "t22", "t23",
+ "t24"]},
+ {"b.b.c", ["t5", "t6", "t10", "t13", "t18", "t21",
+ "t22", "t23", "t24", "t26"]},
+ {"nothing.here.at.all", ["t5", "t6", "t21", "t22", "t23", "t24"]},
+ {"oneword", ["t5", "t6", "t21", "t22", "t23", "t24",
+ "t25"]},
+ {"args-test", ["t5", "t6", "t21", "t22", "t23", "t24",
+ "t25", "t27"]}]),
+ %% remove some bindings
+ RemovedBindings = [lists:nth(1, Bindings), lists:nth(5, Bindings),
+ lists:nth(11, Bindings), lists:nth(19, Bindings),
+ lists:nth(21, Bindings), lists:nth(28, Bindings)],
+ exchange_op_callback(X, remove_bindings, [RemovedBindings]),
+ RemainingBindings = ordsets:to_list(
+ ordsets:subtract(ordsets:from_list(Bindings),
+ ordsets:from_list(RemovedBindings))),
+
+ %% test some matches
+ test_topic_expect_match(
+ X,
+ [{"a.b.c", ["t2", "t6", "t10", "t12", "t18", "t20", "t22",
+ "t23", "t24", "t26"]},
+ {"a.b", ["t3", "t6", "t7", "t8", "t9", "t12", "t15",
+ "t22", "t23", "t24", "t26"]},
+ {"a.b.b", ["t3", "t6", "t7", "t12", "t14", "t18", "t22",
+ "t23", "t24", "t26"]},
+ {"", ["t6", "t17", "t24"]},
+ {"b.c.c", ["t6", "t18", "t22", "t23", "t24", "t26"]},
+ {"a.a.a.a.a", ["t6", "t12", "t22", "t23", "t24"]},
+ {"vodka.gin", ["t6", "t8", "t22", "t23", "t24"]},
+ {"vodka.martini", ["t6", "t8", "t22", "t23", "t24"]},
+ {"b.b.c", ["t6", "t10", "t13", "t18", "t22", "t23",
+ "t24", "t26"]},
+ {"nothing.here.at.all", ["t6", "t22", "t23", "t24"]},
+ {"oneword", ["t6", "t22", "t23", "t24", "t25"]},
+ {"args-test", ["t6", "t22", "t23", "t24", "t25", "t27"]}]),
+
+ %% remove the entire exchange
+ exchange_op_callback(X, delete, [RemainingBindings]),
+ %% none should match now
+ test_topic_expect_match(X, [{"a.b.c", []}, {"b.b.c", []}, {"", []}]),
+ passed.
+
+exchange_op_callback(X, Fun, Args) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun () -> rabbit_exchange:callback(X, Fun, transaction, [X] ++ Args) end),
+ rabbit_exchange:callback(X, Fun, none, [X] ++ Args).
+
+test_topic_expect_match(X, List) ->
+ lists:foreach(
+ fun ({Key, Expected}) ->
+ BinKey = list_to_binary(Key),
+ Message = rabbit_basic:message(X#exchange.name, BinKey,
+ #'P_basic'{}, <<>>),
+ Res = rabbit_exchange_type_topic:route(
+ X, #delivery{mandatory = false,
+ sender = self(),
+ message = Message}),
+ ExpectedRes = lists:map(
+ fun (Q) -> #resource{virtual_host = <<"/">>,
+ kind = queue,
+ name = list_to_binary(Q)}
+ end, Expected),
+ true = (lists:usort(ExpectedRes) =:= lists:usort(Res))
+ end, List).
+
+test_app_management() ->
+ control_action(wait, [rabbit_mnesia:dir() ++ ".pid"]),
+ %% Starting, stopping and diagnostics. Note that we don't try
+ %% 'report' when the rabbit app is stopped and that we enable
+ %% tracing for the duration of this function.
+ ok = control_action(trace_on, []),
+ ok = control_action(stop_app, []),
+ ok = control_action(stop_app, []),
+ ok = control_action(status, []),
+ ok = control_action(cluster_status, []),
+ ok = control_action(environment, []),
+ ok = control_action(start_app, []),
+ ok = control_action(start_app, []),
+ ok = control_action(status, []),
+ ok = control_action(report, []),
+ ok = control_action(cluster_status, []),
+ ok = control_action(environment, []),
+ ok = control_action(trace_off, []),
+ passed.
+
+test_log_management() ->
+ MainLog = rabbit:log_location(kernel),
+ SaslLog = rabbit:log_location(sasl),
+ Suffix = ".1",
+
+ %% prepare basic logs
+ file:delete([MainLog, Suffix]),
+ file:delete([SaslLog, Suffix]),
+
+ %% simple logs reopening
+ ok = control_action(rotate_logs, []),
+ [true, true] = empty_files([MainLog, SaslLog]),
+ ok = test_logs_working(MainLog, SaslLog),
+
+ %% simple log rotation
+ ok = control_action(rotate_logs, [Suffix]),
+ [true, true] = non_empty_files([[MainLog, Suffix], [SaslLog, Suffix]]),
+ [true, true] = empty_files([MainLog, SaslLog]),
+ ok = test_logs_working(MainLog, SaslLog),
+
+ %% reopening logs with log rotation performed first
+ ok = clean_logs([MainLog, SaslLog], Suffix),
+ ok = control_action(rotate_logs, []),
+ ok = file:rename(MainLog, [MainLog, Suffix]),
+ ok = file:rename(SaslLog, [SaslLog, Suffix]),
+ ok = test_logs_working([MainLog, Suffix], [SaslLog, Suffix]),
+ ok = control_action(rotate_logs, []),
+ ok = test_logs_working(MainLog, SaslLog),
+
+ %% log rotation on empty files (the main log will have a ctl action logged)
+ ok = clean_logs([MainLog, SaslLog], Suffix),
+ ok = control_action(rotate_logs, []),
+ ok = control_action(rotate_logs, [Suffix]),
+ [false, true] = empty_files([[MainLog, Suffix], [SaslLog, Suffix]]),
+
+ %% logs with suffix are not writable
+ ok = control_action(rotate_logs, [Suffix]),
+ ok = make_files_non_writable([[MainLog, Suffix], [SaslLog, Suffix]]),
+ ok = control_action(rotate_logs, [Suffix]),
+ ok = test_logs_working(MainLog, SaslLog),
+
+ %% rotate when original log files are not writable
+ ok = make_files_non_writable([MainLog, SaslLog]),
+ ok = control_action(rotate_logs, []),
+
+ %% logging directed to tty (first, remove handlers)
+ ok = delete_log_handlers([rabbit_sasl_report_file_h,
+ rabbit_error_logger_file_h]),
+ ok = clean_logs([MainLog, SaslLog], Suffix),
+ ok = application:set_env(rabbit, sasl_error_logger, tty),
+ ok = application:set_env(rabbit, error_logger, tty),
+ ok = control_action(rotate_logs, []),
+ [{error, enoent}, {error, enoent}] = empty_files([MainLog, SaslLog]),
+
+ %% rotate logs when logging is turned off
+ ok = application:set_env(rabbit, sasl_error_logger, false),
+ ok = application:set_env(rabbit, error_logger, silent),
+ ok = control_action(rotate_logs, []),
+ [{error, enoent}, {error, enoent}] = empty_files([MainLog, SaslLog]),
+
+ %% cleanup
+ ok = application:set_env(rabbit, sasl_error_logger, {file, SaslLog}),
+ ok = application:set_env(rabbit, error_logger, {file, MainLog}),
+ ok = add_log_handlers([{rabbit_error_logger_file_h, MainLog},
+ {rabbit_sasl_report_file_h, SaslLog}]),
+ passed.
+
+test_log_management_during_startup() ->
+ MainLog = rabbit:log_location(kernel),
+ SaslLog = rabbit:log_location(sasl),
+
+ %% start application with simple tty logging
+ ok = control_action(stop_app, []),
+ ok = application:set_env(rabbit, error_logger, tty),
+ ok = application:set_env(rabbit, sasl_error_logger, tty),
+ ok = add_log_handlers([{error_logger_tty_h, []},
+ {sasl_report_tty_h, []}]),
+ ok = control_action(start_app, []),
+
+ %% start application with tty logging and
+ %% proper handlers not installed
+ ok = control_action(stop_app, []),
+ ok = error_logger:tty(false),
+ ok = delete_log_handlers([sasl_report_tty_h]),
+ ok = case catch control_action(start_app, []) of
+ ok -> exit({got_success_but_expected_failure,
+ log_rotation_tty_no_handlers_test});
+ {badrpc, {'EXIT', {rabbit,failure_during_boot,
+ {error,{cannot_log_to_tty,
+ _, not_installed}}}}} -> ok
+ end,
+
+ %% fix sasl logging
+ ok = application:set_env(rabbit, sasl_error_logger, {file, SaslLog}),
+
+ %% start application with logging to non-existing directory
+ TmpLog = "/tmp/rabbit-tests/test.log",
+ delete_file(TmpLog),
+ ok = control_action(stop_app, []),
+ ok = application:set_env(rabbit, error_logger, {file, TmpLog}),
+
+ ok = delete_log_handlers([rabbit_error_logger_file_h]),
+ ok = add_log_handlers([{error_logger_file_h, MainLog}]),
+ ok = control_action(start_app, []),
+
+ %% start application with logging to directory with no
+ %% write permissions
+ ok = control_action(stop_app, []),
+ TmpDir = "/tmp/rabbit-tests",
+ ok = set_permissions(TmpDir, 8#00400),
+ ok = delete_log_handlers([rabbit_error_logger_file_h]),
+ ok = add_log_handlers([{error_logger_file_h, MainLog}]),
+ ok = case control_action(start_app, []) of
+ ok -> exit({got_success_but_expected_failure,
+ log_rotation_no_write_permission_dir_test});
+ {badrpc, {'EXIT',
+ {rabbit, failure_during_boot,
+ {error, {cannot_log_to_file, _, _}}}}} -> ok
+ end,
+
+ %% start application with logging to a subdirectory which
+ %% parent directory has no write permissions
+ ok = control_action(stop_app, []),
+ TmpTestDir = "/tmp/rabbit-tests/no-permission/test/log",
+ ok = application:set_env(rabbit, error_logger, {file, TmpTestDir}),
+ ok = add_log_handlers([{error_logger_file_h, MainLog}]),
+ ok = case control_action(start_app, []) of
+ ok -> exit({got_success_but_expected_failure,
+ log_rotatation_parent_dirs_test});
+ {badrpc,
+ {'EXIT', {rabbit,failure_during_boot,
+ {error, {cannot_log_to_file, _,
+ {error,
+ {cannot_create_parent_dirs, _, eacces}}}}}}} -> ok
+ end,
+ ok = set_permissions(TmpDir, 8#00700),
+ ok = set_permissions(TmpLog, 8#00600),
+ ok = delete_file(TmpLog),
+ ok = file:del_dir(TmpDir),
+
+ %% start application with standard error_logger_file_h
+ %% handler not installed
+ ok = control_action(stop_app, []),
+ ok = application:set_env(rabbit, error_logger, {file, MainLog}),
+ ok = control_action(start_app, []),
+
+ %% start application with standard sasl handler not installed
+ %% and rabbit main log handler installed correctly
+ ok = control_action(stop_app, []),
+ ok = delete_log_handlers([rabbit_sasl_report_file_h]),
+ ok = control_action(start_app, []),
+ passed.
+
+test_arguments_parser() ->
+ GlobalOpts1 = [{"-f1", flag}, {"-o1", {option, "foo"}}],
+ Commands1 = [command1, {command2, [{"-f2", flag}, {"-o2", {option, "bar"}}]}],
+
+ GetOptions =
+ fun (Args) ->
+ rabbit_misc:parse_arguments(Commands1, GlobalOpts1, Args)
+ end,
+
+ check_parse_arguments(no_command, GetOptions, []),
+ check_parse_arguments(no_command, GetOptions, ["foo", "bar"]),
+ check_parse_arguments(
+ {ok, {command1, [{"-f1", false}, {"-o1", "foo"}], []}},
+ GetOptions, ["command1"]),
+ check_parse_arguments(
+ {ok, {command1, [{"-f1", false}, {"-o1", "blah"}], []}},
+ GetOptions, ["command1", "-o1", "blah"]),
+ check_parse_arguments(
+ {ok, {command1, [{"-f1", true}, {"-o1", "foo"}], []}},
+ GetOptions, ["command1", "-f1"]),
+ check_parse_arguments(
+ {ok, {command1, [{"-f1", false}, {"-o1", "blah"}], []}},
+ GetOptions, ["-o1", "blah", "command1"]),
+ check_parse_arguments(
+ {ok, {command1, [{"-f1", false}, {"-o1", "blah"}], ["quux"]}},
+ GetOptions, ["-o1", "blah", "command1", "quux"]),
+ check_parse_arguments(
+ {ok, {command1, [{"-f1", true}, {"-o1", "blah"}], ["quux", "baz"]}},
+ GetOptions, ["command1", "quux", "-f1", "-o1", "blah", "baz"]),
+ %% For duplicate flags, the last one counts
+ check_parse_arguments(
+ {ok, {command1, [{"-f1", false}, {"-o1", "second"}], []}},
+ GetOptions, ["-o1", "first", "command1", "-o1", "second"]),
+ %% If the flag "eats" the command, the command won't be recognised
+ check_parse_arguments(no_command, GetOptions,
+ ["-o1", "command1", "quux"]),
+ %% If a flag eats another flag, the eaten flag won't be recognised
+ check_parse_arguments(
+ {ok, {command1, [{"-f1", false}, {"-o1", "-f1"}], []}},
+ GetOptions, ["command1", "-o1", "-f1"]),
+
+ %% Now for some command-specific flags...
+ check_parse_arguments(
+ {ok, {command2, [{"-f1", false}, {"-f2", false},
+ {"-o1", "foo"}, {"-o2", "bar"}], []}},
+ GetOptions, ["command2"]),
+
+ check_parse_arguments(
+ {ok, {command2, [{"-f1", false}, {"-f2", true},
+ {"-o1", "baz"}, {"-o2", "bar"}], ["quux", "foo"]}},
+ GetOptions, ["-f2", "command2", "quux", "-o1", "baz", "foo"]),
+
+ passed.
+
+test_dynamic_mirroring() ->
+ %% Just unit tests of the node selection logic, see multi node
+ %% tests for the rest...
+ Test = fun ({NewM, NewSs, ExtraSs}, Policy, Params,
+ {MNode, SNodes, SSNodes}, All) ->
+ {ok, M} = rabbit_mirror_queue_misc:module(Policy),
+ {NewM, NewSs0} = M:suggested_queue_nodes(
+ Params, MNode, SNodes, SSNodes, All),
+ NewSs1 = lists:sort(NewSs0),
+ case dm_list_match(NewSs, NewSs1, ExtraSs) of
+ ok -> ok;
+ error -> exit({no_match, NewSs, NewSs1, ExtraSs})
+ end
+ end,
+
+ Test({a,[b,c],0},<<"all">>,'_',{a,[], []}, [a,b,c]),
+ Test({a,[b,c],0},<<"all">>,'_',{a,[b,c],[b,c]},[a,b,c]),
+ Test({a,[b,c],0},<<"all">>,'_',{a,[d], [d]}, [a,b,c]),
+
+ N = fun (Atoms) -> [list_to_binary(atom_to_list(A)) || A <- Atoms] end,
+
+ %% Add a node
+ Test({a,[b,c],0},<<"nodes">>,N([a,b,c]),{a,[b],[b]},[a,b,c,d]),
+ Test({b,[a,c],0},<<"nodes">>,N([a,b,c]),{b,[a],[a]},[a,b,c,d]),
+ %% Add two nodes and drop one
+ Test({a,[b,c],0},<<"nodes">>,N([a,b,c]),{a,[d],[d]},[a,b,c,d]),
+ %% Don't try to include nodes that are not running
+ Test({a,[b], 0},<<"nodes">>,N([a,b,f]),{a,[b],[b]},[a,b,c,d]),
+ %% If we can't find any of the nodes listed then just keep the master
+ Test({a,[], 0},<<"nodes">>,N([f,g,h]),{a,[b],[b]},[a,b,c,d]),
+ %% And once that's happened, still keep the master even when not listed,
+ %% if nothing is synced
+ Test({a,[b,c],0},<<"nodes">>,N([b,c]), {a,[], []}, [a,b,c,d]),
+ Test({a,[b,c],0},<<"nodes">>,N([b,c]), {a,[b],[]}, [a,b,c,d]),
+ %% But if something is synced we can lose the master - but make
+ %% sure we pick the new master from the nodes which are synced!
+ Test({b,[c], 0},<<"nodes">>,N([b,c]), {a,[b],[b]},[a,b,c,d]),
+ Test({b,[c], 0},<<"nodes">>,N([c,b]), {a,[b],[b]},[a,b,c,d]),
+
+ Test({a,[], 1},<<"exactly">>,2,{a,[], []}, [a,b,c,d]),
+ Test({a,[], 2},<<"exactly">>,3,{a,[], []}, [a,b,c,d]),
+ Test({a,[c], 0},<<"exactly">>,2,{a,[c], [c]}, [a,b,c,d]),
+ Test({a,[c], 1},<<"exactly">>,3,{a,[c], [c]}, [a,b,c,d]),
+ Test({a,[c], 0},<<"exactly">>,2,{a,[c,d],[c,d]},[a,b,c,d]),
+ Test({a,[c,d],0},<<"exactly">>,3,{a,[c,d],[c,d]},[a,b,c,d]),
+
+ passed.
+
+%% Does the first list match the second where the second is required
+%% to have exactly Extra superfluous items?
+dm_list_match([], [], 0) -> ok;
+dm_list_match(_, [], _Extra) -> error;
+dm_list_match([H|T1], [H |T2], Extra) -> dm_list_match(T1, T2, Extra);
+dm_list_match(L1, [_H|T2], Extra) -> dm_list_match(L1, T2, Extra - 1).
+
+test_user_management() ->
+
+ %% lots if stuff that should fail
+ {error, {no_such_user, _}} =
+ control_action(delete_user, ["foo"]),
+ {error, {no_such_user, _}} =
+ control_action(change_password, ["foo", "baz"]),
+ {error, {no_such_vhost, _}} =
+ control_action(delete_vhost, ["/testhost"]),
+ {error, {no_such_user, _}} =
+ control_action(set_permissions, ["foo", ".*", ".*", ".*"]),
+ {error, {no_such_user, _}} =
+ control_action(clear_permissions, ["foo"]),
+ {error, {no_such_user, _}} =
+ control_action(list_user_permissions, ["foo"]),
+ {error, {no_such_vhost, _}} =
+ control_action(list_permissions, [], [{"-p", "/testhost"}]),
+ {error, {invalid_regexp, _, _}} =
+ control_action(set_permissions, ["guest", "+foo", ".*", ".*"]),
+ {error, {no_such_user, _}} =
+ control_action(set_user_tags, ["foo", "bar"]),
+
+ %% user creation
+ ok = control_action(add_user, ["foo", "bar"]),
+ {error, {user_already_exists, _}} =
+ control_action(add_user, ["foo", "bar"]),
+ ok = control_action(clear_password, ["foo"]),
+ ok = control_action(change_password, ["foo", "baz"]),
+
+ TestTags = fun (Tags) ->
+ Args = ["foo" | [atom_to_list(T) || T <- Tags]],
+ ok = control_action(set_user_tags, Args),
+ {ok, #internal_user{tags = Tags}} =
+ rabbit_auth_backend_internal:lookup_user(<<"foo">>),
+ ok = control_action(list_users, [])
+ end,
+ TestTags([foo, bar, baz]),
+ TestTags([administrator]),
+ TestTags([]),
+
+ %% vhost creation
+ ok = control_action(add_vhost, ["/testhost"]),
+ {error, {vhost_already_exists, _}} =
+ control_action(add_vhost, ["/testhost"]),
+ ok = control_action(list_vhosts, []),
+
+ %% user/vhost mapping
+ ok = control_action(set_permissions, ["foo", ".*", ".*", ".*"],
+ [{"-p", "/testhost"}]),
+ ok = control_action(set_permissions, ["foo", ".*", ".*", ".*"],
+ [{"-p", "/testhost"}]),
+ ok = control_action(set_permissions, ["foo", ".*", ".*", ".*"],
+ [{"-p", "/testhost"}]),
+ ok = control_action(list_permissions, [], [{"-p", "/testhost"}]),
+ ok = control_action(list_permissions, [], [{"-p", "/testhost"}]),
+ ok = control_action(list_user_permissions, ["foo"]),
+
+ %% user/vhost unmapping
+ ok = control_action(clear_permissions, ["foo"], [{"-p", "/testhost"}]),
+ ok = control_action(clear_permissions, ["foo"], [{"-p", "/testhost"}]),
+
+ %% vhost deletion
+ ok = control_action(delete_vhost, ["/testhost"]),
+ {error, {no_such_vhost, _}} =
+ control_action(delete_vhost, ["/testhost"]),
+
+ %% deleting a populated vhost
+ ok = control_action(add_vhost, ["/testhost"]),
+ ok = control_action(set_permissions, ["foo", ".*", ".*", ".*"],
+ [{"-p", "/testhost"}]),
+ {new, _} = rabbit_amqqueue:declare(
+ rabbit_misc:r(<<"/testhost">>, queue, <<"test">>),
+ true, false, [], none),
+ ok = control_action(delete_vhost, ["/testhost"]),
+
+ %% user deletion
+ ok = control_action(delete_user, ["foo"]),
+ {error, {no_such_user, _}} =
+ control_action(delete_user, ["foo"]),
+
+ passed.
+
+test_runtime_parameters() ->
+ rabbit_runtime_parameters_test:register(),
+ Good = fun(L) -> ok = control_action(set_parameter, L) end,
+ Bad = fun(L) -> {error_string, _} = control_action(set_parameter, L) end,
+
+ %% Acceptable for bijection
+ Good(["test", "good", "\"ignore\""]),
+ Good(["test", "good", "123"]),
+ Good(["test", "good", "true"]),
+ Good(["test", "good", "false"]),
+ Good(["test", "good", "null"]),
+ Good(["test", "good", "{\"key\": \"value\"}"]),
+
+ %% Invalid json
+ Bad(["test", "good", "atom"]),
+ Bad(["test", "good", "{\"foo\": \"bar\""]),
+ Bad(["test", "good", "{foo: \"bar\"}"]),
+
+ %% Test actual validation hook
+ Good(["test", "maybe", "\"good\""]),
+ Bad(["test", "maybe", "\"bad\""]),
+ Good(["test", "admin", "\"ignore\""]), %% ctl means 'user' -> none
+
+ ok = control_action(list_parameters, []),
+
+ ok = control_action(clear_parameter, ["test", "good"]),
+ ok = control_action(clear_parameter, ["test", "maybe"]),
+ ok = control_action(clear_parameter, ["test", "admin"]),
+ {error_string, _} =
+ control_action(clear_parameter, ["test", "neverexisted"]),
+
+ %% We can delete for a component that no longer exists
+ Good(["test", "good", "\"ignore\""]),
+ rabbit_runtime_parameters_test:unregister(),
+ ok = control_action(clear_parameter, ["test", "good"]),
+ passed.
+
+test_policy_validation() ->
+ rabbit_runtime_parameters_test:register_policy_validator(),
+ SetPol = fun (Key, Val) ->
+ control_action_opts(
+ ["set_policy", "name", ".*",
+ rabbit_misc:format("{\"~s\":~p}", [Key, Val])])
+ end,
+
+ ok = SetPol("testeven", []),
+ ok = SetPol("testeven", [1, 2]),
+ ok = SetPol("testeven", [1, 2, 3, 4]),
+ ok = SetPol("testpos", [2, 5, 5678]),
+
+ error = SetPol("testpos", [-1, 0, 1]),
+ error = SetPol("testeven", [ 1, 2, 3]),
+
+ ok = control_action(clear_policy, ["name"]),
+ rabbit_runtime_parameters_test:unregister_policy_validator(),
+ passed.
+
+test_policy_opts_validation() ->
+ Set = fun (Extra) -> control_action_opts(
+ ["set_policy", "name", ".*", "{\"ha-mode\":\"all\"}"
+ | Extra]) end,
+ OK = fun (Extra) -> ok = Set(Extra) end,
+ Fail = fun (Extra) -> error = Set(Extra) end,
+
+ OK ([]),
+
+ OK (["--priority", "0"]),
+ OK (["--priority", "3"]),
+ Fail(["--priority", "banana"]),
+ Fail(["--priority"]),
+
+ OK (["--apply-to", "all"]),
+ OK (["--apply-to", "queues"]),
+ Fail(["--apply-to", "bananas"]),
+ Fail(["--apply-to"]),
+
+ OK (["--priority", "3", "--apply-to", "queues"]),
+ Fail(["--priority", "banana", "--apply-to", "queues"]),
+ Fail(["--priority", "3", "--apply-to", "bananas"]),
+
+ Fail(["--offline"]),
+
+ ok = control_action(clear_policy, ["name"]),
+ passed.
+
+test_ha_policy_validation() ->
+ Set = fun (JSON) -> control_action_opts(
+ ["set_policy", "name", ".*", JSON]) end,
+ OK = fun (JSON) -> ok = Set(JSON) end,
+ Fail = fun (JSON) -> error = Set(JSON) end,
+
+ OK ("{\"ha-mode\":\"all\"}"),
+ Fail("{\"ha-mode\":\"made_up\"}"),
+
+ Fail("{\"ha-mode\":\"nodes\"}"),
+ Fail("{\"ha-mode\":\"nodes\",\"ha-params\":2}"),
+ Fail("{\"ha-mode\":\"nodes\",\"ha-params\":[\"a\",2]}"),
+ OK ("{\"ha-mode\":\"nodes\",\"ha-params\":[\"a\",\"b\"]}"),
+ Fail("{\"ha-params\":[\"a\",\"b\"]}"),
+
+ Fail("{\"ha-mode\":\"exactly\"}"),
+ Fail("{\"ha-mode\":\"exactly\",\"ha-params\":[\"a\",\"b\"]}"),
+ OK ("{\"ha-mode\":\"exactly\",\"ha-params\":2}"),
+ Fail("{\"ha-params\":2}"),
+
+ OK ("{\"ha-mode\":\"all\",\"ha-sync-mode\":\"manual\"}"),
+ OK ("{\"ha-mode\":\"all\",\"ha-sync-mode\":\"automatic\"}"),
+ Fail("{\"ha-mode\":\"all\",\"ha-sync-mode\":\"made_up\"}"),
+ Fail("{\"ha-sync-mode\":\"manual\"}"),
+ Fail("{\"ha-sync-mode\":\"automatic\"}"),
+
+ ok = control_action(clear_policy, ["name"]),
+ passed.
+
+test_server_status() ->
+ %% create a few things so there is some useful information to list
+ {_Writer, Limiter, Ch} = test_channel(),
+ [Q, Q2] = [Queue || Name <- [<<"foo">>, <<"bar">>],
+ {new, Queue = #amqqueue{}} <-
+ [rabbit_amqqueue:declare(
+ rabbit_misc:r(<<"/">>, queue, Name),
+ false, false, [], none)]],
+ ok = rabbit_amqqueue:basic_consume(
+ Q, true, Ch, Limiter, false, 0, <<"ctag">>, true, [], undefined),
+
+ %% list queues
+ ok = info_action(list_queues, rabbit_amqqueue:info_keys(), true),
+
+ %% list exchanges
+ ok = info_action(list_exchanges, rabbit_exchange:info_keys(), true),
+
+ %% list bindings
+ ok = info_action(list_bindings, rabbit_binding:info_keys(), true),
+ %% misc binding listing APIs
+ [_|_] = rabbit_binding:list_for_source(
+ rabbit_misc:r(<<"/">>, exchange, <<"">>)),
+ [_] = rabbit_binding:list_for_destination(
+ rabbit_misc:r(<<"/">>, queue, <<"foo">>)),
+ [_] = rabbit_binding:list_for_source_and_destination(
+ rabbit_misc:r(<<"/">>, exchange, <<"">>),
+ rabbit_misc:r(<<"/">>, queue, <<"foo">>)),
+
+ %% list connections
+ {H, P} = find_listener(),
+ {ok, C} = gen_tcp:connect(H, P, []),
+ gen_tcp:send(C, <<"AMQP", 0, 0, 9, 1>>),
+ timer:sleep(100),
+ ok = info_action(list_connections,
+ rabbit_networking:connection_info_keys(), false),
+ %% close_connection
+ [ConnPid] = rabbit_networking:connections(),
+ ok = control_action(close_connection, [rabbit_misc:pid_to_string(ConnPid),
+ "go away"]),
+
+ %% list channels
+ ok = info_action(list_channels, rabbit_channel:info_keys(), false),
+
+ %% list consumers
+ ok = control_action(list_consumers, []),
+
+ %% set vm memory high watermark
+ HWM = vm_memory_monitor:get_vm_memory_high_watermark(),
+ ok = control_action(set_vm_memory_high_watermark, ["1"]),
+ ok = control_action(set_vm_memory_high_watermark, ["1.0"]),
+ %% this will trigger an alarm
+ ok = control_action(set_vm_memory_high_watermark, ["0.0"]),
+ %% reset
+ ok = control_action(set_vm_memory_high_watermark, [float_to_list(HWM)]),
+
+ %% eval
+ {error_string, _} = control_action(eval, ["\""]),
+ {error_string, _} = control_action(eval, ["a("]),
+ ok = control_action(eval, ["a."]),
+
+ %% cleanup
+ [{ok, _} = rabbit_amqqueue:delete(QR, false, false) || QR <- [Q, Q2]],
+
+ unlink(Ch),
+ ok = rabbit_channel:shutdown(Ch),
+
+ passed.
+
+test_amqp_connection_refusal() ->
+ [passed = test_amqp_connection_refusal(V) ||
+ V <- [<<"AMQP",9,9,9,9>>, <<"AMQP",0,1,0,0>>, <<"XXXX",0,0,9,1>>]],
+ passed.
+
+test_amqp_connection_refusal(Header) ->
+ {H, P} = find_listener(),
+ {ok, C} = gen_tcp:connect(H, P, [binary, {active, false}]),
+ ok = gen_tcp:send(C, Header),
+ {ok, <<"AMQP",0,0,9,1>>} = gen_tcp:recv(C, 8, 100),
+ ok = gen_tcp:close(C),
+ passed.
+
+find_listener() ->
+ [#listener{host = H, port = P} | _] =
+ [L || L = #listener{node = N, protocol = amqp}
+ <- rabbit_networking:active_listeners(),
+ N =:= node()],
+ {H, P}.
+
+test_writer(Pid) ->
+ receive
+ {'$gen_call', From, flush} -> gen_server:reply(From, ok),
+ test_writer(Pid);
+ {send_command, Method} -> Pid ! Method,
+ test_writer(Pid);
+ shutdown -> ok
+ end.
+
+test_channel() ->
+ Me = self(),
+ Writer = spawn(fun () -> test_writer(Me) end),
+ {ok, Limiter} = rabbit_limiter:start_link(no_id),
+ {ok, Ch} = rabbit_channel:start_link(
+ 1, Me, Writer, Me, "", rabbit_framing_amqp_0_9_1,
+ user(<<"guest">>), <<"/">>, [], Me, Limiter),
+ {Writer, Limiter, Ch}.
+
+test_spawn() ->
+ {Writer, _Limiter, Ch} = test_channel(),
+ ok = rabbit_channel:do(Ch, #'channel.open'{}),
+ receive #'channel.open_ok'{} -> ok
+ after ?TIMEOUT -> throw(failed_to_receive_channel_open_ok)
+ end,
+ {Writer, Ch}.
+
+test_spawn(Node) ->
+ rpc:call(Node, ?MODULE, test_spawn_remote, []).
+
+%% Spawn an arbitrary long lived process, so we don't end up linking
+%% the channel to the short-lived process (RPC, here) spun up by the
+%% RPC server.
+test_spawn_remote() ->
+ RPC = self(),
+ spawn(fun () ->
+ {Writer, Ch} = test_spawn(),
+ RPC ! {Writer, Ch},
+ link(Ch),
+ receive
+ _ -> ok
+ end
+ end),
+ receive Res -> Res
+ after ?TIMEOUT -> throw(failed_to_receive_result)
+ end.
+
+user(Username) ->
+ #user{username = Username,
+ tags = [administrator],
+ auth_backend = rabbit_auth_backend_internal,
+ impl = #internal_user{username = Username,
+ tags = [administrator]}}.
+
+test_confirms() ->
+ {_Writer, Ch} = test_spawn(),
+ DeclareBindDurableQueue =
+ fun() ->
+ rabbit_channel:do(Ch, #'queue.declare'{durable = true}),
+ receive #'queue.declare_ok'{queue = Q0} ->
+ rabbit_channel:do(Ch, #'queue.bind'{
+ queue = Q0,
+ exchange = <<"amq.direct">>,
+ routing_key = "magic" }),
+ receive #'queue.bind_ok'{} -> Q0
+ after ?TIMEOUT -> throw(failed_to_bind_queue)
+ end
+ after ?TIMEOUT -> throw(failed_to_declare_queue)
+ end
+ end,
+ %% Declare and bind two queues
+ QName1 = DeclareBindDurableQueue(),
+ QName2 = DeclareBindDurableQueue(),
+ %% Get the first one's pid (we'll crash it later)
+ {ok, Q1} = rabbit_amqqueue:lookup(rabbit_misc:r(<<"/">>, queue, QName1)),
+ QPid1 = Q1#amqqueue.pid,
+ %% Enable confirms
+ rabbit_channel:do(Ch, #'confirm.select'{}),
+ receive
+ #'confirm.select_ok'{} -> ok
+ after ?TIMEOUT -> throw(failed_to_enable_confirms)
+ end,
+ %% Publish a message
+ rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"amq.direct">>,
+ routing_key = "magic"
+ },
+ rabbit_basic:build_content(
+ #'P_basic'{delivery_mode = 2}, <<"">>)),
+ %% We must not kill the queue before the channel has processed the
+ %% 'publish'.
+ ok = rabbit_channel:flush(Ch),
+ %% Crash the queue
+ QPid1 ! boom,
+ %% Wait for a nack
+ receive
+ #'basic.nack'{} -> ok;
+ #'basic.ack'{} -> throw(received_ack_instead_of_nack)
+ after ?TIMEOUT-> throw(did_not_receive_nack)
+ end,
+ receive
+ #'basic.ack'{} -> throw(received_ack_when_none_expected)
+ after 1000 -> ok
+ end,
+ %% Cleanup
+ rabbit_channel:do(Ch, #'queue.delete'{queue = QName2}),
+ receive
+ #'queue.delete_ok'{} -> ok
+ after ?TIMEOUT -> throw(failed_to_cleanup_queue)
+ end,
+ unlink(Ch),
+ ok = rabbit_channel:shutdown(Ch),
+
+ passed.
+
+test_with_state() ->
+ fhc_state = gen_server2:with_state(file_handle_cache,
+ fun (S) -> element(1, S) end),
+ passed.
+
+test_mcall() ->
+ P1 = spawn(fun gs2_test_listener/0),
+ register(foo, P1),
+ global:register_name(gfoo, P1),
+
+ P2 = spawn(fun() -> exit(bang) end),
+ %% ensure P2 is dead (ignore the race setting up the monitor)
+ await_exit(P2),
+
+ P3 = spawn(fun gs2_test_crasher/0),
+
+ %% since P2 crashes almost immediately and P3 after receiving its first
+ %% message, we have to spawn a few more processes to handle the additional
+ %% cases we're interested in here
+ register(baz, spawn(fun gs2_test_crasher/0)),
+ register(bog, spawn(fun gs2_test_crasher/0)),
+ global:register_name(gbaz, spawn(fun gs2_test_crasher/0)),
+
+ NoNode = rabbit_nodes:make("nonode"),
+
+ Targets =
+ %% pids
+ [P1, P2, P3]
+ ++
+ %% registered names
+ [foo, bar, baz]
+ ++
+ %% {Name, Node} pairs
+ [{foo, node()}, {bar, node()}, {bog, node()}, {foo, NoNode}]
+ ++
+ %% {global, Name}
+ [{global, gfoo}, {global, gbar}, {global, gbaz}],
+
+ GoodResults = [{D, goodbye} || D <- [P1, foo,
+ {foo, node()},
+ {global, gfoo}]],
+
+ BadResults = [{P2, noproc}, % died before use
+ {P3, boom}, % died on first use
+ {bar, noproc}, % never registered
+ {baz, boom}, % died on first use
+ {{bar, node()}, noproc}, % never registered
+ {{bog, node()}, boom}, % died on first use
+ {{foo, NoNode}, nodedown}, % invalid node
+ {{global, gbar}, noproc}, % never registered globally
+ {{global, gbaz}, boom}], % died on first use
+
+ {Replies, Errors} = gen_server2:mcall([{T, hello} || T <- Targets]),
+ true = lists:sort(Replies) == lists:sort(GoodResults),
+ true = lists:sort(Errors) == lists:sort(BadResults),
+
+ %% cleanup (ignore the race setting up the monitor)
+ P1 ! stop,
+ await_exit(P1),
+ passed.
+
+await_exit(Pid) ->
+ MRef = erlang:monitor(process, Pid),
+ receive
+ {'DOWN', MRef, _, _, _} -> ok
+ end.
+
+gs2_test_crasher() ->
+ receive
+ {'$gen_call', _From, hello} -> exit(boom)
+ end.
+
+gs2_test_listener() ->
+ receive
+ {'$gen_call', From, hello} ->
+ gen_server2:reply(From, goodbye),
+ gs2_test_listener();
+ stop ->
+ ok
+ end.
+
+test_statistics_event_receiver(Pid) ->
+ receive
+ Foo -> Pid ! Foo, test_statistics_event_receiver(Pid)
+ end.
+
+test_statistics_receive_event(Ch, Matcher) ->
+ rabbit_channel:flush(Ch),
+ Ch ! emit_stats,
+ test_statistics_receive_event1(Ch, Matcher).
+
+test_statistics_receive_event1(Ch, Matcher) ->
+ receive #event{type = channel_stats, props = Props} ->
+ case Matcher(Props) of
+ true -> Props;
+ _ -> test_statistics_receive_event1(Ch, Matcher)
+ end
+ after ?TIMEOUT -> throw(failed_to_receive_event)
+ end.
+
+test_statistics() ->
+ application:set_env(rabbit, collect_statistics, fine),
+
+ %% ATM this just tests the queue / exchange stats in channels. That's
+ %% by far the most complex code though.
+
+ %% Set up a channel and queue
+ {_Writer, Ch} = test_spawn(),
+ rabbit_channel:do(Ch, #'queue.declare'{}),
+ QName = receive #'queue.declare_ok'{queue = Q0} -> Q0
+ after ?TIMEOUT -> throw(failed_to_receive_queue_declare_ok)
+ end,
+ QRes = rabbit_misc:r(<<"/">>, queue, QName),
+ X = rabbit_misc:r(<<"/">>, exchange, <<"">>),
+
+ rabbit_tests_event_receiver:start(self(), [node()], [channel_stats]),
+
+ %% Check stats empty
+ Event = test_statistics_receive_event(Ch, fun (_) -> true end),
+ [] = proplists:get_value(channel_queue_stats, Event),
+ [] = proplists:get_value(channel_exchange_stats, Event),
+ [] = proplists:get_value(channel_queue_exchange_stats, Event),
+
+ %% Publish and get a message
+ rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"">>,
+ routing_key = QName},
+ rabbit_basic:build_content(#'P_basic'{}, <<"">>)),
+ rabbit_channel:do(Ch, #'basic.get'{queue = QName}),
+
+ %% Check the stats reflect that
+ Event2 = test_statistics_receive_event(
+ Ch,
+ fun (E) ->
+ length(proplists:get_value(
+ channel_queue_exchange_stats, E)) > 0
+ end),
+ [{QRes, [{get,1}]}] = proplists:get_value(channel_queue_stats, Event2),
+ [{X,[{publish,1}]}] = proplists:get_value(channel_exchange_stats, Event2),
+ [{{QRes,X},[{publish,1}]}] =
+ proplists:get_value(channel_queue_exchange_stats, Event2),
+
+ %% Check the stats remove stuff on queue deletion
+ rabbit_channel:do(Ch, #'queue.delete'{queue = QName}),
+ Event3 = test_statistics_receive_event(
+ Ch,
+ fun (E) ->
+ length(proplists:get_value(
+ channel_queue_exchange_stats, E)) == 0
+ end),
+
+ [] = proplists:get_value(channel_queue_stats, Event3),
+ [{X,[{publish,1}]}] = proplists:get_value(channel_exchange_stats, Event3),
+ [] = proplists:get_value(channel_queue_exchange_stats, Event3),
+
+ rabbit_channel:shutdown(Ch),
+ rabbit_tests_event_receiver:stop(),
+ passed.
+
+test_refresh_events(SecondaryNode) ->
+ rabbit_tests_event_receiver:start(self(), [node(), SecondaryNode],
+ [channel_created, queue_created]),
+
+ {_Writer, Ch} = test_spawn(),
+ expect_events(pid, Ch, channel_created),
+ rabbit_channel:shutdown(Ch),
+
+ {_Writer2, Ch2} = test_spawn(SecondaryNode),
+ expect_events(pid, Ch2, channel_created),
+ rabbit_channel:shutdown(Ch2),
+
+ {new, #amqqueue{name = QName} = Q} =
+ rabbit_amqqueue:declare(test_queue(), false, false, [], none),
+ expect_events(name, QName, queue_created),
+ rabbit_amqqueue:delete(Q, false, false),
+
+ rabbit_tests_event_receiver:stop(),
+ passed.
+
+expect_events(Tag, Key, Type) ->
+ expect_event(Tag, Key, Type),
+ rabbit:force_event_refresh(make_ref()),
+ expect_event(Tag, Key, Type).
+
+expect_event(Tag, Key, Type) ->
+ receive #event{type = Type, props = Props} ->
+ case pget(Tag, Props) of
+ Key -> ok;
+ _ -> expect_event(Tag, Key, Type)
+ end
+ after ?TIMEOUT -> throw({failed_to_receive_event, Type})
+ end.
+
+test_delegates_async(SecondaryNode) ->
+ Self = self(),
+ Sender = fun (Pid) -> Pid ! {invoked, Self} end,
+
+ Responder = make_responder(fun ({invoked, Pid}) -> Pid ! response end),
+
+ ok = delegate:invoke_no_result(spawn(Responder), Sender),
+ ok = delegate:invoke_no_result(spawn(SecondaryNode, Responder), Sender),
+ await_response(2),
+
+ LocalPids = spawn_responders(node(), Responder, 10),
+ RemotePids = spawn_responders(SecondaryNode, Responder, 10),
+ ok = delegate:invoke_no_result(LocalPids ++ RemotePids, Sender),
+ await_response(20),
+
+ passed.
+
+make_responder(FMsg) -> make_responder(FMsg, timeout).
+make_responder(FMsg, Throw) ->
+ fun () ->
+ receive Msg -> FMsg(Msg)
+ after ?TIMEOUT -> throw(Throw)
+ end
+ end.
+
+spawn_responders(Node, Responder, Count) ->
+ [spawn(Node, Responder) || _ <- lists:seq(1, Count)].
+
+await_response(0) ->
+ ok;
+await_response(Count) ->
+ receive
+ response -> ok,
+ await_response(Count - 1)
+ after ?TIMEOUT -> throw(timeout)
+ end.
+
+must_exit(Fun) ->
+ try
+ Fun(),
+ throw(exit_not_thrown)
+ catch
+ exit:_ -> ok
+ end.
+
+test_delegates_sync(SecondaryNode) ->
+ Sender = fun (Pid) -> gen_server:call(Pid, invoked, infinity) end,
+ BadSender = fun (_Pid) -> exit(exception) end,
+
+ Responder = make_responder(fun ({'$gen_call', From, invoked}) ->
+ gen_server:reply(From, response)
+ end),
+
+ BadResponder = make_responder(fun ({'$gen_call', From, invoked}) ->
+ gen_server:reply(From, response)
+ end, bad_responder_died),
+
+ response = delegate:invoke(spawn(Responder), Sender),
+ response = delegate:invoke(spawn(SecondaryNode, Responder), Sender),
+
+ must_exit(fun () -> delegate:invoke(spawn(BadResponder), BadSender) end),
+ must_exit(fun () ->
+ delegate:invoke(spawn(SecondaryNode, BadResponder), BadSender) end),
+
+ LocalGoodPids = spawn_responders(node(), Responder, 2),
+ RemoteGoodPids = spawn_responders(SecondaryNode, Responder, 2),
+ LocalBadPids = spawn_responders(node(), BadResponder, 2),
+ RemoteBadPids = spawn_responders(SecondaryNode, BadResponder, 2),
+
+ {GoodRes, []} = delegate:invoke(LocalGoodPids ++ RemoteGoodPids, Sender),
+ true = lists:all(fun ({_, response}) -> true end, GoodRes),
+ GoodResPids = [Pid || {Pid, _} <- GoodRes],
+
+ Good = lists:usort(LocalGoodPids ++ RemoteGoodPids),
+ Good = lists:usort(GoodResPids),
+
+ {[], BadRes} = delegate:invoke(LocalBadPids ++ RemoteBadPids, BadSender),
+ true = lists:all(fun ({_, {exit, exception, _}}) -> true end, BadRes),
+ BadResPids = [Pid || {Pid, _} <- BadRes],
+
+ Bad = lists:usort(LocalBadPids ++ RemoteBadPids),
+ Bad = lists:usort(BadResPids),
+
+ MagicalPids = [rabbit_misc:string_to_pid(Str) ||
+ Str <- ["<nonode@nohost.0.1.0>", "<nonode@nohost.0.2.0>"]],
+ {[], BadNodes} = delegate:invoke(MagicalPids, Sender),
+ true = lists:all(
+ fun ({_, {exit, {nodedown, nonode@nohost}, _Stack}}) -> true end,
+ BadNodes),
+ BadNodesPids = [Pid || {Pid, _} <- BadNodes],
+
+ Magical = lists:usort(MagicalPids),
+ Magical = lists:usort(BadNodesPids),
+
+ passed.
+
+test_queue_cleanup(_SecondaryNode) ->
+ {_Writer, Ch} = test_spawn(),
+ rabbit_channel:do(Ch, #'queue.declare'{ queue = ?CLEANUP_QUEUE_NAME }),
+ receive #'queue.declare_ok'{queue = ?CLEANUP_QUEUE_NAME} ->
+ ok
+ after ?TIMEOUT -> throw(failed_to_receive_queue_declare_ok)
+ end,
+ rabbit_channel:shutdown(Ch),
+ rabbit:stop(),
+ rabbit:start(),
+ {_Writer2, Ch2} = test_spawn(),
+ rabbit_channel:do(Ch2, #'queue.declare'{ passive = true,
+ queue = ?CLEANUP_QUEUE_NAME }),
+ receive
+ #'channel.close'{reply_code = ?NOT_FOUND} ->
+ ok
+ after ?TIMEOUT -> throw(failed_to_receive_channel_exit)
+ end,
+ rabbit_channel:shutdown(Ch2),
+ passed.
+
+test_declare_on_dead_queue(SecondaryNode) ->
+ QueueName = rabbit_misc:r(<<"/">>, queue, ?CLEANUP_QUEUE_NAME),
+ Self = self(),
+ Pid = spawn(SecondaryNode,
+ fun () ->
+ {new, #amqqueue{name = QueueName, pid = QPid}} =
+ rabbit_amqqueue:declare(QueueName, false, false, [],
+ none),
+ exit(QPid, kill),
+ Self ! {self(), killed, QPid}
+ end),
+ receive
+ {Pid, killed, QPid} ->
+ {existing, #amqqueue{name = QueueName,
+ pid = QPid}} =
+ rabbit_amqqueue:declare(QueueName, false, false, [], none),
+ false = rabbit_misc:is_process_alive(QPid),
+ {new, Q} = rabbit_amqqueue:declare(QueueName, false, false, [],
+ none),
+ true = rabbit_misc:is_process_alive(Q#amqqueue.pid),
+ {ok, 0} = rabbit_amqqueue:delete(Q, false, false),
+ passed
+ after ?TIMEOUT -> throw(failed_to_create_and_kill_queue)
+ end.
+
+%%---------------------------------------------------------------------
+
+control_action(Command, Args) ->
+ control_action(Command, node(), Args, default_options()).
+
+control_action(Command, Args, NewOpts) ->
+ control_action(Command, node(), Args,
+ expand_options(default_options(), NewOpts)).
+
+control_action(Command, Node, Args, Opts) ->
+ case catch rabbit_control_main:action(
+ Command, Node, Args, Opts,
+ fun (Format, Args1) ->
+ io:format(Format ++ " ...~n", Args1)
+ end) of
+ ok ->
+ io:format("done.~n"),
+ ok;
+ Other ->
+ io:format("failed.~n"),
+ Other
+ end.
+
+control_action_opts(Raw) ->
+ NodeStr = atom_to_list(node()),
+ case rabbit_control_main:parse_arguments(Raw, NodeStr) of
+ {ok, {Cmd, Opts, Args}} ->
+ case control_action(Cmd, node(), Args, Opts) of
+ ok -> ok;
+ _ -> error
+ end;
+ _ ->
+ error
+ end.
+
+info_action(Command, Args, CheckVHost) ->
+ ok = control_action(Command, []),
+ if CheckVHost -> ok = control_action(Command, [], ["-p", "/"]);
+ true -> ok
+ end,
+ ok = control_action(Command, lists:map(fun atom_to_list/1, Args)),
+ {bad_argument, dummy} = control_action(Command, ["dummy"]),
+ ok.
+
+default_options() -> [{"-p", "/"}, {"-q", "false"}].
+
+expand_options(As, Bs) ->
+ lists:foldl(fun({K, _}=A, R) ->
+ case proplists:is_defined(K, R) of
+ true -> R;
+ false -> [A | R]
+ end
+ end, Bs, As).
+
+check_parse_arguments(ExpRes, Fun, As) ->
+ SortRes =
+ fun (no_command) -> no_command;
+ ({ok, {C, KVs, As1}}) -> {ok, {C, lists:sort(KVs), As1}}
+ end,
+
+ true = SortRes(ExpRes) =:= SortRes(Fun(As)).
+
+empty_files(Files) ->
+ [case file:read_file_info(File) of
+ {ok, FInfo} -> FInfo#file_info.size == 0;
+ Error -> Error
+ end || File <- Files].
+
+non_empty_files(Files) ->
+ [case EmptyFile of
+ {error, Reason} -> {error, Reason};
+ _ -> not(EmptyFile)
+ end || EmptyFile <- empty_files(Files)].
+
+test_logs_working(MainLogFile, SaslLogFile) ->
+ ok = rabbit_log:error("foo bar"),
+ ok = error_logger:error_report(crash_report, [foo, bar]),
+ %% give the error loggers some time to catch up
+ timer:sleep(100),
+ [true, true] = non_empty_files([MainLogFile, SaslLogFile]),
+ ok.
+
+set_permissions(Path, Mode) ->
+ case file:read_file_info(Path) of
+ {ok, FInfo} -> file:write_file_info(
+ Path,
+ FInfo#file_info{mode=Mode});
+ Error -> Error
+ end.
+
+clean_logs(Files, Suffix) ->
+ [begin
+ ok = delete_file(File),
+ ok = delete_file([File, Suffix])
+ end || File <- Files],
+ ok.
+
+assert_ram_node() ->
+ case rabbit_mnesia:node_type() of
+ disc -> exit('not_ram_node');
+ ram -> ok
+ end.
+
+assert_disc_node() ->
+ case rabbit_mnesia:node_type() of
+ disc -> ok;
+ ram -> exit('not_disc_node')
+ end.
+
+delete_file(File) ->
+ case file:delete(File) of
+ ok -> ok;
+ {error, enoent} -> ok;
+ Error -> Error
+ end.
+
+make_files_non_writable(Files) ->
+ [ok = file:write_file_info(File, #file_info{mode=0}) ||
+ File <- Files],
+ ok.
+
+add_log_handlers(Handlers) ->
+ [ok = error_logger:add_report_handler(Handler, Args) ||
+ {Handler, Args} <- Handlers],
+ ok.
+
+delete_log_handlers(Handlers) ->
+ [[] = error_logger:delete_report_handler(Handler) ||
+ Handler <- Handlers],
+ ok.
+
+test_supervisor_delayed_restart() ->
+ test_sup:test_supervisor_delayed_restart().
+
+test_file_handle_cache() ->
+ %% test copying when there is just one spare handle
+ Limit = file_handle_cache:get_limit(),
+ ok = file_handle_cache:set_limit(5), %% 1 or 2 sockets, 2 msg_stores
+ TmpDir = filename:join(rabbit_mnesia:dir(), "tmp"),
+ ok = filelib:ensure_dir(filename:join(TmpDir, "nothing")),
+ [Src1, Dst1, Src2, Dst2] = Files =
+ [filename:join(TmpDir, Str) || Str <- ["file1", "file2", "file3", "file4"]],
+ Content = <<"foo">>,
+ CopyFun = fun (Src, Dst) ->
+ {ok, Hdl} = prim_file:open(Src, [binary, write]),
+ ok = prim_file:write(Hdl, Content),
+ ok = prim_file:sync(Hdl),
+ prim_file:close(Hdl),
+
+ {ok, SrcHdl} = file_handle_cache:open(Src, [read], []),
+ {ok, DstHdl} = file_handle_cache:open(Dst, [write], []),
+ Size = size(Content),
+ {ok, Size} = file_handle_cache:copy(SrcHdl, DstHdl, Size),
+ ok = file_handle_cache:delete(SrcHdl),
+ ok = file_handle_cache:delete(DstHdl)
+ end,
+ Pid = spawn(fun () -> {ok, Hdl} = file_handle_cache:open(
+ filename:join(TmpDir, "file5"),
+ [write], []),
+ receive {next, Pid1} -> Pid1 ! {next, self()} end,
+ file_handle_cache:delete(Hdl),
+ %% This will block and never return, so we
+ %% exercise the fhc tidying up the pending
+ %% queue on the death of a process.
+ ok = CopyFun(Src1, Dst1)
+ end),
+ ok = CopyFun(Src1, Dst1),
+ ok = file_handle_cache:set_limit(2),
+ Pid ! {next, self()},
+ receive {next, Pid} -> ok end,
+ timer:sleep(100),
+ Pid1 = spawn(fun () -> CopyFun(Src2, Dst2) end),
+ timer:sleep(100),
+ erlang:monitor(process, Pid),
+ erlang:monitor(process, Pid1),
+ exit(Pid, kill),
+ exit(Pid1, kill),
+ receive {'DOWN', _MRef, process, Pid, _Reason} -> ok end,
+ receive {'DOWN', _MRef1, process, Pid1, _Reason1} -> ok end,
+ [file:delete(File) || File <- Files],
+ ok = file_handle_cache:set_limit(Limit),
+ passed.
+
+test_backing_queue() ->
+ case application:get_env(rabbit, backing_queue_module) of
+ {ok, rabbit_variable_queue} ->
+ {ok, FileSizeLimit} =
+ application:get_env(rabbit, msg_store_file_size_limit),
+ application:set_env(rabbit, msg_store_file_size_limit, 512,
+ infinity),
+ {ok, MaxJournal} =
+ application:get_env(rabbit, queue_index_max_journal_entries),
+ application:set_env(rabbit, queue_index_max_journal_entries, 128,
+ infinity),
+ passed = test_msg_store(),
+ application:set_env(rabbit, msg_store_file_size_limit,
+ FileSizeLimit, infinity),
+ passed = test_queue_index(),
+ passed = test_queue_index_props(),
+ passed = test_variable_queue(),
+ passed = test_variable_queue_delete_msg_store_files_callback(),
+ passed = test_queue_recover(),
+ application:set_env(rabbit, queue_index_max_journal_entries,
+ MaxJournal, infinity),
+ %% We will have restarted the message store, and thus changed
+ %% the order of the children of rabbit_sup. This will cause
+ %% problems if there are subsequent failures - see bug 24262.
+ ok = restart_app(),
+ passed;
+ _ ->
+ passed
+ end.
+
+restart_msg_store_empty() ->
+ ok = rabbit_variable_queue:stop_msg_store(),
+ ok = rabbit_variable_queue:start_msg_store(
+ undefined, {fun (ok) -> finished end, ok}).
+
+msg_id_bin(X) ->
+ erlang:md5(term_to_binary(X)).
+
+msg_store_client_init(MsgStore, Ref) ->
+ rabbit_msg_store:client_init(MsgStore, Ref, undefined, undefined).
+
+on_disk_capture() ->
+ receive
+ {await, MsgIds, Pid} -> on_disk_capture([], MsgIds, Pid);
+ stop -> done
+ end.
+
+on_disk_capture([_|_], _Awaiting, Pid) ->
+ Pid ! {self(), surplus};
+on_disk_capture(OnDisk, Awaiting, Pid) ->
+ receive
+ {on_disk, MsgIdsS} ->
+ MsgIds = gb_sets:to_list(MsgIdsS),
+ on_disk_capture(OnDisk ++ (MsgIds -- Awaiting), Awaiting -- MsgIds,
+ Pid);
+ stop ->
+ done
+ after (case Awaiting of [] -> 200; _ -> ?TIMEOUT end) ->
+ case Awaiting of
+ [] -> Pid ! {self(), arrived}, on_disk_capture();
+ _ -> Pid ! {self(), timeout}
+ end
+ end.
+
+on_disk_await(Pid, MsgIds) when is_list(MsgIds) ->
+ Pid ! {await, MsgIds, self()},
+ receive
+ {Pid, arrived} -> ok;
+ {Pid, Error} -> Error
+ end.
+
+on_disk_stop(Pid) ->
+ MRef = erlang:monitor(process, Pid),
+ Pid ! stop,
+ receive {'DOWN', MRef, process, Pid, _Reason} ->
+ ok
+ end.
+
+msg_store_client_init_capture(MsgStore, Ref) ->
+ Pid = spawn(fun on_disk_capture/0),
+ {Pid, rabbit_msg_store:client_init(
+ MsgStore, Ref, fun (MsgIds, _ActionTaken) ->
+ Pid ! {on_disk, MsgIds}
+ end, undefined)}.
+
+msg_store_contains(Atom, MsgIds, MSCState) ->
+ Atom = lists:foldl(
+ fun (MsgId, Atom1) when Atom1 =:= Atom ->
+ rabbit_msg_store:contains(MsgId, MSCState) end,
+ Atom, MsgIds).
+
+msg_store_read(MsgIds, MSCState) ->
+ lists:foldl(fun (MsgId, MSCStateM) ->
+ {{ok, MsgId}, MSCStateN} = rabbit_msg_store:read(
+ MsgId, MSCStateM),
+ MSCStateN
+ end, MSCState, MsgIds).
+
+msg_store_write(MsgIds, MSCState) ->
+ ok = lists:foldl(fun (MsgId, ok) ->
+ rabbit_msg_store:write(MsgId, MsgId, MSCState)
+ end, ok, MsgIds).
+
+msg_store_remove(MsgIds, MSCState) ->
+ rabbit_msg_store:remove(MsgIds, MSCState).
+
+msg_store_remove(MsgStore, Ref, MsgIds) ->
+ with_msg_store_client(MsgStore, Ref,
+ fun (MSCStateM) ->
+ ok = msg_store_remove(MsgIds, MSCStateM),
+ MSCStateM
+ end).
+
+with_msg_store_client(MsgStore, Ref, Fun) ->
+ rabbit_msg_store:client_terminate(
+ Fun(msg_store_client_init(MsgStore, Ref))).
+
+foreach_with_msg_store_client(MsgStore, Ref, Fun, L) ->
+ rabbit_msg_store:client_terminate(
+ lists:foldl(fun (MsgId, MSCState) -> Fun(MsgId, MSCState) end,
+ msg_store_client_init(MsgStore, Ref), L)).
+
+test_msg_store() ->
+ restart_msg_store_empty(),
+ MsgIds = [msg_id_bin(M) || M <- lists:seq(1,100)],
+ {MsgIds1stHalf, MsgIds2ndHalf} = lists:split(length(MsgIds) div 2, MsgIds),
+ Ref = rabbit_guid:gen(),
+ {Cap, MSCState} = msg_store_client_init_capture(
+ ?PERSISTENT_MSG_STORE, Ref),
+ Ref2 = rabbit_guid:gen(),
+ {Cap2, MSC2State} = msg_store_client_init_capture(
+ ?PERSISTENT_MSG_STORE, Ref2),
+ %% check we don't contain any of the msgs we're about to publish
+ false = msg_store_contains(false, MsgIds, MSCState),
+ %% test confirm logic
+ passed = test_msg_store_confirms([hd(MsgIds)], Cap, MSCState),
+ %% check we don't contain any of the msgs we're about to publish
+ false = msg_store_contains(false, MsgIds, MSCState),
+ %% publish the first half
+ ok = msg_store_write(MsgIds1stHalf, MSCState),
+ %% sync on the first half
+ ok = on_disk_await(Cap, MsgIds1stHalf),
+ %% publish the second half
+ ok = msg_store_write(MsgIds2ndHalf, MSCState),
+ %% check they're all in there
+ true = msg_store_contains(true, MsgIds, MSCState),
+ %% publish the latter half twice so we hit the caching and ref
+ %% count code. We need to do this through a 2nd client since a
+ %% single client is not supposed to write the same message more
+ %% than once without first removing it.
+ ok = msg_store_write(MsgIds2ndHalf, MSC2State),
+ %% check they're still all in there
+ true = msg_store_contains(true, MsgIds, MSCState),
+ %% sync on the 2nd half
+ ok = on_disk_await(Cap2, MsgIds2ndHalf),
+ %% cleanup
+ ok = on_disk_stop(Cap2),
+ ok = rabbit_msg_store:client_delete_and_terminate(MSC2State),
+ ok = on_disk_stop(Cap),
+ %% read them all
+ MSCState1 = msg_store_read(MsgIds, MSCState),
+ %% read them all again - this will hit the cache, not disk
+ MSCState2 = msg_store_read(MsgIds, MSCState1),
+ %% remove them all
+ ok = msg_store_remove(MsgIds, MSCState2),
+ %% check first half doesn't exist
+ false = msg_store_contains(false, MsgIds1stHalf, MSCState2),
+ %% check second half does exist
+ true = msg_store_contains(true, MsgIds2ndHalf, MSCState2),
+ %% read the second half again
+ MSCState3 = msg_store_read(MsgIds2ndHalf, MSCState2),
+ %% read the second half again, just for fun (aka code coverage)
+ MSCState4 = msg_store_read(MsgIds2ndHalf, MSCState3),
+ ok = rabbit_msg_store:client_terminate(MSCState4),
+ %% stop and restart, preserving every other msg in 2nd half
+ ok = rabbit_variable_queue:stop_msg_store(),
+ ok = rabbit_variable_queue:start_msg_store(
+ [], {fun ([]) -> finished;
+ ([MsgId|MsgIdsTail])
+ when length(MsgIdsTail) rem 2 == 0 ->
+ {MsgId, 1, MsgIdsTail};
+ ([MsgId|MsgIdsTail]) ->
+ {MsgId, 0, MsgIdsTail}
+ end, MsgIds2ndHalf}),
+ MSCState5 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
+ %% check we have the right msgs left
+ lists:foldl(
+ fun (MsgId, Bool) ->
+ not(Bool = rabbit_msg_store:contains(MsgId, MSCState5))
+ end, false, MsgIds2ndHalf),
+ ok = rabbit_msg_store:client_terminate(MSCState5),
+ %% restart empty
+ restart_msg_store_empty(),
+ MSCState6 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
+ %% check we don't contain any of the msgs
+ false = msg_store_contains(false, MsgIds, MSCState6),
+ %% publish the first half again
+ ok = msg_store_write(MsgIds1stHalf, MSCState6),
+ %% this should force some sort of sync internally otherwise misread
+ ok = rabbit_msg_store:client_terminate(
+ msg_store_read(MsgIds1stHalf, MSCState6)),
+ MSCState7 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
+ ok = msg_store_remove(MsgIds1stHalf, MSCState7),
+ ok = rabbit_msg_store:client_terminate(MSCState7),
+ %% restart empty
+ restart_msg_store_empty(), %% now safe to reuse msg_ids
+ %% push a lot of msgs in... at least 100 files worth
+ {ok, FileSize} = application:get_env(rabbit, msg_store_file_size_limit),
+ PayloadSizeBits = 65536,
+ BigCount = trunc(100 * FileSize / (PayloadSizeBits div 8)),
+ MsgIdsBig = [msg_id_bin(X) || X <- lists:seq(1, BigCount)],
+ Payload = << 0:PayloadSizeBits >>,
+ ok = with_msg_store_client(
+ ?PERSISTENT_MSG_STORE, Ref,
+ fun (MSCStateM) ->
+ [ok = rabbit_msg_store:write(MsgId, Payload, MSCStateM) ||
+ MsgId <- MsgIdsBig],
+ MSCStateM
+ end),
+ %% now read them to ensure we hit the fast client-side reading
+ ok = foreach_with_msg_store_client(
+ ?PERSISTENT_MSG_STORE, Ref,
+ fun (MsgId, MSCStateM) ->
+ {{ok, Payload}, MSCStateN} = rabbit_msg_store:read(
+ MsgId, MSCStateM),
+ MSCStateN
+ end, MsgIdsBig),
+ %% .., then 3s by 1...
+ ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref,
+ [msg_id_bin(X) || X <- lists:seq(BigCount, 1, -3)]),
+ %% .., then remove 3s by 2, from the young end first. This hits
+ %% GC (under 50% good data left, but no empty files. Must GC).
+ ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref,
+ [msg_id_bin(X) || X <- lists:seq(BigCount-1, 1, -3)]),
+ %% .., then remove 3s by 3, from the young end first. This hits
+ %% GC...
+ ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref,
+ [msg_id_bin(X) || X <- lists:seq(BigCount-2, 1, -3)]),
+ %% ensure empty
+ ok = with_msg_store_client(
+ ?PERSISTENT_MSG_STORE, Ref,
+ fun (MSCStateM) ->
+ false = msg_store_contains(false, MsgIdsBig, MSCStateM),
+ MSCStateM
+ end),
+ %%
+ passed = test_msg_store_client_delete_and_terminate(),
+ %% restart empty
+ restart_msg_store_empty(),
+ passed.
+
+test_msg_store_confirms(MsgIds, Cap, MSCState) ->
+ %% write -> confirmed
+ ok = msg_store_write(MsgIds, MSCState),
+ ok = on_disk_await(Cap, MsgIds),
+ %% remove -> _
+ ok = msg_store_remove(MsgIds, MSCState),
+ ok = on_disk_await(Cap, []),
+ %% write, remove -> confirmed
+ ok = msg_store_write(MsgIds, MSCState),
+ ok = msg_store_remove(MsgIds, MSCState),
+ ok = on_disk_await(Cap, MsgIds),
+ %% write, remove, write -> confirmed, confirmed
+ ok = msg_store_write(MsgIds, MSCState),
+ ok = msg_store_remove(MsgIds, MSCState),
+ ok = msg_store_write(MsgIds, MSCState),
+ ok = on_disk_await(Cap, MsgIds ++ MsgIds),
+ %% remove, write -> confirmed
+ ok = msg_store_remove(MsgIds, MSCState),
+ ok = msg_store_write(MsgIds, MSCState),
+ ok = on_disk_await(Cap, MsgIds),
+ %% remove, write, remove -> confirmed
+ ok = msg_store_remove(MsgIds, MSCState),
+ ok = msg_store_write(MsgIds, MSCState),
+ ok = msg_store_remove(MsgIds, MSCState),
+ ok = on_disk_await(Cap, MsgIds),
+ %% confirmation on timer-based sync
+ passed = test_msg_store_confirm_timer(),
+ passed.
+
+test_msg_store_confirm_timer() ->
+ Ref = rabbit_guid:gen(),
+ MsgId = msg_id_bin(1),
+ Self = self(),
+ MSCState = rabbit_msg_store:client_init(
+ ?PERSISTENT_MSG_STORE, Ref,
+ fun (MsgIds, _ActionTaken) ->
+ case gb_sets:is_member(MsgId, MsgIds) of
+ true -> Self ! on_disk;
+ false -> ok
+ end
+ end, undefined),
+ ok = msg_store_write([MsgId], MSCState),
+ ok = msg_store_keep_busy_until_confirm([msg_id_bin(2)], MSCState),
+ ok = msg_store_remove([MsgId], MSCState),
+ ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
+ passed.
+
+msg_store_keep_busy_until_confirm(MsgIds, MSCState) ->
+ receive
+ on_disk -> ok
+ after 0 ->
+ ok = msg_store_write(MsgIds, MSCState),
+ ok = msg_store_remove(MsgIds, MSCState),
+ msg_store_keep_busy_until_confirm(MsgIds, MSCState)
+ end.
+
+test_msg_store_client_delete_and_terminate() ->
+ restart_msg_store_empty(),
+ MsgIds = [msg_id_bin(M) || M <- lists:seq(1, 10)],
+ Ref = rabbit_guid:gen(),
+ MSCState = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
+ ok = msg_store_write(MsgIds, MSCState),
+ %% test the 'dying client' fast path for writes
+ ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
+ passed.
+
+queue_name(Name) ->
+ rabbit_misc:r(<<"/">>, queue, Name).
+
+test_queue() ->
+ queue_name(<<"test">>).
+
+init_test_queue() ->
+ TestQueue = test_queue(),
+ PRef = rabbit_guid:gen(),
+ PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef),
+ Res = rabbit_queue_index:recover(
+ TestQueue, [], false,
+ fun (MsgId) ->
+ rabbit_msg_store:contains(MsgId, PersistentClient)
+ end,
+ fun nop/1),
+ ok = rabbit_msg_store:client_delete_and_terminate(PersistentClient),
+ Res.
+
+restart_test_queue(Qi) ->
+ _ = rabbit_queue_index:terminate([], Qi),
+ ok = rabbit_variable_queue:stop(),
+ {ok, _} = rabbit_variable_queue:start([test_queue()]),
+ init_test_queue().
+
+empty_test_queue() ->
+ ok = rabbit_variable_queue:stop(),
+ {ok, _} = rabbit_variable_queue:start([]),
+ {0, Qi} = init_test_queue(),
+ _ = rabbit_queue_index:delete_and_terminate(Qi),
+ ok.
+
+with_empty_test_queue(Fun) ->
+ ok = empty_test_queue(),
+ {0, Qi} = init_test_queue(),
+ rabbit_queue_index:delete_and_terminate(Fun(Qi)).
+
+restart_app() ->
+ rabbit:stop(),
+ rabbit:start().
+
+queue_index_publish(SeqIds, Persistent, Qi) ->
+ Ref = rabbit_guid:gen(),
+ MsgStore = case Persistent of
+ true -> ?PERSISTENT_MSG_STORE;
+ false -> ?TRANSIENT_MSG_STORE
+ end,
+ MSCState = msg_store_client_init(MsgStore, Ref),
+ {A, B = [{_SeqId, LastMsgIdWritten} | _]} =
+ lists:foldl(
+ fun (SeqId, {QiN, SeqIdsMsgIdsAcc}) ->
+ MsgId = rabbit_guid:gen(),
+ QiM = rabbit_queue_index:publish(
+ MsgId, SeqId, #message_properties{}, Persistent, QiN),
+ ok = rabbit_msg_store:write(MsgId, MsgId, MSCState),
+ {QiM, [{SeqId, MsgId} | SeqIdsMsgIdsAcc]}
+ end, {Qi, []}, SeqIds),
+ %% do this just to force all of the publishes through to the msg_store:
+ true = rabbit_msg_store:contains(LastMsgIdWritten, MSCState),
+ ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
+ {A, B}.
+
+verify_read_with_published(_Delivered, _Persistent, [], _) ->
+ ok;
+verify_read_with_published(Delivered, Persistent,
+ [{MsgId, SeqId, _Props, Persistent, Delivered}|Read],
+ [{SeqId, MsgId}|Published]) ->
+ verify_read_with_published(Delivered, Persistent, Read, Published);
+verify_read_with_published(_Delivered, _Persistent, _Read, _Published) ->
+ ko.
+
+test_queue_index_props() ->
+ with_empty_test_queue(
+ fun(Qi0) ->
+ MsgId = rabbit_guid:gen(),
+ Props = #message_properties{expiry=12345},
+ Qi1 = rabbit_queue_index:publish(MsgId, 1, Props, true, Qi0),
+ {[{MsgId, 1, Props, _, _}], Qi2} =
+ rabbit_queue_index:read(1, 2, Qi1),
+ Qi2
+ end),
+
+ ok = rabbit_variable_queue:stop(),
+ {ok, _} = rabbit_variable_queue:start([]),
+
+ passed.
+
+test_queue_index() ->
+ SegmentSize = rabbit_queue_index:next_segment_boundary(0),
+ TwoSegs = SegmentSize + SegmentSize,
+ MostOfASegment = trunc(SegmentSize*0.75),
+ SeqIdsA = lists:seq(0, MostOfASegment-1),
+ SeqIdsB = lists:seq(MostOfASegment, 2*MostOfASegment),
+ SeqIdsC = lists:seq(0, trunc(SegmentSize/2)),
+ SeqIdsD = lists:seq(0, SegmentSize*4),
+
+ with_empty_test_queue(
+ fun (Qi0) ->
+ {0, 0, Qi1} = rabbit_queue_index:bounds(Qi0),
+ {Qi2, SeqIdsMsgIdsA} = queue_index_publish(SeqIdsA, false, Qi1),
+ {0, SegmentSize, Qi3} = rabbit_queue_index:bounds(Qi2),
+ {ReadA, Qi4} = rabbit_queue_index:read(0, SegmentSize, Qi3),
+ ok = verify_read_with_published(false, false, ReadA,
+ lists:reverse(SeqIdsMsgIdsA)),
+ %% should get length back as 0, as all the msgs were transient
+ {0, Qi6} = restart_test_queue(Qi4),
+ {0, 0, Qi7} = rabbit_queue_index:bounds(Qi6),
+ {Qi8, SeqIdsMsgIdsB} = queue_index_publish(SeqIdsB, true, Qi7),
+ {0, TwoSegs, Qi9} = rabbit_queue_index:bounds(Qi8),
+ {ReadB, Qi10} = rabbit_queue_index:read(0, SegmentSize, Qi9),
+ ok = verify_read_with_published(false, true, ReadB,
+ lists:reverse(SeqIdsMsgIdsB)),
+ %% should get length back as MostOfASegment
+ LenB = length(SeqIdsB),
+ {LenB, Qi12} = restart_test_queue(Qi10),
+ {0, TwoSegs, Qi13} = rabbit_queue_index:bounds(Qi12),
+ Qi14 = rabbit_queue_index:deliver(SeqIdsB, Qi13),
+ {ReadC, Qi15} = rabbit_queue_index:read(0, SegmentSize, Qi14),
+ ok = verify_read_with_published(true, true, ReadC,
+ lists:reverse(SeqIdsMsgIdsB)),
+ Qi16 = rabbit_queue_index:ack(SeqIdsB, Qi15),
+ Qi17 = rabbit_queue_index:flush(Qi16),
+ %% Everything will have gone now because #pubs == #acks
+ {0, 0, Qi18} = rabbit_queue_index:bounds(Qi17),
+ %% should get length back as 0 because all persistent
+ %% msgs have been acked
+ {0, Qi19} = restart_test_queue(Qi18),
+ Qi19
+ end),
+
+ %% These next bits are just to hit the auto deletion of segment files.
+ %% First, partials:
+ %% a) partial pub+del+ack, then move to new segment
+ with_empty_test_queue(
+ fun (Qi0) ->
+ {Qi1, _SeqIdsMsgIdsC} = queue_index_publish(SeqIdsC,
+ false, Qi0),
+ Qi2 = rabbit_queue_index:deliver(SeqIdsC, Qi1),
+ Qi3 = rabbit_queue_index:ack(SeqIdsC, Qi2),
+ Qi4 = rabbit_queue_index:flush(Qi3),
+ {Qi5, _SeqIdsMsgIdsC1} = queue_index_publish([SegmentSize],
+ false, Qi4),
+ Qi5
+ end),
+
+ %% b) partial pub+del, then move to new segment, then ack all in old segment
+ with_empty_test_queue(
+ fun (Qi0) ->
+ {Qi1, _SeqIdsMsgIdsC2} = queue_index_publish(SeqIdsC,
+ false, Qi0),
+ Qi2 = rabbit_queue_index:deliver(SeqIdsC, Qi1),
+ {Qi3, _SeqIdsMsgIdsC3} = queue_index_publish([SegmentSize],
+ false, Qi2),
+ Qi4 = rabbit_queue_index:ack(SeqIdsC, Qi3),
+ rabbit_queue_index:flush(Qi4)
+ end),
+
+ %% c) just fill up several segments of all pubs, then +dels, then +acks
+ with_empty_test_queue(
+ fun (Qi0) ->
+ {Qi1, _SeqIdsMsgIdsD} = queue_index_publish(SeqIdsD,
+ false, Qi0),
+ Qi2 = rabbit_queue_index:deliver(SeqIdsD, Qi1),
+ Qi3 = rabbit_queue_index:ack(SeqIdsD, Qi2),
+ rabbit_queue_index:flush(Qi3)
+ end),
+
+ %% d) get messages in all states to a segment, then flush, then do
+ %% the same again, don't flush and read. This will hit all
+ %% possibilities in combining the segment with the journal.
+ with_empty_test_queue(
+ fun (Qi0) ->
+ {Qi1, [Seven,Five,Four|_]} = queue_index_publish([0,1,2,4,5,7],
+ false, Qi0),
+ Qi2 = rabbit_queue_index:deliver([0,1,4], Qi1),
+ Qi3 = rabbit_queue_index:ack([0], Qi2),
+ Qi4 = rabbit_queue_index:flush(Qi3),
+ {Qi5, [Eight,Six|_]} = queue_index_publish([3,6,8], false, Qi4),
+ Qi6 = rabbit_queue_index:deliver([2,3,5,6], Qi5),
+ Qi7 = rabbit_queue_index:ack([1,2,3], Qi6),
+ {[], Qi8} = rabbit_queue_index:read(0, 4, Qi7),
+ {ReadD, Qi9} = rabbit_queue_index:read(4, 7, Qi8),
+ ok = verify_read_with_published(true, false, ReadD,
+ [Four, Five, Six]),
+ {ReadE, Qi10} = rabbit_queue_index:read(7, 9, Qi9),
+ ok = verify_read_with_published(false, false, ReadE,
+ [Seven, Eight]),
+ Qi10
+ end),
+
+ %% e) as for (d), but use terminate instead of read, which will
+ %% exercise journal_minus_segment, not segment_plus_journal.
+ with_empty_test_queue(
+ fun (Qi0) ->
+ {Qi1, _SeqIdsMsgIdsE} = queue_index_publish([0,1,2,4,5,7],
+ true, Qi0),
+ Qi2 = rabbit_queue_index:deliver([0,1,4], Qi1),
+ Qi3 = rabbit_queue_index:ack([0], Qi2),
+ {5, Qi4} = restart_test_queue(Qi3),
+ {Qi5, _SeqIdsMsgIdsF} = queue_index_publish([3,6,8], true, Qi4),
+ Qi6 = rabbit_queue_index:deliver([2,3,5,6], Qi5),
+ Qi7 = rabbit_queue_index:ack([1,2,3], Qi6),
+ {5, Qi8} = restart_test_queue(Qi7),
+ Qi8
+ end),
+
+ ok = rabbit_variable_queue:stop(),
+ {ok, _} = rabbit_variable_queue:start([]),
+
+ passed.
+
+variable_queue_init(Q, Recover) ->
+ rabbit_variable_queue:init(
+ Q, case Recover of
+ true -> non_clean_shutdown;
+ false -> new
+ end, fun nop/2, fun nop/2, fun nop/1).
+
+variable_queue_publish(IsPersistent, Count, VQ) ->
+ variable_queue_publish(IsPersistent, Count, fun (_N, P) -> P end, VQ).
+
+variable_queue_publish(IsPersistent, Count, PropFun, VQ) ->
+ variable_queue_publish(IsPersistent, 1, Count, PropFun,
+ fun (_N) -> <<>> end, VQ).
+
+variable_queue_publish(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) ->
+ variable_queue_wait_for_shuffling_end(
+ lists:foldl(
+ fun (N, VQN) ->
+ rabbit_variable_queue:publish(
+ rabbit_basic:message(
+ rabbit_misc:r(<<>>, exchange, <<>>),
+ <<>>, #'P_basic'{delivery_mode = case IsPersistent of
+ true -> 2;
+ false -> 1
+ end},
+ PayloadFun(N)),
+ PropFun(N, #message_properties{}), false, self(), VQN)
+ end, VQ, lists:seq(Start, Start + Count - 1))).
+
+variable_queue_fetch(Count, IsPersistent, IsDelivered, Len, VQ) ->
+ lists:foldl(fun (N, {VQN, AckTagsAcc}) ->
+ Rem = Len - N,
+ {{#basic_message { is_persistent = IsPersistent },
+ IsDelivered, AckTagN}, VQM} =
+ rabbit_variable_queue:fetch(true, VQN),
+ Rem = rabbit_variable_queue:len(VQM),
+ {VQM, [AckTagN | AckTagsAcc]}
+ end, {VQ, []}, lists:seq(1, Count)).
+
+variable_queue_set_ram_duration_target(Duration, VQ) ->
+ variable_queue_wait_for_shuffling_end(
+ rabbit_variable_queue:set_ram_duration_target(Duration, VQ)).
+
+assert_prop(List, Prop, Value) ->
+ Value = proplists:get_value(Prop, List).
+
+assert_props(List, PropVals) ->
+ [assert_prop(List, Prop, Value) || {Prop, Value} <- PropVals].
+
+test_amqqueue(Durable) ->
+ (rabbit_amqqueue:pseudo_queue(test_queue(), self()))
+ #amqqueue { durable = Durable }.
+
+with_fresh_variable_queue(Fun) ->
+ Ref = make_ref(),
+ Me = self(),
+ %% Run in a separate process since rabbit_msg_store will send
+ %% bump_credit messages and we want to ignore them
+ spawn_link(fun() ->
+ ok = empty_test_queue(),
+ VQ = variable_queue_init(test_amqqueue(true), false),
+ S0 = rabbit_variable_queue:status(VQ),
+ assert_props(S0, [{q1, 0}, {q2, 0},
+ {delta,
+ {delta, undefined, 0, undefined}},
+ {q3, 0}, {q4, 0},
+ {len, 0}]),
+ _ = rabbit_variable_queue:delete_and_terminate(
+ shutdown, Fun(VQ)),
+ Me ! Ref
+ end),
+ receive
+ Ref -> ok
+ end,
+ passed.
+
+publish_and_confirm(Q, Payload, Count) ->
+ Seqs = lists:seq(1, Count),
+ [begin
+ Msg = rabbit_basic:message(rabbit_misc:r(<<>>, exchange, <<>>),
+ <<>>, #'P_basic'{delivery_mode = 2},
+ Payload),
+ Delivery = #delivery{mandatory = false, sender = self(),
+ confirm = true, message = Msg, msg_seq_no = Seq},
+ _QPids = rabbit_amqqueue:deliver([Q], Delivery)
+ end || Seq <- Seqs],
+ wait_for_confirms(gb_sets:from_list(Seqs)).
+
+wait_for_confirms(Unconfirmed) ->
+ case gb_sets:is_empty(Unconfirmed) of
+ true -> ok;
+ false -> receive {'$gen_cast', {confirm, Confirmed, _}} ->
+ wait_for_confirms(
+ rabbit_misc:gb_sets_difference(
+ Unconfirmed, gb_sets:from_list(Confirmed)))
+ after ?TIMEOUT -> exit(timeout_waiting_for_confirm)
+ end
+ end.
+
+test_variable_queue() ->
+ [passed = with_fresh_variable_queue(F) ||
+ F <- [fun test_variable_queue_dynamic_duration_change/1,
+ fun test_variable_queue_partial_segments_delta_thing/1,
+ fun test_variable_queue_all_the_bits_not_covered_elsewhere1/1,
+ fun test_variable_queue_all_the_bits_not_covered_elsewhere2/1,
+ fun test_drop/1,
+ fun test_variable_queue_fold_msg_on_disk/1,
+ fun test_dropfetchwhile/1,
+ fun test_dropwhile_varying_ram_duration/1,
+ fun test_fetchwhile_varying_ram_duration/1,
+ fun test_variable_queue_ack_limiting/1,
+ fun test_variable_queue_purge/1,
+ fun test_variable_queue_requeue/1,
+ fun test_variable_queue_requeue_ram_beta/1,
+ fun test_variable_queue_fold/1]],
+ passed.
+
+test_variable_queue_fold(VQ0) ->
+ {PendingMsgs, RequeuedMsgs, FreshMsgs, VQ1} =
+ variable_queue_with_holes(VQ0),
+ Count = rabbit_variable_queue:depth(VQ1),
+ Msgs = lists:sort(PendingMsgs ++ RequeuedMsgs ++ FreshMsgs),
+ lists:foldl(fun (Cut, VQ2) ->
+ test_variable_queue_fold(Cut, Msgs, PendingMsgs, VQ2)
+ end, VQ1, [0, 1, 2, Count div 2,
+ Count - 1, Count, Count + 1, Count * 2]).
+
+test_variable_queue_fold(Cut, Msgs, PendingMsgs, VQ0) ->
+ {Acc, VQ1} = rabbit_variable_queue:fold(
+ fun (M, _, Pending, A) ->
+ MInt = msg2int(M),
+ Pending = lists:member(MInt, PendingMsgs), %% assert
+ case MInt =< Cut of
+ true -> {cont, [MInt | A]};
+ false -> {stop, A}
+ end
+ end, [], VQ0),
+ Expected = lists:takewhile(fun (I) -> I =< Cut end, Msgs),
+ Expected = lists:reverse(Acc), %% assertion
+ VQ1.
+
+msg2int(#basic_message{content = #content{ payload_fragments_rev = P}}) ->
+ binary_to_term(list_to_binary(lists:reverse(P))).
+
+ack_subset(AckSeqs, Interval, Rem) ->
+ lists:filter(fun ({_Ack, N}) -> (N + Rem) rem Interval == 0 end, AckSeqs).
+
+requeue_one_by_one(Acks, VQ) ->
+ lists:foldl(fun (AckTag, VQN) ->
+ {_MsgId, VQM} = rabbit_variable_queue:requeue(
+ [AckTag], VQN),
+ VQM
+ end, VQ, Acks).
+
+%% Create a vq with messages in q1, delta, and q3, and holes (in the
+%% form of pending acks) in the latter two.
+variable_queue_with_holes(VQ0) ->
+ Interval = 2048, %% should match vq:IO_BATCH_SIZE
+ Count = rabbit_queue_index:next_segment_boundary(0)*2 + 2 * Interval,
+ Seq = lists:seq(1, Count),
+ VQ1 = variable_queue_set_ram_duration_target(0, VQ0),
+ VQ2 = variable_queue_publish(
+ false, 1, Count,
+ fun (_, P) -> P end, fun erlang:term_to_binary/1, VQ1),
+ {VQ3, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ2),
+ Acks = lists:reverse(AcksR),
+ AckSeqs = lists:zip(Acks, Seq),
+ [{Subset1, _Seq1}, {Subset2, _Seq2}, {Subset3, Seq3}] =
+ [lists:unzip(ack_subset(AckSeqs, Interval, I)) || I <- [0, 1, 2]],
+ %% we requeue in three phases in order to exercise requeuing logic
+ %% in various vq states
+ {_MsgIds, VQ4} = rabbit_variable_queue:requeue(
+ Acks -- (Subset1 ++ Subset2 ++ Subset3), VQ3),
+ VQ5 = requeue_one_by_one(Subset1, VQ4),
+ %% by now we have some messages (and holes) in delta
+ VQ6 = requeue_one_by_one(Subset2, VQ5),
+ VQ7 = variable_queue_set_ram_duration_target(infinity, VQ6),
+ %% add the q1 tail
+ VQ8 = variable_queue_publish(
+ true, Count + 1, Interval,
+ fun (_, P) -> P end, fun erlang:term_to_binary/1, VQ7),
+ %% assertions
+ [false = case V of
+ {delta, _, 0, _} -> true;
+ 0 -> true;
+ _ -> false
+ end || {K, V} <- rabbit_variable_queue:status(VQ8),
+ lists:member(K, [q1, delta, q3])],
+ Depth = Count + Interval,
+ Depth = rabbit_variable_queue:depth(VQ8),
+ Len = Depth - length(Subset3),
+ Len = rabbit_variable_queue:len(VQ8),
+ {Seq3, Seq -- Seq3, lists:seq(Count + 1, Count + Interval), VQ8}.
+
+test_variable_queue_requeue(VQ0) ->
+ {_PendingMsgs, RequeuedMsgs, FreshMsgs, VQ1} =
+ variable_queue_with_holes(VQ0),
+ Msgs =
+ lists:zip(RequeuedMsgs,
+ lists:duplicate(length(RequeuedMsgs), true)) ++
+ lists:zip(FreshMsgs,
+ lists:duplicate(length(FreshMsgs), false)),
+ VQ2 = lists:foldl(fun ({I, Requeued}, VQa) ->
+ {{M, MRequeued, _}, VQb} =
+ rabbit_variable_queue:fetch(true, VQa),
+ Requeued = MRequeued, %% assertion
+ I = msg2int(M), %% assertion
+ VQb
+ end, VQ1, Msgs),
+ {empty, VQ3} = rabbit_variable_queue:fetch(true, VQ2),
+ VQ3.
+
+%% requeue from ram_pending_ack into q3, move to delta and then empty queue
+test_variable_queue_requeue_ram_beta(VQ0) ->
+ Count = rabbit_queue_index:next_segment_boundary(0)*2 + 2,
+ VQ1 = variable_queue_publish(false, Count, VQ0),
+ {VQ2, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ1),
+ {Back, Front} = lists:split(Count div 2, AcksR),
+ {_, VQ3} = rabbit_variable_queue:requeue(erlang:tl(Back), VQ2),
+ VQ4 = variable_queue_set_ram_duration_target(0, VQ3),
+ {_, VQ5} = rabbit_variable_queue:requeue([erlang:hd(Back)], VQ4),
+ VQ6 = requeue_one_by_one(Front, VQ5),
+ {VQ7, AcksAll} = variable_queue_fetch(Count, false, true, Count, VQ6),
+ {_, VQ8} = rabbit_variable_queue:ack(AcksAll, VQ7),
+ VQ8.
+
+test_variable_queue_purge(VQ0) ->
+ LenDepth = fun (VQ) ->
+ {rabbit_variable_queue:len(VQ),
+ rabbit_variable_queue:depth(VQ)}
+ end,
+ VQ1 = variable_queue_publish(false, 10, VQ0),
+ {VQ2, Acks} = variable_queue_fetch(6, false, false, 10, VQ1),
+ {4, VQ3} = rabbit_variable_queue:purge(VQ2),
+ {0, 6} = LenDepth(VQ3),
+ {_, VQ4} = rabbit_variable_queue:requeue(lists:sublist(Acks, 2), VQ3),
+ {2, 6} = LenDepth(VQ4),
+ VQ5 = rabbit_variable_queue:purge_acks(VQ4),
+ {2, 2} = LenDepth(VQ5),
+ VQ5.
+
+test_variable_queue_ack_limiting(VQ0) ->
+ %% start by sending in a bunch of messages
+ Len = 1024,
+ VQ1 = variable_queue_publish(false, Len, VQ0),
+
+ %% squeeze and relax queue
+ Churn = Len div 32,
+ VQ2 = publish_fetch_and_ack(Churn, Len, VQ1),
+
+ %% update stats for duration
+ {_Duration, VQ3} = rabbit_variable_queue:ram_duration(VQ2),
+
+ %% fetch half the messages
+ {VQ4, _AckTags} = variable_queue_fetch(Len div 2, false, false, Len, VQ3),
+
+ VQ5 = check_variable_queue_status(VQ4, [{len , Len div 2},
+ {ram_ack_count, Len div 2},
+ {ram_msg_count, Len div 2}]),
+
+ %% ensure all acks go to disk on 0 duration target
+ VQ6 = check_variable_queue_status(
+ variable_queue_set_ram_duration_target(0, VQ5),
+ [{len, Len div 2},
+ {target_ram_count, 0},
+ {ram_msg_count, 0},
+ {ram_ack_count, 0}]),
+
+ VQ6.
+
+test_drop(VQ0) ->
+ %% start by sending a messages
+ VQ1 = variable_queue_publish(false, 1, VQ0),
+ %% drop message with AckRequired = true
+ {{MsgId, AckTag}, VQ2} = rabbit_variable_queue:drop(true, VQ1),
+ true = rabbit_variable_queue:is_empty(VQ2),
+ true = AckTag =/= undefinded,
+ %% drop again -> empty
+ {empty, VQ3} = rabbit_variable_queue:drop(false, VQ2),
+ %% requeue
+ {[MsgId], VQ4} = rabbit_variable_queue:requeue([AckTag], VQ3),
+ %% drop message with AckRequired = false
+ {{MsgId, undefined}, VQ5} = rabbit_variable_queue:drop(false, VQ4),
+ true = rabbit_variable_queue:is_empty(VQ5),
+ VQ5.
+
+test_dropfetchwhile(VQ0) ->
+ Count = 10,
+
+ %% add messages with sequential expiry
+ VQ1 = variable_queue_publish(
+ false, 1, Count,
+ fun (N, Props) -> Props#message_properties{expiry = N} end,
+ fun erlang:term_to_binary/1, VQ0),
+
+ %% fetch the first 5 messages
+ {#message_properties{expiry = 6}, {Msgs, AckTags}, VQ2} =
+ rabbit_variable_queue:fetchwhile(
+ fun (#message_properties{expiry = Expiry}) -> Expiry =< 5 end,
+ fun (Msg, AckTag, {MsgAcc, AckAcc}) ->
+ {[Msg | MsgAcc], [AckTag | AckAcc]}
+ end, {[], []}, VQ1),
+ true = lists:seq(1, 5) == [msg2int(M) || M <- lists:reverse(Msgs)],
+
+ %% requeue them
+ {_MsgIds, VQ3} = rabbit_variable_queue:requeue(AckTags, VQ2),
+
+ %% drop the first 5 messages
+ {#message_properties{expiry = 6}, VQ4} =
+ rabbit_variable_queue:dropwhile(
+ fun (#message_properties {expiry = Expiry}) -> Expiry =< 5 end, VQ3),
+
+ %% fetch 5
+ VQ5 = lists:foldl(fun (N, VQN) ->
+ {{Msg, _, _}, VQM} =
+ rabbit_variable_queue:fetch(false, VQN),
+ true = msg2int(Msg) == N,
+ VQM
+ end, VQ4, lists:seq(6, Count)),
+
+ %% should be empty now
+ true = rabbit_variable_queue:is_empty(VQ5),
+
+ VQ5.
+
+test_dropwhile_varying_ram_duration(VQ0) ->
+ test_dropfetchwhile_varying_ram_duration(
+ fun (VQ1) ->
+ {_, VQ2} = rabbit_variable_queue:dropwhile(
+ fun (_) -> false end, VQ1),
+ VQ2
+ end, VQ0).
+
+test_fetchwhile_varying_ram_duration(VQ0) ->
+ test_dropfetchwhile_varying_ram_duration(
+ fun (VQ1) ->
+ {_, ok, VQ2} = rabbit_variable_queue:fetchwhile(
+ fun (_) -> false end,
+ fun (_, _, A) -> A end,
+ ok, VQ1),
+ VQ2
+ end, VQ0).
+
+test_dropfetchwhile_varying_ram_duration(Fun, VQ0) ->
+ VQ1 = variable_queue_publish(false, 1, VQ0),
+ VQ2 = variable_queue_set_ram_duration_target(0, VQ1),
+ VQ3 = Fun(VQ2),
+ VQ4 = variable_queue_set_ram_duration_target(infinity, VQ3),
+ VQ5 = variable_queue_publish(false, 1, VQ4),
+ VQ6 = Fun(VQ5),
+ VQ6.
+
+test_variable_queue_dynamic_duration_change(VQ0) ->
+ SegmentSize = rabbit_queue_index:next_segment_boundary(0),
+
+ %% start by sending in a couple of segments worth
+ Len = 2*SegmentSize,
+ VQ1 = variable_queue_publish(false, Len, VQ0),
+ %% squeeze and relax queue
+ Churn = Len div 32,
+ VQ2 = publish_fetch_and_ack(Churn, Len, VQ1),
+
+ {Duration, VQ3} = rabbit_variable_queue:ram_duration(VQ2),
+ VQ7 = lists:foldl(
+ fun (Duration1, VQ4) ->
+ {_Duration, VQ5} = rabbit_variable_queue:ram_duration(VQ4),
+ io:format("~p:~n~p~n",
+ [Duration1, rabbit_variable_queue:status(VQ5)]),
+ VQ6 = variable_queue_set_ram_duration_target(
+ Duration1, VQ5),
+ publish_fetch_and_ack(Churn, Len, VQ6)
+ end, VQ3, [Duration / 4, 0, Duration / 4, infinity]),
+
+ %% drain
+ {VQ8, AckTags} = variable_queue_fetch(Len, false, false, Len, VQ7),
+ {_Guids, VQ9} = rabbit_variable_queue:ack(AckTags, VQ8),
+ {empty, VQ10} = rabbit_variable_queue:fetch(true, VQ9),
+
+ VQ10.
+
+publish_fetch_and_ack(0, _Len, VQ0) ->
+ VQ0;
+publish_fetch_and_ack(N, Len, VQ0) ->
+ VQ1 = variable_queue_publish(false, 1, VQ0),
+ {{_Msg, false, AckTag}, VQ2} = rabbit_variable_queue:fetch(true, VQ1),
+ Len = rabbit_variable_queue:len(VQ2),
+ {_Guids, VQ3} = rabbit_variable_queue:ack([AckTag], VQ2),
+ publish_fetch_and_ack(N-1, Len, VQ3).
+
+test_variable_queue_partial_segments_delta_thing(VQ0) ->
+ SegmentSize = rabbit_queue_index:next_segment_boundary(0),
+ HalfSegment = SegmentSize div 2,
+ OneAndAHalfSegment = SegmentSize + HalfSegment,
+ VQ1 = variable_queue_publish(true, OneAndAHalfSegment, VQ0),
+ {_Duration, VQ2} = rabbit_variable_queue:ram_duration(VQ1),
+ VQ3 = check_variable_queue_status(
+ variable_queue_set_ram_duration_target(0, VQ2),
+ %% one segment in q3, and half a segment in delta
+ [{delta, {delta, SegmentSize, HalfSegment, OneAndAHalfSegment}},
+ {q3, SegmentSize},
+ {len, SegmentSize + HalfSegment}]),
+ VQ4 = variable_queue_set_ram_duration_target(infinity, VQ3),
+ VQ5 = check_variable_queue_status(
+ variable_queue_publish(true, 1, VQ4),
+ %% one alpha, but it's in the same segment as the deltas
+ [{q1, 1},
+ {delta, {delta, SegmentSize, HalfSegment, OneAndAHalfSegment}},
+ {q3, SegmentSize},
+ {len, SegmentSize + HalfSegment + 1}]),
+ {VQ6, AckTags} = variable_queue_fetch(SegmentSize, true, false,
+ SegmentSize + HalfSegment + 1, VQ5),
+ VQ7 = check_variable_queue_status(
+ VQ6,
+ %% the half segment should now be in q3
+ [{q1, 1},
+ {delta, {delta, undefined, 0, undefined}},
+ {q3, HalfSegment},
+ {len, HalfSegment + 1}]),
+ {VQ8, AckTags1} = variable_queue_fetch(HalfSegment + 1, true, false,
+ HalfSegment + 1, VQ7),
+ {_Guids, VQ9} = rabbit_variable_queue:ack(AckTags ++ AckTags1, VQ8),
+ %% should be empty now
+ {empty, VQ10} = rabbit_variable_queue:fetch(true, VQ9),
+ VQ10.
+
+check_variable_queue_status(VQ0, Props) ->
+ VQ1 = variable_queue_wait_for_shuffling_end(VQ0),
+ S = rabbit_variable_queue:status(VQ1),
+ io:format("~p~n", [S]),
+ assert_props(S, Props),
+ VQ1.
+
+variable_queue_wait_for_shuffling_end(VQ) ->
+ case credit_flow:blocked() of
+ false -> VQ;
+ true -> receive
+ {bump_credit, Msg} ->
+ credit_flow:handle_bump_msg(Msg),
+ variable_queue_wait_for_shuffling_end(
+ rabbit_variable_queue:resume(VQ))
+ end
+ end.
+
+test_variable_queue_all_the_bits_not_covered_elsewhere1(VQ0) ->
+ Count = 2 * rabbit_queue_index:next_segment_boundary(0),
+ VQ1 = variable_queue_publish(true, Count, VQ0),
+ VQ2 = variable_queue_publish(false, Count, VQ1),
+ VQ3 = variable_queue_set_ram_duration_target(0, VQ2),
+ {VQ4, _AckTags} = variable_queue_fetch(Count, true, false,
+ Count + Count, VQ3),
+ {VQ5, _AckTags1} = variable_queue_fetch(Count, false, false,
+ Count, VQ4),
+ _VQ6 = rabbit_variable_queue:terminate(shutdown, VQ5),
+ VQ7 = variable_queue_init(test_amqqueue(true), true),
+ {{_Msg1, true, _AckTag1}, VQ8} = rabbit_variable_queue:fetch(true, VQ7),
+ Count1 = rabbit_variable_queue:len(VQ8),
+ VQ9 = variable_queue_publish(false, 1, VQ8),
+ VQ10 = variable_queue_set_ram_duration_target(0, VQ9),
+ {VQ11, _AckTags2} = variable_queue_fetch(Count1, true, true, Count, VQ10),
+ {VQ12, _AckTags3} = variable_queue_fetch(1, false, false, 1, VQ11),
+ VQ12.
+
+test_variable_queue_all_the_bits_not_covered_elsewhere2(VQ0) ->
+ VQ1 = variable_queue_set_ram_duration_target(0, VQ0),
+ VQ2 = variable_queue_publish(false, 4, VQ1),
+ {VQ3, AckTags} = variable_queue_fetch(2, false, false, 4, VQ2),
+ {_Guids, VQ4} =
+ rabbit_variable_queue:requeue(AckTags, VQ3),
+ VQ5 = rabbit_variable_queue:timeout(VQ4),
+ _VQ6 = rabbit_variable_queue:terminate(shutdown, VQ5),
+ VQ7 = variable_queue_init(test_amqqueue(true), true),
+ {empty, VQ8} = rabbit_variable_queue:fetch(false, VQ7),
+ VQ8.
+
+test_variable_queue_fold_msg_on_disk(VQ0) ->
+ VQ1 = variable_queue_publish(true, 1, VQ0),
+ {VQ2, AckTags} = variable_queue_fetch(1, true, false, 1, VQ1),
+ {ok, VQ3} = rabbit_variable_queue:ackfold(fun (_M, _A, ok) -> ok end,
+ ok, VQ2, AckTags),
+ VQ3.
+
+test_queue_recover() ->
+ Count = 2 * rabbit_queue_index:next_segment_boundary(0),
+ {new, #amqqueue { pid = QPid, name = QName } = Q} =
+ rabbit_amqqueue:declare(test_queue(), true, false, [], none),
+ publish_and_confirm(Q, <<>>, Count),
+
+ exit(QPid, kill),
+ MRef = erlang:monitor(process, QPid),
+ receive {'DOWN', MRef, process, QPid, _Info} -> ok
+ after 10000 -> exit(timeout_waiting_for_queue_death)
+ end,
+ rabbit_amqqueue:stop(),
+ rabbit_amqqueue:start(rabbit_amqqueue:recover()),
+ {ok, Limiter} = rabbit_limiter:start_link(no_id),
+ rabbit_amqqueue:with_or_die(
+ QName,
+ fun (Q1 = #amqqueue { pid = QPid1 }) ->
+ CountMinusOne = Count - 1,
+ {ok, CountMinusOne, {QName, QPid1, _AckTag, true, _Msg}} =
+ rabbit_amqqueue:basic_get(Q1, self(), false, Limiter),
+ exit(QPid1, shutdown),
+ VQ1 = variable_queue_init(Q, true),
+ {{_Msg1, true, _AckTag1}, VQ2} =
+ rabbit_variable_queue:fetch(true, VQ1),
+ CountMinusOne = rabbit_variable_queue:len(VQ2),
+ _VQ3 = rabbit_variable_queue:delete_and_terminate(shutdown, VQ2),
+ rabbit_amqqueue:internal_delete(QName)
+ end),
+ passed.
+
+test_variable_queue_delete_msg_store_files_callback() ->
+ ok = restart_msg_store_empty(),
+ {new, #amqqueue { pid = QPid, name = QName } = Q} =
+ rabbit_amqqueue:declare(test_queue(), true, false, [], none),
+ Payload = <<0:8388608>>, %% 1MB
+ Count = 30,
+ publish_and_confirm(Q, Payload, Count),
+
+ rabbit_amqqueue:set_ram_duration_target(QPid, 0),
+
+ {ok, Limiter} = rabbit_limiter:start_link(no_id),
+
+ CountMinusOne = Count - 1,
+ {ok, CountMinusOne, {QName, QPid, _AckTag, false, _Msg}} =
+ rabbit_amqqueue:basic_get(Q, self(), true, Limiter),
+ {ok, CountMinusOne} = rabbit_amqqueue:purge(Q),
+
+ %% give the queue a second to receive the close_fds callback msg
+ timer:sleep(1000),
+
+ rabbit_amqqueue:delete(Q, false, false),
+ passed.
+
+test_configurable_server_properties() ->
+ %% List of the names of the built-in properties do we expect to find
+ BuiltInPropNames = [<<"product">>, <<"version">>, <<"platform">>,
+ <<"copyright">>, <<"information">>],
+
+ Protocol = rabbit_framing_amqp_0_9_1,
+
+ %% Verify that the built-in properties are initially present
+ ActualPropNames = [Key || {Key, longstr, _} <-
+ rabbit_reader:server_properties(Protocol)],
+ true = lists:all(fun (X) -> lists:member(X, ActualPropNames) end,
+ BuiltInPropNames),
+
+ %% Get the initial server properties configured in the environment
+ {ok, ServerProperties} = application:get_env(rabbit, server_properties),
+
+ %% Helper functions
+ ConsProp = fun (X) -> application:set_env(rabbit,
+ server_properties,
+ [X | ServerProperties]) end,
+ IsPropPresent =
+ fun (X) ->
+ lists:member(X, rabbit_reader:server_properties(Protocol))
+ end,
+
+ %% Add a wholly new property of the simplified {KeyAtom, StringValue} form
+ NewSimplifiedProperty = {NewHareKey, NewHareVal} = {hare, "soup"},
+ ConsProp(NewSimplifiedProperty),
+ %% Do we find hare soup, appropriately formatted in the generated properties?
+ ExpectedHareImage = {list_to_binary(atom_to_list(NewHareKey)),
+ longstr,
+ list_to_binary(NewHareVal)},
+ true = IsPropPresent(ExpectedHareImage),
+
+ %% Add a wholly new property of the {BinaryKey, Type, Value} form
+ %% and check for it
+ NewProperty = {<<"new-bin-key">>, signedint, -1},
+ ConsProp(NewProperty),
+ %% Do we find the new property?
+ true = IsPropPresent(NewProperty),
+
+ %% Add a property that clobbers a built-in, and verify correct clobbering
+ {NewVerKey, NewVerVal} = NewVersion = {version, "X.Y.Z."},
+ {BinNewVerKey, BinNewVerVal} = {list_to_binary(atom_to_list(NewVerKey)),
+ list_to_binary(NewVerVal)},
+ ConsProp(NewVersion),
+ ClobberedServerProps = rabbit_reader:server_properties(Protocol),
+ %% Is the clobbering insert present?
+ true = IsPropPresent({BinNewVerKey, longstr, BinNewVerVal}),
+ %% Is the clobbering insert the only thing with the clobbering key?
+ [{BinNewVerKey, longstr, BinNewVerVal}] =
+ [E || {K, longstr, _V} = E <- ClobberedServerProps, K =:= BinNewVerKey],
+
+ application:set_env(rabbit, server_properties, ServerProperties),
+ passed.
+
+nop(_) -> ok.
+nop(_, _) -> ok.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_tests_event_receiver).
+
+-export([start/3, stop/0]).
+
+-export([init/1, handle_call/2, handle_event/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-include("rabbit.hrl").
+
+start(Pid, Nodes, Types) ->
+ Oks = [ok || _ <- Nodes],
+ {Oks, _} = rpc:multicall(Nodes, gen_event, add_handler,
+ [rabbit_event, ?MODULE, [Pid, Types]]).
+
+stop() ->
+ gen_event:delete_handler(rabbit_event, ?MODULE, []).
+
+%%----------------------------------------------------------------------------
+
+init([Pid, Types]) ->
+ {ok, {Pid, Types}}.
+
+handle_call(_Request, State) ->
+ {ok, not_understood, State}.
+
+handle_event(Event = #event{type = Type}, State = {Pid, Types}) ->
+ case lists:member(Type, Types) of
+ true -> Pid ! Event;
+ false -> ok
+ end,
+ {ok, State}.
+
+handle_info(_Info, State) ->
+ {ok, State}.
+
+terminate(_Arg, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_trace).
+
+-export([init/1, enabled/1, tap_in/2, tap_out/2, start/1, stop/1]).
+
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+-define(TRACE_VHOSTS, trace_vhosts).
+-define(XNAME, <<"amq.rabbitmq.trace">>).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-type(state() :: rabbit_types:exchange() | 'none').
+
+-spec(init/1 :: (rabbit_types:vhost()) -> state()).
+-spec(enabled/1 :: (rabbit_types:vhost()) -> boolean()).
+-spec(tap_in/2 :: (rabbit_types:basic_message(), state()) -> 'ok').
+-spec(tap_out/2 :: (rabbit_amqqueue:qmsg(), state()) -> 'ok').
+
+-spec(start/1 :: (rabbit_types:vhost()) -> 'ok').
+-spec(stop/1 :: (rabbit_types:vhost()) -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+init(VHost) ->
+ case enabled(VHost) of
+ false -> none;
+ true -> {ok, X} = rabbit_exchange:lookup(
+ rabbit_misc:r(VHost, exchange, ?XNAME)),
+ X
+ end.
+
+enabled(VHost) ->
+ {ok, VHosts} = application:get_env(rabbit, ?TRACE_VHOSTS),
+ lists:member(VHost, VHosts).
+
+tap_in(_Msg, none) -> ok;
+tap_in(Msg = #basic_message{exchange_name = #resource{name = XName}}, TraceX) ->
+ trace(TraceX, Msg, <<"publish">>, XName, []).
+
+tap_out(_Msg, none) -> ok;
+tap_out({#resource{name = QName}, _QPid, _QMsgId, Redelivered, Msg}, TraceX) ->
+ RedeliveredNum = case Redelivered of true -> 1; false -> 0 end,
+ trace(TraceX, Msg, <<"deliver">>, QName,
+ [{<<"redelivered">>, signedint, RedeliveredNum}]).
+
+%%----------------------------------------------------------------------------
+
+start(VHost) ->
+ rabbit_log:info("Enabling tracing for vhost '~s'~n", [VHost]),
+ update_config(fun (VHosts) -> [VHost | VHosts -- [VHost]] end).
+
+stop(VHost) ->
+ rabbit_log:info("Disabling tracing for vhost '~s'~n", [VHost]),
+ update_config(fun (VHosts) -> VHosts -- [VHost] end).
+
+update_config(Fun) ->
+ {ok, VHosts0} = application:get_env(rabbit, ?TRACE_VHOSTS),
+ VHosts = Fun(VHosts0),
+ application:set_env(rabbit, ?TRACE_VHOSTS, VHosts),
+ rabbit_channel:refresh_config_local(),
+ ok.
+
+%%----------------------------------------------------------------------------
+
+trace(#exchange{name = Name}, #basic_message{exchange_name = Name},
+ _RKPrefix, _RKSuffix, _Extra) ->
+ ok;
+trace(X, Msg = #basic_message{content = #content{payload_fragments_rev = PFR}},
+ RKPrefix, RKSuffix, Extra) ->
+ {ok, _} = rabbit_basic:publish(
+ X, <<RKPrefix/binary, ".", RKSuffix/binary>>,
+ #'P_basic'{headers = msg_to_table(Msg) ++ Extra}, PFR),
+ ok.
+
+msg_to_table(#basic_message{exchange_name = #resource{name = XName},
+ routing_keys = RoutingKeys,
+ content = Content}) ->
+ #content{properties = Props} =
+ rabbit_binary_parser:ensure_content_decoded(Content),
+ {PropsTable, _Ix} =
+ lists:foldl(fun (K, {L, Ix}) ->
+ V = element(Ix, Props),
+ NewL = case V of
+ undefined -> L;
+ _ -> [{a2b(K), type(V), V} | L]
+ end,
+ {NewL, Ix + 1}
+ end, {[], 2}, record_info(fields, 'P_basic')),
+ [{<<"exchange_name">>, longstr, XName},
+ {<<"routing_keys">>, array, [{longstr, K} || K <- RoutingKeys]},
+ {<<"properties">>, table, PropsTable},
+ {<<"node">>, longstr, a2b(node())}].
+
+a2b(A) -> list_to_binary(atom_to_list(A)).
+
+type(V) when is_list(V) -> table;
+type(V) when is_integer(V) -> signedint;
+type(_V) -> longstr.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_types).
+
+-include("rabbit.hrl").
+
+-ifdef(use_specs).
+
+-export_type([maybe/1, info/0, infos/0, info_key/0, info_keys/0,
+ message/0, msg_id/0, basic_message/0,
+ delivery/0, content/0, decoded_content/0, undecoded_content/0,
+ unencoded_content/0, encoded_content/0, message_properties/0,
+ vhost/0, ctag/0, amqp_error/0, r/1, r2/2, r3/3, listener/0,
+ binding/0, binding_source/0, binding_destination/0,
+ amqqueue/0, exchange/0,
+ connection/0, protocol/0, user/0, internal_user/0,
+ username/0, password/0, password_hash/0,
+ ok/1, error/1, ok_or_error/1, ok_or_error2/2, ok_pid_or_error/0,
+ channel_exit/0, connection_exit/0, mfargs/0, proc_name/0,
+ proc_type_and_name/0]).
+
+-type(maybe(T) :: T | 'none').
+-type(vhost() :: binary()).
+-type(ctag() :: binary()).
+
+%% TODO: make this more precise by tying specific class_ids to
+%% specific properties
+-type(undecoded_content() ::
+ #content{class_id :: rabbit_framing:amqp_class_id(),
+ properties :: 'none',
+ properties_bin :: binary(),
+ payload_fragments_rev :: [binary()]} |
+ #content{class_id :: rabbit_framing:amqp_class_id(),
+ properties :: rabbit_framing:amqp_property_record(),
+ properties_bin :: 'none',
+ payload_fragments_rev :: [binary()]}).
+-type(unencoded_content() :: undecoded_content()).
+-type(decoded_content() ::
+ #content{class_id :: rabbit_framing:amqp_class_id(),
+ properties :: rabbit_framing:amqp_property_record(),
+ properties_bin :: maybe(binary()),
+ payload_fragments_rev :: [binary()]}).
+-type(encoded_content() ::
+ #content{class_id :: rabbit_framing:amqp_class_id(),
+ properties :: maybe(rabbit_framing:amqp_property_record()),
+ properties_bin :: binary(),
+ payload_fragments_rev :: [binary()]}).
+-type(content() :: undecoded_content() | decoded_content()).
+-type(msg_id() :: rabbit_guid:guid()).
+-type(basic_message() ::
+ #basic_message{exchange_name :: rabbit_exchange:name(),
+ routing_keys :: [rabbit_router:routing_key()],
+ content :: content(),
+ id :: msg_id(),
+ is_persistent :: boolean()}).
+-type(message() :: basic_message()).
+-type(delivery() ::
+ #delivery{mandatory :: boolean(),
+ sender :: pid(),
+ message :: message()}).
+-type(message_properties() ::
+ #message_properties{expiry :: pos_integer() | 'undefined',
+ needs_confirming :: boolean()}).
+
+-type(info_key() :: atom()).
+-type(info_keys() :: [info_key()]).
+
+-type(info() :: {info_key(), any()}).
+-type(infos() :: [info()]).
+
+-type(amqp_error() ::
+ #amqp_error{name :: rabbit_framing:amqp_exception(),
+ explanation :: string(),
+ method :: rabbit_framing:amqp_method_name()}).
+
+-type(r(Kind) ::
+ r2(vhost(), Kind)).
+-type(r2(VirtualHost, Kind) ::
+ r3(VirtualHost, Kind, rabbit_misc:resource_name())).
+-type(r3(VirtualHost, Kind, Name) ::
+ #resource{virtual_host :: VirtualHost,
+ kind :: Kind,
+ name :: Name}).
+
+-type(listener() ::
+ #listener{node :: node(),
+ protocol :: atom(),
+ host :: rabbit_networking:hostname(),
+ port :: rabbit_networking:ip_port()}).
+
+-type(binding_source() :: rabbit_exchange:name()).
+-type(binding_destination() :: rabbit_amqqueue:name() | rabbit_exchange:name()).
+
+-type(binding() ::
+ #binding{source :: rabbit_exchange:name(),
+ destination :: binding_destination(),
+ key :: rabbit_binding:key(),
+ args :: rabbit_framing:amqp_table()}).
+
+-type(amqqueue() ::
+ #amqqueue{name :: rabbit_amqqueue:name(),
+ durable :: boolean(),
+ auto_delete :: boolean(),
+ exclusive_owner :: rabbit_types:maybe(pid()),
+ arguments :: rabbit_framing:amqp_table(),
+ pid :: rabbit_types:maybe(pid()),
+ slave_pids :: [pid()]}).
+
+-type(exchange() ::
+ #exchange{name :: rabbit_exchange:name(),
+ type :: rabbit_exchange:type(),
+ durable :: boolean(),
+ auto_delete :: boolean(),
+ arguments :: rabbit_framing:amqp_table()}).
+
+-type(connection() :: pid()).
+
+-type(protocol() :: rabbit_framing:protocol()).
+
+-type(user() ::
+ #user{username :: username(),
+ tags :: [atom()],
+ auth_backend :: atom(),
+ impl :: any()}).
+
+-type(internal_user() ::
+ #internal_user{username :: username(),
+ password_hash :: password_hash(),
+ tags :: [atom()]}).
+
+-type(username() :: binary()).
+-type(password() :: binary()).
+-type(password_hash() :: binary()).
+
+-type(ok(A) :: {'ok', A}).
+-type(error(A) :: {'error', A}).
+-type(ok_or_error(A) :: 'ok' | error(A)).
+-type(ok_or_error2(A, B) :: ok(A) | error(B)).
+-type(ok_pid_or_error() :: ok_or_error2(pid(), any())).
+
+-type(channel_exit() :: no_return()).
+-type(connection_exit() :: no_return()).
+
+-type(mfargs() :: {atom(), atom(), [any()]}).
+
+-type(proc_name() :: term()).
+-type(proc_type_and_name() :: {atom(), proc_name()}).
+
+-endif. % use_specs
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_upgrade).
+
+-export([maybe_upgrade_mnesia/0, maybe_upgrade_local/0]).
+
+-include("rabbit.hrl").
+
+-define(VERSION_FILENAME, "schema_version").
+-define(LOCK_FILENAME, "schema_upgrade_lock").
+
+%% -------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(maybe_upgrade_mnesia/0 :: () -> 'ok').
+-spec(maybe_upgrade_local/0 :: () -> 'ok' |
+ 'version_not_available' |
+ 'starting_from_scratch').
+
+-endif.
+
+%% -------------------------------------------------------------------
+
+%% The upgrade logic is quite involved, due to the existence of
+%% clusters.
+%%
+%% Firstly, we have two different types of upgrades to do: Mnesia and
+%% everythinq else. Mnesia upgrades must only be done by one node in
+%% the cluster (we treat a non-clustered node as a single-node
+%% cluster). This is the primary upgrader. The other upgrades need to
+%% be done by all nodes.
+%%
+%% The primary upgrader has to start first (and do its Mnesia
+%% upgrades). Secondary upgraders need to reset their Mnesia database
+%% and then rejoin the cluster. They can't do the Mnesia upgrades as
+%% well and then merge databases since the cookie for each table will
+%% end up different and the merge will fail.
+%%
+%% This in turn means that we need to determine whether we are the
+%% primary or secondary upgrader *before* Mnesia comes up. If we
+%% didn't then the secondary upgrader would try to start Mnesia, and
+%% either hang waiting for a node which is not yet up, or fail since
+%% its schema differs from the other nodes in the cluster.
+%%
+%% Also, the primary upgrader needs to start Mnesia to do its
+%% upgrades, but needs to forcibly load tables rather than wait for
+%% them (in case it was not the last node to shut down, in which case
+%% it would wait forever).
+%%
+%% This in turn means that maybe_upgrade_mnesia/0 has to be patched
+%% into the boot process by prelaunch before the mnesia application is
+%% started. By the time Mnesia is started the upgrades have happened
+%% (on the primary), or Mnesia has been reset (on the secondary) and
+%% rabbit_mnesia:init_db_unchecked/2 can then make the node rejoin the cluster
+%% in the normal way.
+%%
+%% The non-mnesia upgrades are then triggered by
+%% rabbit_mnesia:init_db_unchecked/2. Of course, it's possible for a given
+%% upgrade process to only require Mnesia upgrades, or only require
+%% non-Mnesia upgrades. In the latter case no Mnesia resets and
+%% reclusterings occur.
+%%
+%% The primary upgrader needs to be a disc node. Ideally we would like
+%% it to be the last disc node to shut down (since otherwise there's a
+%% risk of data loss). On each node we therefore record the disc nodes
+%% that were still running when we shut down. A disc node that knows
+%% other nodes were up when it shut down, or a ram node, will refuse
+%% to be the primary upgrader, and will thus not start when upgrades
+%% are needed.
+%%
+%% However, this is racy if several nodes are shut down at once. Since
+%% rabbit records the running nodes, and shuts down before mnesia, the
+%% race manifests as all disc nodes thinking they are not the primary
+%% upgrader. Therefore the user can remove the record of the last disc
+%% node to shut down to get things going again. This may lose any
+%% mnesia changes that happened after the node chosen as the primary
+%% upgrader was shut down.
+
+%% -------------------------------------------------------------------
+
+ensure_backup_taken() ->
+ case filelib:is_file(lock_filename()) of
+ false -> case filelib:is_dir(backup_dir()) of
+ false -> ok = take_backup();
+ _ -> ok
+ end;
+ true -> throw({error, previous_upgrade_failed})
+ end.
+
+take_backup() ->
+ BackupDir = backup_dir(),
+ case rabbit_mnesia:copy_db(BackupDir) of
+ ok -> info("upgrades: Mnesia dir backed up to ~p~n",
+ [BackupDir]);
+ {error, E} -> throw({could_not_back_up_mnesia_dir, E})
+ end.
+
+ensure_backup_removed() ->
+ case filelib:is_dir(backup_dir()) of
+ true -> ok = remove_backup();
+ _ -> ok
+ end.
+
+remove_backup() ->
+ ok = rabbit_file:recursive_delete([backup_dir()]),
+ info("upgrades: Mnesia backup removed~n", []).
+
+maybe_upgrade_mnesia() ->
+ AllNodes = rabbit_mnesia:cluster_nodes(all),
+ case rabbit_version:upgrades_required(mnesia) of
+ {error, starting_from_scratch} ->
+ ok;
+ {error, version_not_available} ->
+ case AllNodes of
+ [] -> die("Cluster upgrade needed but upgrading from "
+ "< 2.1.1.~nUnfortunately you will need to "
+ "rebuild the cluster.", []);
+ _ -> ok
+ end;
+ {error, _} = Err ->
+ throw(Err);
+ {ok, []} ->
+ ok;
+ {ok, Upgrades} ->
+ ensure_backup_taken(),
+ ok = case upgrade_mode(AllNodes) of
+ primary -> primary_upgrade(Upgrades, AllNodes);
+ secondary -> secondary_upgrade(AllNodes)
+ end
+ end.
+
+upgrade_mode(AllNodes) ->
+ case nodes_running(AllNodes) of
+ [] ->
+ AfterUs = rabbit_mnesia:cluster_nodes(running) -- [node()],
+ case {node_type_legacy(), AfterUs} of
+ {disc, []} ->
+ primary;
+ {disc, _} ->
+ Filename = rabbit_node_monitor:running_nodes_filename(),
+ die("Cluster upgrade needed but other disc nodes shut "
+ "down after this one.~nPlease first start the last "
+ "disc node to shut down.~n~nNote: if several disc "
+ "nodes were shut down simultaneously they may "
+ "all~nshow this message. In which case, remove "
+ "the lock file on one of them and~nstart that node. "
+ "The lock file on this node is:~n~n ~s ", [Filename]);
+ {ram, _} ->
+ die("Cluster upgrade needed but this is a ram node.~n"
+ "Please first start the last disc node to shut down.",
+ [])
+ end;
+ [Another|_] ->
+ MyVersion = rabbit_version:desired_for_scope(mnesia),
+ ErrFun = fun (ClusterVersion) ->
+ %% The other node(s) are running an
+ %% unexpected version.
+ die("Cluster upgrade needed but other nodes are "
+ "running ~p~nand I want ~p",
+ [ClusterVersion, MyVersion])
+ end,
+ case rpc:call(Another, rabbit_version, desired_for_scope,
+ [mnesia]) of
+ {badrpc, {'EXIT', {undef, _}}} -> ErrFun(unknown_old_version);
+ {badrpc, Reason} -> ErrFun({unknown, Reason});
+ CV -> case rabbit_version:matches(
+ MyVersion, CV) of
+ true -> secondary;
+ false -> ErrFun(CV)
+ end
+ end
+ end.
+
+die(Msg, Args) ->
+ %% We don't throw or exit here since that gets thrown
+ %% straight out into do_boot, generating an erl_crash.dump
+ %% and displaying any error message in a confusing way.
+ error_logger:error_msg(Msg, Args),
+ Str = rabbit_misc:format(
+ "~n~n****~n~n" ++ Msg ++ "~n~n****~n~n~n", Args),
+ io:format(Str),
+ error_logger:logfile(close),
+ case application:get_env(rabbit, halt_on_upgrade_failure) of
+ {ok, false} -> throw({upgrade_error, Str});
+ _ -> halt(1) %% i.e. true or undefined
+ end.
+
+primary_upgrade(Upgrades, Nodes) ->
+ Others = Nodes -- [node()],
+ ok = apply_upgrades(
+ mnesia,
+ Upgrades,
+ fun () ->
+ rabbit_table:force_load(),
+ case Others of
+ [] -> ok;
+ _ -> info("mnesia upgrades: Breaking cluster~n", []),
+ [{atomic, ok} = mnesia:del_table_copy(schema, Node)
+ || Node <- Others]
+ end
+ end),
+ ok.
+
+secondary_upgrade(AllNodes) ->
+ %% must do this before we wipe out schema
+ NodeType = node_type_legacy(),
+ rabbit_misc:ensure_ok(mnesia:delete_schema([node()]),
+ cannot_delete_schema),
+ rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia),
+ ok = rabbit_mnesia:init_db_unchecked(AllNodes, NodeType),
+ ok = rabbit_version:record_desired_for_scope(mnesia),
+ ok.
+
+nodes_running(Nodes) ->
+ [N || N <- Nodes, rabbit:is_running(N)].
+
+%% -------------------------------------------------------------------
+
+maybe_upgrade_local() ->
+ case rabbit_version:upgrades_required(local) of
+ {error, version_not_available} -> version_not_available;
+ {error, starting_from_scratch} -> starting_from_scratch;
+ {error, _} = Err -> throw(Err);
+ {ok, []} -> ensure_backup_removed(),
+ ok;
+ {ok, Upgrades} -> mnesia:stop(),
+ ensure_backup_taken(),
+ ok = apply_upgrades(local, Upgrades,
+ fun () -> ok end),
+ ensure_backup_removed(),
+ ok
+ end.
+
+%% -------------------------------------------------------------------
+
+apply_upgrades(Scope, Upgrades, Fun) ->
+ ok = rabbit_file:lock_file(lock_filename()),
+ info("~s upgrades: ~w to apply~n", [Scope, length(Upgrades)]),
+ rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia),
+ Fun(),
+ [apply_upgrade(Scope, Upgrade) || Upgrade <- Upgrades],
+ info("~s upgrades: All upgrades applied successfully~n", [Scope]),
+ ok = rabbit_version:record_desired_for_scope(Scope),
+ ok = file:delete(lock_filename()).
+
+apply_upgrade(Scope, {M, F}) ->
+ info("~s upgrades: Applying ~w:~w~n", [Scope, M, F]),
+ ok = apply(M, F, []).
+
+%% -------------------------------------------------------------------
+
+dir() -> rabbit_mnesia:dir().
+
+lock_filename() -> lock_filename(dir()).
+lock_filename(Dir) -> filename:join(Dir, ?LOCK_FILENAME).
+backup_dir() -> dir() ++ "-upgrade-backup".
+
+node_type_legacy() ->
+ %% This is pretty ugly but we can't start Mnesia and ask it (will
+ %% hang), we can't look at the config file (may not include us
+ %% even if we're a disc node). We also can't use
+ %% rabbit_mnesia:node_type/0 because that will give false
+ %% postivies on Rabbit up to 2.5.1.
+ case filelib:is_regular(filename:join(dir(), "rabbit_durable_exchange.DCD")) of
+ true -> disc;
+ false -> ram
+ end.
+
+%% NB: we cannot use rabbit_log here since it may not have been
+%% started yet
+info(Msg, Args) -> error_logger:info_msg(Msg, Args).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_upgrade_functions).
+
+%% If you are tempted to add include("rabbit.hrl"). here, don't. Using record
+%% defs here leads to pain later.
+
+-compile([export_all]).
+
+-rabbit_upgrade({remove_user_scope, mnesia, []}).
+-rabbit_upgrade({hash_passwords, mnesia, []}).
+-rabbit_upgrade({add_ip_to_listener, mnesia, []}).
+-rabbit_upgrade({internal_exchanges, mnesia, []}).
+-rabbit_upgrade({user_to_internal_user, mnesia, [hash_passwords]}).
+-rabbit_upgrade({topic_trie, mnesia, []}).
+-rabbit_upgrade({semi_durable_route, mnesia, []}).
+-rabbit_upgrade({exchange_event_serial, mnesia, []}).
+-rabbit_upgrade({trace_exchanges, mnesia, [internal_exchanges]}).
+-rabbit_upgrade({user_admin_to_tags, mnesia, [user_to_internal_user]}).
+-rabbit_upgrade({ha_mirrors, mnesia, []}).
+-rabbit_upgrade({gm, mnesia, []}).
+-rabbit_upgrade({exchange_scratch, mnesia, [trace_exchanges]}).
+-rabbit_upgrade({mirrored_supervisor, mnesia, []}).
+-rabbit_upgrade({topic_trie_node, mnesia, []}).
+-rabbit_upgrade({runtime_parameters, mnesia, []}).
+-rabbit_upgrade({exchange_scratches, mnesia, [exchange_scratch]}).
+-rabbit_upgrade({policy, mnesia,
+ [exchange_scratches, ha_mirrors]}).
+-rabbit_upgrade({sync_slave_pids, mnesia, [policy]}).
+-rabbit_upgrade({no_mirror_nodes, mnesia, [sync_slave_pids]}).
+-rabbit_upgrade({gm_pids, mnesia, [no_mirror_nodes]}).
+-rabbit_upgrade({exchange_decorators, mnesia, [policy]}).
+-rabbit_upgrade({policy_apply_to, mnesia, [runtime_parameters]}).
+-rabbit_upgrade({queue_decorators, mnesia, [gm_pids]}).
+-rabbit_upgrade({internal_system_x, mnesia, [exchange_decorators]}).
+-rabbit_upgrade({cluster_name, mnesia, [runtime_parameters]}).
+
+%% -------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(remove_user_scope/0 :: () -> 'ok').
+-spec(hash_passwords/0 :: () -> 'ok').
+-spec(add_ip_to_listener/0 :: () -> 'ok').
+-spec(internal_exchanges/0 :: () -> 'ok').
+-spec(user_to_internal_user/0 :: () -> 'ok').
+-spec(topic_trie/0 :: () -> 'ok').
+-spec(semi_durable_route/0 :: () -> 'ok').
+-spec(exchange_event_serial/0 :: () -> 'ok').
+-spec(trace_exchanges/0 :: () -> 'ok').
+-spec(user_admin_to_tags/0 :: () -> 'ok').
+-spec(ha_mirrors/0 :: () -> 'ok').
+-spec(gm/0 :: () -> 'ok').
+-spec(exchange_scratch/0 :: () -> 'ok').
+-spec(mirrored_supervisor/0 :: () -> 'ok').
+-spec(topic_trie_node/0 :: () -> 'ok').
+-spec(runtime_parameters/0 :: () -> 'ok').
+-spec(policy/0 :: () -> 'ok').
+-spec(sync_slave_pids/0 :: () -> 'ok').
+-spec(no_mirror_nodes/0 :: () -> 'ok').
+-spec(gm_pids/0 :: () -> 'ok').
+-spec(exchange_decorators/0 :: () -> 'ok').
+-spec(policy_apply_to/0 :: () -> 'ok').
+-spec(queue_decorators/0 :: () -> 'ok').
+-spec(internal_system_x/0 :: () -> 'ok').
+
+-endif.
+
+%%--------------------------------------------------------------------
+
+%% It's a bad idea to use records or record_info here, even for the
+%% destination form. Because in the future, the destination form of
+%% your current transform may not match the record any more, and it
+%% would be messy to have to go back and fix old transforms at that
+%% point.
+
+remove_user_scope() ->
+ transform(
+ rabbit_user_permission,
+ fun ({user_permission, UV, {permission, _Scope, Conf, Write, Read}}) ->
+ {user_permission, UV, {permission, Conf, Write, Read}}
+ end,
+ [user_vhost, permission]).
+
+hash_passwords() ->
+ transform(
+ rabbit_user,
+ fun ({user, Username, Password, IsAdmin}) ->
+ Hash = rabbit_auth_backend_internal:hash_password(Password),
+ {user, Username, Hash, IsAdmin}
+ end,
+ [username, password_hash, is_admin]).
+
+add_ip_to_listener() ->
+ transform(
+ rabbit_listener,
+ fun ({listener, Node, Protocol, Host, Port}) ->
+ {listener, Node, Protocol, Host, {0,0,0,0}, Port}
+ end,
+ [node, protocol, host, ip_address, port]).
+
+internal_exchanges() ->
+ Tables = [rabbit_exchange, rabbit_durable_exchange],
+ AddInternalFun =
+ fun ({exchange, Name, Type, Durable, AutoDelete, Args}) ->
+ {exchange, Name, Type, Durable, AutoDelete, false, Args}
+ end,
+ [ ok = transform(T,
+ AddInternalFun,
+ [name, type, durable, auto_delete, internal, arguments])
+ || T <- Tables ],
+ ok.
+
+user_to_internal_user() ->
+ transform(
+ rabbit_user,
+ fun({user, Username, PasswordHash, IsAdmin}) ->
+ {internal_user, Username, PasswordHash, IsAdmin}
+ end,
+ [username, password_hash, is_admin], internal_user).
+
+topic_trie() ->
+ create(rabbit_topic_trie_edge, [{record_name, topic_trie_edge},
+ {attributes, [trie_edge, node_id]},
+ {type, ordered_set}]),
+ create(rabbit_topic_trie_binding, [{record_name, topic_trie_binding},
+ {attributes, [trie_binding, value]},
+ {type, ordered_set}]).
+
+semi_durable_route() ->
+ create(rabbit_semi_durable_route, [{record_name, route},
+ {attributes, [binding, value]}]).
+
+exchange_event_serial() ->
+ create(rabbit_exchange_serial, [{record_name, exchange_serial},
+ {attributes, [name, next]}]).
+
+trace_exchanges() ->
+ [declare_exchange(
+ rabbit_misc:r(VHost, exchange, <<"amq.rabbitmq.trace">>), topic) ||
+ VHost <- rabbit_vhost:list()],
+ ok.
+
+user_admin_to_tags() ->
+ transform(
+ rabbit_user,
+ fun({internal_user, Username, PasswordHash, true}) ->
+ {internal_user, Username, PasswordHash, [administrator]};
+ ({internal_user, Username, PasswordHash, false}) ->
+ {internal_user, Username, PasswordHash, [management]}
+ end,
+ [username, password_hash, tags], internal_user).
+
+ha_mirrors() ->
+ Tables = [rabbit_queue, rabbit_durable_queue],
+ AddMirrorPidsFun =
+ fun ({amqqueue, Name, Durable, AutoDelete, Owner, Arguments, Pid}) ->
+ {amqqueue, Name, Durable, AutoDelete, Owner, Arguments, Pid,
+ [], undefined}
+ end,
+ [ ok = transform(T,
+ AddMirrorPidsFun,
+ [name, durable, auto_delete, exclusive_owner, arguments,
+ pid, slave_pids, mirror_nodes])
+ || T <- Tables ],
+ ok.
+
+gm() ->
+ create(gm_group, [{record_name, gm_group},
+ {attributes, [name, version, members]}]).
+
+exchange_scratch() ->
+ ok = exchange_scratch(rabbit_exchange),
+ ok = exchange_scratch(rabbit_durable_exchange).
+
+exchange_scratch(Table) ->
+ transform(
+ Table,
+ fun ({exchange, Name, Type, Dur, AutoDel, Int, Args}) ->
+ {exchange, Name, Type, Dur, AutoDel, Int, Args, undefined}
+ end,
+ [name, type, durable, auto_delete, internal, arguments, scratch]).
+
+mirrored_supervisor() ->
+ create(mirrored_sup_childspec,
+ [{record_name, mirrored_sup_childspec},
+ {attributes, [key, mirroring_pid, childspec]}]).
+
+topic_trie_node() ->
+ create(rabbit_topic_trie_node,
+ [{record_name, topic_trie_node},
+ {attributes, [trie_node, edge_count, binding_count]},
+ {type, ordered_set}]).
+
+runtime_parameters() ->
+ create(rabbit_runtime_parameters,
+ [{record_name, runtime_parameters},
+ {attributes, [key, value]},
+ {disc_copies, [node()]}]).
+
+exchange_scratches() ->
+ ok = exchange_scratches(rabbit_exchange),
+ ok = exchange_scratches(rabbit_durable_exchange).
+
+exchange_scratches(Table) ->
+ transform(
+ Table,
+ fun ({exchange, Name, Type = <<"x-federation">>, Dur, AutoDel, Int, Args,
+ Scratch}) ->
+ Scratches = orddict:store(federation, Scratch, orddict:new()),
+ {exchange, Name, Type, Dur, AutoDel, Int, Args, Scratches};
+ %% We assert here that nothing else uses the scratch mechanism ATM
+ ({exchange, Name, Type, Dur, AutoDel, Int, Args, undefined}) ->
+ {exchange, Name, Type, Dur, AutoDel, Int, Args, undefined}
+ end,
+ [name, type, durable, auto_delete, internal, arguments, scratches]).
+
+policy() ->
+ ok = exchange_policy(rabbit_exchange),
+ ok = exchange_policy(rabbit_durable_exchange),
+ ok = queue_policy(rabbit_queue),
+ ok = queue_policy(rabbit_durable_queue).
+
+exchange_policy(Table) ->
+ transform(
+ Table,
+ fun ({exchange, Name, Type, Dur, AutoDel, Int, Args, Scratches}) ->
+ {exchange, Name, Type, Dur, AutoDel, Int, Args, Scratches,
+ undefined}
+ end,
+ [name, type, durable, auto_delete, internal, arguments, scratches,
+ policy]).
+
+queue_policy(Table) ->
+ transform(
+ Table,
+ fun ({amqqueue, Name, Dur, AutoDel, Excl, Args, Pid, SPids, MNodes}) ->
+ {amqqueue, Name, Dur, AutoDel, Excl, Args, Pid, SPids, MNodes,
+ undefined}
+ end,
+ [name, durable, auto_delete, exclusive_owner, arguments, pid,
+ slave_pids, mirror_nodes, policy]).
+
+sync_slave_pids() ->
+ Tables = [rabbit_queue, rabbit_durable_queue],
+ AddSyncSlavesFun =
+ fun ({amqqueue, N, D, AD, Excl, Args, Pid, SPids, MNodes, Pol}) ->
+ {amqqueue, N, D, AD, Excl, Args, Pid, SPids, [], MNodes, Pol}
+ end,
+ [ok = transform(T, AddSyncSlavesFun,
+ [name, durable, auto_delete, exclusive_owner, arguments,
+ pid, slave_pids, sync_slave_pids, mirror_nodes, policy])
+ || T <- Tables],
+ ok.
+
+no_mirror_nodes() ->
+ Tables = [rabbit_queue, rabbit_durable_queue],
+ RemoveMirrorNodesFun =
+ fun ({amqqueue, N, D, AD, O, A, Pid, SPids, SSPids, _MNodes, Pol}) ->
+ {amqqueue, N, D, AD, O, A, Pid, SPids, SSPids, Pol}
+ end,
+ [ok = transform(T, RemoveMirrorNodesFun,
+ [name, durable, auto_delete, exclusive_owner, arguments,
+ pid, slave_pids, sync_slave_pids, policy])
+ || T <- Tables],
+ ok.
+
+gm_pids() ->
+ Tables = [rabbit_queue, rabbit_durable_queue],
+ AddGMPidsFun =
+ fun ({amqqueue, N, D, AD, O, A, Pid, SPids, SSPids, Pol}) ->
+ {amqqueue, N, D, AD, O, A, Pid, SPids, SSPids, Pol, []}
+ end,
+ [ok = transform(T, AddGMPidsFun,
+ [name, durable, auto_delete, exclusive_owner, arguments,
+ pid, slave_pids, sync_slave_pids, policy, gm_pids])
+ || T <- Tables],
+ ok.
+
+exchange_decorators() ->
+ ok = exchange_decorators(rabbit_exchange),
+ ok = exchange_decorators(rabbit_durable_exchange).
+
+exchange_decorators(Table) ->
+ transform(
+ Table,
+ fun ({exchange, Name, Type, Dur, AutoDel, Int, Args, Scratches,
+ Policy}) ->
+ {exchange, Name, Type, Dur, AutoDel, Int, Args, Scratches, Policy,
+ {[], []}}
+ end,
+ [name, type, durable, auto_delete, internal, arguments, scratches, policy,
+ decorators]).
+
+policy_apply_to() ->
+ transform(
+ rabbit_runtime_parameters,
+ fun ({runtime_parameters, Key = {_VHost, <<"policy">>, _Name}, Value}) ->
+ ApplyTo = apply_to(proplists:get_value(<<"definition">>, Value)),
+ {runtime_parameters, Key, [{<<"apply-to">>, ApplyTo} | Value]};
+ ({runtime_parameters, Key, Value}) ->
+ {runtime_parameters, Key, Value}
+ end,
+ [key, value]),
+ rabbit_policy:invalidate(),
+ ok.
+
+apply_to(Def) ->
+ case [proplists:get_value(K, Def) ||
+ K <- [<<"federation-upstream-set">>, <<"ha-mode">>]] of
+ [undefined, undefined] -> <<"all">>;
+ [_, undefined] -> <<"exchanges">>;
+ [undefined, _] -> <<"queues">>;
+ [_, _] -> <<"all">>
+ end.
+
+queue_decorators() ->
+ ok = queue_decorators(rabbit_queue),
+ ok = queue_decorators(rabbit_durable_queue).
+
+queue_decorators(Table) ->
+ transform(
+ Table,
+ fun ({amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, Policy, GmPids}) ->
+ {amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, Policy, GmPids, []}
+ end,
+ [name, durable, auto_delete, exclusive_owner, arguments, pid, slave_pids,
+ sync_slave_pids, policy, gm_pids, decorators]).
+
+internal_system_x() ->
+ transform(
+ rabbit_durable_exchange,
+ fun ({exchange, Name = {resource, _, _, <<"amq.rabbitmq.", _/binary>>},
+ Type, Dur, AutoDel, _Int, Args, Scratches, Policy, Decorators}) ->
+ {exchange, Name, Type, Dur, AutoDel, true, Args, Scratches,
+ Policy, Decorators};
+ (X) ->
+ X
+ end,
+ [name, type, durable, auto_delete, internal, arguments, scratches, policy,
+ decorators]).
+
+cluster_name() ->
+ {atomic, ok} = mnesia:transaction(fun cluster_name_tx/0),
+ ok.
+
+cluster_name_tx() ->
+ %% mnesia:transform_table/4 does not let us delete records
+ T = rabbit_runtime_parameters,
+ mnesia:write_lock_table(T),
+ Ks = [K || {_VHost, <<"federation">>, <<"local-nodename">>} = K
+ <- mnesia:all_keys(T)],
+ case Ks of
+ [] -> ok;
+ [K|Tl] -> [{runtime_parameters, _K, Name}] = mnesia:read(T, K, write),
+ R = {runtime_parameters, cluster_name, Name},
+ mnesia:write(T, R, write),
+ case Tl of
+ [] -> ok;
+ _ -> {VHost, _, _} = K,
+ error_logger:warning_msg(
+ "Multiple local-nodenames found, picking '~s' "
+ "from '~s' for cluster name~n", [Name, VHost])
+ end
+ end,
+ [mnesia:delete(T, K, write) || K <- Ks],
+ ok.
+
+%%--------------------------------------------------------------------
+
+transform(TableName, Fun, FieldList) ->
+ rabbit_table:wait([TableName]),
+ {atomic, ok} = mnesia:transform_table(TableName, Fun, FieldList),
+ ok.
+
+transform(TableName, Fun, FieldList, NewRecordName) ->
+ rabbit_table:wait([TableName]),
+ {atomic, ok} = mnesia:transform_table(TableName, Fun, FieldList,
+ NewRecordName),
+ ok.
+
+create(Tab, TabDef) ->
+ {atomic, ok} = mnesia:create_table(Tab, TabDef),
+ ok.
+
+%% Dumb replacement for rabbit_exchange:declare that does not require
+%% the exchange type registry or worker pool to be running by dint of
+%% not validating anything and assuming the exchange type does not
+%% require serialisation.
+%% NB: this assumes the pre-exchange-scratch-space format
+declare_exchange(XName, Type) ->
+ X = {exchange, XName, Type, true, false, false, []},
+ ok = mnesia:dirty_write(rabbit_durable_exchange, X).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_variable_queue).
+
+-export([init/3, terminate/2, delete_and_terminate/2, purge/1, purge_acks/1,
+ publish/5, publish_delivered/4, discard/3, drain_confirmed/1,
+ dropwhile/2, fetchwhile/4, fetch/2, drop/2, ack/2, requeue/2,
+ ackfold/4, fold/3, len/1, is_empty/1, depth/1,
+ set_ram_duration_target/2, ram_duration/1, needs_timeout/1, timeout/1,
+ handle_pre_hibernate/1, resume/1, msg_rates/1,
+ status/1, invoke/3, is_duplicate/2, multiple_routing_keys/0]).
+
+-export([start/1, stop/0]).
+
+%% exported for testing only
+-export([start_msg_store/2, stop_msg_store/0, init/5]).
+
+%%----------------------------------------------------------------------------
+%% Definitions:
+
+%% alpha: this is a message where both the message itself, and its
+%% position within the queue are held in RAM
+%%
+%% beta: this is a message where the message itself is only held on
+%% disk, but its position within the queue is held in RAM.
+%%
+%% gamma: this is a message where the message itself is only held on
+%% disk, but its position is both in RAM and on disk.
+%%
+%% delta: this is a collection of messages, represented by a single
+%% term, where the messages and their position are only held on
+%% disk.
+%%
+%% Note that for persistent messages, the message and its position
+%% within the queue are always held on disk, *in addition* to being in
+%% one of the above classifications.
+%%
+%% Also note that within this code, the term gamma seldom
+%% appears. It's frequently the case that gammas are defined by betas
+%% who have had their queue position recorded on disk.
+%%
+%% In general, messages move q1 -> q2 -> delta -> q3 -> q4, though
+%% many of these steps are frequently skipped. q1 and q4 only hold
+%% alphas, q2 and q3 hold both betas and gammas. When a message
+%% arrives, its classification is determined. It is then added to the
+%% rightmost appropriate queue.
+%%
+%% If a new message is determined to be a beta or gamma, q1 is
+%% empty. If a new message is determined to be a delta, q1 and q2 are
+%% empty (and actually q4 too).
+%%
+%% When removing messages from a queue, if q4 is empty then q3 is read
+%% directly. If q3 becomes empty then the next segment's worth of
+%% messages from delta are read into q3, reducing the size of
+%% delta. If the queue is non empty, either q4 or q3 contain
+%% entries. It is never permitted for delta to hold all the messages
+%% in the queue.
+%%
+%% The duration indicated to us by the memory_monitor is used to
+%% calculate, given our current ingress and egress rates, how many
+%% messages we should hold in RAM (i.e. as alphas). We track the
+%% ingress and egress rates for both messages and pending acks and
+%% rates for both are considered when calculating the number of
+%% messages to hold in RAM. When we need to push alphas to betas or
+%% betas to gammas, we favour writing out messages that are further
+%% from the head of the queue. This minimises writes to disk, as the
+%% messages closer to the tail of the queue stay in the queue for
+%% longer, thus do not need to be replaced as quickly by sending other
+%% messages to disk.
+%%
+%% Whilst messages are pushed to disk and forgotten from RAM as soon
+%% as requested by a new setting of the queue RAM duration, the
+%% inverse is not true: we only load messages back into RAM as
+%% demanded as the queue is read from. Thus only publishes to the
+%% queue will take up available spare capacity.
+%%
+%% When we report our duration to the memory monitor, we calculate
+%% average ingress and egress rates over the last two samples, and
+%% then calculate our duration based on the sum of the ingress and
+%% egress rates. More than two samples could be used, but it's a
+%% balance between responding quickly enough to changes in
+%% producers/consumers versus ignoring temporary blips. The problem
+%% with temporary blips is that with just a few queues, they can have
+%% substantial impact on the calculation of the average duration and
+%% hence cause unnecessary I/O. Another alternative is to increase the
+%% amqqueue_process:RAM_DURATION_UPDATE_PERIOD to beyond 5
+%% seconds. However, that then runs the risk of being too slow to
+%% inform the memory monitor of changes. Thus a 5 second interval,
+%% plus a rolling average over the last two samples seems to work
+%% well in practice.
+%%
+%% The sum of the ingress and egress rates is used because the egress
+%% rate alone is not sufficient. Adding in the ingress rate means that
+%% queues which are being flooded by messages are given more memory,
+%% resulting in them being able to process the messages faster (by
+%% doing less I/O, or at least deferring it) and thus helping keep
+%% their mailboxes empty and thus the queue as a whole is more
+%% responsive. If such a queue also has fast but previously idle
+%% consumers, the consumer can then start to be driven as fast as it
+%% can go, whereas if only egress rate was being used, the incoming
+%% messages may have to be written to disk and then read back in,
+%% resulting in the hard disk being a bottleneck in driving the
+%% consumers. Generally, we want to give Rabbit every chance of
+%% getting rid of messages as fast as possible and remaining
+%% responsive, and using only the egress rate impacts that goal.
+%%
+%% Once the queue has more alphas than the target_ram_count, the
+%% surplus must be converted to betas, if not gammas, if not rolled
+%% into delta. The conditions under which these transitions occur
+%% reflect the conflicting goals of minimising RAM cost per msg, and
+%% minimising CPU cost per msg. Once the msg has become a beta, its
+%% payload is no longer in RAM, thus a read from the msg_store must
+%% occur before the msg can be delivered, but the RAM cost of a beta
+%% is the same as a gamma, so converting a beta to gamma will not free
+%% up any further RAM. To reduce the RAM cost further, the gamma must
+%% be rolled into delta. Whilst recovering a beta or a gamma to an
+%% alpha requires only one disk read (from the msg_store), recovering
+%% a msg from within delta will require two reads (queue_index and
+%% then msg_store). But delta has a near-0 per-msg RAM cost. So the
+%% conflict is between using delta more, which will free up more
+%% memory, but require additional CPU and disk ops, versus using delta
+%% less and gammas and betas more, which will cost more memory, but
+%% require fewer disk ops and less CPU overhead.
+%%
+%% In the case of a persistent msg published to a durable queue, the
+%% msg is immediately written to the msg_store and queue_index. If
+%% then additionally converted from an alpha, it'll immediately go to
+%% a gamma (as it's already in queue_index), and cannot exist as a
+%% beta. Thus a durable queue with a mixture of persistent and
+%% transient msgs in it which has more messages than permitted by the
+%% target_ram_count may contain an interspersed mixture of betas and
+%% gammas in q2 and q3.
+%%
+%% There is then a ratio that controls how many betas and gammas there
+%% can be. This is based on the target_ram_count and thus expresses
+%% the fact that as the number of permitted alphas in the queue falls,
+%% so should the number of betas and gammas fall (i.e. delta
+%% grows). If q2 and q3 contain more than the permitted number of
+%% betas and gammas, then the surplus are forcibly converted to gammas
+%% (as necessary) and then rolled into delta. The ratio is that
+%% delta/(betas+gammas+delta) equals
+%% (betas+gammas+delta)/(target_ram_count+betas+gammas+delta). I.e. as
+%% the target_ram_count shrinks to 0, so must betas and gammas.
+%%
+%% The conversion of betas to gammas is done in batches of at least
+%% ?IO_BATCH_SIZE. This value should not be too small, otherwise the
+%% frequent operations on the queues of q2 and q3 will not be
+%% effectively amortised (switching the direction of queue access
+%% defeats amortisation). Note that there is a natural upper bound due
+%% to credit_flow limits on the alpha to beta conversion.
+%%
+%% The conversion from alphas to betas is chunked due to the
+%% credit_flow limits of the msg_store. This further smooths the
+%% effects of changes to the target_ram_count and ensures the queue
+%% remains responsive even when there is a large amount of IO work to
+%% do. The 'resume' callback is utilised to ensure that conversions
+%% are done as promptly as possible whilst ensuring the queue remains
+%% responsive.
+%%
+%% In the queue we keep track of both messages that are pending
+%% delivery and messages that are pending acks. In the event of a
+%% queue purge, we only need to load qi segments if the queue has
+%% elements in deltas (i.e. it came under significant memory
+%% pressure). In the event of a queue deletion, in addition to the
+%% preceding, by keeping track of pending acks in RAM, we do not need
+%% to search through qi segments looking for messages that are yet to
+%% be acknowledged.
+%%
+%% Pending acks are recorded in memory by storing the message itself.
+%% If the message has been sent to disk, we do not store the message
+%% content. During memory reduction, pending acks containing message
+%% content have that content removed and the corresponding messages
+%% are pushed out to disk.
+%%
+%% Messages from pending acks are returned to q4, q3 and delta during
+%% requeue, based on the limits of seq_id contained in each. Requeued
+%% messages retain their original seq_id, maintaining order
+%% when requeued.
+%%
+%% The order in which alphas are pushed to betas and pending acks
+%% are pushed to disk is determined dynamically. We always prefer to
+%% push messages for the source (alphas or acks) that is growing the
+%% fastest (with growth measured as avg. ingress - avg. egress).
+%%
+%% Notes on Clean Shutdown
+%% (This documents behaviour in variable_queue, queue_index and
+%% msg_store.)
+%%
+%% In order to try to achieve as fast a start-up as possible, if a
+%% clean shutdown occurs, we try to save out state to disk to reduce
+%% work on startup. In the msg_store this takes the form of the
+%% index_module's state, plus the file_summary ets table, and client
+%% refs. In the VQ, this takes the form of the count of persistent
+%% messages in the queue and references into the msg_stores. The
+%% queue_index adds to these terms the details of its segments and
+%% stores the terms in the queue directory.
+%%
+%% Two message stores are used. One is created for persistent messages
+%% to durable queues that must survive restarts, and the other is used
+%% for all other messages that just happen to need to be written to
+%% disk. On start up we can therefore nuke the transient message
+%% store, and be sure that the messages in the persistent store are
+%% all that we need.
+%%
+%% The references to the msg_stores are there so that the msg_store
+%% knows to only trust its saved state if all of the queues it was
+%% previously talking to come up cleanly. Likewise, the queues
+%% themselves (esp queue_index) skips work in init if all the queues
+%% and msg_store were shutdown cleanly. This gives both good speed
+%% improvements and also robustness so that if anything possibly went
+%% wrong in shutdown (or there was subsequent manual tampering), all
+%% messages and queues that can be recovered are recovered, safely.
+%%
+%% To delete transient messages lazily, the variable_queue, on
+%% startup, stores the next_seq_id reported by the queue_index as the
+%% transient_threshold. From that point on, whenever it's reading a
+%% message off disk via the queue_index, if the seq_id is below this
+%% threshold and the message is transient then it drops the message
+%% (the message itself won't exist on disk because it would have been
+%% stored in the transient msg_store which would have had its saved
+%% state nuked on startup). This avoids the expensive operation of
+%% scanning the entire queue on startup in order to delete transient
+%% messages that were only pushed to disk to save memory.
+%%
+%%----------------------------------------------------------------------------
+
+-behaviour(rabbit_backing_queue).
+
+-record(vqstate,
+ { q1,
+ q2,
+ delta,
+ q3,
+ q4,
+ next_seq_id,
+ ram_pending_ack,
+ disk_pending_ack,
+ index_state,
+ msg_store_clients,
+ durable,
+ transient_threshold,
+
+ len,
+ persistent_count,
+
+ target_ram_count,
+ ram_msg_count,
+ ram_msg_count_prev,
+ ram_ack_count_prev,
+ out_counter,
+ in_counter,
+ rates,
+ msgs_on_disk,
+ msg_indices_on_disk,
+ unconfirmed,
+ confirmed,
+ ack_out_counter,
+ ack_in_counter
+ }).
+
+-record(rates, { in, out, ack_in, ack_out, timestamp }).
+
+-record(msg_status,
+ { seq_id,
+ msg_id,
+ msg,
+ is_persistent,
+ is_delivered,
+ msg_on_disk,
+ index_on_disk,
+ msg_props
+ }).
+
+-record(delta,
+ { start_seq_id, %% start_seq_id is inclusive
+ count,
+ end_seq_id %% end_seq_id is exclusive
+ }).
+
+%% When we discover that we should write some indices to disk for some
+%% betas, the IO_BATCH_SIZE sets the number of betas that we must be
+%% due to write indices for before we do any work at all.
+-define(IO_BATCH_SIZE, 2048). %% next power-of-2 after ?CREDIT_DISC_BOUND
+-define(PERSISTENT_MSG_STORE, msg_store_persistent).
+-define(TRANSIENT_MSG_STORE, msg_store_transient).
+-define(QUEUE, lqueue).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-rabbit_upgrade({multiple_routing_keys, local, []}).
+
+-ifdef(use_specs).
+
+-type(timestamp() :: {non_neg_integer(), non_neg_integer(), non_neg_integer()}).
+-type(seq_id() :: non_neg_integer()).
+
+-type(rates() :: #rates { in :: float(),
+ out :: float(),
+ ack_in :: float(),
+ ack_out :: float(),
+ timestamp :: timestamp()}).
+
+-type(delta() :: #delta { start_seq_id :: non_neg_integer(),
+ count :: non_neg_integer(),
+ end_seq_id :: non_neg_integer() }).
+
+%% The compiler (rightfully) complains that ack() and state() are
+%% unused. For this reason we duplicate a -spec from
+%% rabbit_backing_queue with the only intent being to remove
+%% warnings. The problem here is that we can't parameterise the BQ
+%% behaviour by these two types as we would like to. We still leave
+%% these here for documentation purposes.
+-type(ack() :: seq_id()).
+-type(state() :: #vqstate {
+ q1 :: ?QUEUE:?QUEUE(),
+ q2 :: ?QUEUE:?QUEUE(),
+ delta :: delta(),
+ q3 :: ?QUEUE:?QUEUE(),
+ q4 :: ?QUEUE:?QUEUE(),
+ next_seq_id :: seq_id(),
+ ram_pending_ack :: gb_tree(),
+ disk_pending_ack :: gb_tree(),
+ index_state :: any(),
+ msg_store_clients :: 'undefined' | {{any(), binary()},
+ {any(), binary()}},
+ durable :: boolean(),
+ transient_threshold :: non_neg_integer(),
+
+ len :: non_neg_integer(),
+ persistent_count :: non_neg_integer(),
+
+ target_ram_count :: non_neg_integer() | 'infinity',
+ ram_msg_count :: non_neg_integer(),
+ ram_msg_count_prev :: non_neg_integer(),
+ out_counter :: non_neg_integer(),
+ in_counter :: non_neg_integer(),
+ rates :: rates(),
+ msgs_on_disk :: gb_set(),
+ msg_indices_on_disk :: gb_set(),
+ unconfirmed :: gb_set(),
+ confirmed :: gb_set(),
+ ack_out_counter :: non_neg_integer(),
+ ack_in_counter :: non_neg_integer() }).
+%% Duplicated from rabbit_backing_queue
+-spec(ack/2 :: ([ack()], state()) -> {[rabbit_guid:guid()], state()}).
+
+-spec(multiple_routing_keys/0 :: () -> 'ok').
+
+-endif.
+
+-define(BLANK_DELTA, #delta { start_seq_id = undefined,
+ count = 0,
+ end_seq_id = undefined }).
+-define(BLANK_DELTA_PATTERN(Z), #delta { start_seq_id = Z,
+ count = 0,
+ end_seq_id = Z }).
+
+-define(MICROS_PER_SECOND, 1000000.0).
+
+%% We're sampling every 5s for RAM duration; a half life that is of
+%% the same order of magnitude is probably about right.
+-define(RATE_AVG_HALF_LIFE, 5.0).
+
+%% We will recalculate the #rates{} every time we get asked for our
+%% RAM duration, or every N messages published, whichever is
+%% sooner. We do this since the priority calculations in
+%% rabbit_amqqueue_process need fairly fresh rates.
+-define(MSGS_PER_RATE_CALC, 100).
+
+%%----------------------------------------------------------------------------
+%% Public API
+%%----------------------------------------------------------------------------
+
+start(DurableQueues) ->
+ {AllTerms, StartFunState} = rabbit_queue_index:start(DurableQueues),
+ start_msg_store(
+ [Ref || Terms <- AllTerms,
+ Terms /= non_clean_shutdown,
+ begin
+ Ref = proplists:get_value(persistent_ref, Terms),
+ Ref =/= undefined
+ end],
+ StartFunState),
+ {ok, AllTerms}.
+
+stop() ->
+ ok = stop_msg_store(),
+ ok = rabbit_queue_index:stop().
+
+start_msg_store(Refs, StartFunState) ->
+ ok = rabbit_sup:start_child(?TRANSIENT_MSG_STORE, rabbit_msg_store,
+ [?TRANSIENT_MSG_STORE, rabbit_mnesia:dir(),
+ undefined, {fun (ok) -> finished end, ok}]),
+ ok = rabbit_sup:start_child(?PERSISTENT_MSG_STORE, rabbit_msg_store,
+ [?PERSISTENT_MSG_STORE, rabbit_mnesia:dir(),
+ Refs, StartFunState]).
+
+stop_msg_store() ->
+ ok = rabbit_sup:stop_child(?PERSISTENT_MSG_STORE),
+ ok = rabbit_sup:stop_child(?TRANSIENT_MSG_STORE).
+
+init(Queue, Recover, AsyncCallback) ->
+ init(Queue, Recover, AsyncCallback,
+ fun (MsgIds, ActionTaken) ->
+ msgs_written_to_disk(AsyncCallback, MsgIds, ActionTaken)
+ end,
+ fun (MsgIds) -> msg_indices_written_to_disk(AsyncCallback, MsgIds) end).
+
+init(#amqqueue { name = QueueName, durable = IsDurable }, new,
+ AsyncCallback, MsgOnDiskFun, MsgIdxOnDiskFun) ->
+ IndexState = rabbit_queue_index:init(QueueName, MsgIdxOnDiskFun),
+ init(IsDurable, IndexState, 0, [],
+ case IsDurable of
+ true -> msg_store_client_init(?PERSISTENT_MSG_STORE,
+ MsgOnDiskFun, AsyncCallback);
+ false -> undefined
+ end,
+ msg_store_client_init(?TRANSIENT_MSG_STORE, undefined, AsyncCallback));
+
+init(#amqqueue { name = QueueName, durable = true }, Terms,
+ AsyncCallback, MsgOnDiskFun, MsgIdxOnDiskFun) ->
+ {PRef, RecoveryTerms} = process_recovery_terms(Terms),
+ PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef,
+ MsgOnDiskFun, AsyncCallback),
+ TransientClient = msg_store_client_init(?TRANSIENT_MSG_STORE,
+ undefined, AsyncCallback),
+ {DeltaCount, IndexState} =
+ rabbit_queue_index:recover(
+ QueueName, RecoveryTerms,
+ rabbit_msg_store:successfully_recovered_state(?PERSISTENT_MSG_STORE),
+ fun (MsgId) ->
+ rabbit_msg_store:contains(MsgId, PersistentClient)
+ end,
+ MsgIdxOnDiskFun),
+ init(true, IndexState, DeltaCount, RecoveryTerms,
+ PersistentClient, TransientClient).
+
+process_recovery_terms(Terms=non_clean_shutdown) ->
+ {rabbit_guid:gen(), Terms};
+process_recovery_terms(Terms) ->
+ case proplists:get_value(persistent_ref, Terms) of
+ undefined -> {rabbit_guid:gen(), []};
+ PRef -> {PRef, Terms}
+ end.
+
+terminate(_Reason, State) ->
+ State1 = #vqstate { persistent_count = PCount,
+ index_state = IndexState,
+ msg_store_clients = {MSCStateP, MSCStateT} } =
+ purge_pending_ack(true, State),
+ PRef = case MSCStateP of
+ undefined -> undefined;
+ _ -> ok = rabbit_msg_store:client_terminate(MSCStateP),
+ rabbit_msg_store:client_ref(MSCStateP)
+ end,
+ ok = rabbit_msg_store:client_delete_and_terminate(MSCStateT),
+ Terms = [{persistent_ref, PRef}, {persistent_count, PCount}],
+ a(State1 #vqstate { index_state = rabbit_queue_index:terminate(
+ Terms, IndexState),
+ msg_store_clients = undefined }).
+
+%% the only difference between purge and delete is that delete also
+%% needs to delete everything that's been delivered and not ack'd.
+delete_and_terminate(_Reason, State) ->
+ %% TODO: there is no need to interact with qi at all - which we do
+ %% as part of 'purge' and 'purge_pending_ack', other than
+ %% deleting it.
+ {_PurgeCount, State1} = purge(State),
+ State2 = #vqstate { index_state = IndexState,
+ msg_store_clients = {MSCStateP, MSCStateT} } =
+ purge_pending_ack(false, State1),
+ IndexState1 = rabbit_queue_index:delete_and_terminate(IndexState),
+ case MSCStateP of
+ undefined -> ok;
+ _ -> rabbit_msg_store:client_delete_and_terminate(MSCStateP)
+ end,
+ rabbit_msg_store:client_delete_and_terminate(MSCStateT),
+ a(State2 #vqstate { index_state = IndexState1,
+ msg_store_clients = undefined }).
+
+purge(State = #vqstate { q4 = Q4,
+ index_state = IndexState,
+ msg_store_clients = MSCState,
+ len = Len,
+ persistent_count = PCount }) ->
+ %% TODO: when there are no pending acks, which is a common case,
+ %% we could simply wipe the qi instead of issuing delivers and
+ %% acks for all the messages.
+ {LensByStore, IndexState1} = remove_queue_entries(
+ fun ?QUEUE:foldl/3, Q4,
+ orddict:new(), IndexState, MSCState),
+ {LensByStore1, State1 = #vqstate { q1 = Q1,
+ index_state = IndexState2,
+ msg_store_clients = MSCState1 }} =
+ purge_betas_and_deltas(LensByStore,
+ State #vqstate { q4 = ?QUEUE:new(),
+ index_state = IndexState1 }),
+ {LensByStore2, IndexState3} = remove_queue_entries(
+ fun ?QUEUE:foldl/3, Q1,
+ LensByStore1, IndexState2, MSCState1),
+ PCount1 = PCount - find_persistent_count(LensByStore2),
+ {Len, a(State1 #vqstate { q1 = ?QUEUE:new(),
+ index_state = IndexState3,
+ len = 0,
+ ram_msg_count = 0,
+ persistent_count = PCount1 })}.
+
+purge_acks(State) -> a(purge_pending_ack(false, State)).
+
+publish(Msg = #basic_message { is_persistent = IsPersistent, id = MsgId },
+ MsgProps = #message_properties { needs_confirming = NeedsConfirming },
+ IsDelivered, _ChPid, State = #vqstate { q1 = Q1, q3 = Q3, q4 = Q4,
+ next_seq_id = SeqId,
+ len = Len,
+ in_counter = InCount,
+ persistent_count = PCount,
+ durable = IsDurable,
+ unconfirmed = UC }) ->
+ IsPersistent1 = IsDurable andalso IsPersistent,
+ MsgStatus = msg_status(IsPersistent1, IsDelivered, SeqId, Msg, MsgProps),
+ {MsgStatus1, State1} = maybe_write_to_disk(false, false, MsgStatus, State),
+ State2 = case ?QUEUE:is_empty(Q3) of
+ false -> State1 #vqstate { q1 = ?QUEUE:in(m(MsgStatus1), Q1) };
+ true -> State1 #vqstate { q4 = ?QUEUE:in(m(MsgStatus1), Q4) }
+ end,
+ InCount1 = InCount + 1,
+ PCount1 = PCount + one_if(IsPersistent1),
+ UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
+ State3 = inc_ram_msg_count(State2 #vqstate { next_seq_id = SeqId + 1,
+ len = Len + 1,
+ in_counter = InCount1,
+ persistent_count = PCount1,
+ unconfirmed = UC1 }),
+ a(reduce_memory_use(maybe_update_rates(State3))).
+
+publish_delivered(Msg = #basic_message { is_persistent = IsPersistent,
+ id = MsgId },
+ MsgProps = #message_properties {
+ needs_confirming = NeedsConfirming },
+ _ChPid, State = #vqstate { next_seq_id = SeqId,
+ out_counter = OutCount,
+ in_counter = InCount,
+ persistent_count = PCount,
+ durable = IsDurable,
+ unconfirmed = UC }) ->
+ IsPersistent1 = IsDurable andalso IsPersistent,
+ MsgStatus = msg_status(IsPersistent1, true, SeqId, Msg, MsgProps),
+ {MsgStatus1, State1} = maybe_write_to_disk(false, false, MsgStatus, State),
+ State2 = record_pending_ack(m(MsgStatus1), State1),
+ PCount1 = PCount + one_if(IsPersistent1),
+ UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
+ State3 = State2 #vqstate { next_seq_id = SeqId + 1,
+ out_counter = OutCount + 1,
+ in_counter = InCount + 1,
+ persistent_count = PCount1,
+ unconfirmed = UC1 },
+ {SeqId, a(reduce_memory_use(maybe_update_rates(State3)))}.
+
+discard(_MsgId, _ChPid, State) -> State.
+
+drain_confirmed(State = #vqstate { confirmed = C }) ->
+ case gb_sets:is_empty(C) of
+ true -> {[], State}; %% common case
+ false -> {gb_sets:to_list(C), State #vqstate {
+ confirmed = gb_sets:new() }}
+ end.
+
+dropwhile(Pred, State) ->
+ case queue_out(State) of
+ {empty, State1} ->
+ {undefined, a(State1)};
+ {{value, MsgStatus = #msg_status { msg_props = MsgProps }}, State1} ->
+ case Pred(MsgProps) of
+ true -> {_, State2} = remove(false, MsgStatus, State1),
+ dropwhile(Pred, State2);
+ false -> {MsgProps, a(in_r(MsgStatus, State1))}
+ end
+ end.
+
+fetchwhile(Pred, Fun, Acc, State) ->
+ case queue_out(State) of
+ {empty, State1} ->
+ {undefined, Acc, a(State1)};
+ {{value, MsgStatus = #msg_status { msg_props = MsgProps }}, State1} ->
+ case Pred(MsgProps) of
+ true -> {Msg, State2} = read_msg(MsgStatus, State1),
+ {AckTag, State3} = remove(true, MsgStatus, State2),
+ fetchwhile(Pred, Fun, Fun(Msg, AckTag, Acc), State3);
+ false -> {MsgProps, Acc, a(in_r(MsgStatus, State1))}
+ end
+ end.
+
+fetch(AckRequired, State) ->
+ case queue_out(State) of
+ {empty, State1} ->
+ {empty, a(State1)};
+ {{value, MsgStatus}, State1} ->
+ %% it is possible that the message wasn't read from disk
+ %% at this point, so read it in.
+ {Msg, State2} = read_msg(MsgStatus, State1),
+ {AckTag, State3} = remove(AckRequired, MsgStatus, State2),
+ {{Msg, MsgStatus#msg_status.is_delivered, AckTag}, a(State3)}
+ end.
+
+drop(AckRequired, State) ->
+ case queue_out(State) of
+ {empty, State1} ->
+ {empty, a(State1)};
+ {{value, MsgStatus}, State1} ->
+ {AckTag, State2} = remove(AckRequired, MsgStatus, State1),
+ {{MsgStatus#msg_status.msg_id, AckTag}, a(State2)}
+ end.
+
+ack([], State) ->
+ {[], State};
+%% optimisation: this head is essentially a partial evaluation of the
+%% general case below, for the single-ack case.
+ack([SeqId], State) ->
+ {#msg_status { msg_id = MsgId,
+ is_persistent = IsPersistent,
+ msg_on_disk = MsgOnDisk,
+ index_on_disk = IndexOnDisk },
+ State1 = #vqstate { index_state = IndexState,
+ msg_store_clients = MSCState,
+ persistent_count = PCount,
+ ack_out_counter = AckOutCount }} =
+ remove_pending_ack(SeqId, State),
+ IndexState1 = case IndexOnDisk of
+ true -> rabbit_queue_index:ack([SeqId], IndexState);
+ false -> IndexState
+ end,
+ case MsgOnDisk of
+ true -> ok = msg_store_remove(MSCState, IsPersistent, [MsgId]);
+ false -> ok
+ end,
+ PCount1 = PCount - one_if(IsPersistent),
+ {[MsgId],
+ a(State1 #vqstate { index_state = IndexState1,
+ persistent_count = PCount1,
+ ack_out_counter = AckOutCount + 1 })};
+ack(AckTags, State) ->
+ {{IndexOnDiskSeqIds, MsgIdsByStore, AllMsgIds},
+ State1 = #vqstate { index_state = IndexState,
+ msg_store_clients = MSCState,
+ persistent_count = PCount,
+ ack_out_counter = AckOutCount }} =
+ lists:foldl(
+ fun (SeqId, {Acc, State2}) ->
+ {MsgStatus, State3} = remove_pending_ack(SeqId, State2),
+ {accumulate_ack(MsgStatus, Acc), State3}
+ end, {accumulate_ack_init(), State}, AckTags),
+ IndexState1 = rabbit_queue_index:ack(IndexOnDiskSeqIds, IndexState),
+ [ok = msg_store_remove(MSCState, IsPersistent, MsgIds)
+ || {IsPersistent, MsgIds} <- orddict:to_list(MsgIdsByStore)],
+ PCount1 = PCount - find_persistent_count(sum_msg_ids_by_store_to_len(
+ orddict:new(), MsgIdsByStore)),
+ {lists:reverse(AllMsgIds),
+ a(State1 #vqstate { index_state = IndexState1,
+ persistent_count = PCount1,
+ ack_out_counter = AckOutCount + length(AckTags) })}.
+
+requeue(AckTags, #vqstate { delta = Delta,
+ q3 = Q3,
+ q4 = Q4,
+ in_counter = InCounter,
+ len = Len } = State) ->
+ {SeqIds, Q4a, MsgIds, State1} = queue_merge(lists:sort(AckTags), Q4, [],
+ beta_limit(Q3),
+ fun publish_alpha/2, State),
+ {SeqIds1, Q3a, MsgIds1, State2} = queue_merge(SeqIds, Q3, MsgIds,
+ delta_limit(Delta),
+ fun publish_beta/2, State1),
+ {Delta1, MsgIds2, State3} = delta_merge(SeqIds1, Delta, MsgIds1,
+ State2),
+ MsgCount = length(MsgIds2),
+ {MsgIds2, a(reduce_memory_use(
+ maybe_update_rates(
+ State3 #vqstate { delta = Delta1,
+ q3 = Q3a,
+ q4 = Q4a,
+ in_counter = InCounter + MsgCount,
+ len = Len + MsgCount })))}.
+
+ackfold(MsgFun, Acc, State, AckTags) ->
+ {AccN, StateN} =
+ lists:foldl(fun(SeqId, {Acc0, State0}) ->
+ MsgStatus = lookup_pending_ack(SeqId, State0),
+ {Msg, State1} = read_msg(MsgStatus, State0),
+ {MsgFun(Msg, SeqId, Acc0), State1}
+ end, {Acc, State}, AckTags),
+ {AccN, a(StateN)}.
+
+fold(Fun, Acc, State = #vqstate{index_state = IndexState}) ->
+ {Its, IndexState1} = lists:foldl(fun inext/2, {[], IndexState},
+ [msg_iterator(State),
+ disk_ack_iterator(State),
+ ram_ack_iterator(State)]),
+ ifold(Fun, Acc, Its, State#vqstate{index_state = IndexState1}).
+
+len(#vqstate { len = Len }) -> Len.
+
+is_empty(State) -> 0 == len(State).
+
+depth(State = #vqstate { ram_pending_ack = RPA, disk_pending_ack = DPA }) ->
+ len(State) + gb_trees:size(RPA) + gb_trees:size(DPA).
+
+set_ram_duration_target(
+ DurationTarget, State = #vqstate {
+ rates = #rates { in = AvgIngressRate,
+ out = AvgEgressRate,
+ ack_in = AvgAckIngressRate,
+ ack_out = AvgAckEgressRate },
+ target_ram_count = TargetRamCount }) ->
+ Rate =
+ AvgEgressRate + AvgIngressRate + AvgAckEgressRate + AvgAckIngressRate,
+ TargetRamCount1 =
+ case DurationTarget of
+ infinity -> infinity;
+ _ -> trunc(DurationTarget * Rate) %% msgs = sec * msgs/sec
+ end,
+ State1 = State #vqstate { target_ram_count = TargetRamCount1 },
+ a(case TargetRamCount1 == infinity orelse
+ (TargetRamCount =/= infinity andalso
+ TargetRamCount1 >= TargetRamCount) of
+ true -> State1;
+ false -> reduce_memory_use(State1)
+ end).
+
+maybe_update_rates(State = #vqstate{ in_counter = InCount,
+ out_counter = OutCount })
+ when InCount + OutCount > ?MSGS_PER_RATE_CALC ->
+ update_rates(State);
+maybe_update_rates(State) ->
+ State.
+
+update_rates(State = #vqstate{ in_counter = InCount,
+ out_counter = OutCount,
+ ack_in_counter = AckInCount,
+ ack_out_counter = AckOutCount,
+ rates = #rates{ in = InRate,
+ out = OutRate,
+ ack_in = AckInRate,
+ ack_out = AckOutRate,
+ timestamp = TS }}) ->
+ Now = erlang:now(),
+
+ Rates = #rates { in = update_rate(Now, TS, InCount, InRate),
+ out = update_rate(Now, TS, OutCount, OutRate),
+ ack_in = update_rate(Now, TS, AckInCount, AckInRate),
+ ack_out = update_rate(Now, TS, AckOutCount, AckOutRate),
+ timestamp = Now },
+
+ State#vqstate{ in_counter = 0,
+ out_counter = 0,
+ ack_in_counter = 0,
+ ack_out_counter = 0,
+ rates = Rates }.
+
+update_rate(Now, TS, Count, Rate) ->
+ Time = timer:now_diff(Now, TS) / ?MICROS_PER_SECOND,
+ rabbit_misc:moving_average(Time, ?RATE_AVG_HALF_LIFE, Count / Time, Rate).
+
+ram_duration(State) ->
+ State1 = #vqstate { rates = #rates { in = AvgIngressRate,
+ out = AvgEgressRate,
+ ack_in = AvgAckIngressRate,
+ ack_out = AvgAckEgressRate },
+ ram_msg_count = RamMsgCount,
+ ram_msg_count_prev = RamMsgCountPrev,
+ ram_pending_ack = RPA,
+ ram_ack_count_prev = RamAckCountPrev } =
+ update_rates(State),
+
+ RamAckCount = gb_trees:size(RPA),
+
+ Duration = %% msgs+acks / (msgs+acks/sec) == sec
+ case lists:all(fun (X) -> X < 0.01 end,
+ [AvgEgressRate, AvgIngressRate,
+ AvgAckEgressRate, AvgAckIngressRate]) of
+ true -> infinity;
+ false -> (RamMsgCountPrev + RamMsgCount +
+ RamAckCount + RamAckCountPrev) /
+ (4 * (AvgEgressRate + AvgIngressRate +
+ AvgAckEgressRate + AvgAckIngressRate))
+ end,
+
+ {Duration, State1}.
+
+needs_timeout(#vqstate { index_state = IndexState }) ->
+ case rabbit_queue_index:needs_sync(IndexState) of
+ confirms -> timed;
+ other -> idle;
+ false -> false
+ end.
+
+timeout(State = #vqstate { index_state = IndexState }) ->
+ State #vqstate { index_state = rabbit_queue_index:sync(IndexState) }.
+
+handle_pre_hibernate(State = #vqstate { index_state = IndexState }) ->
+ State #vqstate { index_state = rabbit_queue_index:flush(IndexState) }.
+
+resume(State) -> a(reduce_memory_use(State)).
+
+msg_rates(#vqstate { rates = #rates { in = AvgIngressRate,
+ out = AvgEgressRate } }) ->
+ {AvgIngressRate, AvgEgressRate}.
+
+status(#vqstate {
+ q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4,
+ len = Len,
+ ram_pending_ack = RPA,
+ disk_pending_ack = DPA,
+ target_ram_count = TargetRamCount,
+ ram_msg_count = RamMsgCount,
+ next_seq_id = NextSeqId,
+ persistent_count = PersistentCount,
+ rates = #rates { in = AvgIngressRate,
+ out = AvgEgressRate,
+ ack_in = AvgAckIngressRate,
+ ack_out = AvgAckEgressRate }}) ->
+
+ [ {q1 , ?QUEUE:len(Q1)},
+ {q2 , ?QUEUE:len(Q2)},
+ {delta , Delta},
+ {q3 , ?QUEUE:len(Q3)},
+ {q4 , ?QUEUE:len(Q4)},
+ {len , Len},
+ {pending_acks , gb_trees:size(RPA) + gb_trees:size(DPA)},
+ {target_ram_count , TargetRamCount},
+ {ram_msg_count , RamMsgCount},
+ {ram_ack_count , gb_trees:size(RPA)},
+ {next_seq_id , NextSeqId},
+ {persistent_count , PersistentCount},
+ {avg_ingress_rate , AvgIngressRate},
+ {avg_egress_rate , AvgEgressRate},
+ {avg_ack_ingress_rate, AvgAckIngressRate},
+ {avg_ack_egress_rate , AvgAckEgressRate} ].
+
+invoke(?MODULE, Fun, State) -> Fun(?MODULE, State);
+invoke( _, _, State) -> State.
+
+is_duplicate(_Msg, State) -> {false, State}.
+
+%%----------------------------------------------------------------------------
+%% Minor helpers
+%%----------------------------------------------------------------------------
+
+a(State = #vqstate { q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4,
+ len = Len,
+ persistent_count = PersistentCount,
+ ram_msg_count = RamMsgCount }) ->
+ E1 = ?QUEUE:is_empty(Q1),
+ E2 = ?QUEUE:is_empty(Q2),
+ ED = Delta#delta.count == 0,
+ E3 = ?QUEUE:is_empty(Q3),
+ E4 = ?QUEUE:is_empty(Q4),
+ LZ = Len == 0,
+
+ true = E1 or not E3,
+ true = E2 or not ED,
+ true = ED or not E3,
+ true = LZ == (E3 and E4),
+
+ true = Len >= 0,
+ true = PersistentCount >= 0,
+ true = RamMsgCount >= 0,
+ true = RamMsgCount =< Len,
+
+ State.
+
+d(Delta = #delta { start_seq_id = Start, count = Count, end_seq_id = End })
+ when Start + Count =< End ->
+ Delta.
+
+m(MsgStatus = #msg_status { msg = Msg,
+ is_persistent = IsPersistent,
+ msg_on_disk = MsgOnDisk,
+ index_on_disk = IndexOnDisk }) ->
+ true = (not IsPersistent) or IndexOnDisk,
+ true = (not IndexOnDisk) or MsgOnDisk,
+ true = (Msg =/= undefined) or MsgOnDisk,
+
+ MsgStatus.
+
+one_if(true ) -> 1;
+one_if(false) -> 0.
+
+cons_if(true, E, L) -> [E | L];
+cons_if(false, _E, L) -> L.
+
+gb_sets_maybe_insert(false, _Val, Set) -> Set;
+gb_sets_maybe_insert(true, Val, Set) -> gb_sets:add(Val, Set).
+
+msg_status(IsPersistent, IsDelivered, SeqId,
+ Msg = #basic_message {id = MsgId}, MsgProps) ->
+ #msg_status{seq_id = SeqId,
+ msg_id = MsgId,
+ msg = Msg,
+ is_persistent = IsPersistent,
+ is_delivered = IsDelivered,
+ msg_on_disk = false,
+ index_on_disk = false,
+ msg_props = MsgProps}.
+
+beta_msg_status({MsgId, SeqId, MsgProps, IsPersistent, IsDelivered}) ->
+ #msg_status{seq_id = SeqId,
+ msg_id = MsgId,
+ msg = undefined,
+ is_persistent = IsPersistent,
+ is_delivered = IsDelivered,
+ msg_on_disk = true,
+ index_on_disk = true,
+ msg_props = MsgProps}.
+
+trim_msg_status(MsgStatus) -> MsgStatus #msg_status { msg = undefined }.
+
+with_msg_store_state({MSCStateP, MSCStateT}, true, Fun) ->
+ {Result, MSCStateP1} = Fun(MSCStateP),
+ {Result, {MSCStateP1, MSCStateT}};
+with_msg_store_state({MSCStateP, MSCStateT}, false, Fun) ->
+ {Result, MSCStateT1} = Fun(MSCStateT),
+ {Result, {MSCStateP, MSCStateT1}}.
+
+with_immutable_msg_store_state(MSCState, IsPersistent, Fun) ->
+ {Res, MSCState} = with_msg_store_state(MSCState, IsPersistent,
+ fun (MSCState1) ->
+ {Fun(MSCState1), MSCState1}
+ end),
+ Res.
+
+msg_store_client_init(MsgStore, MsgOnDiskFun, Callback) ->
+ msg_store_client_init(MsgStore, rabbit_guid:gen(), MsgOnDiskFun,
+ Callback).
+
+msg_store_client_init(MsgStore, Ref, MsgOnDiskFun, Callback) ->
+ CloseFDsFun = msg_store_close_fds_fun(MsgStore =:= ?PERSISTENT_MSG_STORE),
+ rabbit_msg_store:client_init(MsgStore, Ref, MsgOnDiskFun,
+ fun () -> Callback(?MODULE, CloseFDsFun) end).
+
+msg_store_write(MSCState, IsPersistent, MsgId, Msg) ->
+ with_immutable_msg_store_state(
+ MSCState, IsPersistent,
+ fun (MSCState1) ->
+ rabbit_msg_store:write_flow(MsgId, Msg, MSCState1)
+ end).
+
+msg_store_read(MSCState, IsPersistent, MsgId) ->
+ with_msg_store_state(
+ MSCState, IsPersistent,
+ fun (MSCState1) ->
+ rabbit_msg_store:read(MsgId, MSCState1)
+ end).
+
+msg_store_remove(MSCState, IsPersistent, MsgIds) ->
+ with_immutable_msg_store_state(
+ MSCState, IsPersistent,
+ fun (MCSState1) ->
+ rabbit_msg_store:remove(MsgIds, MCSState1)
+ end).
+
+msg_store_close_fds(MSCState, IsPersistent) ->
+ with_msg_store_state(
+ MSCState, IsPersistent,
+ fun (MSCState1) -> rabbit_msg_store:close_all_indicated(MSCState1) end).
+
+msg_store_close_fds_fun(IsPersistent) ->
+ fun (?MODULE, State = #vqstate { msg_store_clients = MSCState }) ->
+ {ok, MSCState1} = msg_store_close_fds(MSCState, IsPersistent),
+ State #vqstate { msg_store_clients = MSCState1 }
+ end.
+
+maybe_write_delivered(false, _SeqId, IndexState) ->
+ IndexState;
+maybe_write_delivered(true, SeqId, IndexState) ->
+ rabbit_queue_index:deliver([SeqId], IndexState).
+
+betas_from_index_entries(List, TransientThreshold, RPA, DPA, IndexState) ->
+ {Filtered, Delivers, Acks} =
+ lists:foldr(
+ fun ({_MsgId, SeqId, _MsgProps, IsPersistent, IsDelivered} = M,
+ {Filtered1, Delivers1, Acks1} = Acc) ->
+ case SeqId < TransientThreshold andalso not IsPersistent of
+ true -> {Filtered1,
+ cons_if(not IsDelivered, SeqId, Delivers1),
+ [SeqId | Acks1]};
+ false -> case (gb_trees:is_defined(SeqId, RPA) orelse
+ gb_trees:is_defined(SeqId, DPA)) of
+ false -> {?QUEUE:in_r(m(beta_msg_status(M)),
+ Filtered1),
+ Delivers1, Acks1};
+ true -> Acc
+ end
+ end
+ end, {?QUEUE:new(), [], []}, List),
+ {Filtered, rabbit_queue_index:ack(
+ Acks, rabbit_queue_index:deliver(Delivers, IndexState))}.
+
+expand_delta(SeqId, ?BLANK_DELTA_PATTERN(X)) ->
+ d(#delta { start_seq_id = SeqId, count = 1, end_seq_id = SeqId + 1 });
+expand_delta(SeqId, #delta { start_seq_id = StartSeqId,
+ count = Count } = Delta)
+ when SeqId < StartSeqId ->
+ d(Delta #delta { start_seq_id = SeqId, count = Count + 1 });
+expand_delta(SeqId, #delta { count = Count,
+ end_seq_id = EndSeqId } = Delta)
+ when SeqId >= EndSeqId ->
+ d(Delta #delta { count = Count + 1, end_seq_id = SeqId + 1 });
+expand_delta(_SeqId, #delta { count = Count } = Delta) ->
+ d(Delta #delta { count = Count + 1 }).
+
+%%----------------------------------------------------------------------------
+%% Internal major helpers for Public API
+%%----------------------------------------------------------------------------
+
+init(IsDurable, IndexState, DeltaCount, Terms,
+ PersistentClient, TransientClient) ->
+ {LowSeqId, NextSeqId, IndexState1} = rabbit_queue_index:bounds(IndexState),
+
+ DeltaCount1 =
+ case Terms of
+ non_clean_shutdown -> DeltaCount;
+ _ -> proplists:get_value(persistent_count,
+ Terms, DeltaCount)
+ end,
+ Delta = case DeltaCount1 == 0 andalso DeltaCount /= undefined of
+ true -> ?BLANK_DELTA;
+ false -> d(#delta { start_seq_id = LowSeqId,
+ count = DeltaCount1,
+ end_seq_id = NextSeqId })
+ end,
+ Now = now(),
+ State = #vqstate {
+ q1 = ?QUEUE:new(),
+ q2 = ?QUEUE:new(),
+ delta = Delta,
+ q3 = ?QUEUE:new(),
+ q4 = ?QUEUE:new(),
+ next_seq_id = NextSeqId,
+ ram_pending_ack = gb_trees:empty(),
+ disk_pending_ack = gb_trees:empty(),
+ index_state = IndexState1,
+ msg_store_clients = {PersistentClient, TransientClient},
+ durable = IsDurable,
+ transient_threshold = NextSeqId,
+
+ len = DeltaCount1,
+ persistent_count = DeltaCount1,
+
+ target_ram_count = infinity,
+ ram_msg_count = 0,
+ ram_msg_count_prev = 0,
+ ram_ack_count_prev = 0,
+ out_counter = 0,
+ in_counter = 0,
+ rates = blank_rates(Now),
+ msgs_on_disk = gb_sets:new(),
+ msg_indices_on_disk = gb_sets:new(),
+ unconfirmed = gb_sets:new(),
+ confirmed = gb_sets:new(),
+ ack_out_counter = 0,
+ ack_in_counter = 0 },
+ a(maybe_deltas_to_betas(State)).
+
+blank_rates(Now) ->
+ #rates { in = 0.0,
+ out = 0.0,
+ ack_in = 0.0,
+ ack_out = 0.0,
+ timestamp = Now}.
+
+in_r(MsgStatus = #msg_status { msg = undefined },
+ State = #vqstate { q3 = Q3, q4 = Q4 }) ->
+ case ?QUEUE:is_empty(Q4) of
+ true -> State #vqstate { q3 = ?QUEUE:in_r(MsgStatus, Q3) };
+ false -> {Msg, State1 = #vqstate { q4 = Q4a }} =
+ read_msg(MsgStatus, State),
+ inc_ram_msg_count(
+ State1 #vqstate { q4 = ?QUEUE:in_r(MsgStatus#msg_status {
+ msg = Msg }, Q4a) })
+ end;
+in_r(MsgStatus, State = #vqstate { q4 = Q4 }) ->
+ State #vqstate { q4 = ?QUEUE:in_r(MsgStatus, Q4) }.
+
+queue_out(State = #vqstate { q4 = Q4 }) ->
+ case ?QUEUE:out(Q4) of
+ {empty, _Q4} ->
+ case fetch_from_q3(State) of
+ {empty, _State1} = Result -> Result;
+ {loaded, {MsgStatus, State1}} -> {{value, MsgStatus}, State1}
+ end;
+ {{value, MsgStatus}, Q4a} ->
+ {{value, MsgStatus}, State #vqstate { q4 = Q4a }}
+ end.
+
+read_msg(#msg_status{msg = undefined,
+ msg_id = MsgId,
+ is_persistent = IsPersistent}, State) ->
+ read_msg(MsgId, IsPersistent, State);
+read_msg(#msg_status{msg = Msg}, State) ->
+ {Msg, State}.
+
+read_msg(MsgId, IsPersistent, State = #vqstate{msg_store_clients = MSCState}) ->
+ {{ok, Msg = #basic_message {}}, MSCState1} =
+ msg_store_read(MSCState, IsPersistent, MsgId),
+ {Msg, State #vqstate {msg_store_clients = MSCState1}}.
+
+inc_ram_msg_count(State = #vqstate{ram_msg_count = RamMsgCount}) ->
+ State#vqstate{ram_msg_count = RamMsgCount + 1}.
+
+remove(AckRequired, MsgStatus = #msg_status {
+ seq_id = SeqId,
+ msg_id = MsgId,
+ msg = Msg,
+ is_persistent = IsPersistent,
+ is_delivered = IsDelivered,
+ msg_on_disk = MsgOnDisk,
+ index_on_disk = IndexOnDisk },
+ State = #vqstate {ram_msg_count = RamMsgCount,
+ out_counter = OutCount,
+ index_state = IndexState,
+ msg_store_clients = MSCState,
+ len = Len,
+ persistent_count = PCount}) ->
+ %% 1. Mark it delivered if necessary
+ IndexState1 = maybe_write_delivered(
+ IndexOnDisk andalso not IsDelivered,
+ SeqId, IndexState),
+
+ %% 2. Remove from msg_store and queue index, if necessary
+ Rem = fun () ->
+ ok = msg_store_remove(MSCState, IsPersistent, [MsgId])
+ end,
+ Ack = fun () -> rabbit_queue_index:ack([SeqId], IndexState1) end,
+ IndexState2 = case {AckRequired, MsgOnDisk, IndexOnDisk} of
+ {false, true, false} -> Rem(), IndexState1;
+ {false, true, true} -> Rem(), Ack();
+ _ -> IndexState1
+ end,
+
+ %% 3. If an ack is required, add something sensible to PA
+ {AckTag, State1} = case AckRequired of
+ true -> StateN = record_pending_ack(
+ MsgStatus #msg_status {
+ is_delivered = true }, State),
+ {SeqId, StateN};
+ false -> {undefined, State}
+ end,
+
+ PCount1 = PCount - one_if(IsPersistent andalso not AckRequired),
+ RamMsgCount1 = RamMsgCount - one_if(Msg =/= undefined),
+
+ {AckTag, maybe_update_rates(
+ State1 #vqstate {ram_msg_count = RamMsgCount1,
+ out_counter = OutCount + 1,
+ index_state = IndexState2,
+ len = Len - 1,
+ persistent_count = PCount1})}.
+
+purge_betas_and_deltas(LensByStore,
+ State = #vqstate { q3 = Q3,
+ index_state = IndexState,
+ msg_store_clients = MSCState }) ->
+ case ?QUEUE:is_empty(Q3) of
+ true -> {LensByStore, State};
+ false -> {LensByStore1, IndexState1} =
+ remove_queue_entries(fun ?QUEUE:foldl/3, Q3,
+ LensByStore, IndexState, MSCState),
+ purge_betas_and_deltas(LensByStore1,
+ maybe_deltas_to_betas(
+ State #vqstate {
+ q3 = ?QUEUE:new(),
+ index_state = IndexState1 }))
+ end.
+
+remove_queue_entries(Fold, Q, LensByStore, IndexState, MSCState) ->
+ {MsgIdsByStore, Delivers, Acks} =
+ Fold(fun remove_queue_entries1/2, {orddict:new(), [], []}, Q),
+ ok = orddict:fold(fun (IsPersistent, MsgIds, ok) ->
+ msg_store_remove(MSCState, IsPersistent, MsgIds)
+ end, ok, MsgIdsByStore),
+ {sum_msg_ids_by_store_to_len(LensByStore, MsgIdsByStore),
+ rabbit_queue_index:ack(Acks,
+ rabbit_queue_index:deliver(Delivers, IndexState))}.
+
+remove_queue_entries1(
+ #msg_status { msg_id = MsgId, seq_id = SeqId,
+ is_delivered = IsDelivered, msg_on_disk = MsgOnDisk,
+ index_on_disk = IndexOnDisk, is_persistent = IsPersistent },
+ {MsgIdsByStore, Delivers, Acks}) ->
+ {case MsgOnDisk of
+ true -> rabbit_misc:orddict_cons(IsPersistent, MsgId, MsgIdsByStore);
+ false -> MsgIdsByStore
+ end,
+ cons_if(IndexOnDisk andalso not IsDelivered, SeqId, Delivers),
+ cons_if(IndexOnDisk, SeqId, Acks)}.
+
+sum_msg_ids_by_store_to_len(LensByStore, MsgIdsByStore) ->
+ orddict:fold(
+ fun (IsPersistent, MsgIds, LensByStore1) ->
+ orddict:update_counter(IsPersistent, length(MsgIds), LensByStore1)
+ end, LensByStore, MsgIdsByStore).
+
+%%----------------------------------------------------------------------------
+%% Internal gubbins for publishing
+%%----------------------------------------------------------------------------
+
+maybe_write_msg_to_disk(_Force, MsgStatus = #msg_status {
+ msg_on_disk = true }, _MSCState) ->
+ MsgStatus;
+maybe_write_msg_to_disk(Force, MsgStatus = #msg_status {
+ msg = Msg, msg_id = MsgId,
+ is_persistent = IsPersistent }, MSCState)
+ when Force orelse IsPersistent ->
+ Msg1 = Msg #basic_message {
+ %% don't persist any recoverable decoded properties
+ content = rabbit_binary_parser:clear_decoded_content(
+ Msg #basic_message.content)},
+ ok = msg_store_write(MSCState, IsPersistent, MsgId, Msg1),
+ MsgStatus #msg_status { msg_on_disk = true };
+maybe_write_msg_to_disk(_Force, MsgStatus, _MSCState) ->
+ MsgStatus.
+
+maybe_write_index_to_disk(_Force, MsgStatus = #msg_status {
+ index_on_disk = true }, IndexState) ->
+ true = MsgStatus #msg_status.msg_on_disk, %% ASSERTION
+ {MsgStatus, IndexState};
+maybe_write_index_to_disk(Force, MsgStatus = #msg_status {
+ msg_id = MsgId,
+ seq_id = SeqId,
+ is_persistent = IsPersistent,
+ is_delivered = IsDelivered,
+ msg_props = MsgProps}, IndexState)
+ when Force orelse IsPersistent ->
+ true = MsgStatus #msg_status.msg_on_disk, %% ASSERTION
+ IndexState1 = rabbit_queue_index:publish(
+ MsgId, SeqId, MsgProps, IsPersistent, IndexState),
+ {MsgStatus #msg_status { index_on_disk = true },
+ maybe_write_delivered(IsDelivered, SeqId, IndexState1)};
+maybe_write_index_to_disk(_Force, MsgStatus, IndexState) ->
+ {MsgStatus, IndexState}.
+
+maybe_write_to_disk(ForceMsg, ForceIndex, MsgStatus,
+ State = #vqstate { index_state = IndexState,
+ msg_store_clients = MSCState }) ->
+ MsgStatus1 = maybe_write_msg_to_disk(ForceMsg, MsgStatus, MSCState),
+ {MsgStatus2, IndexState1} =
+ maybe_write_index_to_disk(ForceIndex, MsgStatus1, IndexState),
+ {MsgStatus2, State #vqstate { index_state = IndexState1 }}.
+
+%%----------------------------------------------------------------------------
+%% Internal gubbins for acks
+%%----------------------------------------------------------------------------
+
+record_pending_ack(#msg_status { seq_id = SeqId, msg = Msg } = MsgStatus,
+ State = #vqstate { ram_pending_ack = RPA,
+ disk_pending_ack = DPA,
+ ack_in_counter = AckInCount}) ->
+ {RPA1, DPA1} =
+ case Msg of
+ undefined -> {RPA, gb_trees:insert(SeqId, MsgStatus, DPA)};
+ _ -> {gb_trees:insert(SeqId, MsgStatus, RPA), DPA}
+ end,
+ State #vqstate { ram_pending_ack = RPA1,
+ disk_pending_ack = DPA1,
+ ack_in_counter = AckInCount + 1}.
+
+lookup_pending_ack(SeqId, #vqstate { ram_pending_ack = RPA,
+ disk_pending_ack = DPA }) ->
+ case gb_trees:lookup(SeqId, RPA) of
+ {value, V} -> V;
+ none -> gb_trees:get(SeqId, DPA)
+ end.
+
+remove_pending_ack(SeqId, State = #vqstate { ram_pending_ack = RPA,
+ disk_pending_ack = DPA }) ->
+ case gb_trees:lookup(SeqId, RPA) of
+ {value, V} -> RPA1 = gb_trees:delete(SeqId, RPA),
+ {V, State #vqstate { ram_pending_ack = RPA1 }};
+ none -> DPA1 = gb_trees:delete(SeqId, DPA),
+ {gb_trees:get(SeqId, DPA),
+ State #vqstate { disk_pending_ack = DPA1 }}
+ end.
+
+purge_pending_ack(KeepPersistent,
+ State = #vqstate { ram_pending_ack = RPA,
+ disk_pending_ack = DPA,
+ index_state = IndexState,
+ msg_store_clients = MSCState }) ->
+ F = fun (_SeqId, MsgStatus, Acc) -> accumulate_ack(MsgStatus, Acc) end,
+ {IndexOnDiskSeqIds, MsgIdsByStore, _AllMsgIds} =
+ rabbit_misc:gb_trees_fold(
+ F, rabbit_misc:gb_trees_fold(F, accumulate_ack_init(), RPA), DPA),
+ State1 = State #vqstate { ram_pending_ack = gb_trees:empty(),
+ disk_pending_ack = gb_trees:empty() },
+
+ case KeepPersistent of
+ true -> case orddict:find(false, MsgIdsByStore) of
+ error -> State1;
+ {ok, MsgIds} -> ok = msg_store_remove(MSCState, false,
+ MsgIds),
+ State1
+ end;
+ false -> IndexState1 =
+ rabbit_queue_index:ack(IndexOnDiskSeqIds, IndexState),
+ [ok = msg_store_remove(MSCState, IsPersistent, MsgIds)
+ || {IsPersistent, MsgIds} <- orddict:to_list(MsgIdsByStore)],
+ State1 #vqstate { index_state = IndexState1 }
+ end.
+
+accumulate_ack_init() -> {[], orddict:new(), []}.
+
+accumulate_ack(#msg_status { seq_id = SeqId,
+ msg_id = MsgId,
+ is_persistent = IsPersistent,
+ msg_on_disk = MsgOnDisk,
+ index_on_disk = IndexOnDisk },
+ {IndexOnDiskSeqIdsAcc, MsgIdsByStore, AllMsgIds}) ->
+ {cons_if(IndexOnDisk, SeqId, IndexOnDiskSeqIdsAcc),
+ case MsgOnDisk of
+ true -> rabbit_misc:orddict_cons(IsPersistent, MsgId, MsgIdsByStore);
+ false -> MsgIdsByStore
+ end,
+ [MsgId | AllMsgIds]}.
+
+find_persistent_count(LensByStore) ->
+ case orddict:find(true, LensByStore) of
+ error -> 0;
+ {ok, Len} -> Len
+ end.
+
+%%----------------------------------------------------------------------------
+%% Internal plumbing for confirms (aka publisher acks)
+%%----------------------------------------------------------------------------
+
+record_confirms(MsgIdSet, State = #vqstate { msgs_on_disk = MOD,
+ msg_indices_on_disk = MIOD,
+ unconfirmed = UC,
+ confirmed = C }) ->
+ State #vqstate {
+ msgs_on_disk = rabbit_misc:gb_sets_difference(MOD, MsgIdSet),
+ msg_indices_on_disk = rabbit_misc:gb_sets_difference(MIOD, MsgIdSet),
+ unconfirmed = rabbit_misc:gb_sets_difference(UC, MsgIdSet),
+ confirmed = gb_sets:union(C, MsgIdSet) }.
+
+msgs_written_to_disk(Callback, MsgIdSet, ignored) ->
+ Callback(?MODULE,
+ fun (?MODULE, State) -> record_confirms(MsgIdSet, State) end);
+msgs_written_to_disk(Callback, MsgIdSet, written) ->
+ Callback(?MODULE,
+ fun (?MODULE, State = #vqstate { msgs_on_disk = MOD,
+ msg_indices_on_disk = MIOD,
+ unconfirmed = UC }) ->
+ Confirmed = gb_sets:intersection(UC, MsgIdSet),
+ record_confirms(gb_sets:intersection(MsgIdSet, MIOD),
+ State #vqstate {
+ msgs_on_disk =
+ gb_sets:union(MOD, Confirmed) })
+ end).
+
+msg_indices_written_to_disk(Callback, MsgIdSet) ->
+ Callback(?MODULE,
+ fun (?MODULE, State = #vqstate { msgs_on_disk = MOD,
+ msg_indices_on_disk = MIOD,
+ unconfirmed = UC }) ->
+ Confirmed = gb_sets:intersection(UC, MsgIdSet),
+ record_confirms(gb_sets:intersection(MsgIdSet, MOD),
+ State #vqstate {
+ msg_indices_on_disk =
+ gb_sets:union(MIOD, Confirmed) })
+ end).
+
+%%----------------------------------------------------------------------------
+%% Internal plumbing for requeue
+%%----------------------------------------------------------------------------
+
+publish_alpha(#msg_status { msg = undefined } = MsgStatus, State) ->
+ {Msg, State1} = read_msg(MsgStatus, State),
+ {MsgStatus#msg_status { msg = Msg }, inc_ram_msg_count(State1)};
+publish_alpha(MsgStatus, State) ->
+ {MsgStatus, inc_ram_msg_count(State)}.
+
+publish_beta(MsgStatus, State) ->
+ {MsgStatus1, State1} = maybe_write_to_disk(true, false, MsgStatus, State),
+ {m(trim_msg_status(MsgStatus1)), State1}.
+
+%% Rebuild queue, inserting sequence ids to maintain ordering
+queue_merge(SeqIds, Q, MsgIds, Limit, PubFun, State) ->
+ queue_merge(SeqIds, Q, ?QUEUE:new(), MsgIds,
+ Limit, PubFun, State).
+
+queue_merge([SeqId | Rest] = SeqIds, Q, Front, MsgIds,
+ Limit, PubFun, State)
+ when Limit == undefined orelse SeqId < Limit ->
+ case ?QUEUE:out(Q) of
+ {{value, #msg_status { seq_id = SeqIdQ } = MsgStatus}, Q1}
+ when SeqIdQ < SeqId ->
+ %% enqueue from the remaining queue
+ queue_merge(SeqIds, Q1, ?QUEUE:in(MsgStatus, Front), MsgIds,
+ Limit, PubFun, State);
+ {_, _Q1} ->
+ %% enqueue from the remaining list of sequence ids
+ {MsgStatus, State1} = msg_from_pending_ack(SeqId, State),
+ {#msg_status { msg_id = MsgId } = MsgStatus1, State2} =
+ PubFun(MsgStatus, State1),
+ queue_merge(Rest, Q, ?QUEUE:in(MsgStatus1, Front), [MsgId | MsgIds],
+ Limit, PubFun, State2)
+ end;
+queue_merge(SeqIds, Q, Front, MsgIds,
+ _Limit, _PubFun, State) ->
+ {SeqIds, ?QUEUE:join(Front, Q), MsgIds, State}.
+
+delta_merge([], Delta, MsgIds, State) ->
+ {Delta, MsgIds, State};
+delta_merge(SeqIds, Delta, MsgIds, State) ->
+ lists:foldl(fun (SeqId, {Delta0, MsgIds0, State0}) ->
+ {#msg_status { msg_id = MsgId } = MsgStatus, State1} =
+ msg_from_pending_ack(SeqId, State0),
+ {_MsgStatus, State2} =
+ maybe_write_to_disk(true, true, MsgStatus, State1),
+ {expand_delta(SeqId, Delta0), [MsgId | MsgIds0], State2}
+ end, {Delta, MsgIds, State}, SeqIds).
+
+%% Mostly opposite of record_pending_ack/2
+msg_from_pending_ack(SeqId, State) ->
+ {#msg_status { msg_props = MsgProps } = MsgStatus, State1} =
+ remove_pending_ack(SeqId, State),
+ {MsgStatus #msg_status {
+ msg_props = MsgProps #message_properties { needs_confirming = false } },
+ State1}.
+
+beta_limit(Q) ->
+ case ?QUEUE:peek(Q) of
+ {value, #msg_status { seq_id = SeqId }} -> SeqId;
+ empty -> undefined
+ end.
+
+delta_limit(?BLANK_DELTA_PATTERN(_X)) -> undefined;
+delta_limit(#delta { start_seq_id = StartSeqId }) -> StartSeqId.
+
+%%----------------------------------------------------------------------------
+%% Iterator
+%%----------------------------------------------------------------------------
+
+ram_ack_iterator(State) ->
+ {ack, gb_trees:iterator(State#vqstate.ram_pending_ack)}.
+
+disk_ack_iterator(State) ->
+ {ack, gb_trees:iterator(State#vqstate.disk_pending_ack)}.
+
+msg_iterator(State) -> istate(start, State).
+
+istate(start, State) -> {q4, State#vqstate.q4, State};
+istate(q4, State) -> {q3, State#vqstate.q3, State};
+istate(q3, State) -> {delta, State#vqstate.delta, State};
+istate(delta, State) -> {q2, State#vqstate.q2, State};
+istate(q2, State) -> {q1, State#vqstate.q1, State};
+istate(q1, _State) -> done.
+
+next({ack, It}, IndexState) ->
+ case gb_trees:next(It) of
+ none -> {empty, IndexState};
+ {_SeqId, MsgStatus, It1} -> Next = {ack, It1},
+ {value, MsgStatus, true, Next, IndexState}
+ end;
+next(done, IndexState) -> {empty, IndexState};
+next({delta, #delta{start_seq_id = SeqId,
+ end_seq_id = SeqId}, State}, IndexState) ->
+ next(istate(delta, State), IndexState);
+next({delta, #delta{start_seq_id = SeqId,
+ end_seq_id = SeqIdEnd} = Delta, State}, IndexState) ->
+ SeqIdB = rabbit_queue_index:next_segment_boundary(SeqId),
+ SeqId1 = lists:min([SeqIdB, SeqIdEnd]),
+ {List, IndexState1} = rabbit_queue_index:read(SeqId, SeqId1, IndexState),
+ next({delta, Delta#delta{start_seq_id = SeqId1}, List, State}, IndexState1);
+next({delta, Delta, [], State}, IndexState) ->
+ next({delta, Delta, State}, IndexState);
+next({delta, Delta, [{_, SeqId, _, _, _} = M | Rest], State}, IndexState) ->
+ case (gb_trees:is_defined(SeqId, State#vqstate.ram_pending_ack) orelse
+ gb_trees:is_defined(SeqId, State#vqstate.disk_pending_ack)) of
+ false -> Next = {delta, Delta, Rest, State},
+ {value, beta_msg_status(M), false, Next, IndexState};
+ true -> next({delta, Delta, Rest, State}, IndexState)
+ end;
+next({Key, Q, State}, IndexState) ->
+ case ?QUEUE:out(Q) of
+ {empty, _Q} -> next(istate(Key, State), IndexState);
+ {{value, MsgStatus}, QN} -> Next = {Key, QN, State},
+ {value, MsgStatus, false, Next, IndexState}
+ end.
+
+inext(It, {Its, IndexState}) ->
+ case next(It, IndexState) of
+ {empty, IndexState1} ->
+ {Its, IndexState1};
+ {value, MsgStatus1, Unacked, It1, IndexState1} ->
+ {[{MsgStatus1, Unacked, It1} | Its], IndexState1}
+ end.
+
+ifold(_Fun, Acc, [], State) ->
+ {Acc, State};
+ifold(Fun, Acc, Its, State) ->
+ [{MsgStatus, Unacked, It} | Rest] =
+ lists:sort(fun ({#msg_status{seq_id = SeqId1}, _, _},
+ {#msg_status{seq_id = SeqId2}, _, _}) ->
+ SeqId1 =< SeqId2
+ end, Its),
+ {Msg, State1} = read_msg(MsgStatus, State),
+ case Fun(Msg, MsgStatus#msg_status.msg_props, Unacked, Acc) of
+ {stop, Acc1} ->
+ {Acc1, State};
+ {cont, Acc1} ->
+ {Its1, IndexState1} = inext(It, {Rest, State1#vqstate.index_state}),
+ ifold(Fun, Acc1, Its1, State1#vqstate{index_state = IndexState1})
+ end.
+
+%%----------------------------------------------------------------------------
+%% Phase changes
+%%----------------------------------------------------------------------------
+
+reduce_memory_use(State = #vqstate { target_ram_count = infinity }) ->
+ State;
+reduce_memory_use(State = #vqstate {
+ ram_pending_ack = RPA,
+ ram_msg_count = RamMsgCount,
+ target_ram_count = TargetRamCount,
+ rates = #rates { in = AvgIngress,
+ out = AvgEgress,
+ ack_in = AvgAckIngress,
+ ack_out = AvgAckEgress } }) ->
+
+ State1 = #vqstate { q2 = Q2, q3 = Q3 } =
+ case chunk_size(RamMsgCount + gb_trees:size(RPA), TargetRamCount) of
+ 0 -> State;
+ %% Reduce memory of pending acks and alphas. The order is
+ %% determined based on which is growing faster. Whichever
+ %% comes second may very well get a quota of 0 if the
+ %% first manages to push out the max number of messages.
+ S1 -> Funs = case ((AvgAckIngress - AvgAckEgress) >
+ (AvgIngress - AvgEgress)) of
+ true -> [fun limit_ram_acks/2,
+ fun push_alphas_to_betas/2];
+ false -> [fun push_alphas_to_betas/2,
+ fun limit_ram_acks/2]
+ end,
+ {_, State2} = lists:foldl(fun (ReduceFun, {QuotaN, StateN}) ->
+ ReduceFun(QuotaN, StateN)
+ end, {S1, State}, Funs),
+ State2
+ end,
+
+ case chunk_size(?QUEUE:len(Q2) + ?QUEUE:len(Q3),
+ permitted_beta_count(State1)) of
+ S2 when S2 >= ?IO_BATCH_SIZE ->
+ %% There is an implicit, but subtle, upper bound here. We
+ %% may shuffle a lot of messages from Q2/3 into delta, but
+ %% the number of these that require any disk operation,
+ %% namely index writing, i.e. messages that are genuine
+ %% betas and not gammas, is bounded by the credit_flow
+ %% limiting of the alpha->beta conversion above.
+ push_betas_to_deltas(S2, State1);
+ _ ->
+ State1
+ end.
+
+limit_ram_acks(0, State) ->
+ {0, State};
+limit_ram_acks(Quota, State = #vqstate { ram_pending_ack = RPA,
+ disk_pending_ack = DPA }) ->
+ case gb_trees:is_empty(RPA) of
+ true ->
+ {Quota, State};
+ false ->
+ {SeqId, MsgStatus, RPA1} = gb_trees:take_largest(RPA),
+ {MsgStatus1, State1} =
+ maybe_write_to_disk(true, false, MsgStatus, State),
+ DPA1 = gb_trees:insert(SeqId, m(trim_msg_status(MsgStatus1)), DPA),
+ limit_ram_acks(Quota - 1,
+ State1 #vqstate { ram_pending_ack = RPA1,
+ disk_pending_ack = DPA1 })
+ end.
+
+permitted_beta_count(#vqstate { len = 0 }) ->
+ infinity;
+permitted_beta_count(#vqstate { target_ram_count = 0, q3 = Q3 }) ->
+ lists:min([?QUEUE:len(Q3), rabbit_queue_index:next_segment_boundary(0)]);
+permitted_beta_count(#vqstate { q1 = Q1,
+ q4 = Q4,
+ target_ram_count = TargetRamCount,
+ len = Len }) ->
+ BetaDelta = Len - ?QUEUE:len(Q1) - ?QUEUE:len(Q4),
+ lists:max([rabbit_queue_index:next_segment_boundary(0),
+ BetaDelta - ((BetaDelta * BetaDelta) div
+ (BetaDelta + TargetRamCount))]).
+
+chunk_size(Current, Permitted)
+ when Permitted =:= infinity orelse Permitted >= Current ->
+ 0;
+chunk_size(Current, Permitted) ->
+ Current - Permitted.
+
+fetch_from_q3(State = #vqstate { q1 = Q1,
+ q2 = Q2,
+ delta = #delta { count = DeltaCount },
+ q3 = Q3,
+ q4 = Q4 }) ->
+ case ?QUEUE:out(Q3) of
+ {empty, _Q3} ->
+ {empty, State};
+ {{value, MsgStatus}, Q3a} ->
+ State1 = State #vqstate { q3 = Q3a },
+ State2 = case {?QUEUE:is_empty(Q3a), 0 == DeltaCount} of
+ {true, true} ->
+ %% q3 is now empty, it wasn't before;
+ %% delta is still empty. So q2 must be
+ %% empty, and we know q4 is empty
+ %% otherwise we wouldn't be loading from
+ %% q3. As such, we can just set q4 to Q1.
+ true = ?QUEUE:is_empty(Q2), %% ASSERTION
+ true = ?QUEUE:is_empty(Q4), %% ASSERTION
+ State1 #vqstate { q1 = ?QUEUE:new(), q4 = Q1 };
+ {true, false} ->
+ maybe_deltas_to_betas(State1);
+ {false, _} ->
+ %% q3 still isn't empty, we've not
+ %% touched delta, so the invariants
+ %% between q1, q2, delta and q3 are
+ %% maintained
+ State1
+ end,
+ {loaded, {MsgStatus, State2}}
+ end.
+
+maybe_deltas_to_betas(State = #vqstate { delta = ?BLANK_DELTA_PATTERN(X) }) ->
+ State;
+maybe_deltas_to_betas(State = #vqstate {
+ q2 = Q2,
+ delta = Delta,
+ q3 = Q3,
+ index_state = IndexState,
+ ram_pending_ack = RPA,
+ disk_pending_ack = DPA,
+ transient_threshold = TransientThreshold }) ->
+ #delta { start_seq_id = DeltaSeqId,
+ count = DeltaCount,
+ end_seq_id = DeltaSeqIdEnd } = Delta,
+ DeltaSeqId1 =
+ lists:min([rabbit_queue_index:next_segment_boundary(DeltaSeqId),
+ DeltaSeqIdEnd]),
+ {List, IndexState1} = rabbit_queue_index:read(DeltaSeqId, DeltaSeqId1,
+ IndexState),
+ {Q3a, IndexState2} = betas_from_index_entries(List, TransientThreshold,
+ RPA, DPA, IndexState1),
+ State1 = State #vqstate { index_state = IndexState2 },
+ case ?QUEUE:len(Q3a) of
+ 0 ->
+ %% we ignored every message in the segment due to it being
+ %% transient and below the threshold
+ maybe_deltas_to_betas(
+ State1 #vqstate {
+ delta = d(Delta #delta { start_seq_id = DeltaSeqId1 })});
+ Q3aLen ->
+ Q3b = ?QUEUE:join(Q3, Q3a),
+ case DeltaCount - Q3aLen of
+ 0 ->
+ %% delta is now empty, but it wasn't before, so
+ %% can now join q2 onto q3
+ State1 #vqstate { q2 = ?QUEUE:new(),
+ delta = ?BLANK_DELTA,
+ q3 = ?QUEUE:join(Q3b, Q2) };
+ N when N > 0 ->
+ Delta1 = d(#delta { start_seq_id = DeltaSeqId1,
+ count = N,
+ end_seq_id = DeltaSeqIdEnd }),
+ State1 #vqstate { delta = Delta1,
+ q3 = Q3b }
+ end
+ end.
+
+push_alphas_to_betas(Quota, State) ->
+ {Quota1, State1} =
+ push_alphas_to_betas(
+ fun ?QUEUE:out/1,
+ fun (MsgStatus, Q1a,
+ State0 = #vqstate { q3 = Q3, delta = #delta { count = 0 } }) ->
+ State0 #vqstate { q1 = Q1a, q3 = ?QUEUE:in(MsgStatus, Q3) };
+ (MsgStatus, Q1a, State0 = #vqstate { q2 = Q2 }) ->
+ State0 #vqstate { q1 = Q1a, q2 = ?QUEUE:in(MsgStatus, Q2) }
+ end, Quota, State #vqstate.q1, State),
+ {Quota2, State2} =
+ push_alphas_to_betas(
+ fun ?QUEUE:out_r/1,
+ fun (MsgStatus, Q4a, State0 = #vqstate { q3 = Q3 }) ->
+ State0 #vqstate { q3 = ?QUEUE:in_r(MsgStatus, Q3), q4 = Q4a }
+ end, Quota1, State1 #vqstate.q4, State1),
+ {Quota2, State2}.
+
+push_alphas_to_betas(_Generator, _Consumer, Quota, _Q,
+ State = #vqstate { ram_msg_count = RamMsgCount,
+ target_ram_count = TargetRamCount })
+ when Quota =:= 0 orelse
+ TargetRamCount =:= infinity orelse
+ TargetRamCount >= RamMsgCount ->
+ {Quota, State};
+push_alphas_to_betas(Generator, Consumer, Quota, Q, State) ->
+ case credit_flow:blocked() of
+ true -> {Quota, State};
+ false -> case Generator(Q) of
+ {empty, _Q} ->
+ {Quota, State};
+ {{value, MsgStatus}, Qa} ->
+ {MsgStatus1 = #msg_status { msg_on_disk = true },
+ State1 = #vqstate { ram_msg_count = RamMsgCount }} =
+ maybe_write_to_disk(true, false, MsgStatus, State),
+ MsgStatus2 = m(trim_msg_status(MsgStatus1)),
+ State2 = Consumer(MsgStatus2, Qa,
+ State1 #vqstate {
+ ram_msg_count = RamMsgCount - 1 }),
+ push_alphas_to_betas(Generator, Consumer, Quota - 1,
+ Qa, State2)
+ end
+ end.
+
+push_betas_to_deltas(Quota, State = #vqstate { q2 = Q2,
+ delta = Delta,
+ q3 = Q3,
+ index_state = IndexState }) ->
+ PushState = {Quota, Delta, IndexState},
+ {Q3a, PushState1} = push_betas_to_deltas(
+ fun ?QUEUE:out_r/1,
+ fun rabbit_queue_index:next_segment_boundary/1,
+ Q3, PushState),
+ {Q2a, PushState2} = push_betas_to_deltas(
+ fun ?QUEUE:out/1,
+ fun (Q2MinSeqId) -> Q2MinSeqId end,
+ Q2, PushState1),
+ {_, Delta1, IndexState1} = PushState2,
+ State #vqstate { q2 = Q2a,
+ delta = Delta1,
+ q3 = Q3a,
+ index_state = IndexState1 }.
+
+push_betas_to_deltas(Generator, LimitFun, Q, PushState) ->
+ case ?QUEUE:is_empty(Q) of
+ true ->
+ {Q, PushState};
+ false ->
+ {value, #msg_status { seq_id = MinSeqId }} = ?QUEUE:peek(Q),
+ {value, #msg_status { seq_id = MaxSeqId }} = ?QUEUE:peek_r(Q),
+ Limit = LimitFun(MinSeqId),
+ case MaxSeqId < Limit of
+ true -> {Q, PushState};
+ false -> push_betas_to_deltas1(Generator, Limit, Q, PushState)
+ end
+ end.
+
+push_betas_to_deltas1(_Generator, _Limit, Q,
+ {0, _Delta, _IndexState} = PushState) ->
+ {Q, PushState};
+push_betas_to_deltas1(Generator, Limit, Q,
+ {Quota, Delta, IndexState} = PushState) ->
+ case Generator(Q) of
+ {empty, _Q} ->
+ {Q, PushState};
+ {{value, #msg_status { seq_id = SeqId }}, _Qa}
+ when SeqId < Limit ->
+ {Q, PushState};
+ {{value, MsgStatus = #msg_status { seq_id = SeqId }}, Qa} ->
+ {#msg_status { index_on_disk = true }, IndexState1} =
+ maybe_write_index_to_disk(true, MsgStatus, IndexState),
+ Delta1 = expand_delta(SeqId, Delta),
+ push_betas_to_deltas1(Generator, Limit, Qa,
+ {Quota - 1, Delta1, IndexState1})
+ end.
+
+%%----------------------------------------------------------------------------
+%% Upgrading
+%%----------------------------------------------------------------------------
+
+multiple_routing_keys() ->
+ transform_storage(
+ fun ({basic_message, ExchangeName, Routing_Key, Content,
+ MsgId, Persistent}) ->
+ {ok, {basic_message, ExchangeName, [Routing_Key], Content,
+ MsgId, Persistent}};
+ (_) -> {error, corrupt_message}
+ end),
+ ok.
+
+
+%% Assumes message store is not running
+transform_storage(TransformFun) ->
+ transform_store(?PERSISTENT_MSG_STORE, TransformFun),
+ transform_store(?TRANSIENT_MSG_STORE, TransformFun).
+
+transform_store(Store, TransformFun) ->
+ rabbit_msg_store:force_recovery(rabbit_mnesia:dir(), Store),
+ rabbit_msg_store:transform_dir(rabbit_mnesia:dir(), Store, TransformFun).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_version).
+
+-export([recorded/0, matches/2, desired/0, desired_for_scope/1,
+ record_desired/0, record_desired_for_scope/1,
+ upgrades_required/1]).
+
+%% -------------------------------------------------------------------
+-ifdef(use_specs).
+
+-export_type([scope/0, step/0]).
+
+-type(scope() :: atom()).
+-type(scope_version() :: [atom()]).
+-type(step() :: {atom(), atom()}).
+
+-type(version() :: [atom()]).
+
+-spec(recorded/0 :: () -> rabbit_types:ok_or_error2(version(), any())).
+-spec(matches/2 :: ([A], [A]) -> boolean()).
+-spec(desired/0 :: () -> version()).
+-spec(desired_for_scope/1 :: (scope()) -> scope_version()).
+-spec(record_desired/0 :: () -> 'ok').
+-spec(record_desired_for_scope/1 ::
+ (scope()) -> rabbit_types:ok_or_error(any())).
+-spec(upgrades_required/1 ::
+ (scope()) -> rabbit_types:ok_or_error2([step()], any())).
+
+-endif.
+%% -------------------------------------------------------------------
+
+-define(VERSION_FILENAME, "schema_version").
+-define(SCOPES, [mnesia, local]).
+
+%% -------------------------------------------------------------------
+
+recorded() -> case rabbit_file:read_term_file(schema_filename()) of
+ {ok, [V]} -> {ok, V};
+ {error, _} = Err -> Err
+ end.
+
+record(V) -> ok = rabbit_file:write_term_file(schema_filename(), [V]).
+
+recorded_for_scope(Scope) ->
+ case recorded() of
+ {error, _} = Err ->
+ Err;
+ {ok, Version} ->
+ {ok, case lists:keysearch(Scope, 1, categorise_by_scope(Version)) of
+ false -> [];
+ {value, {Scope, SV1}} -> SV1
+ end}
+ end.
+
+record_for_scope(Scope, ScopeVersion) ->
+ case recorded() of
+ {error, _} = Err ->
+ Err;
+ {ok, Version} ->
+ Version1 = lists:keystore(Scope, 1, categorise_by_scope(Version),
+ {Scope, ScopeVersion}),
+ ok = record([Name || {_Scope, Names} <- Version1, Name <- Names])
+ end.
+
+%% -------------------------------------------------------------------
+
+matches(VerA, VerB) ->
+ lists:usort(VerA) =:= lists:usort(VerB).
+
+%% -------------------------------------------------------------------
+
+desired() -> [Name || Scope <- ?SCOPES, Name <- desired_for_scope(Scope)].
+
+desired_for_scope(Scope) -> with_upgrade_graph(fun heads/1, Scope).
+
+record_desired() -> record(desired()).
+
+record_desired_for_scope(Scope) ->
+ record_for_scope(Scope, desired_for_scope(Scope)).
+
+upgrades_required(Scope) ->
+ case recorded_for_scope(Scope) of
+ {error, enoent} ->
+ case filelib:is_file(rabbit_guid:filename()) of
+ false -> {error, starting_from_scratch};
+ true -> {error, version_not_available}
+ end;
+ {ok, CurrentHeads} ->
+ with_upgrade_graph(
+ fun (G) ->
+ case unknown_heads(CurrentHeads, G) of
+ [] -> {ok, upgrades_to_apply(CurrentHeads, G)};
+ Unknown -> {error, {future_upgrades_found, Unknown}}
+ end
+ end, Scope)
+ end.
+
+%% -------------------------------------------------------------------
+
+with_upgrade_graph(Fun, Scope) ->
+ case rabbit_misc:build_acyclic_graph(
+ fun (Module, Steps) -> vertices(Module, Steps, Scope) end,
+ fun (Module, Steps) -> edges(Module, Steps, Scope) end,
+ rabbit_misc:all_module_attributes(rabbit_upgrade)) of
+ {ok, G} -> try
+ Fun(G)
+ after
+ true = digraph:delete(G)
+ end;
+ {error, {vertex, duplicate, StepName}} ->
+ throw({error, {duplicate_upgrade_step, StepName}});
+ {error, {edge, {bad_vertex, StepName}, _From, _To}} ->
+ throw({error, {dependency_on_unknown_upgrade_step, StepName}});
+ {error, {edge, {bad_edge, StepNames}, _From, _To}} ->
+ throw({error, {cycle_in_upgrade_steps, StepNames}})
+ end.
+
+vertices(Module, Steps, Scope0) ->
+ [{StepName, {Module, StepName}} || {StepName, Scope1, _Reqs} <- Steps,
+ Scope0 == Scope1].
+
+edges(_Module, Steps, Scope0) ->
+ [{Require, StepName} || {StepName, Scope1, Requires} <- Steps,
+ Require <- Requires,
+ Scope0 == Scope1].
+unknown_heads(Heads, G) ->
+ [H || H <- Heads, digraph:vertex(G, H) =:= false].
+
+upgrades_to_apply(Heads, G) ->
+ %% Take all the vertices which can reach the known heads. That's
+ %% everything we've already applied. Subtract that from all
+ %% vertices: that's what we have to apply.
+ Unsorted = sets:to_list(
+ sets:subtract(
+ sets:from_list(digraph:vertices(G)),
+ sets:from_list(digraph_utils:reaching(Heads, G)))),
+ %% Form a subgraph from that list and find a topological ordering
+ %% so we can invoke them in order.
+ [element(2, digraph:vertex(G, StepName)) ||
+ StepName <- digraph_utils:topsort(digraph_utils:subgraph(G, Unsorted))].
+
+heads(G) ->
+ lists:sort([V || V <- digraph:vertices(G), digraph:out_degree(G, V) =:= 0]).
+
+%% -------------------------------------------------------------------
+
+categorise_by_scope(Version) when is_list(Version) ->
+ Categorised =
+ [{Scope, Name} || {_Module, Attributes} <-
+ rabbit_misc:all_module_attributes(rabbit_upgrade),
+ {Name, Scope, _Requires} <- Attributes,
+ lists:member(Name, Version)],
+ orddict:to_list(
+ lists:foldl(fun ({Scope, Name}, CatVersion) ->
+ rabbit_misc:orddict_cons(Scope, Name, CatVersion)
+ end, orddict:new(), Categorised)).
+
+dir() -> rabbit_mnesia:dir().
+
+schema_filename() -> filename:join(dir(), ?VERSION_FILENAME).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_vhost).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-export([add/1, delete/1, exists/1, list/0, with/2, assert/1]).
+-export([info/1, info/2, info_all/0, info_all/1]).
+
+-ifdef(use_specs).
+
+-spec(add/1 :: (rabbit_types:vhost()) -> 'ok').
+-spec(delete/1 :: (rabbit_types:vhost()) -> 'ok').
+-spec(exists/1 :: (rabbit_types:vhost()) -> boolean()).
+-spec(list/0 :: () -> [rabbit_types:vhost()]).
+-spec(with/2 :: (rabbit_types:vhost(), rabbit_misc:thunk(A)) -> A).
+-spec(assert/1 :: (rabbit_types:vhost()) -> 'ok').
+
+-spec(info/1 :: (rabbit_types:vhost()) -> rabbit_types:infos()).
+-spec(info/2 :: (rabbit_types:vhost(), rabbit_types:info_keys())
+ -> rabbit_types:infos()).
+-spec(info_all/0 :: () -> [rabbit_types:infos()]).
+-spec(info_all/1 :: (rabbit_types:info_keys()) -> [rabbit_types:infos()]).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+-define(INFO_KEYS, [name, tracing]).
+
+add(VHostPath) ->
+ rabbit_log:info("Adding vhost '~s'~n", [VHostPath]),
+ R = rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ case mnesia:wread({rabbit_vhost, VHostPath}) of
+ [] -> ok = mnesia:write(rabbit_vhost,
+ #vhost{virtual_host = VHostPath},
+ write);
+ [_] -> mnesia:abort({vhost_already_exists, VHostPath})
+ end
+ end,
+ fun (ok, true) ->
+ ok;
+ (ok, false) ->
+ [rabbit_exchange:declare(
+ rabbit_misc:r(VHostPath, exchange, Name),
+ Type, true, false, Internal, []) ||
+ {Name, Type, Internal} <-
+ [{<<"">>, direct, false},
+ {<<"amq.direct">>, direct, false},
+ {<<"amq.topic">>, topic, false},
+ %% per 0-9-1 pdf
+ {<<"amq.match">>, headers, false},
+ %% per 0-9-1 xml
+ {<<"amq.headers">>, headers, false},
+ {<<"amq.fanout">>, fanout, false},
+ {<<"amq.rabbitmq.trace">>, topic, true}]],
+ ok
+ end),
+ rabbit_event:notify(vhost_created, info(VHostPath)),
+ R.
+
+delete(VHostPath) ->
+ %% FIXME: We are forced to delete the queues and exchanges outside
+ %% the TX below. Queue deletion involves sending messages to the queue
+ %% process, which in turn results in further mnesia actions and
+ %% eventually the termination of that process. Exchange deletion causes
+ %% notifications which must be sent outside the TX
+ rabbit_log:info("Deleting vhost '~s'~n", [VHostPath]),
+ QDelFun = fun (Q) -> rabbit_amqqueue:delete(Q, false, false) end,
+ [assert_benign(rabbit_amqqueue:with(Name, QDelFun)) ||
+ #amqqueue{name = Name} <- rabbit_amqqueue:list(VHostPath)],
+ [assert_benign(rabbit_exchange:delete(Name, false)) ||
+ #exchange{name = Name} <- rabbit_exchange:list(VHostPath)],
+ Funs = rabbit_misc:execute_mnesia_transaction(
+ with(VHostPath, fun () -> internal_delete(VHostPath) end)),
+ ok = rabbit_event:notify(vhost_deleted, [{name, VHostPath}]),
+ [ok = Fun() || Fun <- Funs],
+ ok.
+
+assert_benign(ok) -> ok;
+assert_benign({ok, _}) -> ok;
+assert_benign({error, not_found}) -> ok;
+assert_benign({error, {absent, Q}}) ->
+ %% We have a durable queue on a down node. Removing the mnesia
+ %% entries here is safe. If/when the down node restarts, it will
+ %% clear out the on-disk storage of the queue.
+ case rabbit_amqqueue:internal_delete(Q#amqqueue.name) of
+ ok -> ok;
+ {error, not_found} -> ok
+ end.
+
+internal_delete(VHostPath) ->
+ [ok = rabbit_auth_backend_internal:clear_permissions(
+ proplists:get_value(user, Info), VHostPath)
+ || Info <- rabbit_auth_backend_internal:list_vhost_permissions(VHostPath)],
+ Fs1 = [rabbit_runtime_parameters:clear(VHostPath,
+ proplists:get_value(component, Info),
+ proplists:get_value(name, Info))
+ || Info <- rabbit_runtime_parameters:list(VHostPath)],
+ Fs2 = [rabbit_policy:delete(VHostPath, proplists:get_value(name, Info))
+ || Info <- rabbit_policy:list(VHostPath)],
+ ok = mnesia:delete({rabbit_vhost, VHostPath}),
+ Fs1 ++ Fs2.
+
+exists(VHostPath) ->
+ mnesia:dirty_read({rabbit_vhost, VHostPath}) /= [].
+
+list() ->
+ mnesia:dirty_all_keys(rabbit_vhost).
+
+with(VHostPath, Thunk) ->
+ fun () ->
+ case mnesia:read({rabbit_vhost, VHostPath}) of
+ [] ->
+ mnesia:abort({no_such_vhost, VHostPath});
+ [_V] ->
+ Thunk()
+ end
+ end.
+
+%% Like with/2 but outside an Mnesia tx
+assert(VHostPath) -> case exists(VHostPath) of
+ true -> ok;
+ false -> throw({error, {no_such_vhost, VHostPath}})
+ end.
+
+%%----------------------------------------------------------------------------
+
+infos(Items, X) -> [{Item, i(Item, X)} || Item <- Items].
+
+i(name, VHost) -> VHost;
+i(tracing, VHost) -> rabbit_trace:enabled(VHost);
+i(Item, _) -> throw({bad_argument, Item}).
+
+info(VHost) -> infos(?INFO_KEYS, VHost).
+info(VHost, Items) -> infos(Items, VHost).
+
+info_all() -> info_all(?INFO_KEYS).
+info_all(Items) -> [info(VHost, Items) || VHost <- list()].
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_vm).
+
+-export([memory/0]).
+
+-define(MAGIC_PLUGINS, ["mochiweb", "webmachine", "cowboy", "sockjs",
+ "rfc4627_jsonrpc"]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(memory/0 :: () -> rabbit_types:infos()).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+%% Like erlang:memory(), but with awareness of rabbit-y things
+memory() ->
+ ConnProcs = [rabbit_tcp_client_sup, ssl_connection_sup, amqp_sup],
+ QProcs = [rabbit_amqqueue_sup, rabbit_mirror_queue_slave_sup],
+ MsgIndexProcs = [msg_store_transient, msg_store_persistent],
+ MgmtDbProcs = [rabbit_mgmt_sup_sup],
+ PluginProcs = plugin_sups(),
+
+ All = [ConnProcs, QProcs, MsgIndexProcs, MgmtDbProcs, PluginProcs],
+
+ {Sums, _Other} = sum_processes(lists:append(All), [memory]),
+
+ [Conns, Qs, MsgIndexProc, MgmtDbProc, Plugins] =
+ [aggregate_memory(Names, Sums) || Names <- All],
+
+ Mnesia = mnesia_memory(),
+ MsgIndexETS = ets_memory(rabbit_msg_store_ets_index),
+ MgmtDbETS = ets_memory(rabbit_mgmt_db),
+
+ [{total, Total},
+ {processes, Processes},
+ {ets, ETS},
+ {atom, Atom},
+ {binary, Bin},
+ {code, Code},
+ {system, System}] =
+ erlang:memory([total, processes, ets, atom, binary, code, system]),
+
+ OtherProc = Processes - Conns - Qs - MsgIndexProc - Plugins - MgmtDbProc,
+
+ [{total, Total},
+ {connection_procs, Conns},
+ {queue_procs, Qs},
+ {plugins, Plugins},
+ {other_proc, lists:max([0, OtherProc])}, %% [1]
+ {mnesia, Mnesia},
+ {mgmt_db, MgmtDbETS + MgmtDbProc},
+ {msg_index, MsgIndexETS + MsgIndexProc},
+ {other_ets, ETS - Mnesia - MsgIndexETS - MgmtDbETS},
+ {binary, Bin},
+ {code, Code},
+ {atom, Atom},
+ {other_system, System - ETS - Atom - Bin - Code}].
+
+%% [1] - erlang:memory(processes) can be less than the sum of its
+%% parts. Rather than display something nonsensical, just silence any
+%% claims about negative memory. See
+%% http://erlang.org/pipermail/erlang-questions/2012-September/069320.html
+
+%%----------------------------------------------------------------------------
+
+mnesia_memory() ->
+ case mnesia:system_info(is_running) of
+ yes -> lists:sum([bytes(mnesia:table_info(Tab, memory)) ||
+ Tab <- mnesia:system_info(tables)]);
+ _ -> 0
+ end.
+
+ets_memory(Name) ->
+ lists:sum([bytes(ets:info(T, memory)) || T <- ets:all(),
+ N <- [ets:info(T, name)],
+ N =:= Name]).
+
+bytes(Words) -> Words * erlang:system_info(wordsize).
+
+plugin_sups() ->
+ lists:append([plugin_sup(App) ||
+ {App, _, _} <- rabbit_misc:which_applications(),
+ is_plugin(atom_to_list(App))]).
+
+plugin_sup(App) ->
+ case application_controller:get_master(App) of
+ undefined -> [];
+ Master -> case application_master:get_child(Master) of
+ {Pid, _} when is_pid(Pid) -> [process_name(Pid)];
+ Pid when is_pid(Pid) -> [process_name(Pid)];
+ _ -> []
+ end
+ end.
+
+process_name(Pid) ->
+ case process_info(Pid, registered_name) of
+ {registered_name, Name} -> Name;
+ _ -> Pid
+ end.
+
+is_plugin("rabbitmq_" ++ _) -> true;
+is_plugin(App) -> lists:member(App, ?MAGIC_PLUGINS).
+
+aggregate_memory(Names, Sums) ->
+ lists:sum([extract_memory(Name, Sums) || Name <- Names]).
+
+extract_memory(Name, Sums) ->
+ {value, {_, Accs}} = lists:keysearch(Name, 1, Sums),
+ {value, {memory, V}} = lists:keysearch(memory, 1, Accs),
+ V.
+
+%%----------------------------------------------------------------------------
+
+%% NB: this code is non-rabbit specific.
+
+-ifdef(use_specs).
+-type(process() :: pid() | atom()).
+-type(info_key() :: atom()).
+-type(info_value() :: any()).
+-type(info_item() :: {info_key(), info_value()}).
+-type(accumulate() :: fun ((info_key(), info_value(), info_value()) ->
+ info_value())).
+-spec(sum_processes/2 :: ([process()], [info_key()]) ->
+ {[{process(), [info_item()]}], [info_item()]}).
+-spec(sum_processes/3 :: ([process()], accumulate(), [info_item()]) ->
+ {[{process(), [info_item()]}], [info_item()]}).
+-endif.
+
+sum_processes(Names, Items) ->
+ sum_processes(Names, fun (_, X, Y) -> X + Y end,
+ [{Item, 0} || Item <- Items]).
+
+%% summarize the process_info of all processes based on their
+%% '$ancestor' hierarchy, recorded in their process dictionary.
+%%
+%% The function takes
+%%
+%% 1) a list of names/pids of processes that are accumulation points
+%% in the hierarchy.
+%%
+%% 2) a function that aggregates individual info items -taking the
+%% info item key, value and accumulated value as the input and
+%% producing a new accumulated value.
+%%
+%% 3) a list of info item key / initial accumulator value pairs.
+%%
+%% The process_info of a process is accumulated at the nearest of its
+%% ancestors that is mentioned in the first argument, or, if no such
+%% ancestor exists or the ancestor information is absent, in a special
+%% 'other' bucket.
+%%
+%% The result is a pair consisting of
+%%
+%% 1) a k/v list, containing for each of the accumulation names/pids a
+%% list of info items, containing the accumulated data, and
+%%
+%% 2) the 'other' bucket - a list of info items containing the
+%% accumulated data of all processes with no matching ancestors
+%%
+%% Note that this function operates on names as well as pids, but
+%% these must match whatever is contained in the '$ancestor' process
+%% dictionary entry. Generally that means for all registered processes
+%% the name should be used.
+sum_processes(Names, Fun, Acc0) ->
+ Items = [Item || {Item, _Val0} <- Acc0],
+ Acc0Dict = orddict:from_list(Acc0),
+ NameAccs0 = orddict:from_list([{Name, Acc0Dict} || Name <- Names]),
+ {NameAccs, OtherAcc} =
+ lists:foldl(
+ fun (Pid, Acc) ->
+ InfoItems = [registered_name, dictionary | Items],
+ case process_info(Pid, InfoItems) of
+ undefined ->
+ Acc;
+ [{registered_name, RegName}, {dictionary, D} | Vals] ->
+ %% see docs for process_info/2 for the
+ %% special handling of 'registered_name'
+ %% info items
+ Extra = case RegName of
+ [] -> [];
+ N -> [N]
+ end,
+ accumulate(find_ancestor(Extra, D, Names), Fun,
+ orddict:from_list(Vals), Acc)
+ end
+ end, {NameAccs0, Acc0Dict}, processes()),
+ %% these conversions aren't strictly necessary; we do them simply
+ %% for the sake of encapsulating the representation.
+ {[{Name, orddict:to_list(Accs)} ||
+ {Name, Accs} <- orddict:to_list(NameAccs)],
+ orddict:to_list(OtherAcc)}.
+
+find_ancestor(Extra, D, Names) ->
+ Ancestors = case lists:keysearch('$ancestors', 1, D) of
+ {value, {_, Ancs}} -> Ancs;
+ false -> []
+ end,
+ case lists:splitwith(fun (A) -> not lists:member(A, Names) end,
+ Extra ++ Ancestors) of
+ {_, []} -> undefined;
+ {_, [Name | _]} -> Name
+ end.
+
+accumulate(undefined, Fun, ValsDict, {NameAccs, OtherAcc}) ->
+ {NameAccs, orddict:merge(Fun, ValsDict, OtherAcc)};
+accumulate(Name, Fun, ValsDict, {NameAccs, OtherAcc}) ->
+ F = fun (NameAcc) -> orddict:merge(Fun, ValsDict, NameAcc) end,
+ {orddict:update(Name, F, NameAccs), OtherAcc}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_writer).
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+-export([start/6, start_link/6, start/7, start_link/7]).
+
+-export([system_continue/3, system_terminate/4, system_code_change/4]).
+
+-export([send_command/2, send_command/3,
+ send_command_sync/2, send_command_sync/3,
+ send_command_and_notify/4, send_command_and_notify/5,
+ send_command_flow/2, send_command_flow/3,
+ flush/1]).
+-export([internal_send_command/4, internal_send_command/6]).
+
+%% internal
+-export([enter_mainloop/2, mainloop/2, mainloop1/2]).
+
+-record(wstate, {sock, channel, frame_max, protocol, reader,
+ stats_timer, pending}).
+
+-define(HIBERNATE_AFTER, 5000).
+
+%%---------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start/6 ::
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), rabbit_types:protocol(), pid(),
+ rabbit_types:proc_name())
+ -> rabbit_types:ok(pid())).
+-spec(start_link/6 ::
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), rabbit_types:protocol(), pid(),
+ rabbit_types:proc_name())
+ -> rabbit_types:ok(pid())).
+-spec(start/7 ::
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), rabbit_types:protocol(), pid(),
+ rabbit_types:proc_name(), boolean())
+ -> rabbit_types:ok(pid())).
+-spec(start_link/7 ::
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), rabbit_types:protocol(), pid(),
+ rabbit_types:proc_name(), boolean())
+ -> rabbit_types:ok(pid())).
+
+-spec(system_code_change/4 :: (_,_,_,_) -> {'ok',_}).
+-spec(system_continue/3 :: (_,_,#wstate{}) -> any()).
+-spec(system_terminate/4 :: (_,_,_,_) -> none()).
+
+-spec(send_command/2 ::
+ (pid(), rabbit_framing:amqp_method_record()) -> 'ok').
+-spec(send_command/3 ::
+ (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content())
+ -> 'ok').
+-spec(send_command_sync/2 ::
+ (pid(), rabbit_framing:amqp_method_record()) -> 'ok').
+-spec(send_command_sync/3 ::
+ (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content())
+ -> 'ok').
+-spec(send_command_and_notify/4 ::
+ (pid(), pid(), pid(), rabbit_framing:amqp_method_record())
+ -> 'ok').
+-spec(send_command_and_notify/5 ::
+ (pid(), pid(), pid(), rabbit_framing:amqp_method_record(),
+ rabbit_types:content())
+ -> 'ok').
+-spec(send_command_flow/2 ::
+ (pid(), rabbit_framing:amqp_method_record()) -> 'ok').
+-spec(send_command_flow/3 ::
+ (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content())
+ -> 'ok').
+-spec(flush/1 :: (pid()) -> 'ok').
+-spec(internal_send_command/4 ::
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ rabbit_framing:amqp_method_record(), rabbit_types:protocol())
+ -> 'ok').
+-spec(internal_send_command/6 ::
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ rabbit_framing:amqp_method_record(), rabbit_types:content(),
+ non_neg_integer(), rabbit_types:protocol())
+ -> 'ok').
+
+-endif.
+
+%%---------------------------------------------------------------------------
+
+start(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity) ->
+ start(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity, false).
+
+start_link(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity) ->
+ start_link(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity, false).
+
+start(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity,
+ ReaderWantsStats) ->
+ State = initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid,
+ ReaderWantsStats),
+ {ok, proc_lib:spawn(?MODULE, enter_mainloop, [Identity, State])}.
+
+start_link(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity,
+ ReaderWantsStats) ->
+ State = initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid,
+ ReaderWantsStats),
+ {ok, proc_lib:spawn_link(?MODULE, enter_mainloop, [Identity, State])}.
+
+initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid, ReaderWantsStats) ->
+ (case ReaderWantsStats of
+ true -> fun rabbit_event:init_stats_timer/2;
+ false -> fun rabbit_event:init_disabled_stats_timer/2
+ end)(#wstate{sock = Sock,
+ channel = Channel,
+ frame_max = FrameMax,
+ protocol = Protocol,
+ reader = ReaderPid,
+ pending = []},
+ #wstate.stats_timer).
+
+system_continue(Parent, Deb, State) ->
+ mainloop(Deb, State#wstate{reader = Parent}).
+
+system_terminate(Reason, _Parent, _Deb, _State) ->
+ exit(Reason).
+
+system_code_change(Misc, _Module, _OldVsn, _Extra) ->
+ {ok, Misc}.
+
+enter_mainloop(Identity, State) ->
+ Deb = sys:debug_options([]),
+ ?store_proc_name(Identity),
+ mainloop(Deb, State).
+
+mainloop(Deb, State) ->
+ try
+ mainloop1(Deb, State)
+ catch
+ exit:Error -> #wstate{reader = ReaderPid, channel = Channel} = State,
+ ReaderPid ! {channel_exit, Channel, Error}
+ end,
+ done.
+
+mainloop1(Deb, State = #wstate{pending = []}) ->
+ receive
+ Message -> {Deb1, State1} = handle_message(Deb, Message, State),
+ ?MODULE:mainloop1(Deb1, State1)
+ after ?HIBERNATE_AFTER ->
+ erlang:hibernate(?MODULE, mainloop, [Deb, State])
+ end;
+mainloop1(Deb, State) ->
+ receive
+ Message -> {Deb1, State1} = handle_message(Deb, Message, State),
+ ?MODULE:mainloop1(Deb1, State1)
+ after 0 ->
+ ?MODULE:mainloop1(Deb, internal_flush(State))
+ end.
+
+handle_message(Deb, {system, From, Req}, State = #wstate{reader = Parent}) ->
+ sys:handle_system_msg(Req, From, Parent, ?MODULE, Deb, State);
+handle_message(Deb, Message, State) ->
+ {Deb, handle_message(Message, State)}.
+
+handle_message({send_command, MethodRecord}, State) ->
+ internal_send_command_async(MethodRecord, State);
+handle_message({send_command, MethodRecord, Content}, State) ->
+ internal_send_command_async(MethodRecord, Content, State);
+handle_message({send_command_flow, MethodRecord, Sender}, State) ->
+ credit_flow:ack(Sender),
+ internal_send_command_async(MethodRecord, State);
+handle_message({send_command_flow, MethodRecord, Content, Sender}, State) ->
+ credit_flow:ack(Sender),
+ internal_send_command_async(MethodRecord, Content, State);
+handle_message({'$gen_call', From, {send_command_sync, MethodRecord}}, State) ->
+ State1 = internal_flush(
+ internal_send_command_async(MethodRecord, State)),
+ gen_server:reply(From, ok),
+ State1;
+handle_message({'$gen_call', From, {send_command_sync, MethodRecord, Content}},
+ State) ->
+ State1 = internal_flush(
+ internal_send_command_async(MethodRecord, Content, State)),
+ gen_server:reply(From, ok),
+ State1;
+handle_message({'$gen_call', From, flush}, State) ->
+ State1 = internal_flush(State),
+ gen_server:reply(From, ok),
+ State1;
+handle_message({send_command_and_notify, QPid, ChPid, MethodRecord}, State) ->
+ State1 = internal_send_command_async(MethodRecord, State),
+ rabbit_amqqueue:notify_sent(QPid, ChPid),
+ State1;
+handle_message({send_command_and_notify, QPid, ChPid, MethodRecord, Content},
+ State) ->
+ State1 = internal_send_command_async(MethodRecord, Content, State),
+ rabbit_amqqueue:notify_sent(QPid, ChPid),
+ State1;
+handle_message({'DOWN', _MRef, process, QPid, _Reason}, State) ->
+ rabbit_amqqueue:notify_sent_queue_down(QPid),
+ State;
+handle_message({inet_reply, _, ok}, State) ->
+ rabbit_event:ensure_stats_timer(State, #wstate.stats_timer, emit_stats);
+handle_message({inet_reply, _, Status}, _State) ->
+ exit({writer, send_failed, Status});
+handle_message(emit_stats, State = #wstate{reader = ReaderPid}) ->
+ ReaderPid ! ensure_stats,
+ rabbit_event:reset_stats_timer(State, #wstate.stats_timer);
+handle_message(Message, _State) ->
+ exit({writer, message_not_understood, Message}).
+
+%%---------------------------------------------------------------------------
+
+send_command(W, MethodRecord) ->
+ W ! {send_command, MethodRecord},
+ ok.
+
+send_command(W, MethodRecord, Content) ->
+ W ! {send_command, MethodRecord, Content},
+ ok.
+
+send_command_flow(W, MethodRecord) ->
+ credit_flow:send(W),
+ W ! {send_command_flow, MethodRecord, self()},
+ ok.
+
+send_command_flow(W, MethodRecord, Content) ->
+ credit_flow:send(W),
+ W ! {send_command_flow, MethodRecord, Content, self()},
+ ok.
+
+send_command_sync(W, MethodRecord) ->
+ call(W, {send_command_sync, MethodRecord}).
+
+send_command_sync(W, MethodRecord, Content) ->
+ call(W, {send_command_sync, MethodRecord, Content}).
+
+send_command_and_notify(W, Q, ChPid, MethodRecord) ->
+ W ! {send_command_and_notify, Q, ChPid, MethodRecord},
+ ok.
+
+send_command_and_notify(W, Q, ChPid, MethodRecord, Content) ->
+ W ! {send_command_and_notify, Q, ChPid, MethodRecord, Content},
+ ok.
+
+flush(W) -> call(W, flush).
+
+%%---------------------------------------------------------------------------
+
+call(Pid, Msg) ->
+ {ok, Res} = gen:call(Pid, '$gen_call', Msg, infinity),
+ Res.
+
+%%---------------------------------------------------------------------------
+
+assemble_frame(Channel, MethodRecord, Protocol) ->
+ rabbit_binary_generator:build_simple_method_frame(
+ Channel, MethodRecord, Protocol).
+
+assemble_frames(Channel, MethodRecord, Content, FrameMax, Protocol) ->
+ MethodName = rabbit_misc:method_record_type(MethodRecord),
+ true = Protocol:method_has_content(MethodName), % assertion
+ MethodFrame = rabbit_binary_generator:build_simple_method_frame(
+ Channel, MethodRecord, Protocol),
+ ContentFrames = rabbit_binary_generator:build_simple_content_frames(
+ Channel, Content, FrameMax, Protocol),
+ [MethodFrame | ContentFrames].
+
+tcp_send(Sock, Data) ->
+ rabbit_misc:throw_on_error(inet_error,
+ fun () -> rabbit_net:send(Sock, Data) end).
+
+internal_send_command(Sock, Channel, MethodRecord, Protocol) ->
+ ok = tcp_send(Sock, assemble_frame(Channel, MethodRecord, Protocol)).
+
+internal_send_command(Sock, Channel, MethodRecord, Content, FrameMax,
+ Protocol) ->
+ ok = lists:foldl(fun (Frame, ok) -> tcp_send(Sock, Frame);
+ (_Frame, Other) -> Other
+ end, ok, assemble_frames(Channel, MethodRecord,
+ Content, FrameMax, Protocol)).
+
+internal_send_command_async(MethodRecord,
+ State = #wstate{channel = Channel,
+ protocol = Protocol,
+ pending = Pending}) ->
+ Frame = assemble_frame(Channel, MethodRecord, Protocol),
+ maybe_flush(State#wstate{pending = [Frame | Pending]}).
+
+internal_send_command_async(MethodRecord, Content,
+ State = #wstate{channel = Channel,
+ frame_max = FrameMax,
+ protocol = Protocol,
+ pending = Pending}) ->
+ Frames = assemble_frames(Channel, MethodRecord, Content, FrameMax,
+ Protocol),
+ maybe_flush(State#wstate{pending = [Frames | Pending]}).
+
+%% This magic number is the tcp-over-ethernet MSS (1460) minus the
+%% minimum size of a AMQP basic.deliver method frame (24) plus basic
+%% content header (22). The idea is that we want to flush just before
+%% exceeding the MSS.
+-define(FLUSH_THRESHOLD, 1414).
+
+maybe_flush(State = #wstate{pending = Pending}) ->
+ case iolist_size(Pending) >= ?FLUSH_THRESHOLD of
+ true -> internal_flush(State);
+ false -> State
+ end.
+
+internal_flush(State = #wstate{pending = []}) ->
+ State;
+internal_flush(State = #wstate{sock = Sock, pending = Pending}) ->
+ ok = port_cmd(Sock, lists:reverse(Pending)),
+ State#wstate{pending = []}.
+
+%% gen_tcp:send/2 does a selective receive of {inet_reply, Sock,
+%% Status} to obtain the result. That is bad when it is called from
+%% the writer since it requires scanning of the writers possibly quite
+%% large message queue.
+%%
+%% So instead we lift the code from prim_inet:send/2, which is what
+%% gen_tcp:send/2 calls, do the first half here and then just process
+%% the result code in handle_message/2 as and when it arrives.
+%%
+%% This means we may end up happily sending data down a closed/broken
+%% socket, but that's ok since a) data in the buffers will be lost in
+%% any case (so qualitatively we are no worse off than if we used
+%% gen_tcp:send/2), and b) we do detect the changed socket status
+%% eventually, i.e. when we get round to handling the result code.
+%%
+%% Also note that the port has bounded buffers and port_command blocks
+%% when these are full. So the fact that we process the result
+%% asynchronously does not impact flow control.
+port_cmd(Sock, Data) ->
+ true = try rabbit_net:port_command(Sock, Data)
+ catch error:Error -> exit({writer, send_failed, Error})
+ end,
+ ok.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% Invoke callbacks on startup and termination.
+%%
+%% Simply hook this process into a supervision hierarchy, to have the
+%% callbacks invoked at a precise point during the establishment and
+%% teardown of that hierarchy, respectively.
+%%
+%% Or launch the process independently, and link to it, to have the
+%% callbacks invoked on startup and when the linked process
+%% terminates, respectively.
+
+-module(supervised_lifecycle).
+
+-behavior(gen_server).
+
+-export([start_link/3]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/3 :: (atom(), rabbit_types:mfargs(), rabbit_types:mfargs()) ->
+ rabbit_types:ok_pid_or_error()).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start_link(Name, StartMFA, StopMFA) ->
+ gen_server:start_link({local, Name}, ?MODULE, [StartMFA, StopMFA], []).
+
+%%----------------------------------------------------------------------------
+
+init([{M, F, A}, StopMFA]) ->
+ process_flag(trap_exit, true),
+ apply(M, F, A),
+ {ok, StopMFA}.
+
+handle_call(_Request, _From, State) -> {noreply, State}.
+
+handle_cast(_Msg, State) -> {noreply, State}.
+
+handle_info(_Info, State) -> {noreply, State}.
+
+terminate(_Reason, {M, F, A}) ->
+ apply(M, F, A),
+ ok.
+
+code_change(_OldVsn, State, _Extra) -> {ok, State}.
--- /dev/null
+%% This file is a copy of supervisor.erl from the R16B Erlang/OTP
+%% distribution, with the following modifications:
+%%
+%% 1) the module name is supervisor2
+%%
+%% 2) a find_child/2 utility function has been added
+%%
+%% 3) Added an 'intrinsic' restart type. Like the transient type, this
+%% type means the child should only be restarted if the child exits
+%% abnormally. Unlike the transient type, if the child exits
+%% normally, the supervisor itself also exits normally. If the
+%% child is a supervisor and it exits normally (i.e. with reason of
+%% 'shutdown') then the child's parent also exits normally.
+%%
+%% 4) child specifications can contain, as the restart type, a tuple
+%% {permanent, Delay} | {transient, Delay} | {intrinsic, Delay}
+%% where Delay >= 0 (see point (4) below for intrinsic). The delay,
+%% in seconds, indicates what should happen if a child, upon being
+%% restarted, exceeds the MaxT and MaxR parameters. Thus, if a
+%% child exits, it is restarted as normal. If it exits sufficiently
+%% quickly and often to exceed the boundaries set by the MaxT and
+%% MaxR parameters, and a Delay is specified, then rather than
+%% stopping the supervisor, the supervisor instead continues and
+%% tries to start up the child again, Delay seconds later.
+%%
+%% Note that if a child is delay-restarted this will reset the
+%% count of restarts towrds MaxR and MaxT. This matters if MaxT >
+%% Delay, since otherwise we would fail to restart after the delay.
+%%
+%% Sometimes, you may wish for a transient or intrinsic child to
+%% exit abnormally so that it gets restarted, but still log
+%% nothing. gen_server will log any exit reason other than
+%% 'normal', 'shutdown' or {'shutdown', _}. Thus the exit reason of
+%% {'shutdown', 'restart'} is interpreted to mean you wish the
+%% child to be restarted according to the delay parameters, but
+%% gen_server will not log the error. Thus from gen_server's
+%% perspective it's a normal exit, whilst from supervisor's
+%% perspective, it's an abnormal exit.
+%%
+%% 5) normal, and {shutdown, _} exit reasons are all treated the same
+%% (i.e. are regarded as normal exits)
+%%
+%% All modifications are (C) 2010-2013 GoPivotal, Inc.
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2012. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(supervisor2).
+
+-behaviour(gen_server).
+
+%% External exports
+-export([start_link/2, start_link/3,
+ start_child/2, restart_child/2,
+ delete_child/2, terminate_child/2,
+ which_children/1, count_children/1,
+ find_child/2, check_childspecs/1]).
+
+%% Internal exports
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+-export([try_again_restart/3]).
+
+%%--------------------------------------------------------------------------
+-ifdef(use_specs).
+-export_type([child_spec/0, startchild_ret/0, strategy/0, sup_name/0]).
+-endif.
+%%--------------------------------------------------------------------------
+
+-ifdef(use_specs).
+-type child() :: 'undefined' | pid().
+-type child_id() :: term().
+-type mfargs() :: {M :: module(), F :: atom(), A :: [term()] | undefined}.
+-type modules() :: [module()] | 'dynamic'.
+-type delay() :: non_neg_integer().
+-type restart() :: 'permanent' | 'transient' | 'temporary' | 'intrinsic' | {'permanent', delay()} | {'transient', delay()} | {'intrinsic', delay()}.
+-type shutdown() :: 'brutal_kill' | timeout().
+-type worker() :: 'worker' | 'supervisor'.
+-type sup_name() :: {'local', Name :: atom()} | {'global', Name :: atom()}.
+-type sup_ref() :: (Name :: atom())
+ | {Name :: atom(), Node :: node()}
+ | {'global', Name :: atom()}
+ | pid().
+-type child_spec() :: {Id :: child_id(),
+ StartFunc :: mfargs(),
+ Restart :: restart(),
+ Shutdown :: shutdown(),
+ Type :: worker(),
+ Modules :: modules()}.
+
+-type strategy() :: 'one_for_all' | 'one_for_one'
+ | 'rest_for_one' | 'simple_one_for_one'.
+-endif.
+
+%%--------------------------------------------------------------------------
+
+-ifdef(use_specs).
+-record(child, {% pid is undefined when child is not running
+ pid = undefined :: child() | {restarting,pid()} | [pid()],
+ name :: child_id(),
+ mfargs :: mfargs(),
+ restart_type :: restart(),
+ shutdown :: shutdown(),
+ child_type :: worker(),
+ modules = [] :: modules()}).
+-type child_rec() :: #child{}.
+-else.
+-record(child, {
+ pid = undefined,
+ name,
+ mfargs,
+ restart_type,
+ shutdown,
+ child_type,
+ modules = []}).
+-endif.
+
+-define(DICT, dict).
+-define(SETS, sets).
+-define(SET, set).
+
+-ifdef(use_specs).
+-record(state, {name,
+ strategy :: strategy(),
+ children = [] :: [child_rec()],
+ dynamics :: ?DICT() | ?SET(),
+ intensity :: non_neg_integer(),
+ period :: pos_integer(),
+ restarts = [],
+ module,
+ args}).
+-type state() :: #state{}.
+-else.
+-record(state, {name,
+ strategy,
+ children = [],
+ dynamics,
+ intensity,
+ period,
+ restarts = [],
+ module,
+ args}).
+-endif.
+
+-define(is_simple(State), State#state.strategy =:= simple_one_for_one).
+-define(is_permanent(R), ((R =:= permanent) orelse
+ (is_tuple(R) andalso
+ tuple_size(R) == 2 andalso
+ element(1, R) =:= permanent))).
+-define(is_explicit_restart(R),
+ R == {shutdown, restart}).
+
+-ifdef(use_specs).
+-callback init(Args :: term()) ->
+ {ok, {{RestartStrategy :: strategy(),
+ MaxR :: non_neg_integer(),
+ MaxT :: non_neg_integer()},
+ [ChildSpec :: child_spec()]}}
+ | ignore.
+-endif.
+-define(restarting(_Pid_), {restarting,_Pid_}).
+
+%%% ---------------------------------------------------
+%%% This is a general process supervisor built upon gen_server.erl.
+%%% Servers/processes should/could also be built using gen_server.erl.
+%%% SupName = {local, atom()} | {global, atom()}.
+%%% ---------------------------------------------------
+-ifdef(use_specs).
+-type startlink_err() :: {'already_started', pid()}
+ | {'shutdown', term()}
+ | term().
+-type startlink_ret() :: {'ok', pid()} | 'ignore' | {'error', startlink_err()}.
+
+-spec start_link(Module, Args) -> startlink_ret() when
+ Module :: module(),
+ Args :: term().
+
+-endif.
+start_link(Mod, Args) ->
+ gen_server:start_link(?MODULE, {self, Mod, Args}, []).
+
+-ifdef(use_specs).
+-spec start_link(SupName, Module, Args) -> startlink_ret() when
+ SupName :: sup_name(),
+ Module :: module(),
+ Args :: term().
+-endif.
+start_link(SupName, Mod, Args) ->
+ gen_server:start_link(SupName, ?MODULE, {SupName, Mod, Args}, []).
+
+%%% ---------------------------------------------------
+%%% Interface functions.
+%%% ---------------------------------------------------
+-ifdef(use_specs).
+-type startchild_err() :: 'already_present'
+ | {'already_started', Child :: child()} | term().
+-type startchild_ret() :: {'ok', Child :: child()}
+ | {'ok', Child :: child(), Info :: term()}
+ | {'error', startchild_err()}.
+
+-spec start_child(SupRef, ChildSpec) -> startchild_ret() when
+ SupRef :: sup_ref(),
+ ChildSpec :: child_spec() | (List :: [term()]).
+-endif.
+start_child(Supervisor, ChildSpec) ->
+ call(Supervisor, {start_child, ChildSpec}).
+
+-ifdef(use_specs).
+-spec restart_child(SupRef, Id) -> Result when
+ SupRef :: sup_ref(),
+ Id :: child_id(),
+ Result :: {'ok', Child :: child()}
+ | {'ok', Child :: child(), Info :: term()}
+ | {'error', Error},
+ Error :: 'running' | 'restarting' | 'not_found' | 'simple_one_for_one' |
+ term().
+-endif.
+restart_child(Supervisor, Name) ->
+ call(Supervisor, {restart_child, Name}).
+
+-ifdef(use_specs).
+-spec delete_child(SupRef, Id) -> Result when
+ SupRef :: sup_ref(),
+ Id :: child_id(),
+ Result :: 'ok' | {'error', Error},
+ Error :: 'running' | 'restarting' | 'not_found' | 'simple_one_for_one'.
+-endif.
+delete_child(Supervisor, Name) ->
+ call(Supervisor, {delete_child, Name}).
+
+%%-----------------------------------------------------------------
+%% Func: terminate_child/2
+%% Returns: ok | {error, Reason}
+%% Note that the child is *always* terminated in some
+%% way (maybe killed).
+%%-----------------------------------------------------------------
+-ifdef(use_specs).
+-spec terminate_child(SupRef, Id) -> Result when
+ SupRef :: sup_ref(),
+ Id :: pid() | child_id(),
+ Result :: 'ok' | {'error', Error},
+ Error :: 'not_found' | 'simple_one_for_one'.
+-endif.
+terminate_child(Supervisor, Name) ->
+ call(Supervisor, {terminate_child, Name}).
+
+-ifdef(use_specs).
+-spec which_children(SupRef) -> [{Id,Child,Type,Modules}] when
+ SupRef :: sup_ref(),
+ Id :: child_id() | undefined,
+ Child :: child() | 'restarting',
+ Type :: worker(),
+ Modules :: modules().
+-endif.
+which_children(Supervisor) ->
+ call(Supervisor, which_children).
+
+-ifdef(use_specs).
+-spec count_children(SupRef) -> PropListOfCounts when
+ SupRef :: sup_ref(),
+ PropListOfCounts :: [Count],
+ Count :: {specs, ChildSpecCount :: non_neg_integer()}
+ | {active, ActiveProcessCount :: non_neg_integer()}
+ | {supervisors, ChildSupervisorCount :: non_neg_integer()}
+ |{workers, ChildWorkerCount :: non_neg_integer()}.
+-endif.
+count_children(Supervisor) ->
+ call(Supervisor, count_children).
+
+-ifdef(use_specs).
+-spec find_child(Supervisor, Name) -> [pid()] when
+ Supervisor :: sup_ref(),
+ Name :: child_id().
+-endif.
+find_child(Supervisor, Name) ->
+ [Pid || {Name1, Pid, _Type, _Modules} <- which_children(Supervisor),
+ Name1 =:= Name].
+
+call(Supervisor, Req) ->
+ gen_server:call(Supervisor, Req, infinity).
+
+-ifdef(use_specs).
+-spec check_childspecs(ChildSpecs) -> Result when
+ ChildSpecs :: [child_spec()],
+ Result :: 'ok' | {'error', Error :: term()}.
+-endif.
+check_childspecs(ChildSpecs) when is_list(ChildSpecs) ->
+ case check_startspec(ChildSpecs) of
+ {ok, _} -> ok;
+ Error -> {error, Error}
+ end;
+check_childspecs(X) -> {error, {badarg, X}}.
+
+%%%-----------------------------------------------------------------
+%%% Called by timer:apply_after from restart/2
+-ifdef(use_specs).
+-spec try_again_restart(SupRef, Child, Reason) -> ok when
+ SupRef :: sup_ref(),
+ Child :: child_id() | pid(),
+ Reason :: term().
+-endif.
+try_again_restart(Supervisor, Child, Reason) ->
+ cast(Supervisor, {try_again_restart, Child, Reason}).
+
+cast(Supervisor, Req) ->
+ gen_server:cast(Supervisor, Req).
+
+%%% ---------------------------------------------------
+%%%
+%%% Initialize the supervisor.
+%%%
+%%% ---------------------------------------------------
+-ifdef(use_specs).
+-type init_sup_name() :: sup_name() | 'self'.
+
+-type stop_rsn() :: {'shutdown', term()}
+ | {'bad_return', {module(),'init', term()}}
+ | {'bad_start_spec', term()}
+ | {'start_spec', term()}
+ | {'supervisor_data', term()}.
+
+-spec init({init_sup_name(), module(), [term()]}) ->
+ {'ok', state()} | 'ignore' | {'stop', stop_rsn()}.
+-endif.
+init({SupName, Mod, Args}) ->
+ process_flag(trap_exit, true),
+ case Mod:init(Args) of
+ {ok, {SupFlags, StartSpec}} ->
+ case init_state(SupName, SupFlags, Mod, Args) of
+ {ok, State} when ?is_simple(State) ->
+ init_dynamic(State, StartSpec);
+ {ok, State} ->
+ init_children(State, StartSpec);
+ Error ->
+ {stop, {supervisor_data, Error}}
+ end;
+ ignore ->
+ ignore;
+ Error ->
+ {stop, {bad_return, {Mod, init, Error}}}
+ end.
+
+init_children(State, StartSpec) ->
+ SupName = State#state.name,
+ case check_startspec(StartSpec) of
+ {ok, Children} ->
+ case start_children(Children, SupName) of
+ {ok, NChildren} ->
+ {ok, State#state{children = NChildren}};
+ {error, NChildren, Reason} ->
+ terminate_children(NChildren, SupName),
+ {stop, {shutdown, Reason}}
+ end;
+ Error ->
+ {stop, {start_spec, Error}}
+ end.
+
+init_dynamic(State, [StartSpec]) ->
+ case check_startspec([StartSpec]) of
+ {ok, Children} ->
+ {ok, State#state{children = Children}};
+ Error ->
+ {stop, {start_spec, Error}}
+ end;
+init_dynamic(_State, StartSpec) ->
+ {stop, {bad_start_spec, StartSpec}}.
+
+%%-----------------------------------------------------------------
+%% Func: start_children/2
+%% Args: Children = [child_rec()] in start order
+%% SupName = {local, atom()} | {global, atom()} | {pid(), Mod}
+%% Purpose: Start all children. The new list contains #child's
+%% with pids.
+%% Returns: {ok, NChildren} | {error, NChildren, Reason}
+%% NChildren = [child_rec()] in termination order (reversed
+%% start order)
+%%-----------------------------------------------------------------
+start_children(Children, SupName) -> start_children(Children, [], SupName).
+
+start_children([Child|Chs], NChildren, SupName) ->
+ case do_start_child(SupName, Child) of
+ {ok, undefined} when Child#child.restart_type =:= temporary ->
+ start_children(Chs, NChildren, SupName);
+ {ok, Pid} ->
+ start_children(Chs, [Child#child{pid = Pid}|NChildren], SupName);
+ {ok, Pid, _Extra} ->
+ start_children(Chs, [Child#child{pid = Pid}|NChildren], SupName);
+ {error, Reason} ->
+ report_error(start_error, Reason, Child, SupName),
+ {error, lists:reverse(Chs) ++ [Child | NChildren],
+ {failed_to_start_child,Child#child.name,Reason}}
+ end;
+start_children([], NChildren, _SupName) ->
+ {ok, NChildren}.
+
+do_start_child(SupName, Child) ->
+ #child{mfargs = {M, F, Args}} = Child,
+ case catch apply(M, F, Args) of
+ {ok, Pid} when is_pid(Pid) ->
+ NChild = Child#child{pid = Pid},
+ report_progress(NChild, SupName),
+ {ok, Pid};
+ {ok, Pid, Extra} when is_pid(Pid) ->
+ NChild = Child#child{pid = Pid},
+ report_progress(NChild, SupName),
+ {ok, Pid, Extra};
+ ignore ->
+ {ok, undefined};
+ {error, What} -> {error, What};
+ What -> {error, What}
+ end.
+
+do_start_child_i(M, F, A) ->
+ case catch apply(M, F, A) of
+ {ok, Pid} when is_pid(Pid) ->
+ {ok, Pid};
+ {ok, Pid, Extra} when is_pid(Pid) ->
+ {ok, Pid, Extra};
+ ignore ->
+ {ok, undefined};
+ {error, Error} ->
+ {error, Error};
+ What ->
+ {error, What}
+ end.
+
+%%% ---------------------------------------------------
+%%%
+%%% Callback functions.
+%%%
+%%% ---------------------------------------------------
+-ifdef(use_specs).
+-type call() :: 'which_children' | 'count_children' | {_, _}. % XXX: refine
+-spec handle_call(call(), term(), state()) -> {'reply', term(), state()}.
+-endif.
+handle_call({start_child, EArgs}, _From, State) when ?is_simple(State) ->
+ Child = hd(State#state.children),
+ #child{mfargs = {M, F, A}} = Child,
+ Args = A ++ EArgs,
+ case do_start_child_i(M, F, Args) of
+ {ok, undefined} when Child#child.restart_type =:= temporary ->
+ {reply, {ok, undefined}, State};
+ {ok, Pid} ->
+ NState = save_dynamic_child(Child#child.restart_type, Pid, Args, State),
+ {reply, {ok, Pid}, NState};
+ {ok, Pid, Extra} ->
+ NState = save_dynamic_child(Child#child.restart_type, Pid, Args, State),
+ {reply, {ok, Pid, Extra}, NState};
+ What ->
+ {reply, What, State}
+ end;
+
+%% terminate_child for simple_one_for_one can only be done with pid
+handle_call({terminate_child, Name}, _From, State) when not is_pid(Name),
+ ?is_simple(State) ->
+ {reply, {error, simple_one_for_one}, State};
+
+handle_call({terminate_child, Name}, _From, State) ->
+ case get_child(Name, State, ?is_simple(State)) of
+ {value, Child} ->
+ case do_terminate(Child, State#state.name) of
+ #child{restart_type=RT} when RT=:=temporary; ?is_simple(State) ->
+ {reply, ok, state_del_child(Child, State)};
+ NChild ->
+ {reply, ok, replace_child(NChild, State)}
+ end;
+ false ->
+ {reply, {error, not_found}, State}
+ end;
+
+%%% The requests delete_child and restart_child are invalid for
+%%% simple_one_for_one supervisors.
+handle_call({_Req, _Data}, _From, State) when ?is_simple(State) ->
+ {reply, {error, simple_one_for_one}, State};
+
+handle_call({start_child, ChildSpec}, _From, State) ->
+ case check_childspec(ChildSpec) of
+ {ok, Child} ->
+ {Resp, NState} = handle_start_child(Child, State),
+ {reply, Resp, NState};
+ What ->
+ {reply, {error, What}, State}
+ end;
+
+handle_call({restart_child, Name}, _From, State) ->
+ case get_child(Name, State) of
+ {value, Child} when Child#child.pid =:= undefined ->
+ case do_start_child(State#state.name, Child) of
+ {ok, Pid} ->
+ NState = replace_child(Child#child{pid = Pid}, State),
+ {reply, {ok, Pid}, NState};
+ {ok, Pid, Extra} ->
+ NState = replace_child(Child#child{pid = Pid}, State),
+ {reply, {ok, Pid, Extra}, NState};
+ Error ->
+ {reply, Error, State}
+ end;
+ {value, #child{pid=?restarting(_)}} ->
+ {reply, {error, restarting}, State};
+ {value, _} ->
+ {reply, {error, running}, State};
+ _ ->
+ {reply, {error, not_found}, State}
+ end;
+
+handle_call({delete_child, Name}, _From, State) ->
+ case get_child(Name, State) of
+ {value, Child} when Child#child.pid =:= undefined ->
+ NState = remove_child(Child, State),
+ {reply, ok, NState};
+ {value, #child{pid=?restarting(_)}} ->
+ {reply, {error, restarting}, State};
+ {value, _} ->
+ {reply, {error, running}, State};
+ _ ->
+ {reply, {error, not_found}, State}
+ end;
+
+handle_call(which_children, _From, #state{children = [#child{restart_type = temporary,
+ child_type = CT,
+ modules = Mods}]} =
+ State) when ?is_simple(State) ->
+ Reply = lists:map(fun(Pid) -> {undefined, Pid, CT, Mods} end,
+ ?SETS:to_list(dynamics_db(temporary, State#state.dynamics))),
+ {reply, Reply, State};
+
+handle_call(which_children, _From, #state{children = [#child{restart_type = RType,
+ child_type = CT,
+ modules = Mods}]} =
+ State) when ?is_simple(State) ->
+ Reply = lists:map(fun({?restarting(_),_}) -> {undefined,restarting,CT,Mods};
+ ({Pid, _}) -> {undefined, Pid, CT, Mods} end,
+ ?DICT:to_list(dynamics_db(RType, State#state.dynamics))),
+ {reply, Reply, State};
+
+handle_call(which_children, _From, State) ->
+ Resp =
+ lists:map(fun(#child{pid = ?restarting(_), name = Name,
+ child_type = ChildType, modules = Mods}) ->
+ {Name, restarting, ChildType, Mods};
+ (#child{pid = Pid, name = Name,
+ child_type = ChildType, modules = Mods}) ->
+ {Name, Pid, ChildType, Mods}
+ end,
+ State#state.children),
+ {reply, Resp, State};
+
+
+handle_call(count_children, _From, #state{children = [#child{restart_type = temporary,
+ child_type = CT}]} = State)
+ when ?is_simple(State) ->
+ {Active, Count} =
+ ?SETS:fold(fun(Pid, {Alive, Tot}) ->
+ case is_pid(Pid) andalso is_process_alive(Pid) of
+ true ->{Alive+1, Tot +1};
+ false ->
+ {Alive, Tot + 1}
+ end
+ end, {0, 0}, dynamics_db(temporary, State#state.dynamics)),
+ Reply = case CT of
+ supervisor -> [{specs, 1}, {active, Active},
+ {supervisors, Count}, {workers, 0}];
+ worker -> [{specs, 1}, {active, Active},
+ {supervisors, 0}, {workers, Count}]
+ end,
+ {reply, Reply, State};
+
+handle_call(count_children, _From, #state{children = [#child{restart_type = RType,
+ child_type = CT}]} = State)
+ when ?is_simple(State) ->
+ {Active, Count} =
+ ?DICT:fold(fun(Pid, _Val, {Alive, Tot}) ->
+ case is_pid(Pid) andalso is_process_alive(Pid) of
+ true ->
+ {Alive+1, Tot +1};
+ false ->
+ {Alive, Tot + 1}
+ end
+ end, {0, 0}, dynamics_db(RType, State#state.dynamics)),
+ Reply = case CT of
+ supervisor -> [{specs, 1}, {active, Active},
+ {supervisors, Count}, {workers, 0}];
+ worker -> [{specs, 1}, {active, Active},
+ {supervisors, 0}, {workers, Count}]
+ end,
+ {reply, Reply, State};
+
+handle_call(count_children, _From, State) ->
+ %% Specs and children are together on the children list...
+ {Specs, Active, Supers, Workers} =
+ lists:foldl(fun(Child, Counts) ->
+ count_child(Child, Counts)
+ end, {0,0,0,0}, State#state.children),
+
+ %% Reformat counts to a property list.
+ Reply = [{specs, Specs}, {active, Active},
+ {supervisors, Supers}, {workers, Workers}],
+ {reply, Reply, State}.
+
+
+count_child(#child{pid = Pid, child_type = worker},
+ {Specs, Active, Supers, Workers}) ->
+ case is_pid(Pid) andalso is_process_alive(Pid) of
+ true -> {Specs+1, Active+1, Supers, Workers+1};
+ false -> {Specs+1, Active, Supers, Workers+1}
+ end;
+count_child(#child{pid = Pid, child_type = supervisor},
+ {Specs, Active, Supers, Workers}) ->
+ case is_pid(Pid) andalso is_process_alive(Pid) of
+ true -> {Specs+1, Active+1, Supers+1, Workers};
+ false -> {Specs+1, Active, Supers+1, Workers}
+ end.
+
+
+%%% If a restart attempt failed, this message is sent via
+%%% timer:apply_after(0,...) in order to give gen_server the chance to
+%%% check it's inbox before trying again.
+-ifdef(use_specs).
+-spec handle_cast({try_again_restart, child_id() | pid(), term()}, state()) ->
+ {'noreply', state()} | {stop, shutdown, state()}.
+-endif.
+handle_cast({try_again_restart,Pid,Reason}, #state{children=[Child]}=State)
+ when ?is_simple(State) ->
+ RT = Child#child.restart_type,
+ RPid = restarting(Pid),
+ case dynamic_child_args(RPid, dynamics_db(RT, State#state.dynamics)) of
+ {ok, Args} ->
+ {M, F, _} = Child#child.mfargs,
+ NChild = Child#child{pid = RPid, mfargs = {M, F, Args}},
+ try_restart(Child#child.restart_type, Reason, NChild, State);
+ error ->
+ {noreply, State}
+ end;
+
+handle_cast({try_again_restart,Name,Reason}, State) ->
+ %% we still support >= R12-B3 in which lists:keyfind/3 doesn't exist
+ case lists:keysearch(Name,#child.name,State#state.children) of
+ {value, Child = #child{pid=?restarting(_), restart_type=RestartType}} ->
+ try_restart(RestartType, Reason, Child, State);
+ _ ->
+ {noreply,State}
+ end.
+
+%%
+%% Take care of terminated children.
+%%
+-ifdef(use_specs).
+-spec handle_info(term(), state()) ->
+ {'noreply', state()} | {'stop', 'shutdown', state()}.
+-endif.
+handle_info({'EXIT', Pid, Reason}, State) ->
+ case restart_child(Pid, Reason, State) of
+ {ok, State1} ->
+ {noreply, State1};
+ {shutdown, State1} ->
+ {stop, shutdown, State1}
+ end;
+
+handle_info({delayed_restart, {RestartType, Reason, Child}}, State)
+ when ?is_simple(State) ->
+ try_restart(RestartType, Reason, Child, State#state{restarts = []}); %% [1]
+handle_info({delayed_restart, {RestartType, Reason, Child}}, State) ->
+ case get_child(Child#child.name, State) of
+ {value, Child1} ->
+ try_restart(RestartType, Reason, Child1,
+ State#state{restarts = []}); %% [1]
+ _What ->
+ {noreply, State}
+ end;
+%% [1] When we receive a delayed_restart message we want to reset the
+%% restarts field since otherwise the MaxT might not have elapsed and
+%% we would just delay again and again. Since a common use of the
+%% delayed restart feature is for MaxR = 1, MaxT = some huge number
+%% (so that we don't end up bouncing around in non-delayed restarts)
+%% this is important.
+
+handle_info(Msg, State) ->
+ error_logger:error_msg("Supervisor received unexpected message: ~p~n",
+ [Msg]),
+ {noreply, State}.
+
+%%
+%% Terminate this server.
+%%
+-ifdef(use_specs).
+-spec terminate(term(), state()) -> 'ok'.
+-endif.
+terminate(_Reason, #state{children=[Child]} = State) when ?is_simple(State) ->
+ terminate_dynamic_children(Child, dynamics_db(Child#child.restart_type,
+ State#state.dynamics),
+ State#state.name);
+terminate(_Reason, State) ->
+ terminate_children(State#state.children, State#state.name).
+
+%%
+%% Change code for the supervisor.
+%% Call the new call-back module and fetch the new start specification.
+%% Combine the new spec. with the old. If the new start spec. is
+%% not valid the code change will not succeed.
+%% Use the old Args as argument to Module:init/1.
+%% NOTE: This requires that the init function of the call-back module
+%% does not have any side effects.
+%%
+-ifdef(use_specs).
+-spec code_change(term(), state(), term()) ->
+ {'ok', state()} | {'error', term()}.
+-endif.
+code_change(_, State, _) ->
+ case (State#state.module):init(State#state.args) of
+ {ok, {SupFlags, StartSpec}} ->
+ case catch check_flags(SupFlags) of
+ ok ->
+ {Strategy, MaxIntensity, Period} = SupFlags,
+ update_childspec(State#state{strategy = Strategy,
+ intensity = MaxIntensity,
+ period = Period},
+ StartSpec);
+ Error ->
+ {error, Error}
+ end;
+ ignore ->
+ {ok, State};
+ Error ->
+ Error
+ end.
+
+check_flags({Strategy, MaxIntensity, Period}) ->
+ validStrategy(Strategy),
+ validIntensity(MaxIntensity),
+ validPeriod(Period),
+ ok;
+check_flags(What) ->
+ {bad_flags, What}.
+
+update_childspec(State, StartSpec) when ?is_simple(State) ->
+ case check_startspec(StartSpec) of
+ {ok, [Child]} ->
+ {ok, State#state{children = [Child]}};
+ Error ->
+ {error, Error}
+ end;
+update_childspec(State, StartSpec) ->
+ case check_startspec(StartSpec) of
+ {ok, Children} ->
+ OldC = State#state.children, % In reverse start order !
+ NewC = update_childspec1(OldC, Children, []),
+ {ok, State#state{children = NewC}};
+ Error ->
+ {error, Error}
+ end.
+
+update_childspec1([Child|OldC], Children, KeepOld) ->
+ case update_chsp(Child, Children) of
+ {ok,NewChildren} ->
+ update_childspec1(OldC, NewChildren, KeepOld);
+ false ->
+ update_childspec1(OldC, Children, [Child|KeepOld])
+ end;
+update_childspec1([], Children, KeepOld) ->
+ %% Return them in (kept) reverse start order.
+ lists:reverse(Children ++ KeepOld).
+
+update_chsp(OldCh, Children) ->
+ case lists:map(fun(Ch) when OldCh#child.name =:= Ch#child.name ->
+ Ch#child{pid = OldCh#child.pid};
+ (Ch) ->
+ Ch
+ end,
+ Children) of
+ Children ->
+ false; % OldCh not found in new spec.
+ NewC ->
+ {ok, NewC}
+ end.
+
+%%% ---------------------------------------------------
+%%% Start a new child.
+%%% ---------------------------------------------------
+
+handle_start_child(Child, State) ->
+ case get_child(Child#child.name, State) of
+ false ->
+ case do_start_child(State#state.name, Child) of
+ {ok, undefined} when Child#child.restart_type =:= temporary ->
+ {{ok, undefined}, State};
+ {ok, Pid} ->
+ {{ok, Pid}, save_child(Child#child{pid = Pid}, State)};
+ {ok, Pid, Extra} ->
+ {{ok, Pid, Extra}, save_child(Child#child{pid = Pid}, State)};
+ {error, What} ->
+ {{error, {What, Child}}, State}
+ end;
+ {value, OldChild} when is_pid(OldChild#child.pid) ->
+ {{error, {already_started, OldChild#child.pid}}, State};
+ {value, _OldChild} ->
+ {{error, already_present}, State}
+ end.
+
+%%% ---------------------------------------------------
+%%% Restart. A process has terminated.
+%%% Returns: {ok, state()} | {shutdown, state()}
+%%% ---------------------------------------------------
+
+restart_child(Pid, Reason, #state{children = [Child]} = State) when ?is_simple(State) ->
+ RestartType = Child#child.restart_type,
+ case dynamic_child_args(Pid, dynamics_db(RestartType, State#state.dynamics)) of
+ {ok, Args} ->
+ {M, F, _} = Child#child.mfargs,
+ NChild = Child#child{pid = Pid, mfargs = {M, F, Args}},
+ do_restart(RestartType, Reason, NChild, State);
+ error ->
+ {ok, State}
+ end;
+
+restart_child(Pid, Reason, State) ->
+ Children = State#state.children,
+ %% we still support >= R12-B3 in which lists:keyfind/3 doesn't exist
+ case lists:keysearch(Pid, #child.pid, Children) of
+ {value, #child{restart_type = RestartType} = Child} ->
+ do_restart(RestartType, Reason, Child, State);
+ false ->
+ {ok, State}
+ end.
+
+try_restart(RestartType, Reason, Child, State) ->
+ case handle_restart(RestartType, Reason, Child, State) of
+ {ok, NState} -> {noreply, NState};
+ {shutdown, State2} -> {stop, shutdown, State2}
+ end.
+
+do_restart(RestartType, Reason, Child, State) ->
+ maybe_report_error(RestartType, Reason, Child, State),
+ handle_restart(RestartType, Reason, Child, State).
+
+maybe_report_error(permanent, Reason, Child, State) ->
+ report_child_termination(Reason, Child, State);
+maybe_report_error({permanent, _}, Reason, Child, State) ->
+ report_child_termination(Reason, Child, State);
+maybe_report_error(_Type, Reason, Child, State) ->
+ case is_abnormal_termination(Reason) of
+ true -> report_child_termination(Reason, Child, State);
+ false -> ok
+ end.
+
+report_child_termination(Reason, Child, State) ->
+ report_error(child_terminated, Reason, Child, State#state.name).
+
+handle_restart(permanent, _Reason, Child, State) ->
+ restart(Child, State);
+handle_restart(transient, Reason, Child, State) ->
+ restart_if_explicit_or_abnormal(fun restart/2,
+ fun delete_child_and_continue/2,
+ Reason, Child, State);
+handle_restart(intrinsic, Reason, Child, State) ->
+ restart_if_explicit_or_abnormal(fun restart/2,
+ fun delete_child_and_stop/2,
+ Reason, Child, State);
+handle_restart(temporary, _Reason, Child, State) ->
+ delete_child_and_continue(Child, State);
+handle_restart({permanent, _Delay}=Restart, Reason, Child, State) ->
+ do_restart_delay(Restart, Reason, Child, State);
+handle_restart({transient, _Delay}=Restart, Reason, Child, State) ->
+ restart_if_explicit_or_abnormal(defer_to_restart_delay(Restart, Reason),
+ fun delete_child_and_continue/2,
+ Reason, Child, State);
+handle_restart({intrinsic, _Delay}=Restart, Reason, Child, State) ->
+ restart_if_explicit_or_abnormal(defer_to_restart_delay(Restart, Reason),
+ fun delete_child_and_stop/2,
+ Reason, Child, State).
+
+restart_if_explicit_or_abnormal(RestartHow, Otherwise, Reason, Child, State) ->
+ case ?is_explicit_restart(Reason) orelse is_abnormal_termination(Reason) of
+ true -> RestartHow(Child, State);
+ false -> Otherwise(Child, State)
+ end.
+
+defer_to_restart_delay(Restart, Reason) ->
+ fun(Child, State) -> do_restart_delay(Restart, Reason, Child, State) end.
+
+delete_child_and_continue(Child, State) ->
+ {ok, state_del_child(Child, State)}.
+
+delete_child_and_stop(Child, State) ->
+ {shutdown, state_del_child(Child, State)}.
+
+is_abnormal_termination(normal) -> false;
+is_abnormal_termination(shutdown) -> false;
+is_abnormal_termination({shutdown, _}) -> false;
+is_abnormal_termination(_Other) -> true.
+
+do_restart_delay({RestartType, Delay}, Reason, Child, State) ->
+ case add_restart(State) of
+ {ok, NState} ->
+ maybe_restart(NState#state.strategy, Child, NState);
+ {terminate, _NState} ->
+ %% we've reached the max restart intensity, but the
+ %% add_restart will have added to the restarts
+ %% field. Given we don't want to die here, we need to go
+ %% back to the old restarts field otherwise we'll never
+ %% attempt to restart later, which is why we ignore
+ %% NState for this clause.
+ _TRef = erlang:send_after(trunc(Delay*1000), self(),
+ {delayed_restart,
+ {{RestartType, Delay}, Reason, Child}}),
+ {ok, state_del_child(Child, State)}
+ end.
+
+restart(Child, State) ->
+ case add_restart(State) of
+ {ok, NState} ->
+ maybe_restart(NState#state.strategy, Child, NState);
+ {terminate, NState} ->
+ report_error(shutdown, reached_max_restart_intensity,
+ Child, State#state.name),
+ {shutdown, remove_child(Child, NState)}
+ end.
+
+maybe_restart(Strategy, Child, State) ->
+ case restart(Strategy, Child, State) of
+ {try_again, Reason, NState2} ->
+ %% Leaving control back to gen_server before
+ %% trying again. This way other incoming requsts
+ %% for the supervisor can be handled - e.g. a
+ %% shutdown request for the supervisor or the
+ %% child.
+ Id = if ?is_simple(State) -> Child#child.pid;
+ true -> Child#child.name
+ end,
+ timer:apply_after(0,?MODULE,try_again_restart,[self(),Id,Reason]),
+ {ok,NState2};
+ Other ->
+ Other
+ end.
+
+restart(simple_one_for_one, Child, State) ->
+ #child{pid = OldPid, mfargs = {M, F, A}} = Child,
+ Dynamics = ?DICT:erase(OldPid, dynamics_db(Child#child.restart_type,
+ State#state.dynamics)),
+ case do_start_child_i(M, F, A) of
+ {ok, Pid} ->
+ NState = State#state{dynamics = ?DICT:store(Pid, A, Dynamics)},
+ {ok, NState};
+ {ok, Pid, _Extra} ->
+ NState = State#state{dynamics = ?DICT:store(Pid, A, Dynamics)},
+ {ok, NState};
+ {error, Error} ->
+ NState = State#state{dynamics = ?DICT:store(restarting(OldPid), A,
+ Dynamics)},
+ report_error(start_error, Error, Child, State#state.name),
+ {try_again, Error, NState}
+ end;
+restart(one_for_one, Child, State) ->
+ OldPid = Child#child.pid,
+ case do_start_child(State#state.name, Child) of
+ {ok, Pid} ->
+ NState = replace_child(Child#child{pid = Pid}, State),
+ {ok, NState};
+ {ok, Pid, _Extra} ->
+ NState = replace_child(Child#child{pid = Pid}, State),
+ {ok, NState};
+ {error, Reason} ->
+ NState = replace_child(Child#child{pid = restarting(OldPid)}, State),
+ report_error(start_error, Reason, Child, State#state.name),
+ {try_again, Reason, NState}
+ end;
+restart(rest_for_one, Child, State) ->
+ {ChAfter, ChBefore} = split_child(Child#child.pid, State#state.children),
+ ChAfter2 = terminate_children(ChAfter, State#state.name),
+ case start_children(ChAfter2, State#state.name) of
+ {ok, ChAfter3} ->
+ {ok, State#state{children = ChAfter3 ++ ChBefore}};
+ {error, ChAfter3, Reason} ->
+ NChild = Child#child{pid=restarting(Child#child.pid)},
+ NState = State#state{children = ChAfter3 ++ ChBefore},
+ {try_again, Reason, replace_child(NChild,NState)}
+ end;
+restart(one_for_all, Child, State) ->
+ Children1 = del_child(Child#child.pid, State#state.children),
+ Children2 = terminate_children(Children1, State#state.name),
+ case start_children(Children2, State#state.name) of
+ {ok, NChs} ->
+ {ok, State#state{children = NChs}};
+ {error, NChs, Reason} ->
+ NChild = Child#child{pid=restarting(Child#child.pid)},
+ NState = State#state{children = NChs},
+ {try_again, Reason, replace_child(NChild,NState)}
+ end.
+
+restarting(Pid) when is_pid(Pid) -> ?restarting(Pid);
+restarting(RPid) -> RPid.
+
+%%-----------------------------------------------------------------
+%% Func: terminate_children/2
+%% Args: Children = [child_rec()] in termination order
+%% SupName = {local, atom()} | {global, atom()} | {pid(),Mod}
+%% Returns: NChildren = [child_rec()] in
+%% startup order (reversed termination order)
+%%-----------------------------------------------------------------
+terminate_children(Children, SupName) ->
+ terminate_children(Children, SupName, []).
+
+%% Temporary children should not be restarted and thus should
+%% be skipped when building the list of terminated children, although
+%% we do want them to be shut down as many functions from this module
+%% use this function to just clear everything.
+terminate_children([Child = #child{restart_type=temporary} | Children], SupName, Res) ->
+ do_terminate(Child, SupName),
+ terminate_children(Children, SupName, Res);
+terminate_children([Child | Children], SupName, Res) ->
+ NChild = do_terminate(Child, SupName),
+ terminate_children(Children, SupName, [NChild | Res]);
+terminate_children([], _SupName, Res) ->
+ Res.
+
+do_terminate(Child, SupName) when is_pid(Child#child.pid) ->
+ case shutdown(Child#child.pid, Child#child.shutdown) of
+ ok ->
+ ok;
+ {error, normal} when not ?is_permanent(Child#child.restart_type) ->
+ ok;
+ {error, OtherReason} ->
+ report_error(shutdown_error, OtherReason, Child, SupName)
+ end,
+ Child#child{pid = undefined};
+do_terminate(Child, _SupName) ->
+ Child#child{pid = undefined}.
+
+%%-----------------------------------------------------------------
+%% Shutdowns a child. We must check the EXIT value
+%% of the child, because it might have died with another reason than
+%% the wanted. In that case we want to report the error. We put a
+%% monitor on the child an check for the 'DOWN' message instead of
+%% checking for the 'EXIT' message, because if we check the 'EXIT'
+%% message a "naughty" child, who does unlink(Sup), could hang the
+%% supervisor.
+%% Returns: ok | {error, OtherReason} (this should be reported)
+%%-----------------------------------------------------------------
+shutdown(Pid, brutal_kill) ->
+ case monitor_child(Pid) of
+ ok ->
+ exit(Pid, kill),
+ receive
+ {'DOWN', _MRef, process, Pid, killed} ->
+ ok;
+ {'DOWN', _MRef, process, Pid, OtherReason} ->
+ {error, OtherReason}
+ end;
+ {error, Reason} ->
+ {error, Reason}
+ end;
+shutdown(Pid, Time) ->
+ case monitor_child(Pid) of
+ ok ->
+ exit(Pid, shutdown), %% Try to shutdown gracefully
+ receive
+ {'DOWN', _MRef, process, Pid, shutdown} ->
+ ok;
+ {'DOWN', _MRef, process, Pid, OtherReason} ->
+ {error, OtherReason}
+ after Time ->
+ exit(Pid, kill), %% Force termination.
+ receive
+ {'DOWN', _MRef, process, Pid, OtherReason} ->
+ {error, OtherReason}
+ end
+ end;
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+%% Help function to shutdown/2 switches from link to monitor approach
+monitor_child(Pid) ->
+
+ %% Do the monitor operation first so that if the child dies
+ %% before the monitoring is done causing a 'DOWN'-message with
+ %% reason noproc, we will get the real reason in the 'EXIT'-message
+ %% unless a naughty child has already done unlink...
+ erlang:monitor(process, Pid),
+ unlink(Pid),
+
+ receive
+ %% If the child dies before the unlik we must empty
+ %% the mail-box of the 'EXIT'-message and the 'DOWN'-message.
+ {'EXIT', Pid, Reason} ->
+ receive
+ {'DOWN', _, process, Pid, _} ->
+ {error, Reason}
+ end
+ after 0 ->
+ %% If a naughty child did unlink and the child dies before
+ %% monitor the result will be that shutdown/2 receives a
+ %% 'DOWN'-message with reason noproc.
+ %% If the child should die after the unlink there
+ %% will be a 'DOWN'-message with a correct reason
+ %% that will be handled in shutdown/2.
+ ok
+ end.
+
+
+%%-----------------------------------------------------------------
+%% Func: terminate_dynamic_children/3
+%% Args: Child = child_rec()
+%% Dynamics = ?DICT() | ?SET()
+%% SupName = {local, atom()} | {global, atom()} | {pid(),Mod}
+%% Returns: ok
+%%
+%%
+%% Shutdown all dynamic children. This happens when the supervisor is
+%% stopped. Because the supervisor can have millions of dynamic children, we
+%% can have an significative overhead here.
+%%-----------------------------------------------------------------
+terminate_dynamic_children(Child, Dynamics, SupName) ->
+ {Pids, EStack0} = monitor_dynamic_children(Child, Dynamics),
+ Sz = ?SETS:size(Pids),
+ EStack = case Child#child.shutdown of
+ brutal_kill ->
+ ?SETS:fold(fun(P, _) -> exit(P, kill) end, ok, Pids),
+ wait_dynamic_children(Child, Pids, Sz, undefined, EStack0);
+ infinity ->
+ ?SETS:fold(fun(P, _) -> exit(P, shutdown) end, ok, Pids),
+ wait_dynamic_children(Child, Pids, Sz, undefined, EStack0);
+ Time ->
+ ?SETS:fold(fun(P, _) -> exit(P, shutdown) end, ok, Pids),
+ TRef = erlang:start_timer(Time, self(), kill),
+ wait_dynamic_children(Child, Pids, Sz, TRef, EStack0)
+ end,
+ %% Unroll stacked errors and report them
+ ?DICT:fold(fun(Reason, Ls, _) ->
+ report_error(shutdown_error, Reason,
+ Child#child{pid=Ls}, SupName)
+ end, ok, EStack).
+
+
+monitor_dynamic_children(#child{restart_type=temporary}, Dynamics) ->
+ ?SETS:fold(fun(P, {Pids, EStack}) ->
+ case monitor_child(P) of
+ ok ->
+ {?SETS:add_element(P, Pids), EStack};
+ {error, normal} ->
+ {Pids, EStack};
+ {error, Reason} ->
+ {Pids, ?DICT:append(Reason, P, EStack)}
+ end
+ end, {?SETS:new(), ?DICT:new()}, Dynamics);
+monitor_dynamic_children(#child{restart_type=RType}, Dynamics) ->
+ ?DICT:fold(fun(P, _, {Pids, EStack}) when is_pid(P) ->
+ case monitor_child(P) of
+ ok ->
+ {?SETS:add_element(P, Pids), EStack};
+ {error, normal} when not ?is_permanent(RType) ->
+ {Pids, EStack};
+ {error, Reason} ->
+ {Pids, ?DICT:append(Reason, P, EStack)}
+ end;
+ (?restarting(_), _, {Pids, EStack}) ->
+ {Pids, EStack}
+ end, {?SETS:new(), ?DICT:new()}, Dynamics).
+
+wait_dynamic_children(_Child, _Pids, 0, undefined, EStack) ->
+ EStack;
+wait_dynamic_children(_Child, _Pids, 0, TRef, EStack) ->
+ %% If the timer has expired before its cancellation, we must empty the
+ %% mail-box of the 'timeout'-message.
+ erlang:cancel_timer(TRef),
+ receive
+ {timeout, TRef, kill} ->
+ EStack
+ after 0 ->
+ EStack
+ end;
+wait_dynamic_children(#child{shutdown=brutal_kill} = Child, Pids, Sz,
+ TRef, EStack) ->
+ receive
+ {'DOWN', _MRef, process, Pid, killed} ->
+ wait_dynamic_children(Child, ?SETS:del_element(Pid, Pids), Sz-1,
+ TRef, EStack);
+
+ {'DOWN', _MRef, process, Pid, Reason} ->
+ wait_dynamic_children(Child, ?SETS:del_element(Pid, Pids), Sz-1,
+ TRef, ?DICT:append(Reason, Pid, EStack))
+ end;
+wait_dynamic_children(#child{restart_type=RType} = Child, Pids, Sz,
+ TRef, EStack) ->
+ receive
+ {'DOWN', _MRef, process, Pid, shutdown} ->
+ wait_dynamic_children(Child, ?SETS:del_element(Pid, Pids), Sz-1,
+ TRef, EStack);
+
+ {'DOWN', _MRef, process, Pid, normal} when not ?is_permanent(RType) ->
+ wait_dynamic_children(Child, ?SETS:del_element(Pid, Pids), Sz-1,
+ TRef, EStack);
+
+ {'DOWN', _MRef, process, Pid, Reason} ->
+ wait_dynamic_children(Child, ?SETS:del_element(Pid, Pids), Sz-1,
+ TRef, ?DICT:append(Reason, Pid, EStack));
+
+ {timeout, TRef, kill} ->
+ ?SETS:fold(fun(P, _) -> exit(P, kill) end, ok, Pids),
+ wait_dynamic_children(Child, Pids, Sz-1, undefined, EStack)
+ end.
+
+%%-----------------------------------------------------------------
+%% Child/State manipulating functions.
+%%-----------------------------------------------------------------
+
+%% Note we do not want to save the parameter list for temporary processes as
+%% they will not be restarted, and hence we do not need this information.
+%% Especially for dynamic children to simple_one_for_one supervisors
+%% it could become very costly as it is not uncommon to spawn
+%% very many such processes.
+save_child(#child{restart_type = temporary,
+ mfargs = {M, F, _}} = Child, #state{children = Children} = State) ->
+ State#state{children = [Child#child{mfargs = {M, F, undefined}} |Children]};
+save_child(Child, #state{children = Children} = State) ->
+ State#state{children = [Child |Children]}.
+
+save_dynamic_child(temporary, Pid, _, #state{dynamics = Dynamics} = State) ->
+ State#state{dynamics = ?SETS:add_element(Pid, dynamics_db(temporary, Dynamics))};
+save_dynamic_child(RestartType, Pid, Args, #state{dynamics = Dynamics} = State) ->
+ State#state{dynamics = ?DICT:store(Pid, Args, dynamics_db(RestartType, Dynamics))}.
+
+dynamics_db(temporary, undefined) ->
+ ?SETS:new();
+dynamics_db(_, undefined) ->
+ ?DICT:new();
+dynamics_db(_,Dynamics) ->
+ Dynamics.
+
+dynamic_child_args(Pid, Dynamics) ->
+ case ?SETS:is_set(Dynamics) of
+ true ->
+ {ok, undefined};
+ false ->
+ ?DICT:find(Pid, Dynamics)
+ end.
+
+state_del_child(#child{pid = Pid, restart_type = temporary}, State) when ?is_simple(State) ->
+ NDynamics = ?SETS:del_element(Pid, dynamics_db(temporary, State#state.dynamics)),
+ State#state{dynamics = NDynamics};
+state_del_child(#child{pid = Pid, restart_type = RType}, State) when ?is_simple(State) ->
+ NDynamics = ?DICT:erase(Pid, dynamics_db(RType, State#state.dynamics)),
+ State#state{dynamics = NDynamics};
+state_del_child(Child, State) ->
+ NChildren = del_child(Child#child.name, State#state.children),
+ State#state{children = NChildren}.
+
+del_child(Name, [Ch=#child{pid = ?restarting(_)}|_]=Chs) when Ch#child.name =:= Name ->
+ Chs;
+del_child(Name, [Ch|Chs]) when Ch#child.name =:= Name, Ch#child.restart_type =:= temporary ->
+ Chs;
+del_child(Name, [Ch|Chs]) when Ch#child.name =:= Name ->
+ [Ch#child{pid = undefined} | Chs];
+del_child(Pid, [Ch|Chs]) when Ch#child.pid =:= Pid, Ch#child.restart_type =:= temporary ->
+ Chs;
+del_child(Pid, [Ch|Chs]) when Ch#child.pid =:= Pid ->
+ [Ch#child{pid = undefined} | Chs];
+del_child(Name, [Ch|Chs]) ->
+ [Ch|del_child(Name, Chs)];
+del_child(_, []) ->
+ [].
+
+%% Chs = [S4, S3, Ch, S1, S0]
+%% Ret: {[S4, S3, Ch], [S1, S0]}
+split_child(Name, Chs) ->
+ split_child(Name, Chs, []).
+
+split_child(Name, [Ch|Chs], After) when Ch#child.name =:= Name ->
+ {lists:reverse([Ch#child{pid = undefined} | After]), Chs};
+split_child(Pid, [Ch|Chs], After) when Ch#child.pid =:= Pid ->
+ {lists:reverse([Ch#child{pid = undefined} | After]), Chs};
+split_child(Name, [Ch|Chs], After) ->
+ split_child(Name, Chs, [Ch | After]);
+split_child(_, [], After) ->
+ {lists:reverse(After), []}.
+
+get_child(Name, State) ->
+ get_child(Name, State, false).
+get_child(Pid, State, AllowPid) when AllowPid, is_pid(Pid) ->
+ get_dynamic_child(Pid, State);
+get_child(Name, State, _) ->
+ lists:keysearch(Name, #child.name, State#state.children).
+
+get_dynamic_child(Pid, #state{children=[Child], dynamics=Dynamics}) ->
+ DynamicsDb = dynamics_db(Child#child.restart_type, Dynamics),
+ case is_dynamic_pid(Pid, DynamicsDb) of
+ true ->
+ {value, Child#child{pid=Pid}};
+ false ->
+ RPid = restarting(Pid),
+ case is_dynamic_pid(RPid, DynamicsDb) of
+ true ->
+ {value, Child#child{pid=RPid}};
+ false ->
+ case erlang:is_process_alive(Pid) of
+ true -> false;
+ false -> {value, Child}
+ end
+ end
+ end.
+
+is_dynamic_pid(Pid, Dynamics) ->
+ case ?SETS:is_set(Dynamics) of
+ true ->
+ ?SETS:is_element(Pid, Dynamics);
+ false ->
+ ?DICT:is_key(Pid, Dynamics)
+ end.
+
+replace_child(Child, State) ->
+ Chs = do_replace_child(Child, State#state.children),
+ State#state{children = Chs}.
+
+do_replace_child(Child, [Ch|Chs]) when Ch#child.name =:= Child#child.name ->
+ [Child | Chs];
+do_replace_child(Child, [Ch|Chs]) ->
+ [Ch|do_replace_child(Child, Chs)].
+
+remove_child(Child, State) ->
+ Chs = lists:keydelete(Child#child.name, #child.name, State#state.children),
+ State#state{children = Chs}.
+
+%%-----------------------------------------------------------------
+%% Func: init_state/4
+%% Args: SupName = {local, atom()} | {global, atom()} | self
+%% Type = {Strategy, MaxIntensity, Period}
+%% Strategy = one_for_one | one_for_all | simple_one_for_one |
+%% rest_for_one
+%% MaxIntensity = integer() >= 0
+%% Period = integer() > 0
+%% Mod :== atom()
+%% Args :== term()
+%% Purpose: Check that Type is of correct type (!)
+%% Returns: {ok, state()} | Error
+%%-----------------------------------------------------------------
+init_state(SupName, Type, Mod, Args) ->
+ case catch init_state1(SupName, Type, Mod, Args) of
+ {ok, State} ->
+ {ok, State};
+ Error ->
+ Error
+ end.
+
+init_state1(SupName, {Strategy, MaxIntensity, Period}, Mod, Args) ->
+ validStrategy(Strategy),
+ validIntensity(MaxIntensity),
+ validPeriod(Period),
+ {ok, #state{name = supname(SupName,Mod),
+ strategy = Strategy,
+ intensity = MaxIntensity,
+ period = Period,
+ module = Mod,
+ args = Args}};
+init_state1(_SupName, Type, _, _) ->
+ {invalid_type, Type}.
+
+validStrategy(simple_one_for_one) -> true;
+validStrategy(one_for_one) -> true;
+validStrategy(one_for_all) -> true;
+validStrategy(rest_for_one) -> true;
+validStrategy(What) -> throw({invalid_strategy, What}).
+
+validIntensity(Max) when is_integer(Max),
+ Max >= 0 -> true;
+validIntensity(What) -> throw({invalid_intensity, What}).
+
+validPeriod(Period) when is_integer(Period),
+ Period > 0 -> true;
+validPeriod(What) -> throw({invalid_period, What}).
+
+supname(self, Mod) -> {self(), Mod};
+supname(N, _) -> N.
+
+%%% ------------------------------------------------------
+%%% Check that the children start specification is valid.
+%%% Shall be a six (6) tuple
+%%% {Name, Func, RestartType, Shutdown, ChildType, Modules}
+%%% where Name is an atom
+%%% Func is {Mod, Fun, Args} == {atom(), atom(), list()}
+%%% RestartType is permanent | temporary | transient |
+%%% intrinsic | {permanent, Delay} |
+%%% {transient, Delay} | {intrinsic, Delay}
+%% where Delay >= 0
+%%% Shutdown = integer() > 0 | infinity | brutal_kill
+%%% ChildType = supervisor | worker
+%%% Modules = [atom()] | dynamic
+%%% Returns: {ok, [child_rec()]} | Error
+%%% ------------------------------------------------------
+
+check_startspec(Children) -> check_startspec(Children, []).
+
+check_startspec([ChildSpec|T], Res) ->
+ case check_childspec(ChildSpec) of
+ {ok, Child} ->
+ case lists:keymember(Child#child.name, #child.name, Res) of
+ true -> {duplicate_child_name, Child#child.name};
+ false -> check_startspec(T, [Child | Res])
+ end;
+ Error -> Error
+ end;
+check_startspec([], Res) ->
+ {ok, lists:reverse(Res)}.
+
+check_childspec({Name, Func, RestartType, Shutdown, ChildType, Mods}) ->
+ catch check_childspec(Name, Func, RestartType, Shutdown, ChildType, Mods);
+check_childspec(X) -> {invalid_child_spec, X}.
+
+check_childspec(Name, Func, RestartType, Shutdown, ChildType, Mods) ->
+ validName(Name),
+ validFunc(Func),
+ validRestartType(RestartType),
+ validChildType(ChildType),
+ validShutdown(Shutdown, ChildType),
+ validMods(Mods),
+ {ok, #child{name = Name, mfargs = Func, restart_type = RestartType,
+ shutdown = Shutdown, child_type = ChildType, modules = Mods}}.
+
+validChildType(supervisor) -> true;
+validChildType(worker) -> true;
+validChildType(What) -> throw({invalid_child_type, What}).
+
+validName(_Name) -> true.
+
+validFunc({M, F, A}) when is_atom(M),
+ is_atom(F),
+ is_list(A) -> true;
+validFunc(Func) -> throw({invalid_mfa, Func}).
+
+validRestartType(permanent) -> true;
+validRestartType(temporary) -> true;
+validRestartType(transient) -> true;
+validRestartType(intrinsic) -> true;
+validRestartType({permanent, Delay}) -> validDelay(Delay);
+validRestartType({intrinsic, Delay}) -> validDelay(Delay);
+validRestartType({transient, Delay}) -> validDelay(Delay);
+validRestartType(RestartType) -> throw({invalid_restart_type,
+ RestartType}).
+
+validDelay(Delay) when is_number(Delay),
+ Delay >= 0 -> true;
+validDelay(What) -> throw({invalid_delay, What}).
+
+validShutdown(Shutdown, _)
+ when is_integer(Shutdown), Shutdown > 0 -> true;
+validShutdown(infinity, _) -> true;
+validShutdown(brutal_kill, _) -> true;
+validShutdown(Shutdown, _) -> throw({invalid_shutdown, Shutdown}).
+
+validMods(dynamic) -> true;
+validMods(Mods) when is_list(Mods) ->
+ lists:foreach(fun(Mod) ->
+ if
+ is_atom(Mod) -> ok;
+ true -> throw({invalid_module, Mod})
+ end
+ end,
+ Mods);
+validMods(Mods) -> throw({invalid_modules, Mods}).
+
+%%% ------------------------------------------------------
+%%% Add a new restart and calculate if the max restart
+%%% intensity has been reached (in that case the supervisor
+%%% shall terminate).
+%%% All restarts accured inside the period amount of seconds
+%%% are kept in the #state.restarts list.
+%%% Returns: {ok, State'} | {terminate, State'}
+%%% ------------------------------------------------------
+
+add_restart(State) ->
+ I = State#state.intensity,
+ P = State#state.period,
+ R = State#state.restarts,
+ Now = erlang:now(),
+ R1 = add_restart([Now|R], Now, P),
+ State1 = State#state{restarts = R1},
+ case length(R1) of
+ CurI when CurI =< I ->
+ {ok, State1};
+ _ ->
+ {terminate, State1}
+ end.
+
+add_restart([R|Restarts], Now, Period) ->
+ case inPeriod(R, Now, Period) of
+ true ->
+ [R|add_restart(Restarts, Now, Period)];
+ _ ->
+ []
+ end;
+add_restart([], _, _) ->
+ [].
+
+inPeriod(Time, Now, Period) ->
+ case difference(Time, Now) of
+ T when T > Period ->
+ false;
+ _ ->
+ true
+ end.
+
+%%
+%% Time = {MegaSecs, Secs, MicroSecs} (NOTE: MicroSecs is ignored)
+%% Calculate the time elapsed in seconds between two timestamps.
+%% If MegaSecs is equal just subtract Secs.
+%% Else calculate the Mega difference and add the Secs difference,
+%% note that Secs difference can be negative, e.g.
+%% {827, 999999, 676} diff {828, 1, 653753} == > 2 secs.
+%%
+difference({TimeM, TimeS, _}, {CurM, CurS, _}) when CurM > TimeM ->
+ ((CurM - TimeM) * 1000000) + (CurS - TimeS);
+difference({_, TimeS, _}, {_, CurS, _}) ->
+ CurS - TimeS.
+
+%%% ------------------------------------------------------
+%%% Error and progress reporting.
+%%% ------------------------------------------------------
+
+report_error(Error, Reason, Child, SupName) ->
+ ErrorMsg = [{supervisor, SupName},
+ {errorContext, Error},
+ {reason, Reason},
+ {offender, extract_child(Child)}],
+ error_logger:error_report(supervisor_report, ErrorMsg).
+
+
+extract_child(Child) when is_list(Child#child.pid) ->
+ [{nb_children, length(Child#child.pid)},
+ {name, Child#child.name},
+ {mfargs, Child#child.mfargs},
+ {restart_type, Child#child.restart_type},
+ {shutdown, Child#child.shutdown},
+ {child_type, Child#child.child_type}];
+extract_child(Child) ->
+ [{pid, Child#child.pid},
+ {name, Child#child.name},
+ {mfargs, Child#child.mfargs},
+ {restart_type, Child#child.restart_type},
+ {shutdown, Child#child.shutdown},
+ {child_type, Child#child.child_type}].
+
+report_progress(Child, SupName) ->
+ Progress = [{supervisor, SupName},
+ {started, extract_child(Child)}],
+ error_logger:info_report(progress, Progress).
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(supervisor2_tests).
+-behaviour(supervisor2).
+
+-export([test_all/0, start_link/0]).
+-export([init/1]).
+
+test_all() ->
+ ok = check_shutdown(stop, 200, 200, 2000),
+ ok = check_shutdown(ignored, 1, 2, 2000).
+
+check_shutdown(SigStop, Iterations, ChildCount, SupTimeout) ->
+ {ok, Sup} = supervisor2:start_link(?MODULE, [SupTimeout]),
+ Res = lists:foldl(
+ fun (I, ok) ->
+ TestSupPid = erlang:whereis(?MODULE),
+ ChildPids =
+ [begin
+ {ok, ChildPid} =
+ supervisor2:start_child(TestSupPid, []),
+ ChildPid
+ end || _ <- lists:seq(1, ChildCount)],
+ MRef = erlang:monitor(process, TestSupPid),
+ [P ! SigStop || P <- ChildPids],
+ ok = supervisor2:terminate_child(Sup, test_sup),
+ {ok, _} = supervisor2:restart_child(Sup, test_sup),
+ receive
+ {'DOWN', MRef, process, TestSupPid, shutdown} ->
+ ok;
+ {'DOWN', MRef, process, TestSupPid, Reason} ->
+ {error, {I, Reason}}
+ end;
+ (_, R) ->
+ R
+ end, ok, lists:seq(1, Iterations)),
+ unlink(Sup),
+ exit(Sup, shutdown),
+ Res.
+
+start_link() ->
+ Pid = spawn_link(fun () ->
+ process_flag(trap_exit, true),
+ receive stop -> ok end
+ end),
+ {ok, Pid}.
+
+init([Timeout]) ->
+ {ok, {{one_for_one, 0, 1},
+ [{test_sup, {supervisor2, start_link,
+ [{local, ?MODULE}, ?MODULE, []]},
+ transient, Timeout, supervisor, [?MODULE]}]}};
+init([]) ->
+ {ok, {{simple_one_for_one, 0, 1},
+ [{test_worker, {?MODULE, start_link, []},
+ temporary, 1000, worker, [?MODULE]}]}}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(tcp_acceptor).
+
+-behaviour(gen_server).
+
+-export([start_link/2]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-record(state, {callback, sock, ref}).
+
+%%--------------------------------------------------------------------
+
+start_link(Callback, LSock) ->
+ gen_server:start_link(?MODULE, {Callback, LSock}, []).
+
+%%--------------------------------------------------------------------
+
+init({Callback, LSock}) ->
+ gen_server:cast(self(), accept),
+ {ok, #state{callback=Callback, sock=LSock}}.
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast(accept, State) ->
+ ok = file_handle_cache:obtain(),
+ accept(State);
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info({inet_async, LSock, Ref, {ok, Sock}},
+ State = #state{callback={M,F,A}, sock=LSock, ref=Ref}) ->
+
+ %% patch up the socket so it looks like one we got from
+ %% gen_tcp:accept/1
+ {ok, Mod} = inet_db:lookup_socket(LSock),
+ inet_db:register_socket(Sock, Mod),
+
+ %% handle
+ case tune_buffer_size(Sock) of
+ ok -> file_handle_cache:transfer(
+ apply(M, F, A ++ [Sock])),
+ ok = file_handle_cache:obtain();
+ {error, enotconn} -> catch port_close(Sock);
+ {error, Err} -> {ok, {IPAddress, Port}} = inet:sockname(LSock),
+ error_logger:error_msg(
+ "failed to tune buffer size of "
+ "connection accepted on ~s:~p - ~s~n",
+ [rabbit_misc:ntoab(IPAddress), Port,
+ rabbit_misc:format_inet_error(Err)]),
+ catch port_close(Sock)
+ end,
+
+ %% accept more
+ accept(State);
+
+handle_info({inet_async, LSock, Ref, {error, Reason}},
+ State=#state{sock=LSock, ref=Ref}) ->
+ case Reason of
+ closed -> {stop, normal, State}; %% listening socket closed
+ econnaborted -> accept(State); %% client sent RST before we accepted
+ _ -> {stop, {accept_failed, Reason}, State}
+ end;
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%--------------------------------------------------------------------
+
+accept(State = #state{sock=LSock}) ->
+ case prim_inet:async_accept(LSock, -1) of
+ {ok, Ref} -> {noreply, State#state{ref=Ref}};
+ Error -> {stop, {cannot_accept, Error}, State}
+ end.
+
+tune_buffer_size(Sock) ->
+ case inet:getopts(Sock, [sndbuf, recbuf, buffer]) of
+ {ok, BufSizes} -> BufSz = lists:max([Sz || {_Opt, Sz} <- BufSizes]),
+ inet:setopts(Sock, [{buffer, BufSz}]);
+ Error -> Error
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(tcp_acceptor_sup).
+
+-behaviour(supervisor).
+
+-export([start_link/2]).
+
+-export([init/1]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-type(mfargs() :: {atom(), atom(), [any()]}).
+
+-spec(start_link/2 :: (atom(), mfargs()) -> rabbit_types:ok_pid_or_error()).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start_link(Name, Callback) ->
+ supervisor:start_link({local,Name}, ?MODULE, Callback).
+
+init(Callback) ->
+ {ok, {{simple_one_for_one, 10, 10},
+ [{tcp_acceptor, {tcp_acceptor, start_link, [Callback]},
+ transient, brutal_kill, worker, [tcp_acceptor]}]}}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(tcp_listener).
+
+-behaviour(gen_server).
+
+-export([start_link/8]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-record(state, {sock, on_startup, on_shutdown, label}).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-type(mfargs() :: {atom(), atom(), [any()]}).
+
+-spec(start_link/8 ::
+ (inet:ip_address(), inet:port_number(), [gen_tcp:listen_option()],
+ integer(), atom(), mfargs(), mfargs(), string()) ->
+ rabbit_types:ok_pid_or_error()).
+
+-endif.
+
+%%--------------------------------------------------------------------
+
+start_link(IPAddress, Port, SocketOpts,
+ ConcurrentAcceptorCount, AcceptorSup,
+ OnStartup, OnShutdown, Label) ->
+ gen_server:start_link(
+ ?MODULE, {IPAddress, Port, SocketOpts,
+ ConcurrentAcceptorCount, AcceptorSup,
+ OnStartup, OnShutdown, Label}, []).
+
+%%--------------------------------------------------------------------
+
+init({IPAddress, Port, SocketOpts,
+ ConcurrentAcceptorCount, AcceptorSup,
+ {M,F,A} = OnStartup, OnShutdown, Label}) ->
+ process_flag(trap_exit, true),
+ case gen_tcp:listen(Port, SocketOpts ++ [{ip, IPAddress},
+ {active, false}]) of
+ {ok, LSock} ->
+ lists:foreach(fun (_) ->
+ {ok, _APid} = supervisor:start_child(
+ AcceptorSup, [LSock])
+ end,
+ lists:duplicate(ConcurrentAcceptorCount, dummy)),
+ {ok, {LIPAddress, LPort}} = inet:sockname(LSock),
+ error_logger:info_msg(
+ "started ~s on ~s:~p~n",
+ [Label, rabbit_misc:ntoab(LIPAddress), LPort]),
+ apply(M, F, A ++ [IPAddress, Port]),
+ {ok, #state{sock = LSock,
+ on_startup = OnStartup, on_shutdown = OnShutdown,
+ label = Label}};
+ {error, Reason} ->
+ error_logger:error_msg(
+ "failed to start ~s on ~s:~p - ~p (~s)~n",
+ [Label, rabbit_misc:ntoab(IPAddress), Port,
+ Reason, inet:format_error(Reason)]),
+ {stop, {cannot_listen, IPAddress, Port, Reason}}
+ end.
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, #state{sock=LSock, on_shutdown = {M,F,A}, label=Label}) ->
+ {ok, {IPAddress, Port}} = inet:sockname(LSock),
+ gen_tcp:close(LSock),
+ error_logger:info_msg("stopped ~s on ~s:~p~n",
+ [Label, rabbit_misc:ntoab(IPAddress), Port]),
+ apply(M, F, A ++ [IPAddress, Port]).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(tcp_listener_sup).
+
+-behaviour(supervisor).
+
+-export([start_link/7, start_link/8]).
+
+-export([init/1]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-type(mfargs() :: {atom(), atom(), [any()]}).
+
+-spec(start_link/7 ::
+ (inet:ip_address(), inet:port_number(), [gen_tcp:listen_option()],
+ mfargs(), mfargs(), mfargs(), string()) ->
+ rabbit_types:ok_pid_or_error()).
+-spec(start_link/8 ::
+ (inet:ip_address(), inet:port_number(), [gen_tcp:listen_option()],
+ mfargs(), mfargs(), mfargs(), integer(), string()) ->
+ rabbit_types:ok_pid_or_error()).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start_link(IPAddress, Port, SocketOpts, OnStartup, OnShutdown,
+ AcceptCallback, Label) ->
+ start_link(IPAddress, Port, SocketOpts, OnStartup, OnShutdown,
+ AcceptCallback, 1, Label).
+
+start_link(IPAddress, Port, SocketOpts, OnStartup, OnShutdown,
+ AcceptCallback, ConcurrentAcceptorCount, Label) ->
+ supervisor:start_link(
+ ?MODULE, {IPAddress, Port, SocketOpts, OnStartup, OnShutdown,
+ AcceptCallback, ConcurrentAcceptorCount, Label}).
+
+init({IPAddress, Port, SocketOpts, OnStartup, OnShutdown,
+ AcceptCallback, ConcurrentAcceptorCount, Label}) ->
+ %% This is gross. The tcp_listener needs to know about the
+ %% tcp_acceptor_sup, and the only way I can think of accomplishing
+ %% that without jumping through hoops is to register the
+ %% tcp_acceptor_sup.
+ Name = rabbit_misc:tcp_name(tcp_acceptor_sup, IPAddress, Port),
+ {ok, {{one_for_all, 10, 10},
+ [{tcp_acceptor_sup, {tcp_acceptor_sup, start_link,
+ [Name, AcceptCallback]},
+ transient, infinity, supervisor, [tcp_acceptor_sup]},
+ {tcp_listener, {tcp_listener, start_link,
+ [IPAddress, Port, SocketOpts,
+ ConcurrentAcceptorCount, Name,
+ OnStartup, OnShutdown, Label]},
+ transient, 16#ffffffff, worker, [tcp_listener]}]}}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(test_sup).
+
+-behaviour(supervisor2).
+
+-export([test_supervisor_delayed_restart/0,
+ init/1, start_child/0]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(test_supervisor_delayed_restart/0 :: () -> 'passed').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+%% Public API
+%%----------------------------------------------------------------------------
+
+test_supervisor_delayed_restart() ->
+ passed = with_sup(simple_one_for_one,
+ fun (SupPid) ->
+ {ok, _ChildPid} =
+ supervisor2:start_child(SupPid, []),
+ test_supervisor_delayed_restart(SupPid)
+ end),
+ passed = with_sup(one_for_one, fun test_supervisor_delayed_restart/1).
+
+test_supervisor_delayed_restart(SupPid) ->
+ ok = ping_child(SupPid),
+ ok = exit_child(SupPid),
+ timer:sleep(100),
+ ok = ping_child(SupPid),
+ ok = exit_child(SupPid),
+ timer:sleep(100),
+ timeout = ping_child(SupPid),
+ timer:sleep(1010),
+ ok = ping_child(SupPid),
+ passed.
+
+with_sup(RestartStrategy, Fun) ->
+ {ok, SupPid} = supervisor2:start_link(?MODULE, [RestartStrategy]),
+ Res = Fun(SupPid),
+ unlink(SupPid),
+ exit(SupPid, shutdown),
+ Res.
+
+init([RestartStrategy]) ->
+ {ok, {{RestartStrategy, 1, 1},
+ [{test, {test_sup, start_child, []}, {permanent, 1},
+ 16#ffffffff, worker, [test_sup]}]}}.
+
+start_child() ->
+ {ok, proc_lib:spawn_link(fun run_child/0)}.
+
+ping_child(SupPid) ->
+ Ref = make_ref(),
+ with_child_pid(SupPid, fun(ChildPid) -> ChildPid ! {ping, Ref, self()} end),
+ receive {pong, Ref} -> ok
+ after 1000 -> timeout
+ end.
+
+exit_child(SupPid) ->
+ with_child_pid(SupPid, fun(ChildPid) -> exit(ChildPid, abnormal) end),
+ ok.
+
+with_child_pid(SupPid, Fun) ->
+ case supervisor2:which_children(SupPid) of
+ [{_Id, undefined, worker, [test_sup]}] -> ok;
+ [{_Id, ChildPid, worker, [test_sup]}] -> Fun(ChildPid);
+ [] -> ok
+ end.
+
+run_child() ->
+ receive {ping, Ref, Pid} -> Pid ! {pong, Ref},
+ run_child()
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(truncate).
+
+-define(ELLIPSIS_LENGTH, 3).
+
+-record(params, {content, struct, content_dec, struct_dec}).
+
+-export([log_event/2, term/2]).
+%% exported for testing
+-export([test/0]).
+
+log_event({Type, GL, {Pid, Format, Args}}, Params)
+ when Type =:= error orelse
+ Type =:= info_msg orelse
+ Type =:= warning_msg ->
+ {Type, GL, {Pid, Format, [term(T, Params) || T <- Args]}};
+log_event({Type, GL, {Pid, ReportType, Report}}, Params)
+ when Type =:= error_report orelse
+ Type =:= info_report orelse
+ Type =:= warning_report ->
+ {Type, GL, {Pid, ReportType, report(Report, Params)}};
+log_event(Event, _Params) ->
+ Event.
+
+report([[Thing]], Params) -> report([Thing], Params);
+report(List, Params) when is_list(List) -> [case Item of
+ {K, V} -> {K, term(V, Params)};
+ _ -> term(Item, Params)
+ end || Item <- List];
+report(Other, Params) -> term(Other, Params).
+
+term(Thing, {Max, {Content, Struct, ContentDec, StructDec}}) ->
+ case term_limit(Thing, Max) of
+ true -> term(Thing, true, #params{content = Content,
+ struct = Struct,
+ content_dec = ContentDec,
+ struct_dec = StructDec});
+ false -> Thing
+ end.
+
+term(Bin, _AllowPrintable, #params{content = N})
+ when (is_binary(Bin) orelse is_bitstring(Bin))
+ andalso size(Bin) > N - ?ELLIPSIS_LENGTH ->
+ Suffix = without_ellipsis(N),
+ <<Head:Suffix/binary, _/bitstring>> = Bin,
+ <<Head/binary, <<"...">>/binary>>;
+term(L, AllowPrintable, #params{struct = N} = Params) when is_list(L) ->
+ case AllowPrintable andalso io_lib:printable_list(L) of
+ true -> N2 = without_ellipsis(N),
+ case length(L) > N2 of
+ true -> string:left(L, N2) ++ "...";
+ false -> L
+ end;
+ false -> shrink_list(L, Params)
+ end;
+term(T, _AllowPrintable, Params) when is_tuple(T) ->
+ list_to_tuple(shrink_list(tuple_to_list(T), Params));
+term(T, _, _) ->
+ T.
+
+without_ellipsis(N) -> erlang:max(N - ?ELLIPSIS_LENGTH, 0).
+
+shrink_list(_, #params{struct = N}) when N =< 0 ->
+ ['...'];
+shrink_list([], _) ->
+ [];
+shrink_list([H|T], #params{content = Content,
+ struct = Struct,
+ content_dec = ContentDec,
+ struct_dec = StructDec} = Params) ->
+ [term(H, true, Params#params{content = Content - ContentDec,
+ struct = Struct - StructDec})
+ | term(T, false, Params#params{struct = Struct - 1})].
+
+%%----------------------------------------------------------------------------
+
+%% We don't use erts_debug:flat_size/1 because that ignores binary
+%% sizes. This is all going to be rather approximate though, these
+%% sizes are probably not very "fair" but we are just trying to see if
+%% we reach a fairly arbitrary limit anyway though.
+term_limit(Thing, Max) ->
+ case term_size(Thing, Max, erlang:system_info(wordsize)) of
+ limit_exceeded -> true;
+ _ -> false
+ end.
+
+term_size(B, M, _W) when is_bitstring(B) -> lim(M, size(B));
+term_size(A, M, W) when is_atom(A) -> lim(M, 2 * W);
+term_size(N, M, W) when is_number(N) -> lim(M, 2 * W);
+term_size(T, M, W) when is_tuple(T) -> tuple_term_size(
+ T, M, 1, tuple_size(T), W);
+term_size([], M, _W) ->
+ M;
+term_size([H|T], M, W) ->
+ case term_size(H, M, W) of
+ limit_exceeded -> limit_exceeded;
+ M2 -> lim(term_size(T, M2, W), 2 * W)
+ end;
+term_size(X, M, W) ->
+ lim(M, erts_debug:flat_size(X) * W).
+
+lim(S, T) when is_number(S) andalso S > T -> S - T;
+lim(_, _) -> limit_exceeded.
+
+tuple_term_size(_T, limit_exceeded, _I, _S, _W) ->
+ limit_exceeded;
+tuple_term_size(_T, M, I, S, _W) when I > S ->
+ M;
+tuple_term_size(T, M, I, S, W) ->
+ tuple_term_size(T, lim(term_size(element(I, T), M, W), 2 * W), I + 1, S, W).
+
+%%----------------------------------------------------------------------------
+
+test() ->
+ test_short_examples_exactly(),
+ test_term_limit(),
+ test_large_examples_for_size(),
+ ok.
+
+test_short_examples_exactly() ->
+ F = fun (Term, Exp) ->
+ Exp = term(Term, {1, {10, 10, 5, 5}}),
+ Term = term(Term, {100000, {10, 10, 5, 5}})
+ end,
+ FSmall = fun (Term, Exp) ->
+ Exp = term(Term, {1, {2, 2, 2, 2}}),
+ Term = term(Term, {100000, {2, 2, 2, 2}})
+ end,
+ F([], []),
+ F("h", "h"),
+ F("hello world", "hello w..."),
+ F([[h,e,l,l,o,' ',w,o,r,l,d]], [[h,e,l,l,o,'...']]),
+ F([a|b], [a|b]),
+ F(<<"hello">>, <<"hello">>),
+ F([<<"hello world">>], [<<"he...">>]),
+ F(<<1:1>>, <<1:1>>),
+ F(<<1:81>>, <<0:56, "...">>),
+ F({{{{a}}},{b},c,d,e,f,g,h,i,j,k}, {{{'...'}},{b},c,d,e,f,g,h,i,j,'...'}),
+ FSmall({a,30,40,40,40,40}, {a,30,'...'}),
+ FSmall([a,30,40,40,40,40], [a,30,'...']),
+ P = spawn(fun() -> receive die -> ok end end),
+ F([0, 0.0, <<1:1>>, F, P], [0, 0.0, <<1:1>>, F, P]),
+ P ! die,
+ R = make_ref(),
+ F([R], [R]),
+ ok.
+
+test_term_limit() ->
+ W = erlang:system_info(wordsize),
+ S = <<"abc">>,
+ 1 = term_size(S, 4, W),
+ limit_exceeded = term_size(S, 3, W),
+ case 100 - term_size([S, S], 100, W) of
+ 22 -> ok; %% 32 bit
+ 38 -> ok %% 64 bit
+ end,
+ case 100 - term_size([S, [S]], 100, W) of
+ 30 -> ok; %% ditto
+ 54 -> ok
+ end,
+ limit_exceeded = term_size([S, S], 6, W),
+ ok.
+
+test_large_examples_for_size() ->
+ %% Real world values
+ Shrink = fun(Term) -> term(Term, {1, {1000, 100, 50, 5}}) end,
+ TestSize = fun(Term) ->
+ true = 5000000 < size(term_to_binary(Term)),
+ true = 500000 > size(term_to_binary(Shrink(Term)))
+ end,
+ TestSize(lists:seq(1, 5000000)),
+ TestSize(recursive_list(1000, 10)),
+ TestSize(recursive_list(5000, 20)),
+ TestSize(gb_sets:from_list([I || I <- lists:seq(1, 1000000)])),
+ TestSize(gb_trees:from_orddict([{I, I} || I <- lists:seq(1, 1000000)])),
+ ok.
+
+recursive_list(S, 0) -> lists:seq(1, S);
+recursive_list(S, N) -> [recursive_list(S div N, N-1) || _ <- lists:seq(1, S)].
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+%% In practice Erlang shouldn't be allowed to grow to more than a half
+%% of available memory. The pessimistic scenario is when the Erlang VM
+%% has a single process that's consuming all memory. In such a case,
+%% during garbage collection, Erlang tries to allocate a huge chunk of
+%% continuous memory, which can result in a crash or heavy swapping.
+%%
+%% This module tries to warn Rabbit before such situations occur, so
+%% that it has a higher chance to avoid running out of memory.
+
+-module(vm_memory_monitor).
+
+-behaviour(gen_server).
+
+-export([start_link/1, start_link/3]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-export([get_total_memory/0, get_vm_limit/0,
+ get_check_interval/0, set_check_interval/1,
+ get_vm_memory_high_watermark/0, set_vm_memory_high_watermark/1,
+ get_memory_limit/0]).
+
+%% for tests
+-export([parse_line_linux/1]).
+
+
+-define(SERVER, ?MODULE).
+-define(DEFAULT_MEMORY_CHECK_INTERVAL, 1000).
+-define(ONE_MB, 1048576).
+
+%% For an unknown OS, we assume that we have 1GB of memory. It'll be
+%% wrong. Scale by vm_memory_high_watermark in configuration to get a
+%% sensible value.
+-define(MEMORY_SIZE_FOR_UNKNOWN_OS, 1073741824).
+
+-record(state, {total_memory,
+ memory_limit,
+ memory_fraction,
+ timeout,
+ timer,
+ alarmed,
+ alarm_funs
+ }).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/1 :: (float()) -> rabbit_types:ok_pid_or_error()).
+-spec(start_link/3 :: (float(), fun ((any()) -> 'ok'),
+ fun ((any()) -> 'ok')) -> rabbit_types:ok_pid_or_error()).
+-spec(get_total_memory/0 :: () -> (non_neg_integer() | 'unknown')).
+-spec(get_vm_limit/0 :: () -> non_neg_integer()).
+-spec(get_check_interval/0 :: () -> non_neg_integer()).
+-spec(set_check_interval/1 :: (non_neg_integer()) -> 'ok').
+-spec(get_vm_memory_high_watermark/0 :: () -> float()).
+-spec(set_vm_memory_high_watermark/1 :: (float()) -> 'ok').
+-spec(get_memory_limit/0 :: () -> non_neg_integer()).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+%% Public API
+%%----------------------------------------------------------------------------
+
+get_total_memory() ->
+ try
+ get_total_memory(os:type())
+ catch _:Error ->
+ rabbit_log:warning(
+ "Failed to get total system memory: ~n~p~n~p~n",
+ [Error, erlang:get_stacktrace()]),
+ unknown
+ end.
+
+get_vm_limit() -> get_vm_limit(os:type()).
+
+get_check_interval() ->
+ gen_server:call(?MODULE, get_check_interval, infinity).
+
+set_check_interval(Fraction) ->
+ gen_server:call(?MODULE, {set_check_interval, Fraction}, infinity).
+
+get_vm_memory_high_watermark() ->
+ gen_server:call(?MODULE, get_vm_memory_high_watermark, infinity).
+
+set_vm_memory_high_watermark(Fraction) ->
+ gen_server:call(?MODULE, {set_vm_memory_high_watermark, Fraction},
+ infinity).
+
+get_memory_limit() ->
+ gen_server:call(?MODULE, get_memory_limit, infinity).
+
+%%----------------------------------------------------------------------------
+%% gen_server callbacks
+%%----------------------------------------------------------------------------
+
+start_link(MemFraction) ->
+ start_link(MemFraction,
+ fun alarm_handler:set_alarm/1, fun alarm_handler:clear_alarm/1).
+
+start_link(MemFraction, AlarmSet, AlarmClear) ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE,
+ [MemFraction, {AlarmSet, AlarmClear}], []).
+
+init([MemFraction, AlarmFuns]) ->
+ TRef = start_timer(?DEFAULT_MEMORY_CHECK_INTERVAL),
+ State = #state { timeout = ?DEFAULT_MEMORY_CHECK_INTERVAL,
+ timer = TRef,
+ alarmed = false,
+ alarm_funs = AlarmFuns },
+ {ok, set_mem_limits(State, MemFraction)}.
+
+handle_call(get_vm_memory_high_watermark, _From, State) ->
+ {reply, State#state.memory_fraction, State};
+
+handle_call({set_vm_memory_high_watermark, MemFraction}, _From, State) ->
+ {reply, ok, set_mem_limits(State, MemFraction)};
+
+handle_call(get_check_interval, _From, State) ->
+ {reply, State#state.timeout, State};
+
+handle_call({set_check_interval, Timeout}, _From, State) ->
+ {ok, cancel} = timer:cancel(State#state.timer),
+ {reply, ok, State#state{timeout = Timeout, timer = start_timer(Timeout)}};
+
+handle_call(get_memory_limit, _From, State) ->
+ {reply, State#state.memory_limit, State};
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast(_Request, State) ->
+ {noreply, State}.
+
+handle_info(update, State) ->
+ {noreply, internal_update(State)};
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+%% Server Internals
+%%----------------------------------------------------------------------------
+
+set_mem_limits(State, MemFraction) ->
+ TotalMemory =
+ case get_total_memory() of
+ unknown ->
+ case State of
+ #state { total_memory = undefined,
+ memory_limit = undefined } ->
+ error_logger:warning_msg(
+ "Unknown total memory size for your OS ~p. "
+ "Assuming memory size is ~pMB.~n",
+ [os:type(),
+ trunc(?MEMORY_SIZE_FOR_UNKNOWN_OS/?ONE_MB)]);
+ _ ->
+ ok
+ end,
+ ?MEMORY_SIZE_FOR_UNKNOWN_OS;
+ M -> M
+ end,
+ UsableMemory =
+ case get_vm_limit() of
+ Limit when Limit < TotalMemory ->
+ error_logger:warning_msg(
+ "Only ~pMB of ~pMB memory usable due to "
+ "limited address space.~n"
+ "Crashes due to memory exhaustion are possible - see~n"
+ "http://www.rabbitmq.com/memory.html#address-space~n",
+ [trunc(V/?ONE_MB) || V <- [Limit, TotalMemory]]),
+ Limit;
+ _ ->
+ TotalMemory
+ end,
+ MemLim = trunc(MemFraction * UsableMemory),
+ error_logger:info_msg("Memory limit set to ~pMB of ~pMB total.~n",
+ [trunc(MemLim/?ONE_MB), trunc(TotalMemory/?ONE_MB)]),
+ internal_update(State #state { total_memory = TotalMemory,
+ memory_limit = MemLim,
+ memory_fraction = MemFraction}).
+
+internal_update(State = #state { memory_limit = MemLimit,
+ alarmed = Alarmed,
+ alarm_funs = {AlarmSet, AlarmClear} }) ->
+ MemUsed = erlang:memory(total),
+ NewAlarmed = MemUsed > MemLimit,
+ case {Alarmed, NewAlarmed} of
+ {false, true} -> emit_update_info(set, MemUsed, MemLimit),
+ AlarmSet({{resource_limit, memory, node()}, []});
+ {true, false} -> emit_update_info(clear, MemUsed, MemLimit),
+ AlarmClear({resource_limit, memory, node()});
+ _ -> ok
+ end,
+ State #state {alarmed = NewAlarmed}.
+
+emit_update_info(AlarmState, MemUsed, MemLimit) ->
+ error_logger:info_msg(
+ "vm_memory_high_watermark ~p. Memory used:~p allowed:~p~n",
+ [AlarmState, MemUsed, MemLimit]).
+
+start_timer(Timeout) ->
+ {ok, TRef} = timer:send_interval(Timeout, update),
+ TRef.
+
+%% According to http://msdn.microsoft.com/en-us/library/aa366778(VS.85).aspx
+%% Windows has 2GB and 8TB of address space for 32 and 64 bit accordingly.
+get_vm_limit({win32,_OSname}) ->
+ case erlang:system_info(wordsize) of
+ 4 -> 2*1024*1024*1024; %% 2 GB for 32 bits 2^31
+ 8 -> 8*1024*1024*1024*1024 %% 8 TB for 64 bits 2^42
+ end;
+
+%% On a 32-bit machine, if you're using more than 2 gigs of RAM you're
+%% in big trouble anyway.
+get_vm_limit(_OsType) ->
+ case erlang:system_info(wordsize) of
+ 4 -> 2*1024*1024*1024; %% 2 GB for 32 bits 2^31
+ 8 -> 256*1024*1024*1024*1024 %% 256 TB for 64 bits 2^48
+ %%http://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details
+ end.
+
+%%----------------------------------------------------------------------------
+%% Internal Helpers
+%%----------------------------------------------------------------------------
+cmd(Command) ->
+ Exec = hd(string:tokens(Command, " ")),
+ case os:find_executable(Exec) of
+ false -> throw({command_not_found, Exec});
+ _ -> os:cmd(Command)
+ end.
+
+%% get_total_memory(OS) -> Total
+%% Windows and Freebsd code based on: memsup:get_memory_usage/1
+%% Original code was part of OTP and released under "Erlang Public License".
+
+get_total_memory({unix,darwin}) ->
+ File = cmd("/usr/bin/vm_stat"),
+ Lines = string:tokens(File, "\n"),
+ Dict = dict:from_list(lists:map(fun parse_line_mach/1, Lines)),
+ [PageSize, Inactive, Active, Free, Wired] =
+ [dict:fetch(Key, Dict) ||
+ Key <- [page_size, 'Pages inactive', 'Pages active', 'Pages free',
+ 'Pages wired down']],
+ PageSize * (Inactive + Active + Free + Wired);
+
+get_total_memory({unix,freebsd}) ->
+ PageSize = sysctl("vm.stats.vm.v_page_size"),
+ PageCount = sysctl("vm.stats.vm.v_page_count"),
+ PageCount * PageSize;
+
+get_total_memory({unix,openbsd}) ->
+ sysctl("hw.usermem");
+
+get_total_memory({win32,_OSname}) ->
+ [Result|_] = os_mon_sysinfo:get_mem_info(),
+ {ok, [_MemLoad, TotPhys, _AvailPhys, _TotPage, _AvailPage, _TotV, _AvailV],
+ _RestStr} =
+ io_lib:fread("~d~d~d~d~d~d~d", Result),
+ TotPhys;
+
+get_total_memory({unix, linux}) ->
+ File = read_proc_file("/proc/meminfo"),
+ Lines = string:tokens(File, "\n"),
+ Dict = dict:from_list(lists:map(fun parse_line_linux/1, Lines)),
+ dict:fetch('MemTotal', Dict);
+
+get_total_memory({unix, sunos}) ->
+ File = cmd("/usr/sbin/prtconf"),
+ Lines = string:tokens(File, "\n"),
+ Dict = dict:from_list(lists:map(fun parse_line_sunos/1, Lines)),
+ dict:fetch('Memory size', Dict);
+
+get_total_memory({unix, aix}) ->
+ File = cmd("/usr/bin/vmstat -v"),
+ Lines = string:tokens(File, "\n"),
+ Dict = dict:from_list(lists:map(fun parse_line_aix/1, Lines)),
+ dict:fetch('memory pages', Dict) * 4096;
+
+get_total_memory(_OsType) ->
+ unknown.
+
+%% A line looks like "Foo bar: 123456."
+parse_line_mach(Line) ->
+ [Name, RHS | _Rest] = string:tokens(Line, ":"),
+ case Name of
+ "Mach Virtual Memory Statistics" ->
+ ["(page", "size", "of", PageSize, "bytes)"] =
+ string:tokens(RHS, " "),
+ {page_size, list_to_integer(PageSize)};
+ _ ->
+ [Value | _Rest1] = string:tokens(RHS, " ."),
+ {list_to_atom(Name), list_to_integer(Value)}
+ end.
+
+%% A line looks like "MemTotal: 502968 kB"
+%% or (with broken OS/modules) "Readahead 123456 kB"
+parse_line_linux(Line) ->
+ {Name, Value, UnitRest} =
+ case string:tokens(Line, ":") of
+ %% no colon in the line
+ [S] ->
+ [K, RHS] = re:split(S, "\s", [{parts, 2}, {return, list}]),
+ [V | Unit] = string:tokens(RHS, " "),
+ {K, V, Unit};
+ [K, RHS | _Rest] ->
+ [V | Unit] = string:tokens(RHS, " "),
+ {K, V, Unit}
+ end,
+ Value1 = case UnitRest of
+ [] -> list_to_integer(Value); %% no units
+ ["kB"] -> list_to_integer(Value) * 1024
+ end,
+ {list_to_atom(Name), Value1}.
+
+%% A line looks like "Memory size: 1024 Megabytes"
+parse_line_sunos(Line) ->
+ case string:tokens(Line, ":") of
+ [Name, RHS | _Rest] ->
+ [Value1 | UnitsRest] = string:tokens(RHS, " "),
+ Value2 = case UnitsRest of
+ ["Gigabytes"] ->
+ list_to_integer(Value1) * ?ONE_MB * 1024;
+ ["Megabytes"] ->
+ list_to_integer(Value1) * ?ONE_MB;
+ ["Kilobytes"] ->
+ list_to_integer(Value1) * 1024;
+ _ ->
+ Value1 ++ UnitsRest %% no known units
+ end,
+ {list_to_atom(Name), Value2};
+ [Name] -> {list_to_atom(Name), none}
+ end.
+
+%% Lines look like " 12345 memory pages"
+%% or " 80.1 maxpin percentage"
+parse_line_aix(Line) ->
+ [Value | NameWords] = string:tokens(Line, " "),
+ Name = string:join(NameWords, " "),
+ {list_to_atom(Name),
+ case lists:member($., Value) of
+ true -> trunc(list_to_float(Value));
+ false -> list_to_integer(Value)
+ end}.
+
+sysctl(Def) ->
+ list_to_integer(cmd("/sbin/sysctl -n " ++ Def) -- "\n").
+
+%% file:read_file does not work on files in /proc as it seems to get
+%% the size of the file first and then read that many bytes. But files
+%% in /proc always have length 0, we just have to read until we get
+%% eof.
+read_proc_file(File) ->
+ {ok, IoDevice} = file:open(File, [read, raw]),
+ Res = read_proc_file(IoDevice, []),
+ file:close(IoDevice),
+ lists:flatten(lists:reverse(Res)).
+
+-define(BUFFER_SIZE, 1024).
+read_proc_file(IoDevice, Acc) ->
+ case file:read(IoDevice, ?BUFFER_SIZE) of
+ {ok, Res} -> read_proc_file(IoDevice, [Res | Acc]);
+ eof -> Acc
+ end.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(vm_memory_monitor_tests).
+
+-export([all_tests/0]).
+
+%% ---------------------------------------------------------------------------
+%% Tests
+%% ---------------------------------------------------------------------------
+
+all_tests() ->
+ lists:foreach(fun ({S, {K, V}}) ->
+ {K, V} = vm_memory_monitor:parse_line_linux(S)
+ end,
+ [{"MemTotal: 0 kB", {'MemTotal', 0}},
+ {"MemTotal: 502968 kB ", {'MemTotal', 515039232}},
+ {"MemFree: 178232 kB", {'MemFree', 182509568}},
+ {"MemTotal: 50296888", {'MemTotal', 50296888}},
+ {"MemTotal 502968 kB", {'MemTotal', 515039232}},
+ {"MemTotal 50296866 ", {'MemTotal', 50296866}}]),
+ passed.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(worker_pool).
+
+%% Generic worker pool manager.
+%%
+%% Supports nested submission of jobs (nested jobs always run
+%% immediately in current worker process).
+%%
+%% Possible future enhancements:
+%%
+%% 1. Allow priorities (basically, change the pending queue to a
+%% priority_queue).
+
+-behaviour(gen_server2).
+
+-export([start_link/0, submit/1, submit_async/1, ready/1, idle/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-type(mfargs() :: {atom(), atom(), [any()]}).
+
+-spec(start_link/0 :: () -> {'ok', pid()} | {'error', any()}).
+-spec(submit/1 :: (fun (() -> A) | mfargs()) -> A).
+-spec(submit_async/1 :: (fun (() -> any()) | mfargs()) -> 'ok').
+-spec(ready/1 :: (pid()) -> 'ok').
+-spec(idle/1 :: (pid()) -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+-define(SERVER, ?MODULE).
+-define(HIBERNATE_AFTER_MIN, 1000).
+-define(DESIRED_HIBERNATE, 10000).
+
+-record(state, { available, pending }).
+
+%%----------------------------------------------------------------------------
+
+start_link() -> gen_server2:start_link({local, ?SERVER}, ?MODULE, [],
+ [{timeout, infinity}]).
+
+submit(Fun) ->
+ case get(worker_pool_worker) of
+ true -> worker_pool_worker:run(Fun);
+ _ -> Pid = gen_server2:call(?SERVER, {next_free, self()}, infinity),
+ worker_pool_worker:submit(Pid, Fun)
+ end.
+
+submit_async(Fun) -> gen_server2:cast(?SERVER, {run_async, Fun}).
+
+ready(WPid) -> gen_server2:cast(?SERVER, {ready, WPid}).
+
+idle(WPid) -> gen_server2:cast(?SERVER, {idle, WPid}).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, #state { pending = queue:new(), available = ordsets:new() }, hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+handle_call({next_free, CPid}, From, State = #state { available = [],
+ pending = Pending }) ->
+ {noreply, State#state{pending = queue:in({next_free, From, CPid}, Pending)},
+ hibernate};
+handle_call({next_free, CPid}, _From, State = #state { available =
+ [WPid | Avail1] }) ->
+ worker_pool_worker:next_job_from(WPid, CPid),
+ {reply, WPid, State #state { available = Avail1 }, hibernate};
+
+handle_call(Msg, _From, State) ->
+ {stop, {unexpected_call, Msg}, State}.
+
+handle_cast({ready, WPid}, State) ->
+ erlang:monitor(process, WPid),
+ handle_cast({idle, WPid}, State);
+
+handle_cast({idle, WPid}, State = #state { available = Avail,
+ pending = Pending }) ->
+ {noreply,
+ case queue:out(Pending) of
+ {empty, _Pending} ->
+ State #state { available = ordsets:add_element(WPid, Avail) };
+ {{value, {next_free, From, CPid}}, Pending1} ->
+ worker_pool_worker:next_job_from(WPid, CPid),
+ gen_server2:reply(From, WPid),
+ State #state { pending = Pending1 };
+ {{value, {run_async, Fun}}, Pending1} ->
+ worker_pool_worker:submit_async(WPid, Fun),
+ State #state { pending = Pending1 }
+ end, hibernate};
+
+handle_cast({run_async, Fun}, State = #state { available = [],
+ pending = Pending }) ->
+ {noreply, State #state { pending = queue:in({run_async, Fun}, Pending)},
+ hibernate};
+handle_cast({run_async, Fun}, State = #state { available = [WPid | Avail1] }) ->
+ worker_pool_worker:submit_async(WPid, Fun),
+ {noreply, State #state { available = Avail1 }, hibernate};
+
+handle_cast(Msg, State) ->
+ {stop, {unexpected_cast, Msg}, State}.
+
+handle_info({'DOWN', _MRef, process, WPid, _Reason},
+ State = #state { available = Avail }) ->
+ {noreply, State #state { available = ordsets:del_element(WPid, Avail) },
+ hibernate};
+
+handle_info(Msg, State) ->
+ {stop, {unexpected_info, Msg}, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+terminate(_Reason, State) ->
+ State.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(worker_pool_sup).
+
+-behaviour(supervisor).
+
+-export([start_link/0, start_link/1]).
+
+-export([init/1]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
+-spec(start_link/1 :: (non_neg_integer()) -> rabbit_types:ok_pid_or_error()).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+-define(SERVER, ?MODULE).
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ start_link(erlang:system_info(schedulers)).
+
+start_link(WCount) ->
+ supervisor:start_link({local, ?SERVER}, ?MODULE, [WCount]).
+
+%%----------------------------------------------------------------------------
+
+init([WCount]) ->
+ {ok, {{one_for_one, 10, 10},
+ [{worker_pool, {worker_pool, start_link, []}, transient,
+ 16#ffffffff, worker, [worker_pool]} |
+ [{N, {worker_pool_worker, start_link, []}, transient, 16#ffffffff,
+ worker, [worker_pool_worker]} || N <- lists:seq(1, WCount)]]}}.
--- /dev/null
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(worker_pool_worker).
+
+-behaviour(gen_server2).
+
+-export([start_link/0, next_job_from/2, submit/2, submit_async/2, run/1]).
+
+-export([set_maximum_since_use/2]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3, prioritise_cast/3]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-type(mfargs() :: {atom(), atom(), [any()]}).
+
+-spec(start_link/0 :: () -> {'ok', pid()} | {'error', any()}).
+-spec(next_job_from/2 :: (pid(), pid()) -> 'ok').
+-spec(submit/2 :: (pid(), fun (() -> A) | mfargs()) -> A).
+-spec(submit_async/2 :: (pid(), fun (() -> any()) | mfargs()) -> 'ok').
+-spec(run/1 :: (fun (() -> A)) -> A; (mfargs()) -> any()).
+-spec(set_maximum_since_use/2 :: (pid(), non_neg_integer()) -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+-define(HIBERNATE_AFTER_MIN, 1000).
+-define(DESIRED_HIBERNATE, 10000).
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ gen_server2:start_link(?MODULE, [], [{timeout, infinity}]).
+
+next_job_from(Pid, CPid) ->
+ gen_server2:cast(Pid, {next_job_from, CPid}).
+
+submit(Pid, Fun) ->
+ gen_server2:call(Pid, {submit, Fun, self()}, infinity).
+
+submit_async(Pid, Fun) ->
+ gen_server2:cast(Pid, {submit_async, Fun}).
+
+set_maximum_since_use(Pid, Age) ->
+ gen_server2:cast(Pid, {set_maximum_since_use, Age}).
+
+run({M, F, A}) ->
+ apply(M, F, A);
+run(Fun) ->
+ Fun().
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ ok = file_handle_cache:register_callback(?MODULE, set_maximum_since_use,
+ [self()]),
+ ok = worker_pool:ready(self()),
+ put(worker_pool_worker, true),
+ {ok, undefined, hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+prioritise_cast({set_maximum_since_use, _Age}, _Len, _State) -> 8;
+prioritise_cast({next_job_from, _CPid}, _Len, _State) -> 7;
+prioritise_cast(_Msg, _Len, _State) -> 0.
+
+handle_call({submit, Fun, CPid}, From, undefined) ->
+ {noreply, {job, CPid, From, Fun}, hibernate};
+
+handle_call({submit, Fun, CPid}, From, {from, CPid, MRef}) ->
+ erlang:demonitor(MRef),
+ gen_server2:reply(From, run(Fun)),
+ ok = worker_pool:idle(self()),
+ {noreply, undefined, hibernate};
+
+handle_call(Msg, _From, State) ->
+ {stop, {unexpected_call, Msg}, State}.
+
+handle_cast({next_job_from, CPid}, undefined) ->
+ MRef = erlang:monitor(process, CPid),
+ {noreply, {from, CPid, MRef}, hibernate};
+
+handle_cast({next_job_from, CPid}, {job, CPid, From, Fun}) ->
+ gen_server2:reply(From, run(Fun)),
+ ok = worker_pool:idle(self()),
+ {noreply, undefined, hibernate};
+
+handle_cast({submit_async, Fun}, undefined) ->
+ run(Fun),
+ ok = worker_pool:idle(self()),
+ {noreply, undefined, hibernate};
+
+handle_cast({set_maximum_since_use, Age}, State) ->
+ ok = file_handle_cache:set_maximum_since_use(Age),
+ {noreply, State, hibernate};
+
+handle_cast(Msg, State) ->
+ {stop, {unexpected_cast, Msg}, State}.
+
+handle_info({'DOWN', MRef, process, CPid, _Reason}, {from, CPid, MRef}) ->
+ ok = worker_pool:idle(self()),
+ {noreply, undefined, hibernate};
+
+handle_info({'DOWN', _MRef, process, _Pid, _Reason}, State) ->
+ {noreply, State, hibernate};
+
+handle_info(Msg, State) ->
+ {stop, {unexpected_info, Msg}, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+terminate(_Reason, State) ->
+ State.
--- /dev/null
+VERSION?=3.3.5