Build python-cm-api 7.0 for Ubuntu 14.04 32/3832/3
authoriberezovskiy <iberezovskiy@mirantis.com>
Fri, 20 Feb 2015 13:57:33 +0000 (16:57 +0300)
committeriberezovskiy <iberezovskiy@mirantis.com>
Fri, 20 Feb 2015 15:27:19 +0000 (18:27 +0300)
  We need in python-cm-api >= 7.0 because it's sahara dependency
  https://bugs.launchpad.net/mos/+bug/1371611

  Sources has been copied from packages/precise/python-cm-api 6.1 branch
  commit 0269e07e1a56f2d6b981b6c9a5294fb5a6922846

Change-Id: Ic8f513f6b4cfce88910f01a46159bedc702fd0c1

56 files changed:
cm-api/MANIFEST.in [new file with mode: 0644]
cm-api/Makefile [new file with mode: 0644]
cm-api/README.md [new file with mode: 0644]
cm-api/SHELL_README.md [new file with mode: 0644]
cm-api/examples/bulk_config_update.py [new file with mode: 0644]
cm-api/examples/schema.py [new file with mode: 0644]
cm-api/examples/timeseries.py [new file with mode: 0644]
cm-api/setup.py [new file with mode: 0644]
cm-api/src/cm_api/__init__.py [new file with mode: 0644]
cm-api/src/cm_api/api_client.py [new file with mode: 0644]
cm-api/src/cm_api/endpoints/__init__.py [new file with mode: 0644]
cm-api/src/cm_api/endpoints/batch.py [new file with mode: 0644]
cm-api/src/cm_api/endpoints/clusters.py [new file with mode: 0644]
cm-api/src/cm_api/endpoints/cms.py [new file with mode: 0644]
cm-api/src/cm_api/endpoints/dashboards.py [new file with mode: 0644]
cm-api/src/cm_api/endpoints/events.py [new file with mode: 0644]
cm-api/src/cm_api/endpoints/host_templates.py [new file with mode: 0644]
cm-api/src/cm_api/endpoints/hosts.py [new file with mode: 0644]
cm-api/src/cm_api/endpoints/parcels.py [new file with mode: 0644]
cm-api/src/cm_api/endpoints/role_config_groups.py [new file with mode: 0644]
cm-api/src/cm_api/endpoints/roles.py [new file with mode: 0644]
cm-api/src/cm_api/endpoints/services.py [new file with mode: 0644]
cm-api/src/cm_api/endpoints/timeseries.py [new file with mode: 0644]
cm-api/src/cm_api/endpoints/tools.py [new file with mode: 0644]
cm-api/src/cm_api/endpoints/types.py [new file with mode: 0644]
cm-api/src/cm_api/endpoints/users.py [new file with mode: 0644]
cm-api/src/cm_api/http_client.py [new file with mode: 0644]
cm-api/src/cm_api/resource.py [new file with mode: 0644]
cm-api/src/cm_api_tests/__init__.py [new file with mode: 0644]
cm-api/src/cm_api_tests/test_baseapiobject.py [new file with mode: 0644]
cm-api/src/cm_api_tests/test_baseapiresource.py [new file with mode: 0644]
cm-api/src/cm_api_tests/test_batch.py [new file with mode: 0644]
cm-api/src/cm_api_tests/test_clusters.py [new file with mode: 0644]
cm-api/src/cm_api_tests/test_cms.py [new file with mode: 0644]
cm-api/src/cm_api_tests/test_dashboards.py [new file with mode: 0644]
cm-api/src/cm_api_tests/test_events.py [new file with mode: 0644]
cm-api/src/cm_api_tests/test_host_template.py [new file with mode: 0644]
cm-api/src/cm_api_tests/test_impala.py [new file with mode: 0644]
cm-api/src/cm_api_tests/test_replication.py [new file with mode: 0644]
cm-api/src/cm_api_tests/test_services.py [new file with mode: 0644]
cm-api/src/cm_api_tests/test_snapshot.py [new file with mode: 0644]
cm-api/src/cm_api_tests/test_timeseries.py [new file with mode: 0644]
cm-api/src/cm_api_tests/test_users.py [new file with mode: 0644]
cm-api/src/cm_api_tests/test_yarn.py [new file with mode: 0644]
cm-api/src/cm_api_tests/utils.py [new file with mode: 0644]
cm-api/src/cm_shell/__init__.py [new file with mode: 0644]
cm-api/src/cm_shell/cmps.py [new file with mode: 0755]
cm-api/src/cm_shell/prettytable.py [new file with mode: 0644]
debian/changelog [new file with mode: 0644]
debian/compat [new file with mode: 0644]
debian/control [new file with mode: 0644]
debian/copyright [new file with mode: 0644]
debian/docs [new file with mode: 0644]
debian/files [new file with mode: 0644]
debian/rules [new file with mode: 0755]
tests/runtests.sh [new file with mode: 0644]

diff --git a/cm-api/MANIFEST.in b/cm-api/MANIFEST.in
new file mode 100644 (file)
index 0000000..598f02c
--- /dev/null
@@ -0,0 +1 @@
+include .git-hash
diff --git a/cm-api/Makefile b/cm-api/Makefile
new file mode 100644 (file)
index 0000000..dfe2a55
--- /dev/null
@@ -0,0 +1,37 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.PHONY: help
+help:
+       @echo 'The build targets are:'
+       @echo '  dist           : Create a source distribution tarball'
+
+.PHONY: test
+test:
+       @echo 'Running tests...'
+       PYTHONPATH=src python -m unittest discover src/cm_api_tests
+
+.PHONY: dist
+dist: test
+       git rev-parse HEAD > .git-hash
+       python setup.py sdist
+
+.PHONY: clean
+clean:
+       rm -rf *.egg-info
+       rm -rf dist
+       rm -rf build
+       find . -name *.py[co] -exec rm -f {} \;
diff --git a/cm-api/README.md b/cm-api/README.md
new file mode 100644 (file)
index 0000000..e465d15
--- /dev/null
@@ -0,0 +1,70 @@
+Welcome to Cloudera Manager API Client!
+
+Python Client
+=============
+The python source is in the `python` directory. The Python client comes with a 
+`cm_api` Python client module, and examples on performing certain Hadoop cluster 
+administrative tasks using the Python client.
+
+Getting Started
+---------------
+Here is a short snippet on using the `cm_api` Python client:
+
+    Python 2.7.2+ (default, Oct  4 2011, 20:06:09) 
+    [GCC 4.6.1] on linux2
+    Type "help", "copyright", "credits" or "license" for more information.
+    >>> from cm_api.api_client import ApiResource
+    >>> api = ApiResource('rhel62-1.ent.cloudera.com', 7180, 'admin', 'admin')
+    >>> for h in api.get_all_hosts():
+    ...   print h.hostname
+    ... 
+    rhel62-2.ent.cloudera.com
+    rhel62-4.ent.cloudera.com
+    rhel62-3.ent.cloudera.com
+    rhel62-1.ent.cloudera.com
+    >>> 
+
+Another example: getting all the services in a cluster:
+
+    >>> for c in api.get_all_clusters():
+    ...   print c.name
+    ... 
+    Cluster 1 - CDH4
+    >>> for s in api.get_cluster('Cluster 1 - CDH4').get_all_services():
+    ...  print s.name
+    ... 
+    hdfs1
+    mapreduce1
+    zookeeper1
+    hbase1
+    oozie1
+    yarn1
+    hue1
+    >>> 
+
+Shell
+-----
+After installing the `cm_api` Python package, you can use the API shell `cmps`
+(CM Python Shell):
+
+    $ cmps -H <host> --user admin --password admin
+    Welcome to the Cloudera Manager Console
+    Select a cluster using 'show clusters' and 'use'
+    cloudera> show clusters
+    +------------------+
+    |   CLUSTER NAME   |
+    +------------------+
+    | Cluster 1 - CDH4 |
+    | Cluster 2 - CDH3 |
+    +------------------+
+    cloudera> 
+
+Please see the `SHELL_README.md` file for more.
+
+Example Scripts
+---------------
+You can find example scripts in the `python/examples` directory.
+
+* `bulk_config_update.py` ---
+  Useful for heterogenous hardware environment. It sets the configuration on
+  the roles running on a given set of hosts.
diff --git a/cm-api/SHELL_README.md b/cm-api/SHELL_README.md
new file mode 100644 (file)
index 0000000..c50482c
--- /dev/null
@@ -0,0 +1,143 @@
+Cloudera Manager Python Shell
+============================
+
+
+Getting Started
+---------------
+
+### Installation ###
+
+> Run as a privileged user, or in a virtualenv
+
+    $ python setup.py install
+
+### Usage ###
+
+    $ cmps
+    usage: cmps [-h] -H HOSTNAME [-p PORT] [-u USERNAME] [-c CLUSTER]
+           [--password PASSWORD] [-e EXECUTE] [-s SEPERATOR]
+    cmps: error: argument -H/--host/--hostname is required
+
+### Login ###
+
+    $ cmps -H <host>
+    Enter Username: admin
+    Enter Password: 
+    Welcome to the Cloudera Manager Console
+    Select a cluster using 'show clusters' and 'use'
+    cloudera> 
+
+### Using Help ###
+
+    cloudera> help
+    Cloudera Manager Commands
+    =========================
+    log              show           status        stop_role   
+    restart_role     start_cluster  stderr        stop_service
+    restart_service  start_role     stdout        use         
+    roles            start_service  stop_cluster  version     
+
+    Other Commands
+    ==============
+    help
+
+    cloudera> help stop_cluster
+        Completely stop the cluster
+        Usage:
+            > stop_cluster <cluster>
+
+### Connecting to a Cluster ###
+
+> Autocomplete works
+
+    cloudera> use cdh4
+    Connected to cdh4
+    cdh4> status
+    +------------+-----------+---------+--------+------------+
+    | NAME       | SERVICE   |  STATUS | HEALTH |   CONFIG   |
+    +------------+-----------+---------+--------+------------+
+    | hbase1     | HBASE     | STARTED |  GOOD  | UP TO DATE |
+    | hdfs1      | HDFS      | STARTED |  GOOD  | UP TO DATE |
+    | mapreduce1 | MAPREDUCE | STARTED |  GOOD  | UP TO DATE |
+    | zookeeper1 | ZOOKEEPER | STARTED |  GOOD  | UP TO DATE |
+    +------------+-----------+---------+--------+------------+
+
+### View Roles ###
+
+    cdh4> roles hbase1
+    +--------------+---------------------+-----------------------+---------+--------+------------+
+    | ROLE TYPE    | HOST                | ROLE NAME             |  STATE  | HEALTH |   CONFIG   |
+    +--------------+---------------------+-----------------------+---------+--------+------------+
+    | MASTER       | hbase.localdomain   | hbase1-MASTER-1       | STARTED |  GOOD  | UP TO DATE |
+    | REGIONSERVER | hbase-2.localdomain | hbase1-REGIONSERVER-2 | STARTED |  GOOD  | UP TO DATE |
+    | REGIONSERVER | hbase.localdomain   | hbase1-REGIONSERVER-1 | STARTED |  GOOD  | UP TO DATE |
+    +--------------+---------------------+-----------------------+---------+--------+------------+
+
+### Stopping / Starting Services and Roles ###
+    
+    cdh4> restart_service hbase1
+    hbase1 is being restarted
+    cdh4> status hbase1
+    status hbase1
+    +--------+---------+----------+--------+------------+
+    | NAME   | SERVICE |  STATUS  | HEALTH |   CONFIG   |
+    +--------+---------+----------+--------+------------+
+    | hbase1 | HBASE   | STARTING |  GOOD  | UP TO DATE |
+    +--------+---------+----------+--------+------------+
+
+    cdh4> stop_role hbase1-REGIONSERVER-2
+    Stopping Role
+    cdh4> roles hbase1
+    roles hbase1
+    +--------------+---------------------+-----------------------+---------+--------+------------+
+    | ROLE TYPE    | HOST                | ROLE NAME             |  STATE  | HEALTH |   CONFIG   |
+    +--------------+---------------------+-----------------------+---------+--------+------------+
+    | MASTER       | hbase.localdomain   | hbase1-MASTER-1       | STARTED |  GOOD  | UP TO DATE |
+    | REGIONSERVER | hbase-2.localdomain | hbase1-REGIONSERVER-2 | STOPPED |  GOOD  | UP TO DATE |
+    | REGIONSERVER | hbase.localdomain   | hbase1-REGIONSERVER-1 | STARTED |  GOOD  | UP TO DATE |
+    +--------------+---------------------+-----------------------+---------+--------+------------+
+
+### Viewing Logs ###
+
+> Interactive shells will use less
+
+> Non-interactive shells will send to stdout
+    
+    cdh4> log hbase1-REGIONSERVER-2
+    cdh4> stdout hbase1-REGIONSERVER-2
+    cdh4> stderr hbase1-REGIONSERVER-2
+
+### Non-Interactive ###
+
+    $ cmps -H 192.168.2.105 -u admin --password admin -e "show hosts; show clusters"
+    +---------------------+---------------+----------+
+    | HOSTNAME            | IP ADDRESS    | RACK     |
+    +---------------------+---------------+----------+
+    | hbase.localdomain   | 192.168.2.105 | /default |
+    | hbase-2.localdomain | 192.168.2.110 | /default |
+    +---------------------+---------------+----------+
+    +--------------+
+    | CLUSTER NAME |
+    +--------------+
+    |     cdh4     |
+    +--------------+ 
+
+### Custom Output Delimiter ###
+
+    $ cmps -H 192.168.2.105 -u admin --password admin -e "roles hbase1" -c cdh4 -s ,
+    ROLE TYPE,HOST,ROLE NAME,STATE,HEALTH,CONFIG
+    MASTER,hbase.localdomain,hbase1-MASTER-1,STARTED,GOOD,UP TO DATE
+    REGIONSERVER,hbase-2.localdomain,hbase1-REGIONSERVER-2,STARTED,GOOD,UP TO DATE
+    REGIONSERVER,hbase.localdomain,hbase1-REGIONSERVER-1,STARTED,GOOD,UP TO DATE
+
+### Scripting Example ###
+
+> Obtain log files for all the region servers
+
+    $ for i in $(cmps -H 192.168.2.105 -u admin --password admin -e "roles hbase1" -c cdh4 -s , | grep REGIONSERVER | awk -F, '{print $3}');  
+    do 
+        cmps -H 192.168.2.105 -u admin --password admin -c cdh4 -e "log $i" > $i.out;
+    done
+    $ du -h *.out
+    2.4M    hbase1-REGIONSERVER-1.out
+    1.9M    hbase1-REGIONSERVER-2.out
diff --git a/cm-api/examples/bulk_config_update.py b/cm-api/examples/bulk_config_update.py
new file mode 100644 (file)
index 0000000..338c0dd
--- /dev/null
@@ -0,0 +1,239 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+Given a list of hostnames, update the config of all the DataNodes,
+TaskTrackers and RegionServers on those hosts. The configuration for
+the various role types is hardcoded in the code.
+
+Usage: %s [options]
+
+Options:
+  -f <hostname_file>        Path to a file containing the set of hostnames
+                            to update configuration for. The hostnames
+                            should be listed in the file one per line.
+                            If not specified, the program will use the
+                            constant defined in the code.
+"""
+
+import getopt
+import inspect
+import logging
+import sys
+import textwrap
+
+from cm_api.api_client import ApiResource
+
+#
+# Customize these constants for your Cloudera Manager.
+#
+CM_HOST = 'localhost'
+CM_USER = 'admin'
+CM_PASSWD = 'admin'
+
+#
+# Hostnames for the set of hosts to change configs for.
+#
+# The program will throw an exception if any of these hosts are not
+# found in Cloudera Manager.
+#
+HOSTS = [
+  'foo1.cloudera.com',
+  'foo2.cloudera.com',
+  'foo3.cloudera.com',
+]
+
+#
+# Samples: These are the config settings for the given group of hosts
+#
+# Change the values to the optimal setting for your hardware profile.
+# You do not need all these configs. And the config that you want to
+# change may not be here. Remove (or comment out) lines, and add new
+# config settings as necessary.
+#
+# To see what other config parameters are available (for a DN for
+# example), access:
+#   http://cm:7180/api/v1/clusters/myCDH/services/hdfs1/roles/dn1/config?view=full
+#
+# Replace "cm", "myCDH", "hdfs1", "dn1" with the actual values from your
+# environment.
+#
+DATANODE_CONF = {
+  'dfs_data_dir_list': '/data/1/dfs/dn,/data/2/dfs/dn',
+  'dfs_datanode_handler_count': 10,
+  'dfs_datanode_du_reserved': 10737418240,
+  'dfs_datanode_max_xcievers': 4096,
+  'datanode_java_heapsize': 2048000000,
+  'datanode_java_opts': ''
+}
+
+TASKTRACKER_CONF = {
+  'tasktracker_mapred_local_dir_list': '/data/1/mapred/local,/data/2/mapred/local',
+  'mapred_tasktracker_map_tasks_maximum': 8,
+  'mapred_tasktracker_reduce_tasks_maximum': 8,
+  'tasktracker_java_opts': '',
+  'task_tracker_java_heapsize': 2048000000,
+  # Override client values
+  'override_mapred_child_java_opts_max_heap': 2048000000,
+  'override_mapred_child_ulimit': 2048000,      # In KiB
+  'override_io_sort_mb': 100,
+  'override_io_sort_factor': 50,
+}
+
+REGIONSERVER_CONF = {
+  'hbase_hregion_memstore_flush_size': 1024000000,
+  'hbase_regionserver_handler_count': 10,
+  'hbase_regionserver_java_heapsize': 2048000000,
+  'hbase_regionserver_java_opts': '',
+}
+
+
+LOG = logging.getLogger(__name__)
+
+def do_bulk_config_update(hostnames):
+  """
+  Given a list of hostnames, update the configs of all the
+  datanodes, tasktrackers and regionservers on those hosts.
+  """
+  api = ApiResource(CM_HOST, username=CM_USER, password=CM_PASSWD)
+  hosts = collect_hosts(api, hostnames)
+
+  # Set config
+  for h in hosts:
+    configure_roles_on_host(api, h)
+
+
+def collect_hosts(api, wanted_hostnames):
+  """
+  Return a list of ApiHost objects for the set of hosts that
+  we want to change config for.
+  """
+  all_hosts = api.get_all_hosts(view='full')
+  all_hostnames = set([ h.hostname for h in all_hosts])
+  wanted_hostnames = set(wanted_hostnames)
+
+  unknown_hosts = wanted_hostnames.difference(all_hostnames)
+  if len(unknown_hosts) != 0:
+    msg = "The following hosts are not found in Cloudera Manager. "\
+          "Please check for typos:\n%s" % ('\n'.join(unknown_hosts))
+    LOG.error(msg)
+    raise RuntimeError(msg)
+
+  return [ h for h in all_hosts if h.hostname in wanted_hostnames ]
+
+
+def configure_roles_on_host(api, host):
+  """
+  Go through all the roles on this host, and configure them if they
+  match the role types that we care about.
+  """
+  for role_ref in host.roleRefs:
+    # Mgmt service/role has no cluster name. Skip over those.
+    if role_ref.get('clusterName') is None:
+      continue
+
+    # Get the role and inspect the role type
+    role = api.get_cluster(role_ref['clusterName'])\
+              .get_service(role_ref['serviceName'])\
+              .get_role(role_ref['roleName'])
+    LOG.debug("Evaluating %s (%s)" % (role.name, host.hostname))
+
+    config = None
+    if role.type == 'DATANODE':
+      config = DATANODE_CONF
+    elif role.type == 'TASKTRACKER':
+      config = TASKTRACKER_CONF
+    elif role.type == 'REGIONSERVER':
+      config = REGIONSERVER_CONF
+    else:
+      continue
+
+    # Set the config
+    LOG.info("Configuring %s (%s)" % (role.name, host.hostname))
+    role.update_config(config)
+
+
+def read_host_file(path):
+  """
+  Read the host file. Return a list of hostnames.
+  """
+  res = []
+  for l in file(path).xreadlines():
+    hostname = l.strip()
+    if hostname:
+      res.append(hostname)
+  return res
+
+
+def setup_logging(level):
+  logging.basicConfig()
+  logging.getLogger().setLevel(level)
+
+
+def usage():
+  doc = inspect.getmodule(usage).__doc__
+  print >>sys.stderr, textwrap.dedent(doc % (sys.argv[0],))
+
+
+def main(argv):
+  setup_logging(logging.INFO)
+
+  # Argument parsing
+  try:
+    opts, args = getopt.getopt(argv[1:], "hf:")
+  except getopt.GetoptError, err:
+    print >>sys.stderr, err
+    usage()
+    return -1
+
+  host_file = None
+  for option, val in opts:
+    if option == '-h':
+      usage()
+      return -1
+    elif option == '-f':
+      host_file = val
+    else:
+      print >>sys.stderr, "Unknown flag:", option
+      usage()
+      return -1
+
+  if args:
+    print >>sys.stderr, "Unknown trailing argument:", args
+    usage()
+    return -1
+
+  # Decide which host list to use
+  if host_file is not None:
+    hostnames = read_host_file(host_file)
+    LOG.info("Using host list from file '%s'. Found %d hosts." %
+             (host_file, len(hostnames)))
+  else:
+    hostnames = HOSTS
+    LOG.info("Using built-in host list. Found %d hosts." % (len(hostnames),))
+
+  # Do work
+  do_bulk_config_update(hostnames)
+  return 0
+
+
+#
+# The "main" entry
+#
+if __name__ == '__main__':
+  sys.exit(main(sys.argv))
diff --git a/cm-api/examples/schema.py b/cm-api/examples/schema.py
new file mode 100644 (file)
index 0000000..8a6b55a
--- /dev/null
@@ -0,0 +1,169 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Given an alias_name, show all metrics schema that has the given
+alias_name as an alias. If an alias_name is not provided, then
+show all metrics schema.
+
+Usage: %s [options]
+
+Options:
+-a <name>    alias name. Show only metric schema
+whose aliases include the specified alias name.
+Aliases usually occur when a metric is renamed or
+reorganized. Multiple metrics can have the same alias
+because the metrics come from different sources.
+If not specified, the program will show all
+metrics schema.
+
+Example: 
+The alias "-a drop_receive_network_interface_sum" 
+returns
+<ApiMetricSchema>
+  name: drop_receive_network_interface_sum
+  isCounter: True
+  unitNumerator: packets
+  aliases: [network_interface_drop_receive]
+  sources:
+    CLUSTER: [enterprise]
+    HOST: [enterprise]
+<ApiMetricSchema>
+  name: drop_receive
+  isCounter: True
+  unitNumerator: packets
+  aliases: [network_interface_drop_receive]
+  sources:
+    NETWORK_INTERFACE: [enterprise]
+
+"""
+
+import getopt
+import inspect
+import logging
+import sys
+import textwrap
+
+from cm_api.api_client import ApiResource
+
+#
+# Customize these constants for your Cloudera Manager.
+#
+CM_HOST   = 'localhost'
+CM_USER   = 'admin'
+CM_PASSWD = 'admin'
+
+LOG = logging.getLogger(__name__)
+
+class MetricSchemas(object):
+  """
+  """
+  def __init__(self):
+    api = ApiResource(CM_HOST, username=CM_USER, password=CM_PASSWD)
+    self._schemas = api.get_metric_schema()
+
+  def get_schemas(self):
+    return self._schemas
+
+  def get_aliases(self, name):
+    results = []
+    for metric in self._schemas:
+      if metric.aliases is not None:
+        for alias in metric.aliases:
+          if alias == name:
+            results.append(metric)    
+    return results;
+
+def do_get_metrics():
+  """
+  Get schema for all metrics
+  """
+  metric_schemas = MetricSchemas()
+  for metric in metric_schemas.get_schemas():
+    do_print(metric)
+
+def do_get_aliases(name):
+  """
+  Get aliases for given metric name
+  """
+  metric_schemas = MetricSchemas()
+  aliases = metric_schemas.get_aliases(name)
+  for alias in aliases:
+    do_print(alias)
+
+def do_print(metric):
+  print "<ApiMetricSchema>"
+  print "  name: %s" % metric.name
+  print "  isCounter: %s" % metric.isCounter
+  if metric.unitNumerator:
+    print "  unitNumerator: %s" % metric.unitNumerator
+  if metric.unitDenominator:
+    print "  unitDenominator: %s" % metric.unitDenominator
+  if metric.aliases:
+    print "  aliases: %s" % map(str, metric.aliases)
+  if metric.sources:
+    print "  sources:"
+    for (k,v) in metric.sources.items():
+      print "    %s: %s" % (k, map(str, v))
+
+def usage():
+  doc = inspect.getmodule(usage).__doc__
+  print >>sys.stderr, textwrap.dedent(doc % (sys.argv[0],))
+
+def setup_logging(level):
+  logging.basicConfig()
+  logging.getLogger().setLevel(level)
+
+
+def main(argv):
+  setup_logging(logging.INFO)
+
+  # Argument parsing
+  try:
+    opts, args = getopt.getopt(argv[1:], "ha:")
+  except getopt.GetoptError, err:
+    print >>sys.stderr, err
+    usage()
+    return -1
+
+  for option, val in opts:
+    if option == '-h':
+      usage()
+      return -1
+    elif option == '-a':
+      do_get_aliases(val)
+      return 0
+    else:
+      print >>sys.stderr, "Unknown flag:", option
+      usage()
+      return -1
+
+  if args:
+    print >>sys.stderr, "Unknown trailing argument:", args
+    usage()
+    return -1
+
+  # Do work
+  do_get_metrics()
+  return 0
+
+
+#
+# The "main" entry
+#
+if __name__ == '__main__':
+  sys.exit(main(sys.argv))
diff --git a/cm-api/examples/timeseries.py b/cm-api/examples/timeseries.py
new file mode 100644 (file)
index 0000000..4482329
--- /dev/null
@@ -0,0 +1,177 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+Given a query amd an optional time range, show all timeseries
+data that matches the given query in the given time range.
+
+Usage: %s [options] query
+
+Options:
+-f <from_time>        From time for the query
+Should be in the format "YYYY-mm-ddTHH:MM".
+Defaults to 5 minutes before to_time if not specified.
+-t <to_time>           To time for the query.
+Should be in the format "YYYY-mm-ddTHH:MM".
+Defaults to now if not specified.
+
+Example: 
+The query "select cpu_percent" 
+returns
+<ApiTimeSeriesResponse>
+  query: select cpu_percent
+  timeSeries:
+    metadata:
+      metricName: cpu_percent
+      entityName: localhost
+      startTime: 2013-05-09 18:49:52.488000
+      endTime: 2013-05-09 18:54:52.488000
+      unitNumerators: [percent]
+      attributes: {category: HOST, entityName: localhost,
+                   hostId: localhost, rackId: default,
+                   hostname: localhost}
+    data:
+      timestamp: 2013-05-09 18:50:38 value: 3.0 type: SAMPLE
+      timestamp: 2013-05-09 18:51:38 value: 3.4 type: SAMPLE
+      timestamp: 2013-05-09 18:52:38 value: 4.4 type: SAMPLE
+      timestamp: 2013-05-09 18:53:38 value: 4.5 type: SAMPLE
+      timestamp: 2013-05-09 18:54:38 value: 6.0 type: SAMPLE
+"""
+
+import getopt
+import inspect
+import logging
+import sys
+import textwrap
+from datetime import datetime
+from datetime import timedelta 
+
+from cm_api.api_client import ApiResource
+
+#
+# Customize these constants for your Cloudera Manager.
+#
+CM_HOST = 'localhost'
+CM_USER = 'admin'
+CM_PASSWD = 'admin'
+
+LOG = logging.getLogger(__name__)
+
+class TimeSeriesQuery(object):
+  """
+  """
+  def __init__(self):
+    self._api = ApiResource(CM_HOST, username=CM_USER, password=CM_PASSWD)
+
+  def query(self, query, from_time, to_time):
+    return self._api.query_timeseries(query, from_time, to_time)
+
+def do_print(response):
+  print "<ApiTimeSeriesResponse>"
+  print "  query: %s" % (response.timeSeriesQuery)
+  if response.warnings:
+    print "  warnings: %s" % (response.warnings)
+  if response.errors:
+    print "  errors: %s" % (response.errors)
+  if response.timeSeries:
+    print "  timeSeries:"
+    for ts in response.timeSeries:
+      metadata = ts.metadata
+      print "    metadata:"
+      print "      metricName: %s" % (metadata.metricName)
+      print "      entityName: %s" % (metadata.entityName)
+      print "      startTime: %s" % (metadata.startTime)
+      print "      endTime: %s" % (metadata.endTime)
+      if metadata.unitNumerators:
+        print "      unitNumerators: %s" % (metadata.unitNumerators)
+      if metadata.unitDenominators: 
+        print "      unitDenominators: %s" % (metadata.unitDenominators)
+      if metadata.attributes:
+        print "      attributes: %s" % (metadata.attributes)
+      print "    data:"
+      for data in ts.data:
+        print "      timestamp: %s value: %s type: %s" % \
+               (data.timestamp, data.value, data.type)
+
+def do_query(query, from_time, to_time):
+  tsquery = TimeSeriesQuery()
+  for response in tsquery.query(query, from_time, to_time):
+    do_print(response)
+
+def usage():
+  doc = inspect.getmodule(usage).__doc__
+  print >>sys.stderr, textwrap.dedent(doc % (sys.argv[0],))
+
+def setup_logging(level):
+  logging.basicConfig()
+  logging.getLogger().setLevel(level)
+
+def main(argv):
+  setup_logging(logging.INFO)
+
+  from_time = None
+  to_time = None
+
+  # Argument parsing
+  try:
+    opts, args = getopt.getopt(argv[1:], "hf:t:")
+  except getopt.GetoptError, err:
+    print >>sys.stderr, err
+    usage()
+    return -1
+
+  for option, val in opts:
+    if option == '-h':
+      usage()
+      return -1
+    elif option == '-f':
+      try:
+        print val
+        from_time = datetime.strptime(val, "%Y-%m-%dT%H:%M")
+        from_time = from_time.isoformat()
+      except:
+        print >>sys.stderr, "Unable to parse the from time:"
+        usage()
+        return -1
+    elif option == '-t':
+      try:
+        to_time = datetime.strptime(val, "%Y-%m-%dT%H:%M")
+        to_time = to_time.isoformat()
+      except:
+        print >>sys.stderr, "Unable to parse the to time:"
+        usage()
+        return -1
+    else:
+      print >>sys.stderr, "Unknown flag:", option
+      usage()
+      return -1
+
+  if args:
+    # Do work
+    do_query(args[0], from_time, to_time)
+    return 0
+  else:
+    print >>sys.stderr, "No query:"
+    usage()
+    return -1
+
+#
+# The "main" entry
+#
+if __name__ == '__main__':
+  sys.exit(main(sys.argv))
diff --git a/cm-api/setup.py b/cm-api/setup.py
new file mode 100644 (file)
index 0000000..886eb10
--- /dev/null
@@ -0,0 +1,61 @@
+#! /usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from setuptools import setup, find_packages
+
+from sys import version_info, platform
+
+if version_info[:2] > (2, 5):
+    install_requires = []
+else:
+    install_requires = ['simplejson >= 2.0.0']
+
+# Python 2.6 and below requires argparse
+if version_info[:2] < (2, 7):
+    install_requires += ['argparse']
+
+# Mac does not come default with readline, this is needed for autocomplete
+# in the cmps shell
+if platform == 'darwin':
+    install_requires += ['readline']
+
+setup(
+  name = 'cm_api',
+  version = '7.0',    # Compatible with API v7 (CM 5.1)
+  packages = find_packages('src', exclude=['cm_api_tests']),
+  package_dir = {'cm_api': 'src/cm_api',
+                 'cm_shell': 'src/cm_shell'},
+
+  # Project uses simplejson, so ensure that it gets installed or upgraded
+  # on the target machine
+  install_requires = install_requires,
+
+  author = 'Cloudera, Inc.',
+  author_email = 'scm-users@cloudera.org',
+  description = 'Cloudera Manager API client',
+  long_desc = 'cm_api is a Python client to the Cloudera Manager REST API',
+  license = 'Apache License 2.0',
+  url = 'http://cloudera.github.com/cm_api/',
+  classifiers = [
+        "Development Status :: 5 - Production/Stable",
+        "Operating System :: OS Independent",
+        "Programming Language :: Python",
+        "Programming Language :: Python :: 2.6",
+        "Programming Language :: Python :: 2.7",
+  ],
+  entry_points = { 'console_scripts': [ 'cmps = cm_shell.cmps:main', ]}
+)
diff --git a/cm-api/src/cm_api/__init__.py b/cm-api/src/cm_api/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/cm-api/src/cm_api/api_client.py b/cm-api/src/cm_api/api_client.py
new file mode 100644 (file)
index 0000000..4e353e6
--- /dev/null
@@ -0,0 +1,307 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+try:
+  import json
+except ImportError:
+  import simplejson as json
+
+from cm_api.http_client import HttpClient, RestException
+from cm_api.endpoints import batch, cms, clusters, events, hosts, tools
+from cm_api.endpoints import types, users, timeseries
+from cm_api.resource import Resource
+
+__docformat__ = "epytext"
+
+LOG = logging.getLogger(__name__)
+
+API_AUTH_REALM = "Cloudera Manager"
+API_CURRENT_VERSION = 6
+
+class ApiException(RestException):
+  """
+  Any error result from the API is converted into this exception type.
+  This handles errors from the HTTP level as well as the API level.
+  """
+  def __init__(self, error):
+    # The parent class will set up _code and _message
+    RestException.__init__(self, error)
+    try:
+      # See if the body is json
+      json_body = json.loads(self._message)
+      self._message = json_body['message']
+    except (ValueError, KeyError):
+      pass    # Ignore json parsing error
+
+
+class ApiResource(Resource):
+  """
+  Resource object that provides methods for managing the top-level API resources.
+  """
+
+  def __init__(self, server_host, server_port=None,
+               username="admin", password="admin",
+               use_tls=False, version=API_CURRENT_VERSION):
+    """
+    Creates a Resource object that provides API endpoints.
+
+    @param server_host: The hostname of the Cloudera Manager server.
+    @param server_port: The port of the server. Defaults to 7180 (http) or
+      7183 (https).
+    @param username: Login name.
+    @param password: Login password.
+    @param use_tls: Whether to use tls (https).
+    @param version: API version.
+    @return: Resource object referring to the root.
+    """
+    self._version = version
+    protocol = use_tls and "https" or "http"
+    if server_port is None:
+      server_port = use_tls and 7183 or 7180
+    base_url = "%s://%s:%s/api/v%s" % \
+        (protocol, server_host, server_port, version)
+
+    client = HttpClient(base_url, exc_class=ApiException)
+    client.set_basic_auth(username, password, API_AUTH_REALM)
+    client.set_headers( { "Content-Type" : "application/json" } )
+    Resource.__init__(self, client)
+
+  @property
+  def version(self):
+    """
+    Returns the API version (integer) being used.
+    """
+    return self._version
+
+  # CMS ops.
+
+  def get_cloudera_manager(self):
+    """
+    Returns a Cloudera Manager object.
+    """
+    return cms.ClouderaManager(self)
+
+  # Cluster ops.
+
+  def create_cluster(self, name, version):
+    """
+    Create a new cluster.
+
+    @param name: Cluster name.
+    @param version: Cluster CDH version.
+    @return: The created cluster.
+    """
+    return clusters.create_cluster(self, name, version)
+
+  def delete_cluster(self, name):
+    """
+    Delete a cluster by name.
+
+    @param name: Cluster name
+    @return: The deleted ApiCluster object
+    """
+    return clusters.delete_cluster(self, name)
+
+  def get_all_clusters(self, view = None):
+    """
+    Retrieve a list of all clusters.
+    @param view: View to materialize ('full' or 'summary').
+    @return: A list of ApiCluster objects.
+    """
+    return clusters.get_all_clusters(self, view)
+
+  def get_cluster(self, name):
+    """
+    Look up a cluster by name.
+
+    @param name: Cluster name.
+    @return: An ApiCluster object.
+    """
+    return clusters.get_cluster(self, name)
+
+  # Host ops.
+
+  def create_host(self, host_id, name, ipaddr, rack_id = None):
+    """
+    Create a host.
+
+    @param host_id:  The host id.
+    @param name:     Host name
+    @param ipaddr:   IP address
+    @param rack_id:  Rack id. Default None.
+    @return: An ApiHost object
+    """
+    return hosts.create_host(self, host_id, name, ipaddr, rack_id)
+
+  def delete_host(self, host_id):
+    """
+    Delete a host by id.
+
+    @param host_id: Host id
+    @return: The deleted ApiHost object
+    """
+    return hosts.delete_host(self, host_id)
+
+  def get_all_hosts(self, view = None):
+    """
+    Get all hosts
+
+    @param view: View to materialize ('full' or 'summary').
+    @return: A list of ApiHost objects.
+    """
+    return hosts.get_all_hosts(self, view)
+
+  def get_host(self, host_id):
+    """
+    Look up a host by id.
+
+    @param host_id: Host id
+    @return: An ApiHost object
+    """
+    return hosts.get_host(self, host_id)
+
+  # Users
+
+  def get_all_users(self, view = None):
+    """
+    Get all users.
+
+    @param view: View to materialize ('full' or 'summary').
+    @return: A list of ApiUser objects.
+    """
+    return users.get_all_users(self, view)
+
+  def get_user(self, username):
+    """
+    Look up a user by username.
+
+    @param username: Username to look up
+    @return: An ApiUser object
+    """
+    return users.get_user(self, username)
+
+  def create_user(self, username, password, roles):
+    """
+    Create a user.
+
+    @param username: Username
+    @param password: Password
+    @param roles: List of roles for the user. This should be [] for a
+                  regular user, or ['ROLE_ADMIN'] for an admin.
+    @return: An ApiUser object
+    """
+    return users.create_user(self, username, password, roles)
+
+  def delete_user(self, username):
+    """
+    Delete user by username.
+
+    @param username: Username
+    @return: An ApiUser object
+    """
+    return users.delete_user(self, username)
+
+  # Events
+
+  def query_events(self, query_str = None):
+    """
+    Query events.
+    @param query_str: Query string.
+    @return: A list of ApiEvent.
+    """
+    return events.query_events(self, query_str)
+
+  def get_event(self, event_id):
+    """
+    Retrieve a particular event by ID.
+    @param event_id: The event ID.
+    @return: An ApiEvent.
+    """
+    return events.get_event(self, event_id)
+
+  # Tools
+
+  def echo(self, message):
+    """Have the server echo a message back."""
+    return tools.echo(self, message)
+
+  def echo_error(self, message):
+    """Generate an error, but we get to set the error message."""
+    return tools.echo_error(self, message)
+
+  # Metrics
+
+  def get_metrics(self, path, from_time, to_time, metrics, view, params=None):
+    """
+    Generic function for querying metrics.
+
+    @param from_time: A datetime; start of the period to query (optional).
+    @param to_time: A datetime; end of the period to query (default = now).
+    @param metrics: List of metrics to query (default = all).
+    @param view: View to materialize ('full' or 'summary')
+    @param params: Other query parameters.
+    @return: List of metrics and their readings.
+    """
+    if not params:
+      params = { }
+    if from_time:
+      params['from'] = from_time.isoformat()
+    if to_time:
+      params['to'] = to_time.isoformat()
+    if metrics:
+      params['metrics'] = metrics
+    if view:
+      params['view'] = view
+    resp = self.get(path, params=params)
+    return types.ApiList.from_json_dict(resp, self, types.ApiMetric)
+
+  def query_timeseries(self, query, from_time=None, to_time=None):
+    """
+    Query time series.
+    @param query: Query string.
+    @param from_time: Start of the period to query (optional).
+    @param to_time: End of the period to query (default = now).
+    @return: A list of ApiTimeSeriesResponse.
+    """
+    return timeseries.query_timeseries(self, query, from_time, to_time)
+
+  def get_metric_schema(self):
+    """
+    Get the schema for all of the metrics.
+    @return: A list of ApiMetricSchema.
+    """
+    return timeseries.get_metric_schema(self)
+
+  # Batch
+
+  def do_batch(self, elements):
+    """
+    Execute a batch request with one or more elements. If any element fails,
+    the entire request is rolled back and subsequent elements are ignored.
+    @param elements: A list of ApiBatchRequestElements
+    @return: 2-tuple (overall success, list of ApiBatchResponseElements).
+    """
+    return batch.do_batch(self, elements)
+
+def get_root_resource(server_host, server_port=None,
+                      username="admin", password="admin",
+                      use_tls=False, version=API_CURRENT_VERSION):
+  """
+  See ApiResource.
+  """
+  return ApiResource(server_host, server_port, username, password, use_tls,
+      version)
diff --git a/cm-api/src/cm_api/endpoints/__init__.py b/cm-api/src/cm_api/endpoints/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/cm-api/src/cm_api/endpoints/batch.py b/cm-api/src/cm_api/endpoints/batch.py
new file mode 100644 (file)
index 0000000..524463b
--- /dev/null
@@ -0,0 +1,33 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from cm_api.endpoints.types import *
+
+__docformat__ = "epytext"
+
+BATCH_PATH = "/batch"
+
+def do_batch(resource_root, elements):
+  """
+  Execute a batch request with one or more elements. If any element fails,
+  the entire request is rolled back and subsequent elements are ignored.
+
+  @param elements: A list of ApiBatchRequestElements
+  @return: an ApiBatchResponseList
+  @since: API v6
+  """
+  return call(resource_root.post, BATCH_PATH, ApiBatchResponseList,
+      data=elements, api_version=6)
diff --git a/cm-api/src/cm_api/endpoints/clusters.py b/cm-api/src/cm_api/endpoints/clusters.py
new file mode 100644 (file)
index 0000000..cd6d1a2
--- /dev/null
@@ -0,0 +1,451 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from cm_api.endpoints.types import *
+from cm_api.endpoints import services, parcels, host_templates
+
+__docformat__ = "epytext"
+
+CLUSTERS_PATH = "/clusters"
+
+def create_cluster(resource_root, name, version, fullVersion=None):
+  """
+  Create a cluster
+  @param resource_root: The root Resource object.
+  @param name: Cluster name
+  @param version: Cluster CDH major version (eg: "4")
+                  - The CDH minor version will be assumed to be the
+                    latest released version, if 'fullVersion' is not
+                    specified.
+  @param fullVersion: Cluster's full CDH version. (eg: "4.6.0")
+                        - If specified, 'version' will be ignored.
+                        - Since: v6
+  @return: An ApiCluster object
+  """
+  if fullVersion is not None:
+    api_version = 6
+  else:
+    api_version = 1
+
+  apicluster = ApiCluster(resource_root, name, version, fullVersion)
+  return call(resource_root.post, CLUSTERS_PATH, ApiCluster, True,
+              data=[apicluster], api_version=api_version)[0]
+
+def get_cluster(resource_root, name):
+  """
+  Lookup a cluster by name
+  @param resource_root: The root Resource object.
+  @param name: Cluster name
+  @return: An ApiCluster object
+  """
+  return call(resource_root.get, "%s/%s" % (CLUSTERS_PATH, name), ApiCluster)
+
+def get_all_clusters(resource_root, view=None):
+  """
+  Get all clusters
+  @param resource_root: The root Resource object.
+  @return: A list of ApiCluster objects.
+  """
+  return call(resource_root.get, CLUSTERS_PATH, ApiCluster, True,
+      params=view and dict(view=view) or None)
+
+def delete_cluster(resource_root, name):
+  """
+  Delete a cluster by name
+  @param resource_root: The root Resource object.
+  @param name: Cluster name
+  @return: The deleted ApiCluster object
+  """
+  return call(resource_root.delete, "%s/%s" % (CLUSTERS_PATH, name), ApiCluster)
+
+class ApiCluster(BaseApiResource):
+  _ATTRIBUTES = {
+    'name'              : None,
+    'displayName'       : None,
+    'version'           : None,
+    'fullVersion'       : None,
+    'maintenanceMode'   : ROAttr(),
+    'maintenanceOwners' : ROAttr(),
+  }
+
+  def __init__(self, resource_root, name=None, version=None, fullVersion=None):
+    BaseApiObject.init(self, resource_root, locals())
+
+  def __str__(self):
+    return "<ApiCluster>: %s; version: %s" % (self.name, self.version)
+
+  def _path(self):
+    return "%s/%s" % (CLUSTERS_PATH, self.name)
+
+  def _put_cluster(self, dic, params=None):
+    """Change cluster attributes"""
+    cluster = self._put('', ApiCluster, data=dic, params=params)
+    self._update(cluster)
+    return self
+
+  def get_service_types(self):
+    """
+    Get all service types supported by this cluster.
+
+    @return: A list of service types (strings)
+    """
+    resp = self._get_resource_root().get(self._path() + '/serviceTypes')
+    return resp[ApiList.LIST_KEY]
+
+  def get_commands(self, view=None):
+    """
+    Retrieve a list of running commands for this cluster.
+
+    @param view: View to materialize ('full' or 'summary')
+    @return: A list of running commands.
+    """
+    return self._get("commands", ApiCommand, True,
+        params = view and dict(view=view) or None)
+
+  def rename(self, newname):
+    """
+    Rename a cluster.
+
+    @param newname: New cluster name
+    @return: An ApiCluster object
+    @since: API v2
+    """
+    dic = self.to_json_dict()
+    if self._get_resource_root().version < 6:
+      dic['name'] = newname
+    else:
+      dic['displayName'] = newname
+    return self._put_cluster(dic)
+
+  def update_cdh_version(self, new_cdh_version):
+    """
+    Manually set the CDH version.
+
+    @param new_cdh_version: New CDH version, e.g. 4.5.1
+    @return: An ApiCluster object
+    @since: API v6
+    """
+    dic = self.to_json_dict()
+    dic['fullVersion'] = new_cdh_version
+    return self._put_cluster(dic)
+
+  def create_service(self, name, service_type):
+    """
+    Create a service.
+
+    @param name: Service name
+    @param service_type: Service type
+    @return: An ApiService object
+    """
+    return services.create_service(self._get_resource_root(), name,
+        service_type, self.name)
+
+  def delete_service(self, name):
+    """
+    Delete a service by name.
+
+    @param name: Service name
+    @return: The deleted ApiService object
+    """
+    return services.delete_service(self._get_resource_root(), name, self.name)
+
+  def get_service(self, name):
+    """
+    Lookup a service by name.
+
+    @param name: Service name
+    @return: An ApiService object
+    """
+    return services.get_service(self._get_resource_root(), name, self.name)
+
+  def get_all_services(self, view = None):
+    """
+    Get all services in this cluster.
+
+    @return: A list of ApiService objects.
+    """
+    return services.get_all_services(self._get_resource_root(), self.name, view)
+
+  def get_parcel(self, product, version):
+    """
+    Lookup a parcel by product and version.
+
+    @param product: the product name
+    @param version: the product version
+    @return: An ApiParcel object
+    """
+    return parcels.get_parcel(self._get_resource_root(), product, version, self.name)
+
+  def get_all_parcels(self, view = None):
+    """
+    Get all parcels in this cluster.
+
+    @return: A list of ApiParcel objects.
+    """
+    return parcels.get_all_parcels(self._get_resource_root(), self.name, view)
+
+  def list_hosts(self):
+    """
+    Lists all the hosts that are associated with this cluster.
+
+    @return: A list of ApiHostRef objects of the hosts in the cluster.
+    @since: API v3
+    """
+    return self._get("hosts", ApiHostRef, True, api_version=3)
+
+  def remove_host(self, hostId):
+    """
+    Removes the association of the host with the cluster.
+
+    @return: A ApiHostRef of the host that was removed.
+    @since: API v3
+    """
+    return self._delete("hosts/" + hostId, ApiHostRef, api_version=3)
+
+  def remove_all_hosts(self):
+    """
+    Removes the association of all the hosts with the cluster.
+
+    @return: A list of ApiHostRef objects of the hosts that were removed.
+    @since: API v3
+    """
+    return self._delete("hosts", ApiHostRef, True, api_version=3)
+
+  def add_hosts(self, hostIds):
+    """
+    Adds a host to the cluster.
+
+    @param hostIds: List of IDs of hosts to add to cluster.
+    @return: A list of ApiHostRef objects of the new
+             hosts that were added to the cluster
+    @since: API v3
+    """
+    hostRefList = [ApiHostRef(self._get_resource_root(), x) for x in hostIds]
+    return self._post("hosts", ApiHostRef, True, data=hostRefList,
+        api_version=3)
+
+  def start(self):
+    """
+    Start all services in a cluster, respecting dependencies.
+
+    @return: Reference to the submitted command.
+    """
+    return self._cmd('start')
+
+  def stop(self):
+    """
+    Stop all services in a cluster, respecting dependencies.
+
+    @return: Reference to the submitted command.
+    """
+    return self._cmd('stop')
+
+  def restart(self):
+    """
+    Restart all services in the cluster.
+    Services are restarted in the appropriate order given their dependencies.
+
+    @return: Reference to the submitted command.
+    """
+    return self._cmd('restart')
+
+  def deploy_client_config(self):
+    """
+    Deploys client configuration to the hosts on the cluster.
+
+    @return: Reference to the submitted command.
+    @since: API v2
+    """
+    return self._cmd('deployClientConfig')
+
+  def upgrade_services(self):
+    """
+    This command is no longer recommended with API v6 onwards. It simply does
+    not work when parcels are used, and even with packages it may fail due to
+    a race. Use upgrade_cdh instead.
+
+    Upgrades the services in the cluster to CDH5 version.
+    This command requires that the CDH packages in the hosts used by the
+    cluster be upgraded to CDH5 before this command is issued. Once issued,
+    this command will stop all running services before proceeding.
+
+    If parcels are used instead of CDH system packages then the following
+    steps need to happen in order:
+      1. Stop all services manually
+      2. Activate parcel
+      3. Run this upgrade command
+
+    The command will upgrade the services and their configuration to the
+    version available in the CDH5 distribution.
+
+    @return: Reference to the submitted command.
+    @deprecated: since API v6
+    """
+    return self._cmd('upgradeServices')
+
+  def enter_maintenance_mode(self):
+    """
+    Put the cluster in maintenance mode.
+
+    @return: Reference to the completed command.
+    @since: API v2
+    """
+    cmd = self._cmd('enterMaintenanceMode')
+    if cmd.success:
+      self._update(get_cluster(self._get_resource_root(), self.name))
+    return cmd
+
+  def exit_maintenance_mode(self):
+    """
+    Take the cluster out of maintenance mode.
+
+    @return: Reference to the completed command.
+    @since: API v2
+    """
+    cmd = self._cmd('exitMaintenanceMode')
+    if cmd.success:
+      self._update(get_cluster(self._get_resource_root(), self.name))
+    return cmd
+
+  def get_all_host_templates(self):
+    """
+    Retrieves all host templates in the cluster.
+    @return: ApiList of ApiHostTemplate objects.
+    """
+    return host_templates.get_all_host_templates(self._get_resource_root(), self.name)
+
+  def get_host_template(self, name):
+    """
+    Retrieves a host templates by name.
+    @param name: Host template name.
+    @return: An ApiHostTemplate object.
+    """
+    return host_templates.get_host_template(self._get_resource_root(), name, self.name)
+
+  def create_host_template(self, name):
+    """
+    Creates a host template.
+    @param name: Name of the host template to create.
+    @return: An ApiHostTemplate object.
+    """
+    return host_templates.create_host_template(self._get_resource_root(), name, self.name)
+
+  def delete_host_template(self, name):
+    """
+    Deletes a host template.
+    @param name: Name of the host template to delete.
+    @return: An ApiHostTemplate object.
+    """
+    return host_templates.delete_host_template(self._get_resource_root(), name, self.name)
+
+  def rolling_restart(self, slave_batch_size=None,
+                      slave_fail_count_threshold=None,
+                      sleep_seconds=None,
+                      stale_configs_only=None,
+                      unupgraded_only=None,
+                      roles_to_include=None,
+                      restart_service_names=None):
+    """
+    Command to do a "best-effort" rolling restart of the given cluster,
+    i.e. it does plain restart of services that cannot be rolling restarted,
+    followed by first rolling restarting non-slaves and then rolling restarting
+    the slave roles of services that can be rolling restarted. The slave restarts
+    are done host-by-host.
+    @param slave_batch_size: Number of hosts with slave roles to restart at a time
+           Must be greater than 0. Default is 1.
+    @param slave_fail_count_threshold: The threshold for number of slave host batches that
+           are allowed to fail to restart before the entire command is considered failed.
+           Must be >= 0. Default is 0.
+    @param sleep_seconds: Number of seconds to sleep between restarts of slave host batches.
+           Must be >=0. Default is 0.
+    @param stale_configs_only: Restart roles with stale configs only. Default is false.
+    @param unupgraded_only: Restart roles that haven't been upgraded yet. Default is false.
+    @param roles_to_include: Role types to restart. Default is slave roles only.
+    @param restart_service_names: List of specific services to restart.
+    @return: Reference to the submitted command.
+    @since: API v4
+    """
+    args = dict()
+    if slave_batch_size:
+      args['slaveBatchSize'] = slave_batch_size
+    if slave_fail_count_threshold:
+      args['slaveFailCountThreshold'] = slave_fail_count_threshold
+    if sleep_seconds:
+      args['sleepSeconds'] = sleep_seconds
+    if stale_configs_only:
+      args['staleConfigsOnly'] = stale_configs_only
+    if unupgraded_only:
+      args['unUpgradedOnly'] = unupgraded_only
+    if roles_to_include:
+      args['rolesToInclude'] = roles_to_include
+    if restart_service_names:
+      args['restartServiceNames'] = restart_service_names
+
+    return self._cmd('rollingRestart', data=args, api_version=4)
+
+  def auto_assign_roles(self):
+    """
+    Automatically assign roles to hosts and create the roles for all the services in a cluster.
+
+    Assignments are done based on services in the cluster and hardware specifications.
+    Existing roles will be taken into account and their assignments will be not be modified.
+    @since: API v6
+    """
+    self._put("autoAssignRoles", None, api_version=6)
+
+  def auto_configure(self):
+    """
+    Automatically configures roles and services in a cluster.
+
+    Overwrites some existing configurations. Might create new role config
+    groups. Only default role config groups must exist before calling this
+    endpoint. Other role config groups must not exist. If they do, an exception
+    will be thrown preventing any configuration. Ignores the Cloudera
+    Management Service even if colocated with roles of this cluster. To avoid
+    over-committing the heap on hosts, assign hosts to this cluster that are
+    not being used by the Cloudera Management Service.
+    @since: API v6
+    """
+    self._put("autoConfigure", None, api_version=6)
+
+  def upgrade_cdh(self, deploy_client_config=True, start_all_services=True, cdh_parcel_version=None):
+    """
+    Perform CDH upgrade to the next major version.
+
+    If using packages, CDH packages on all hosts of the cluster must be
+    manually upgraded before this command is issued.
+    The command will upgrade the services and their configuration to the
+    version available in the CDH5 distribution. All running services will
+    be stopped before proceeding.
+
+    @param deploy_client_config: Whether to deploy client configurations
+           after the upgrade. Default is True.
+    @param start_all_services: Whether to start all services after the upgrade.
+           Default is True.
+    @param cdh_parcel_version: If using parcels, the full version of an
+           already distributed parcel for the next major CDH version. Default
+           is None. Example versions are: '5.0.0-1.cdh5.0.0.p0.11' or
+           '5.0.2-1.cdh5.0.2.p0.32'
+    @return: Reference to the submitted command.
+    @since: API v6
+    """
+    args = dict()
+    args['deployClientConfig'] = deploy_client_config
+    args['startAllServices'] = start_all_services
+    if cdh_parcel_version:
+      args['cdhParcelVersion'] = cdh_parcel_version
+    return self._cmd('upgradeCdh', data=args, api_version=6)
diff --git a/cm-api/src/cm_api/endpoints/cms.py b/cm-api/src/cm_api/endpoints/cms.py
new file mode 100644 (file)
index 0000000..1327008
--- /dev/null
@@ -0,0 +1,391 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from cm_api.endpoints.types import *
+from cm_api.endpoints.services import ApiService
+
+class ApiLicense(BaseApiObject):
+  """Model for a CM license."""
+  _ATTRIBUTES = {
+    'owner'       : ROAttr(),
+    'uuid'        : ROAttr(),
+    'expiration'  : ROAttr(),
+  }
+
+  def __init__(self, resource_root):
+    BaseApiObject.init(self, resource_root)
+
+class ClouderaManager(BaseApiResource):
+  """
+  The Cloudera Manager instance.
+
+  Provides access to CM configuration and services.
+  """
+
+  def __init__(self, resource_root):
+    BaseApiObject.init(self, resource_root)
+
+  def _path(self):
+    return '/cm'
+
+  def get_commands(self, view=None):
+    """
+    Retrieve a list of running global commands.
+
+    @param view: View to materialize ('full' or 'summary')
+    @return: A list of running commands.
+    """
+    return self._get("commands", ApiCommand, True,
+        params = view and dict(view=view) or None)
+
+  def create_mgmt_service(self, service_setup_info):
+    """
+    Setup the Cloudera Management Service.
+
+    @param service_setup_info: ApiServiceSetupInfo object.
+    @return: The management service instance.
+    """
+    return self._put("service", ApiService, data=service_setup_info)
+
+  def delete_mgmt_service(self):
+    """
+    Delete the Cloudera Management Service.
+
+    @return: The deleted management service instance.
+    """
+    return self._delete("service", ApiService, api_version=6)
+
+  def get_service(self):
+    """
+    Return the Cloudera Management Services instance.
+
+    @return: An ApiService instance.
+    """
+    return self._get("service", ApiService)
+
+  def get_license(self):
+    """
+    Return information about the currently installed license.
+
+    @return: License information.
+    """
+    return self._get("license", ApiLicense)
+
+  def update_license(self, license_text):
+    """
+    Install or update the Cloudera Manager license.
+
+    @param license_text: the license in text form
+    """
+    content = (
+        '--MULTI_BOUNDARY',
+        'Content-Disposition: form-data; name="license"',
+        '',
+        license_text,
+        '--MULTI_BOUNDARY--',
+        '')
+    resp = self._get_resource_root().post('cm/license',
+        data="\r\n".join(content),
+        contenttype='multipart/form-data; boundary=MULTI_BOUNDARY')
+    return ApiLicense.from_json_dict(resp, self._get_resource_root())
+
+  def get_config(self, view = None):
+    """
+    Retrieve the Cloudera Manager configuration.
+
+    The 'summary' view contains strings as the dictionary values. The full
+    view contains ApiConfig instances as the values.
+
+    @param view: View to materialize ('full' or 'summary')
+    @return: Dictionary with configuration data.
+    """
+    return self._get_config("config", view)
+
+  def update_config(self, config):
+    """
+    Update the CM configuration.
+
+    @param config: Dictionary with configuration to update.
+    @return: Dictionary with updated configuration.
+    """
+    return self._update_config("config", config)
+
+  def generate_credentials(self):
+    """
+    Generate credentials for services configured with Kerberos.
+
+    @return: Information about the submitted command.
+    """
+    return self._cmd('generateCredentials')
+
+  def inspect_hosts(self):
+    """
+    Runs the host inspector on the configured hosts.
+
+    @return: Information about the submitted command.
+    """
+    return self._cmd('inspectHosts')
+
+  def collect_diagnostic_data(self, start_datetime, end_datetime, includeInfoLog=False):
+    """
+    This method is deprecated as of CM 4.5.
+    You should use collect_diagnostic_data_45.
+    Issue the command to collect diagnostic data.
+
+    @param start_datetime: The start of the collection period. Type datetime.
+    @param end_datetime: The end of the collection period. Type datetime.
+    @param includeInfoLog: Whether to include INFO level log messages.
+    """
+    args = {
+        'startTime': start_datetime.isoformat(),
+        'endTime': end_datetime.isoformat(),
+        'includeInfoLog': includeInfoLog,
+    }
+    return self._cmd('collectDiagnosticData', data=args)
+
+  def collect_diagnostic_data_45(self, end_datetime, bundle_size_bytes, cluster_name=None):
+    """
+    Issue the command to collect diagnostic data.
+
+    @param end_datetime: The end of the collection period. Type datetime.
+    @param bundle_size_bytes: The target size for the support bundle in bytes
+    @param cluster_name: The cluster to collect or None for all clusters
+    """
+    args = {
+        'endTime': end_datetime.isoformat(),
+        'bundleSizeBytes': bundle_size_bytes,
+        'clusterName': cluster_name
+    }
+    return self._cmd('collectDiagnosticData', data=args)
+
+  def hosts_decommission(self, host_names):
+    """
+    Decommission the specified hosts by decommissioning the slave roles
+    and stopping the remaining ones.
+
+    @param host_names: List of names of hosts to be decommissioned.
+    @return: Information about the submitted command.
+    @since: API v2
+    """
+    return self._cmd('hostsDecommission', data=host_names)
+
+  def hosts_recommission(self, host_names):
+    """
+    Recommission the specified hosts by recommissioning the slave roles.
+    This command doesn't start the roles. Use hosts_start_roles for that.
+
+    @param host_names: List of names of hosts to be recommissioned.
+    @return: Information about the submitted command.
+    @since: API v2
+    """
+    return self._cmd('hostsRecommission', data=host_names)
+
+  def hosts_start_roles(self, host_names):
+    """
+    Start all the roles on the specified hosts.
+
+    @param host_names: List of names of hosts on which to start all roles.
+    @return: Information about the submitted command.
+    @since: API v2
+    """
+    return self._cmd('hostsStartRoles', data=host_names)
+
+  def create_peer(self, name, url, username, password):
+    """
+    Create a new peer for replication.
+
+    @param name: The name of the peer.
+    @param url: The url of the peer.
+    @param username: The admin username to use to setup the remote side of the peer connection.
+    @param password: The password of the admin user.
+    @return: The newly created peer.
+    @since: API v3
+    """
+    peer = ApiCmPeer(self._get_resource_root(),
+        name=name,
+        url=url,
+        username=username,
+        password=password)
+    return self._post("peers", ApiCmPeer, data=peer, api_version=3)
+
+  def delete_peer(self, name):
+    """
+    Delete a replication peer.
+
+    @param name: The name of the peer.
+    @return: The deleted peer.
+    @since: API v3
+    """
+    return self._delete("peers/" + name, ApiCmPeer, api_version=3)
+
+  def update_peer(self,
+      current_name,
+      new_name, new_url, username, password):
+    """
+    Update a replication peer.
+
+    @param current_name: The name of the peer to updated.
+    @param new_name: The new name for the peer.
+    @param new_url: The new url for the peer.
+    @param username: The admin username to use to setup the remote side of the peer connection.
+    @param password: The password of the admin user.
+    @return: The updated peer.
+    @since: API v3
+    """
+    peer = ApiCmPeer(self._get_resource_root(),
+        name=new_name,
+        url=new_url,
+        username=username,
+        password=password)
+    return self._put("peers/" + current_name, data=peer, api_version=3)
+
+  def get_peers(self):
+    """
+    Retrieve a list of replication peers.
+
+    @return: A list of replication peers.
+    @since: API v3
+    """
+    return self._get("peers", ApiCmPeer, True, api_version=3)
+
+  def get_peer(self, name):
+    """
+    Retrieve a replication peer by name.
+
+    @param name: The name of the peer.
+    @return: The peer.
+    @since: API v3
+    """
+    return self._get("peers/" + name, ApiCmPeer, api_version=3)
+
+  def test_peer_connectivity(self, name):
+    """
+    Test connectivity for a replication peer.
+
+    @param name: The name of the peer to test.
+    @return: The command representing the test.
+    @since: API v3
+    """
+    return self._post("peers/%s/commands/test" % (name, ), ApiCommand,
+        api_version=3)
+
+  def get_all_hosts_config(self, view=None):
+    """
+    Retrieve the default configuration for all hosts.
+
+    @param view: View to materialize.
+    @param view: View to materialize ('full' or 'summary')
+    @return: Dictionary with configuration data.
+    """
+    return self._get_config("allHosts/config", view)
+
+  def update_all_hosts_config(self, config):
+    """
+    Update the default configuration for all hosts.
+
+    @param config: Dictionary with configuration to update.
+    @return: Dictionary with updated configuration.
+    """
+    return self._update_config("allHosts/config", config)
+
+  def auto_assign_roles(self):
+    """
+    Automatically assign roles to hosts and create the roles for the Cloudera
+    Management Service.
+
+    Assignments are done based on number of hosts in the deployment and hardware
+    specifications. Existing roles will be taken into account and their
+    assignments will be not be modified. The deployment should not have any
+    clusters when calling this endpoint. If it does, an exception will be thrown
+    preventing any role assignments.
+    @since: API v6
+    """
+    self._put("service/autoAssignRoles", None, api_version=6)
+
+  def auto_configure(self):
+    """
+    Automatically configures roles of the Cloudera Management Service.
+
+    Overwrites some existing configurations. Only default role config groups
+    must exist before calling this endpoint. Other role config groups must not
+    exist. If they do, an exception will be thrown preventing any
+    configuration. Ignores any clusters (and their services and roles)
+    colocated with the Cloudera Management Service. To avoid over-committing
+    the heap on hosts, place the Cloudera Management Service roles on machines
+    not used by any of the clusters.
+    @since: API v6
+    """
+    self._put("service/autoConfigure", None, api_version=6)
+
+  def host_install(self, user_name, host_names, ssh_port=None, password=None,
+          private_key=None, passphrase=None, parallel_install_count=None,
+          cm_repo_url=None, gpg_key_custom_url=None):
+    """
+    Install Cloudera Manager Agent on a set of hosts.
+
+    @param user_name: The username used to authenticate with the hosts. Root access
+                      to your hosts is required to install Cloudera packages. The
+                      installer will connect to your hosts via SSH and log in either
+                      directly as root or as another user with password-less sudo
+                      privileges to become root.
+    @param host_names: List of names of hosts to configure for use with
+                       Cloudera Manager. A host may be specified by a
+                       hostname(FQDN) or an IP address.
+    @param ssh_port: SSH port. If unset, defaults to 22.
+    @param password: The password used to authenticate with the hosts. Specify
+                     either this or a private key. For password-less login, use
+                     an empty string as password.
+    @param private_key: The private key to authenticate with the hosts. Specify
+                        either this or a password.
+    @param passphrase: The passphrase associated with the private key used to
+                       authenticate with the hosts (optional).
+    @param parallel_install_count: Number of simultaneous installations.
+                                   Defaults to 10. Running a large number of
+                                   installations at once can consume large amounts
+                                   of network bandwidth and other system resources.
+    @param cm_repo_url: The Cloudera Manager repository URL to use (optional).
+                        Example for SLES, Redhat or other RPM based distributions:
+                        http://archive-primary.cloudera.com/cm5/redhat/6/x86_64/cm/5/
+                        Example for Ubuntu or other Debian based distributions:
+                        "deb http://archive.cloudera.com/cm5/ubuntu/lucid/amd64/cm/ lucid-cm5 contrib"
+    @param gpg_key_custom_url: The Cloudera Manager public GPG key (optional).
+                               Example for SLES, Redhat or other RPM based distributions:
+                               http://archive-primary.cloudera.com/cm5/redhat/6/x86_64/cm/RPM-GPG-KEY-cloudera
+                               Example for Ubuntu or other Debian based distributions:
+                               http://archive.cloudera.com/debian/archive.key
+    @return: Information about the submitted command.
+    @since: API v6
+    """
+    host_install_args = {}
+    if user_name:
+     host_install_args['userName'] = user_name
+    if host_names:
+      host_install_args['hostNames'] = host_names
+    if ssh_port:
+     host_install_args['sshPort'] = ssh_port
+    if password:
+     host_install_args['password'] = password
+    if private_key:
+     host_install_args['privateKey'] = private_key
+    if passphrase:
+     host_install_args['passphrase'] = passphrase
+    if parallel_install_count:
+     host_install_args['parallelInstallCount'] = parallel_install_count
+    if cm_repo_url:
+     host_install_args['cmRepoUrl'] = cm_repo_url
+    if gpg_key_custom_url:
+     host_install_args['gpgKeyCustomUrl'] = gpg_key_custom_url
+    return self._cmd('hostInstall', data=host_install_args)
diff --git a/cm-api/src/cm_api/endpoints/dashboards.py b/cm-api/src/cm_api/endpoints/dashboards.py
new file mode 100644 (file)
index 0000000..c2f2d52
--- /dev/null
@@ -0,0 +1,77 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from cm_api.endpoints.types import *
+
+__docformat__ = "epytext"
+
+DASHBOARDS_PATH = "/timeseries/dashboards"
+
+def _get_dashboard_path(dashboard_name):
+  return DASHBOARDS_PATH + "/%s" % (dashboard_name)
+
+def create_dashboards(resource_root, dashboard_list):
+  """
+  Creates the list of dashboards. If any of the dashboards already exist
+  this whole command will fail and no dashboards will be created.
+  @since: API v6
+  @return: The list of dashboards created.
+  """
+  return call(resource_root.post, DASHBOARDS_PATH, ApiDashboard, \
+      ret_is_list=True, data=dashboard_list)
+
+def get_dashboards(resource_root):
+  """
+  Returns the list of all dashboards.
+  @since: API v6
+  @return: A list of API dashboard objects.
+  """
+  return call(resource_root.get, DASHBOARDS_PATH, ApiDashboard, \
+     ret_is_list=True)
+
+def get_dashboard(resource_root, dashboard_name):
+  """
+  Returns a dashboard definition for the specified name. This dashboard
+  can be imported with the createDashboards API.
+  @since: API v6
+  @return: An API dasbhboard object.
+  """
+  return call(resource_root.get, _get_dashboard_path(dashboard_name), \
+      ApiDashboard)
+
+def delete_dashboard(resource_root, dashboard_name):
+  """
+  Deletes a dashboard.
+  @since: API v6
+  @return: The deleted dashboard.
+  """
+  return call(resource_root.delete, _get_dashboard_path(dashboard_name), \
+      ApiDashboard)
+
+class ApiDashboard(BaseApiResource):
+  _ATTRIBUTES = {
+    'name'  : None,
+    'json'  : None
+  }
+
+  def __init__(self, resource_root, name=None, json=None):
+    BaseApiObject.init(self, resource_root, locals())
+
+  def __str__(self):
+    return "<ApiDashboard>: %s" % (self.name)
+
+  def _path(self):
+    return _get_dashboard_path(self.name)
diff --git a/cm-api/src/cm_api/endpoints/events.py b/cm-api/src/cm_api/endpoints/events.py
new file mode 100644 (file)
index 0000000..595e3b2
--- /dev/null
@@ -0,0 +1,60 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from cm_api.endpoints.types import *
+
+__docformat__ = "epytext"
+
+EVENTS_PATH = "/events"
+
+def query_events(resource_root, query_str=None):
+  """
+  Search for events.
+  @param query_str: Query string.
+  @return: A list of ApiEvent.
+  """
+  params = None
+  if query_str:
+    params = dict(query=query_str)
+  return call(resource_root.get, EVENTS_PATH, ApiEventQueryResult,
+      params=params)
+
+def get_event(resource_root, event_id):
+  """
+  Retrieve a particular event by ID.
+  @param event_id: The event ID.
+  @return: An ApiEvent.
+  """
+  return call(resource_root.get, "%s/%s" % (EVENTS_PATH, event_id), ApiEvent)
+
+
+class ApiEvent(BaseApiObject):
+  _ATTRIBUTES = {
+    'id'            : ROAttr(),
+    'content'       : ROAttr(),
+    'timeOccurred'  : ROAttr(datetime.datetime),
+    'timeReceived'  : ROAttr(datetime.datetime),
+    'category'      : ROAttr(),
+    'severity'      : ROAttr(),
+    'alert'         : ROAttr(),
+    'attributes'    : ROAttr(),
+  }
+
+class ApiEventQueryResult(ApiList):
+  _ATTRIBUTES = {
+    'totalResults' : ROAttr(),
+  }
+  _MEMBER_CLASS = ApiEvent
diff --git a/cm-api/src/cm_api/endpoints/host_templates.py b/cm-api/src/cm_api/endpoints/host_templates.py
new file mode 100644 (file)
index 0000000..790e3e6
--- /dev/null
@@ -0,0 +1,164 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+from cm_api.endpoints.types import *
+
+__docformat__ = "epytext"
+
+HOST_TEMPLATES_PATH = "/clusters/%s/hostTemplates"
+HOST_TEMPLATE_PATH = "/clusters/%s/hostTemplates/%s"
+APPLY_HOST_TEMPLATE_PATH = HOST_TEMPLATE_PATH + "/commands/applyHostTemplate"
+
+def create_host_template(resource_root, name, cluster_name):
+  """
+  Create a host template.
+  @param resource_root: The root Resource object.
+  @param name: Host template name
+  @param cluster_name: Cluster name
+  @return: An ApiHostTemplate object for the created host template.
+  @since: API v3
+  """
+  apitemplate = ApiHostTemplate(resource_root, name, [])
+  return call(resource_root.post,
+      HOST_TEMPLATES_PATH % (cluster_name,),
+      ApiHostTemplate, True, data=[apitemplate], api_version=3)[0]
+
+def get_host_template(resource_root, name, cluster_name):
+  """
+  Lookup a host template by name in the specified cluster.
+  @param resource_root: The root Resource object.
+  @param name: Host template name.
+  @param cluster_name: Cluster name.
+  @return: An ApiHostTemplate object.
+  @since: API v3
+  """
+  return call(resource_root.get,
+      HOST_TEMPLATE_PATH % (cluster_name, name),
+      ApiHostTemplate, api_version=3)
+
+def get_all_host_templates(resource_root, cluster_name="default"):
+  """
+  Get all host templates in a cluster.
+  @param cluster_name: Cluster name.
+  @return: ApiList of ApiHostTemplate objects for all host templates in a cluster.
+  @since: API v3
+  """
+  return call(resource_root.get,
+      HOST_TEMPLATES_PATH % (cluster_name,),
+      ApiHostTemplate, True, api_version=3)
+
+def delete_host_template(resource_root, name, cluster_name):
+  """
+  Delete a host template identified by name in the specified cluster.
+  @param resource_root: The root Resource object.
+  @param name: Host template name.
+  @param cluster_name: Cluster name.
+  @return: The deleted ApiHostTemplate object.
+  @since: API v3
+  """
+  return call(resource_root.delete,
+      HOST_TEMPLATE_PATH % (cluster_name, name),
+      ApiHostTemplate, api_version=3)
+
+def update_host_template(resource_root, name, cluster_name, api_host_template):
+  """
+  Update a host template identified by name in the specified cluster.
+  @param resource_root: The root Resource object.
+  @param name: Host template name.
+  @param cluster_name: Cluster name.
+  @param api_host_template: The updated host template.
+  @return: The updated ApiHostTemplate.
+  @since: API v3
+  """
+  return call(resource_root.put,
+      HOST_TEMPLATE_PATH % (cluster_name, name),
+      ApiHostTemplate, data=api_host_template, api_version=3)
+
+def apply_host_template(resource_root, name, cluster_name, host_ids, start_roles):
+  """
+  Apply a host template identified by name on the specified hosts and
+  optionally start them.
+  @param resource_root: The root Resource object.
+  @param name: Host template name.
+  @param cluster_name: Cluster name.
+  @param host_ids: List of host ids.
+  @param start_roles: Whether to start the created roles or not.
+  @return: An ApiCommand object.
+  @since: API v3
+  """
+  host_refs = []
+  for host_id in host_ids:
+    host_refs.append(ApiHostRef(resource_root, host_id))
+
+  params = {"startRoles" : start_roles}
+  return call(resource_root.post,
+      APPLY_HOST_TEMPLATE_PATH % (cluster_name, name),
+      ApiCommand, data=host_refs, params=params, api_version=3)
+
+
+class ApiHostTemplate(BaseApiResource):
+  _ATTRIBUTES = {
+    'name'                : None,
+    'roleConfigGroupRefs' : Attr(ApiRoleConfigGroupRef),
+    'clusterRef'          : ROAttr(ApiClusterRef),
+  }
+
+  def __init__(self, resource_root, name=None, roleConfigGroupRefs=None):
+    BaseApiObject.init(self, resource_root, locals())
+
+  def __str__(self):
+    return "<ApiHostTemplate>: %s (cluster %s)" % (self.name, self.clusterRef.clusterName)
+
+  def _api_version(self):
+    return 3
+
+  def _path(self):
+    return HOST_TEMPLATE_PATH % (self.clusterRef.clusterName, self.name)
+
+  def _do_update(self, update):
+    self._update(self._put('', ApiHostTemplate, data=update))
+    return self
+
+  def rename(self, new_name):
+    """
+    Rename a host template.
+    @param new_name: New host template name.
+    @return: An ApiHostTemplate object.
+    """
+    update = copy.copy(self)
+    update.name = new_name
+    return self._do_update(update)
+
+  def set_role_config_groups(self, role_config_group_refs):
+    """
+    Updates the role config groups in a host template.
+    @param role_config_group_refs: List of role config group refs.
+    @return: An ApiHostTemplate object.
+    """
+    update = copy.copy(self)
+    update.roleConfigGroupRefs = role_config_group_refs
+    return self._do_update(update)
+
+  def apply_host_template(self, host_ids, start_roles):
+    """
+    Apply a host template identified by name on the specified hosts and
+    optionally start them.
+    @param host_ids: List of host ids.
+    @param start_roles: Whether to start the created roles or not.
+    @return: An ApiCommand object.
+    """
+    return apply_host_template(self._get_resource_root(), self.name, self.clusterRef.clusterName, host_ids, start_roles)
diff --git a/cm-api/src/cm_api/endpoints/hosts.py b/cm-api/src/cm_api/endpoints/hosts.py
new file mode 100644 (file)
index 0000000..5d00f31
--- /dev/null
@@ -0,0 +1,187 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+
+from cm_api.endpoints.types import *
+
+__docformat__ = "epytext"
+
+HOSTS_PATH = "/hosts"
+
+def create_host(resource_root, host_id, name, ipaddr, rack_id=None):
+  """
+  Create a host
+  @param resource_root: The root Resource object.
+  @param host_id: Host id
+  @param name: Host name
+  @param ipaddr: IP address
+  @param rack_id: Rack id. Default None
+  @return: An ApiHost object
+  """
+  apihost = ApiHost(resource_root, host_id, name, ipaddr, rack_id)
+  return call(resource_root.post, HOSTS_PATH, ApiHost, True, data=[apihost])[0]
+
+def get_host(resource_root, host_id):
+  """
+  Lookup a host by id
+  @param resource_root: The root Resource object.
+  @param host_id: Host id
+  @return: An ApiHost object
+  """
+  return call(resource_root.get, "%s/%s" % (HOSTS_PATH, host_id), ApiHost)
+
+def get_all_hosts(resource_root, view=None):
+  """
+  Get all hosts
+  @param resource_root: The root Resource object.
+  @return: A list of ApiHost objects.
+  """
+  return call(resource_root.get, HOSTS_PATH, ApiHost, True,
+          params=view and dict(view=view) or None)
+
+def delete_host(resource_root, host_id):
+  """
+  Delete a host by id
+  @param resource_root: The root Resource object.
+  @param host_id: Host id
+  @return: The deleted ApiHost object
+  """
+  return call(resource_root.delete, "%s/%s" % (HOSTS_PATH, host_id), ApiHost)
+
+
+class ApiHost(BaseApiResource):
+  _ATTRIBUTES = {
+    'hostId'            : None,
+    'hostname'          : None,
+    'ipAddress'         : None,
+    'rackId'            : None,
+    'status'            : ROAttr(),
+    'lastHeartbeat'     : ROAttr(datetime.datetime),
+    'roleRefs'          : ROAttr(ApiRoleRef),
+    'healthSummary'     : ROAttr(),
+    'healthChecks'      : ROAttr(),
+    'hostUrl'           : ROAttr(),
+    'commissionState'   : ROAttr(),
+    'maintenanceMode'   : ROAttr(),
+    'maintenanceOwners' : ROAttr(),
+    'numCores'          : ROAttr(),
+    'totalPhysMemBytes' : ROAttr(),
+  }
+
+  def __init__(self, resource_root, hostId=None, hostname=None,
+      ipAddress=None, rackId=None):
+    BaseApiObject.init(self, resource_root, locals())
+
+  def __str__(self):
+    return "<ApiHost>: %s (%s)" % (self.hostId, self.ipAddress)
+
+  def _path(self):
+    return HOSTS_PATH + '/' + self.hostId
+
+  def _put(self):
+    """
+    Update this resource.
+    @return: The updated object.
+    """
+    return self._put('', ApiHost, data=self)
+
+  def get_config(self, view=None):
+    """
+    Retrieve the host's configuration.
+
+    The 'summary' view contains strings as the dictionary values. The full
+    view contains ApiConfig instances as the values.
+
+    @param view: View to materialize ('full' or 'summary')
+    @return: Dictionary with configuration data.
+    """
+    return self._get_config("config", view)
+
+  def update_config(self, config):
+    """
+    Update the host's configuration.
+
+    @param config: Dictionary with configuration to update.
+    @return: Dictionary with updated configuration.
+    """
+    return self._update_config("config", config)
+
+  def get_metrics(self, from_time=None, to_time=None, metrics=None,
+      ifs=[], storageIds=[], view=None):
+    """
+    This endpoint is not supported as of v6. Use the timeseries API
+    instead. To get all metrics for a host with the timeseries API use
+    the query:
+
+    'select * where hostId = $HOST_ID'.
+
+    To get specific metrics for a host use a comma-separated list of
+    the metric names as follows:
+
+    'select $METRIC_NAME1, $METRIC_NAME2 where hostId = $HOST_ID'.
+
+    For more information see http://tiny.cloudera.com/tsquery_doc
+    @param from_time: A datetime; start of the period to query (optional).
+    @param to_time: A datetime; end of the period to query (default = now).
+    @param metrics: List of metrics to query (default = all).
+    @param ifs: network interfaces to query. Default all, use None to disable.
+    @param storageIds: storage IDs to query. Default all, use None to disable.
+    @param view: View to materialize ('full' or 'summary')
+    @return: List of metrics and their readings.
+    """
+    params = { }
+    if ifs:
+      params['ifs'] = ifs
+    elif ifs is None:
+      params['queryNw'] = 'false'
+    if storageIds:
+      params['storageIds'] = storageIds
+    elif storageIds is None:
+      params['queryStorage'] = 'false'
+    return self._get_resource_root().get_metrics(self._path() + '/metrics',
+        from_time, to_time, metrics, view, params)
+
+  def enter_maintenance_mode(self):
+    """
+    Put the host in maintenance mode.
+
+    @return: Reference to the completed command.
+    @since: API v2
+    """
+    cmd = self._cmd('enterMaintenanceMode')
+    if cmd.success:
+      self._update(get_host(self._get_resource_root(), self.hostId))
+    return cmd
+
+  def exit_maintenance_mode(self):
+    """
+    Take the host out of maintenance mode.
+
+    @return: Reference to the completed command.
+    @since: API v2
+    """
+    cmd = self._cmd('exitMaintenanceMode')
+    if cmd.success:
+      self._update(get_host(self._get_resource_root(), self.hostId))
+    return cmd
+
+  def set_rack_id(self, rackId):
+    """
+    Update the rack ID of this host.
+    """
+    self.rackId = rackId
+    self._put()
diff --git a/cm-api/src/cm_api/endpoints/parcels.py b/cm-api/src/cm_api/endpoints/parcels.py
new file mode 100644 (file)
index 0000000..c6473ce
--- /dev/null
@@ -0,0 +1,171 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from cm_api.endpoints.types import *
+
+__docformat__ = "epytext"
+
+PARCELS_PATH = "/clusters/%s/parcels"
+PARCEL_PATH = "/clusters/%s/parcels/products/%s/versions/%s"
+
+def get_parcel(resource_root, product, version, cluster_name="default"):
+  """
+  Lookup a parcel by name
+  @param resource_root: The root Resource object.
+  @param product: Parcel product name
+  @param version: Parcel version
+  @param cluster_name: Cluster name
+  @return: An ApiService object
+  """
+  return _get_parcel(resource_root, PARCEL_PATH % (cluster_name, product, version))
+
+def _get_parcel(resource_root, path):
+  return call(resource_root.get, path, ApiParcel, api_version=3)
+
+def get_all_parcels(resource_root, cluster_name="default", view=None):
+  """
+  Get all parcels
+  @param resource_root: The root Resource object.
+  @param cluster_name: Cluster name
+  @return: A list of ApiParcel objects.
+  @since: API v3
+  """
+  return call(resource_root.get, PARCELS_PATH % (cluster_name,),
+      ApiParcel, True, params=view and dict(view=view) or None, api_version=3)
+
+class ApiParcelState(BaseApiObject):
+  """
+  An object that represents the state of a parcel.
+  """
+  _ATTRIBUTES = {
+      'progress'      : ROAttr(),
+      'totalProgress' : ROAttr(),
+      'count'         : ROAttr(),
+      'totalCount'    : ROAttr(),
+      'warnings'      : ROAttr(),
+      'errors'        : ROAttr(),
+    }
+
+  def __init__(self, resource_root):
+    BaseApiObject.init(self, resource_root)
+
+  def __str__(self):
+    return "<ApiParcelState>: (progress: %s) (totalProgress: %s) (count: %s) (totalCount: %s)" % (
+        self.progress, self.totalProgress, self.count, self.totalCount)
+
+class ApiParcel(BaseApiResource):
+  """
+  An object that represents a parcel and allows administrative operations.
+
+  @since: API v3
+  """
+  _ATTRIBUTES = {
+    'product'     : ROAttr(),
+    'version'     : ROAttr(),
+    'stage'       : ROAttr(),
+    'state'       : ROAttr(ApiParcelState),
+    'clusterRef'  : ROAttr(ApiClusterRef),
+  }
+
+  def __init__(self, resource_root):
+    BaseApiObject.init(self, resource_root)
+
+  def __str__(self):
+    return "<ApiParcel>: %s-%s (stage: %s) (state: %s) (cluster: %s)" % (
+        self.product, self.version, self.stage, self.state, self._get_cluster_name())
+
+  def _api_version(self):
+    return 3
+
+  def _path(self):
+    """
+    Return the API path for this service.
+    """
+    return PARCEL_PATH % (self._get_cluster_name(), self.product, self.version)
+
+  def _get_cluster_name(self):
+    if self.clusterRef:
+      return self.clusterRef.clusterName
+    return None
+
+  def start_download(self):
+    """
+    Start the download of the parcel
+
+    @return: Reference to the completed command.
+    """
+    return self._cmd('startDownload')
+
+  def cancel_download(self):
+    """
+    Cancels the parcel download. If the parcel is not
+    currently downloading an exception is raised.
+
+    @return: Reference to the completed command.
+    """
+    return self._cmd('cancelDownload')
+
+  def remove_download(self):
+    """
+    Removes the downloaded parcel
+
+    @return: Reference to the completed command.
+    """
+    return self._cmd('removeDownload')
+
+  def start_distribution(self):
+    """
+    Start the distribution of the parcel to all hosts
+    in the cluster.
+
+    @return: Reference to the completed command.
+    """
+    return self._cmd('startDistribution')
+
+  def cancel_distribution(self):
+    """
+    Cancels the parcel distrubution. If the parcel is not
+    currently distributing an exception is raised.
+
+    @return: Reference to the completed command
+    """
+    return self._cmd('cancelDistribution')
+
+  def start_removal_of_distribution(self):
+    """
+    Start the removal of the distribution of the parcel
+    from all the hosts in the cluster.
+
+    @return: Reference to the completed command.
+    """
+    return self._cmd('startRemovalOfDistribution')
+
+  def activate(self):
+    """
+    Activate the parcel on all the hosts in the cluster.
+
+    @return: Reference to the completed command.
+    """
+    return self._cmd('activate')
+
+  def deactivate(self):
+    """
+    Deactivates the parcel on all the hosts in the cluster.
+
+    @return: Reference to the completed command.
+    """
+    return self._cmd('deactivate')
+
diff --git a/cm-api/src/cm_api/endpoints/role_config_groups.py b/cm-api/src/cm_api/endpoints/role_config_groups.py
new file mode 100644 (file)
index 0000000..a5d5efd
--- /dev/null
@@ -0,0 +1,241 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from cm_api.endpoints.types import *
+from cm_api.endpoints.roles import ApiRole
+
+__docformat__ = "epytext"
+
+ROLE_CONFIG_GROUPS_PATH = "/clusters/%s/services/%s/roleConfigGroups"
+CM_ROLE_CONFIG_GROUPS_PATH = "/cm/service/roleConfigGroups"
+
+def _get_role_config_groups_path(cluster_name, service_name):
+  if cluster_name:
+    return ROLE_CONFIG_GROUPS_PATH % (cluster_name, service_name)
+  else:
+    return CM_ROLE_CONFIG_GROUPS_PATH
+
+def _get_role_config_group_path(cluster_name, service_name, name):
+  path = _get_role_config_groups_path(cluster_name, service_name)
+  return "%s/%s" % (path, name)
+
+def create_role_config_groups(resource_root, service_name, apigroup_list,
+    cluster_name="default"):
+  """
+  Create role config groups.
+  @param resource_root: The root Resource object.
+  @param service_name: Service name.
+  @param apigroup_list: List of role config groups to create.
+  @param cluster_name: Cluster name.
+  @return: New ApiRoleConfigGroup object.
+  @since: API v3
+  """
+  return call(resource_root.post,
+      _get_role_config_groups_path(cluster_name, service_name),
+      ApiRoleConfigGroup, True, data=apigroup_list, api_version=3)
+
+def create_role_config_group(resource_root, service_name, name, display_name,
+    role_type, cluster_name="default"):
+  """
+  Create a role config group.
+  @param resource_root: The root Resource object.
+  @param service_name: Service name.
+  @param name: The name of the new group.
+  @param display_name: The display name of the new group.
+  @param role_type: The role type of the new group.
+  @param cluster_name: Cluster name.
+  @return: List of created role config groups.
+  """
+  apigroup = ApiRoleConfigGroup(resource_root, name, display_name, role_type)
+  return create_role_config_groups(resource_root, service_name, [apigroup],
+      cluster_name)[0]
+
+def get_role_config_group(resource_root, service_name, name,
+    cluster_name="default"):
+  """
+  Find a role config group by name.
+  @param resource_root: The root Resource object.
+  @param service_name: Service name.
+  @param name: Role config group name.
+  @param cluster_name: Cluster name.
+  @return: An ApiRoleConfigGroup object.
+  """
+  return _get_role_config_group(resource_root, _get_role_config_group_path(
+      cluster_name, service_name, name))
+
+def _get_role_config_group(resource_root, path):
+  return call(resource_root.get, path, ApiRoleConfigGroup, api_version=3)
+
+def get_all_role_config_groups(resource_root, service_name,
+    cluster_name="default"):
+  """
+  Get all role config groups in the specified service.
+  @param resource_root: The root Resource object.
+  @param service_name: Service name.
+  @param cluster_name: Cluster name.
+  @return: A list of ApiRoleConfigGroup objects.
+  @since: API v3
+  """
+  return call(resource_root.get,
+      _get_role_config_groups_path(cluster_name, service_name),
+      ApiRoleConfigGroup, True, api_version=3)
+
+def update_role_config_group(resource_root, service_name, name, apigroup,
+    cluster_name="default"):
+  """
+  Update a role config group by name.
+  @param resource_root: The root Resource object.
+  @param service_name: Service name.
+  @param name: Role config group name.
+  @param apigroup: The updated role config group.
+  @param cluster_name: Cluster name.
+  @return: The updated ApiRoleConfigGroup object.
+  @since: API v3
+  """
+  return call(resource_root.put,
+      _get_role_config_group_path(cluster_name, service_name, name),
+      ApiRoleConfigGroup, data=apigroup, api_version=3)
+
+def delete_role_config_group(resource_root, service_name, name,
+    cluster_name="default"):
+  """
+  Delete a role config group by name.
+  @param resource_root: The root Resource object.
+  @param service_name: Service name.
+  @param name: Role config group name.
+  @param cluster_name: Cluster name.
+  @return: The deleted ApiRoleConfigGroup object.
+  @since: API v3
+  """
+  return call(resource_root.delete,
+      _get_role_config_group_path(cluster_name, service_name, name),
+      ApiRoleConfigGroup, api_version=3)
+
+def move_roles(resource_root, service_name, name, role_names,
+    cluster_name="default"):
+  """
+  Moves roles to the specified role config group.
+
+  The roles can be moved from any role config group belonging
+  to the same service. The role type of the destination group
+  must match the role type of the roles.
+
+  @param name: The name of the group the roles will be moved to.
+  @param role_names: The names of the roles to move.
+  @return: List of roles which have been moved successfully.
+  @since: API v3
+  """
+  return call(resource_root.put,
+      _get_role_config_group_path(cluster_name, service_name, name) + '/roles',
+      ApiRole, True, data=role_names, api_version=3)
+
+def move_roles_to_base_role_config_group(resource_root, service_name,
+     role_names, cluster_name="default"):
+  """
+  Moves roles to the base role config group.
+
+  The roles can be moved from any role config group belonging to the same
+  service. The role type of the roles may vary. Each role will be moved to
+  its corresponding base group depending on its role type.
+
+  @param role_names: The names of the roles to move.
+  @return: List of roles which have been moved successfully.
+  @since: API v3
+  """
+  return call(resource_root.put,
+      _get_role_config_groups_path(cluster_name, service_name) + '/roles',
+      ApiRole, True, data=role_names, api_version=3)
+
+
+class ApiRoleConfigGroup(BaseApiResource):
+  """
+  name is RW only temporarily; once all RCG names are unique,
+  this property will be auto-generated and Read-only
+
+  @since: API v3
+  """
+  _ATTRIBUTES = {
+    'name'        : None,
+    'displayName' : None,
+    'roleType'    : None,
+    'config'      : Attr(ApiConfig),
+    'base'        : ROAttr(),
+    'serviceRef'  : ROAttr(ApiServiceRef),
+  }
+
+  def __init__(self, resource_root, name=None, displayName=None, roleType=None,
+      config=None):
+    BaseApiObject.init(self, resource_root, locals())
+
+  def __str__(self):
+    return "<ApiRoleConfigGroup>: %s (cluster: %s; service: %s)" % (
+        self.name, self.serviceRef.clusterName, self.serviceRef.serviceName)
+
+  def _api_version(self):
+    return 3
+
+  def _path(self):
+    return _get_role_config_group_path(self.serviceRef.clusterName,
+                          self.serviceRef.serviceName,
+                          self.name)
+
+  def get_config(self, view = None):
+    """
+    Retrieve the group's configuration.
+
+    The 'summary' view contains strings as the dictionary values. The full
+    view contains ApiConfig instances as the values.
+
+    @param view: View to materialize ('full' or 'summary').
+    @return: Dictionary with configuration data.
+    """
+    path = self._path() + '/config'
+    resp = self._get_resource_root().get(path,
+        params = view and dict(view=view) or None)
+    return json_to_config(resp, view == 'full')
+
+  def update_config(self, config):
+    """
+    Update the group's configuration.
+
+    @param config: Dictionary with configuration to update.
+    @return: Dictionary with updated configuration.
+    """
+    path = self._path() + '/config'
+    resp = self._get_resource_root().put(path, data = config_to_json(config))
+    return json_to_config(resp)
+
+  def get_all_roles(self):
+    """
+    Retrieve the roles in this role config group.
+
+    @return: List of roles in this role config group.
+    """
+    return self._get("roles", ApiRole, True)
+
+  def move_roles(self, roles):
+    """
+    Moves roles to this role config group.
+
+    The roles can be moved from any role config group belonging
+    to the same service. The role type of the destination group
+    must match the role type of the roles.
+
+    @param roles: The names of the roles to move.
+    @return: List of roles which have been moved successfully.
+    """
+    return move_roles(self._get_resource_root(), self.serviceRef.serviceName,
+        self.name, roles, self.serviceRef.clusterName)
diff --git a/cm-api/src/cm_api/endpoints/roles.py b/cm-api/src/cm_api/endpoints/roles.py
new file mode 100644 (file)
index 0000000..026c206
--- /dev/null
@@ -0,0 +1,255 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from cm_api.endpoints.types import *
+
+__docformat__ = "epytext"
+
+ROLES_PATH = "/clusters/%s/services/%s/roles"
+CM_ROLES_PATH = "/cm/service/roles"
+
+def _get_roles_path(cluster_name, service_name):
+  if cluster_name:
+    return ROLES_PATH % (cluster_name, service_name)
+  else:
+    return CM_ROLES_PATH
+
+def _get_role_path(cluster_name, service_name, role_name):
+  path = _get_roles_path(cluster_name, service_name)
+  return "%s/%s" % (path, role_name)
+
+def create_role(resource_root,
+                service_name,
+                role_type,
+                role_name,
+                host_id,
+                cluster_name="default"):
+  """
+  Create a role
+  @param resource_root: The root Resource object.
+  @param service_name: Service name
+  @param role_type: Role type
+  @param role_name: Role name
+  @param cluster_name: Cluster name
+  @return: An ApiRole object
+  """
+  apirole = ApiRole(resource_root, role_name, role_type,
+                    ApiHostRef(resource_root, host_id))
+  return call(resource_root.post,
+      _get_roles_path(cluster_name, service_name),
+      ApiRole, True, data=[apirole])[0]
+
+def get_role(resource_root, service_name, name, cluster_name="default"):
+  """
+  Lookup a role by name
+  @param resource_root: The root Resource object.
+  @param service_name: Service name
+  @param name: Role name
+  @param cluster_name: Cluster name
+  @return: An ApiRole object
+  """
+  return _get_role(resource_root, _get_role_path(cluster_name, service_name, name))
+
+def _get_role(resource_root, path):
+  return call(resource_root.get, path, ApiRole)
+
+def get_all_roles(resource_root, service_name, cluster_name="default", view=None):
+  """
+  Get all roles
+  @param resource_root: The root Resource object.
+  @param service_name: Service name
+  @param cluster_name: Cluster name
+  @return: A list of ApiRole objects.
+  """
+  return call(resource_root.get,
+      _get_roles_path(cluster_name, service_name),
+      ApiRole, True, params=view and dict(view=view) or None)
+
+def get_roles_by_type(resource_root, service_name, role_type,
+                      cluster_name="default", view=None):
+  """
+  Get all roles of a certain type in a service
+  @param resource_root: The root Resource object.
+  @param service_name: Service name
+  @param role_type: Role type
+  @param cluster_name: Cluster name
+  @return: A list of ApiRole objects.
+  """
+  roles = get_all_roles(resource_root, service_name, cluster_name, view)
+  return [ r for r in roles if r.type == role_type ]
+
+def delete_role(resource_root, service_name, name, cluster_name="default"):
+  """
+  Delete a role by name
+  @param resource_root: The root Resource object.
+  @param service_name: Service name
+  @param name: Role name
+  @param cluster_name: Cluster name
+  @return: The deleted ApiRole object
+  """
+  return call(resource_root.delete,
+      _get_role_path(cluster_name, service_name, name), ApiRole)
+
+
+class ApiRole(BaseApiResource):
+  _ATTRIBUTES = {
+    'name'                  : None,
+    'type'                  : None,
+    'hostRef'               : Attr(ApiHostRef),
+    'roleState'             : ROAttr(),
+    'healthSummary'         : ROAttr(),
+    'healthChecks'          : ROAttr(),
+    'serviceRef'            : ROAttr(ApiServiceRef),
+    'configStale'           : ROAttr(),
+    'configStalenessStatus' : ROAttr(),
+    'haStatus'              : ROAttr(),
+    'roleUrl'               : ROAttr(),
+    'commissionState'       : ROAttr(),
+    'maintenanceMode'       : ROAttr(),
+    'maintenanceOwners'     : ROAttr(),
+    'roleConfigGroupRef'    : ROAttr(ApiRoleConfigGroupRef),
+    'zooKeeperServerMode'   : ROAttr(),
+  }
+
+  def __init__(self, resource_root, name=None, type=None, hostRef=None):
+    BaseApiObject.init(self, resource_root, locals())
+
+  def __str__(self):
+    return "<ApiRole>: %s (cluster: %s; service: %s)" % (
+        self.name, self.serviceRef.clusterName, self.serviceRef.serviceName)
+
+  def _path(self):
+    return _get_role_path(self.serviceRef.clusterName,
+                          self.serviceRef.serviceName,
+                          self.name)
+
+  def _get_log(self, log):
+    path = "%s/logs/%s" % (self._path(), log)
+    return self._get_resource_root().get(path)
+
+  def get_commands(self, view=None):
+    """
+    Retrieve a list of running commands for this role.
+
+    @param view: View to materialize ('full' or 'summary')
+    @return: A list of running commands.
+    """
+    return self._get("commands", ApiCommand, True,
+        params = view and dict(view=view) or None)
+
+  def get_config(self, view = None):
+    """
+    Retrieve the role's configuration.
+
+    The 'summary' view contains strings as the dictionary values. The full
+    view contains ApiConfig instances as the values.
+
+    @param view: View to materialize ('full' or 'summary')
+    @return: Dictionary with configuration data.
+    """
+    return self._get_config("config", view)
+
+  def update_config(self, config):
+    """
+    Update the role's configuration.
+
+    @param config: Dictionary with configuration to update.
+    @return: Dictionary with updated configuration.
+    """
+    return self._update_config("config", config)
+
+  def get_full_log(self):
+    """
+    Retrieve the contents of the role's log file.
+
+    @return: Contents of log file.
+    """
+    return self._get_log('full')
+
+  def get_stdout(self):
+    """
+    Retrieve the contents of the role's standard output.
+
+    @return: Contents of stdout.
+    """
+    return self._get_log('stdout')
+
+  def get_stderr(self):
+    """
+    Retrieve the contents of the role's standard error.
+
+    @return: Contents of stderr.
+    """
+    return self._get_log('stderr')
+
+  def get_metrics(self, from_time=None, to_time=None, metrics=None, view=None):
+    """
+    This endpoint is not supported as of v6. Use the timeseries API
+    instead. To get all metrics for a role with the timeseries API use
+    the query:
+
+    'select * where roleName = $ROLE_NAME'.
+
+    To get specific metrics for a role use a comma-separated list of
+    the metric names as follows:
+
+    'select $METRIC_NAME1, $METRIC_NAME2 where roleName = $ROLE_NAME'.
+
+    For more information see http://tiny.cloudera.com/tsquery_doc
+    @param from_time: A datetime; start of the period to query (optional).
+    @param to_time: A datetime; end of the period to query (default = now).
+    @param metrics: List of metrics to query (default = all).
+    @param view: View to materialize ('full' or 'summary')
+    @return: List of metrics and their readings.
+    """
+    return self._get_resource_root().get_metrics(self._path() + '/metrics',
+        from_time, to_time, metrics, view)
+
+  def enter_maintenance_mode(self):
+    """
+    Put the role in maintenance mode.
+
+    @return: Reference to the completed command.
+    @since: API v2
+    """
+    cmd = self._cmd('enterMaintenanceMode')
+    if cmd.success:
+      self._update(_get_role(self._get_resource_root(), self._path()))
+    return cmd
+
+  def exit_maintenance_mode(self):
+    """
+    Take the role out of maintenance mode.
+
+    @return: Reference to the completed command.
+    @since: API v2
+    """
+    cmd = self._cmd('exitMaintenanceMode')
+    if cmd.success:
+      self._update(_get_role(self._get_resource_root(), self._path()))
+    return cmd
+
+  def list_commands_by_name(self):
+    """
+    Lists all the commands that can be executed by name
+    on the provided role.
+
+    @return: A list of command metadata objects
+    @since: API v6
+    """
+    return self._get("commandsByName", ApiCommandMetadata, True, api_version=6)
+
+
diff --git a/cm-api/src/cm_api/endpoints/services.py b/cm-api/src/cm_api/endpoints/services.py
new file mode 100644 (file)
index 0000000..200ed9d
--- /dev/null
@@ -0,0 +1,1601 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+try:
+  import json
+except ImportError:
+  import simplejson as json
+
+from cm_api.endpoints.types import *
+from cm_api.endpoints import roles, role_config_groups
+
+__docformat__ = "epytext"
+
+SERVICES_PATH = "/clusters/%s/services"
+SERVICE_PATH = "/clusters/%s/services/%s"
+ROLETYPES_CFG_KEY = 'roleTypeConfigs'
+
+def create_service(resource_root, name, service_type,
+                   cluster_name="default"):
+  """
+  Create a service
+  @param resource_root: The root Resource object.
+  @param name: Service name
+  @param service_type: Service type
+  @param cluster_name: Cluster name
+  @return: An ApiService object
+  """
+  apiservice = ApiService(resource_root, name, service_type)
+  return call(resource_root.post,
+      SERVICES_PATH % (cluster_name,),
+      ApiService, True, data=[apiservice])[0]
+
+def get_service(resource_root, name, cluster_name="default"):
+  """
+  Lookup a service by name
+  @param resource_root: The root Resource object.
+  @param name: Service name
+  @param cluster_name: Cluster name
+  @return: An ApiService object
+  """
+  return _get_service(resource_root, "%s/%s" % (SERVICES_PATH % (cluster_name,), name))
+
+def _get_service(resource_root, path):
+  return call(resource_root.get, path, ApiService)
+
+def get_all_services(resource_root, cluster_name="default", view=None):
+  """
+  Get all services
+  @param resource_root: The root Resource object.
+  @param cluster_name: Cluster name
+  @return: A list of ApiService objects.
+  """
+  return call(resource_root.get,
+      SERVICES_PATH % (cluster_name,),
+      ApiService, True, params=view and dict(view=view) or None)
+
+def delete_service(resource_root, name, cluster_name="default"):
+  """
+  Delete a service by name
+  @param resource_root: The root Resource object.
+  @param name: Service name
+  @param cluster_name: Cluster name
+  @return: The deleted ApiService object
+  """
+  return call(resource_root.delete,
+      "%s/%s" % (SERVICES_PATH % (cluster_name,), name),
+      ApiService)
+
+
+class ApiService(BaseApiResource):
+  _ATTRIBUTES = {
+    'name'                        : None,
+    'type'                        : None,
+    'displayName'                 : None,
+    'serviceState'                : ROAttr(),
+    'healthSummary'               : ROAttr(),
+    'healthChecks'                : ROAttr(),
+    'clusterRef'                  : ROAttr(ApiClusterRef),
+    'configStale'                 : ROAttr(),
+    'configStalenessStatus'       : ROAttr(),
+    'clientConfigStalenessStatus' : ROAttr(),
+    'serviceUrl'                  : ROAttr(),
+    'maintenanceMode'             : ROAttr(),
+    'maintenanceOwners'           : ROAttr(),
+  }
+
+  def __init__(self, resource_root, name=None, type=None):
+    BaseApiObject.init(self, resource_root, locals())
+
+  def __str__(self):
+    return "<ApiService>: %s (cluster: %s)" % (
+        self.name, self._get_cluster_name())
+
+  def _get_cluster_name(self):
+    if hasattr(self, 'clusterRef') and self.clusterRef:
+      return self.clusterRef.clusterName
+    return None
+
+  def _path(self):
+    """
+    Return the API path for this service.
+
+    This method assumes that lack of a cluster reference means that the
+    object refers to the Cloudera Management Services instance.
+    """
+    if self._get_cluster_name():
+      return SERVICE_PATH % (self._get_cluster_name(), self.name)
+    else:
+      return '/cm/service'
+
+  def _role_cmd(self, cmd, roles, api_version=1):
+    return self._post("roleCommands/" + cmd, ApiBulkCommandList,
+        data=roles, api_version=api_version)
+
+  def _parse_svc_config(self, json_dic, view = None):
+    """
+    Parse a json-decoded ApiServiceConfig dictionary into a 2-tuple.
+
+    @param json_dic: The json dictionary with the config data.
+    @param view: View to materialize.
+    @return: 2-tuple (service config dictionary, role type configurations)
+    """
+    svc_config = json_to_config(json_dic, view == 'full')
+    rt_configs = { }
+    if json_dic.has_key(ROLETYPES_CFG_KEY):
+      for rt_config in json_dic[ROLETYPES_CFG_KEY]:
+        rt_configs[rt_config['roleType']] = \
+            json_to_config(rt_config, view == 'full')
+
+    return (svc_config, rt_configs)
+
+  def get_commands(self, view=None):
+    """
+    Retrieve a list of running commands for this service.
+
+    @param view: View to materialize ('full' or 'summary')
+    @return: A list of running commands.
+    """
+    return self._get("commands", ApiCommand, True,
+        params = view and dict(view=view) or None)
+
+  def get_running_activities(self):
+    return self.query_activities()
+
+  def query_activities(self, query_str=None):
+    return self._get("activities", ApiActivity, True,
+        params=query_str and dict(query=query_str) or dict())
+
+  def get_activity(self, job_id):
+    return self._get("activities/" + job_id, ApiActivity)
+
+  def get_impala_queries(self, start_time, end_time, filter_str="", limit=100,
+     offset=0):
+    """
+    Returns a list of queries that satisfy the filter
+
+    @type start_time: datetime.datetime. Note that the datetime must either be
+                      time zone aware or specified in the server time zone. See
+                      the python datetime documentation for more details about
+                      python's time zone handling.
+    @param start_time: Queries must have ended after this time
+    @type end_time: datetime.datetime. Note that the datetime must either be
+                    time zone aware or specified in the server time zone. See
+                    the python datetime documentation for more details about
+                    python's time zone handling.
+    @param end_time: Queries must have started before this time
+    @param filter_str: A filter to apply to the queries. For example:
+                       'user = root and queryDuration > 5s'
+    @param limit: The maximum number of results to return
+    @param offset: The offset into the return list
+    @since: API v4
+    """
+    params = {
+      'from':   start_time.isoformat(),
+      'to':     end_time.isoformat(),
+      'filter': filter_str,
+      'limit':  limit,
+      'offset': offset,
+    }
+    return self._get("impalaQueries", ApiImpalaQueryResponse,
+        params=params, api_version=4)
+
+  def cancel_impala_query(self, query_id):
+    """
+    Cancel the query.
+
+    @param query_id: The query ID
+    @return: The warning message, if any.
+    @since: API v4
+    """
+    return self._post("impalaQueries/%s/cancel" % query_id,
+        ApiImpalaCancelResponse, api_version=4)
+
+  def get_query_details(self, query_id, format='text'):
+    """
+    Get the query details
+
+    @param query_id: The query ID
+    @param format: The format of the response ('text' or 'thrift_encoded')
+    @return: The details text
+    @since: API v4
+    """
+    return self._get("impalaQueries/" + query_id, ApiImpalaQueryDetailsResponse,
+        params=dict(format=format), api_version=4)
+
+  def get_impala_query_attributes(self):
+    """
+    Returns the list of all attributes that the Service Monitor can associate
+    with Impala queries.
+
+    Examples of attributes include the user who issued the query and the
+    number of HDFS bytes read by the query.
+
+    These attributes can be used to search for specific Impala queries through
+    the get_impala_queries API. For example the 'user' attribute could be used
+    in the search 'user = root'. If the attribute is numeric it can also be used
+    as a metric in a tsquery (ie, 'select hdfs_bytes_read from IMPALA_QUERIES').
+
+    Note that this response is identical for all Impala services.
+
+    @return: A list of the Impala query attributes
+    @since API v6
+    """
+    return self._get("impalaQueries/attributes", ApiImpalaQueryAttribute,
+        api_version=6)
+
+  def create_impala_catalog_database(self):
+    """
+    Create the Impala Catalog Database. Only works with embedded postgresql
+    database. This command should usually be followed by a call to
+    create_impala_catalog_database_tables.
+
+    @return: Reference to the submitted command.
+    @since: API v6
+    """
+    return self._cmd('impalaCreateCatalogDatabase', api_version=6)
+
+  def create_impala_catalog_database_tables(self):
+    """
+    Creates the Impala Catalog Database tables in the configured database.
+    Will do nothing if tables already exist. Will not perform an upgrade.
+
+    @return: Reference to the submitted command.
+    @since: API v6
+    """
+    return self._cmd('impalaCreateCatalogDatabaseTables', api_version=6)
+
+  def create_impala_user_dir(self):
+    """
+    Create the Impala user directory
+
+    @return: Reference to submitted command.
+    @since: API v6
+    """
+    return self._cmd('impalaCreateUserDir', api_version=6)
+
+  def get_yarn_applications(self, start_time, end_time, filter_str="", limit=100,
+      offset=0):
+    """
+    Returns a list of YARN applications that satisfy the filter
+    @type start_time: datetime.datetime. Note that the datetime must either be
+                      time zone aware or specified in the server time zone. See
+                      the python datetime documentation for more details about
+                      python's time zone handling.
+    @param start_time: Applications must have ended after this time
+    @type end_time: datetime.datetime. Note that the datetime must either be
+                    time zone aware or specified in the server time zone. See
+                    the python datetime documentation for more details about
+                    python's time zone handling.
+    @param filter_str: A filter to apply to the applications. For example:
+                       'user = root and applicationDuration > 5s'
+    @param limit: The maximum number of results to return
+    @param offset: The offset into the return list
+    @since: API v6
+    """
+    params = {
+      'from':   start_time.isoformat(),
+      'to':     end_time.isoformat(),
+      'filter': filter_str,
+      'limit':  limit,
+      'offset': offset
+    }
+    return self._get("yarnApplications", ApiYarnApplicationResponse,
+        params=params, api_version=6)
+
+  def kill_yarn_application(self, application_id):
+    """
+    Kills the application.
+
+    @return: The warning message, if any.
+    @since: API v6
+    """
+    return self._post("yarnApplications/%s/kill" % (application_id, ),
+        ApiYarnKillResponse, api_version=6)
+
+  def get_yarn_application_attributes(self):
+    """
+    Returns the list of all attributes that the Service Monitor can associate
+    with YARN applications.
+
+    Examples of attributes include the user who ran the application and the
+    number of maps completed by the application.
+
+    These attributes can be used to search for specific YARN applications through
+    the get_yarn_applications API. For example the 'user' attribute could be used
+    in the search 'user = root'. If the attribute is numeric it can also be used
+    as a metric in a tsquery (ie, 'select maps_completed from YARN_APPLICATIONS').
+
+    Note that this response is identical for all YARN services.
+
+    @return: A list of the YARN application attributes
+    @since API v6
+    """
+    return self._get("yarnApplications/attributes", ApiYarnApplicationAttribute,
+        api_version=6)
+
+  def create_yarn_job_history_dir(self):
+    """
+    Create the Yarn job history directory.
+
+    @return: Reference to submitted command.
+    @since: API v6
+    """
+    return self._cmd('yarnCreateJobHistoryDirCommand', api_version=6)
+
+  def create_yarn_node_manager_remote_app_log_dir(self):
+    """
+    Create the Yarn NodeManager remote application log directory.
+
+    @return: Reference to submitted command.
+    @since: API v6
+    """
+    return self._cmd('yarnNodeManagerRemoteAppLogDirCommand', api_version=6)
+
+  def get_config(self, view = None):
+    """
+    Retrieve the service's configuration.
+
+    Retrieves both the service configuration and role type configuration
+    for each of the service's supported role types. The role type
+    configurations are returned as a dictionary, whose keys are the
+    role type name, and values are the respective configuration dictionaries.
+
+    The 'summary' view contains strings as the dictionary values. The full
+    view contains ApiConfig instances as the values.
+
+    @param view: View to materialize ('full' or 'summary')
+    @return: 2-tuple (service config dictionary, role type configurations)
+    """
+    path = self._path() + '/config'
+    resp = self._get_resource_root().get(path,
+        params = view and dict(view=view) or None)
+    return self._parse_svc_config(resp, view)
+
+  def update_config(self, svc_config, **rt_configs):
+    """
+    Update the service's configuration.
+
+    @param svc_config: Dictionary with service configuration to update.
+    @param rt_configs: Dict of role type configurations to update.
+    @return: 2-tuple (service config dictionary, role type configurations)
+    """
+    path = self._path() + '/config'
+
+    if svc_config:
+      data = config_to_api_list(svc_config)
+    else:
+      data = { }
+    if rt_configs:
+      rt_list = [ ]
+      for rt, cfg in rt_configs.iteritems():
+        rt_data = config_to_api_list(cfg)
+        rt_data['roleType'] = rt
+        rt_list.append(rt_data)
+      data[ROLETYPES_CFG_KEY] = rt_list
+
+    resp = self._get_resource_root().put(path, data = json.dumps(data))
+    return self._parse_svc_config(resp)
+
+  def create_role(self, role_name, role_type, host_id):
+    """
+    Create a role.
+
+    @param role_name: Role name
+    @param role_type: Role type
+    @param host_id: ID of the host to assign the role to
+    @return: An ApiRole object
+    """
+    return roles.create_role(self._get_resource_root(), self.name, role_type,
+        role_name, host_id, self._get_cluster_name())
+
+  def delete_role(self, name):
+    """
+    Delete a role by name.
+
+    @param name: Role name
+    @return: The deleted ApiRole object
+    """
+    return roles.delete_role(self._get_resource_root(), self.name, name,
+        self._get_cluster_name())
+
+  def get_role(self, name):
+    """
+    Lookup a role by name.
+
+    @param name: Role name
+    @return: An ApiRole object
+    """
+    return roles.get_role(self._get_resource_root(), self.name, name,
+        self._get_cluster_name())
+
+  def get_all_roles(self, view = None):
+    """
+    Get all roles in the service.
+
+    @param view: View to materialize ('full' or 'summary')
+    @return: A list of ApiRole objects.
+    """
+    return roles.get_all_roles(self._get_resource_root(), self.name,
+        self._get_cluster_name(), view)
+
+  def get_roles_by_type(self, role_type, view = None):
+    """
+    Get all roles of a certain type in a service.
+
+    @param role_type: Role type
+    @param view: View to materialize ('full' or 'summary')
+    @return: A list of ApiRole objects.
+    """
+    return roles.get_roles_by_type(self._get_resource_root(), self.name,
+        role_type, self._get_cluster_name(), view)
+
+  def get_role_types(self):
+    """
+    Get a list of role types in a service.
+
+    @return: A list of role types (strings)
+    """
+    resp = self._get_resource_root().get(self._path() + '/roleTypes')
+    return resp[ApiList.LIST_KEY]
+
+  def get_all_role_config_groups(self):
+    """
+    Get a list of role configuration groups in the service.
+
+    @return: A list of ApiRoleConfigGroup objects.
+    @since: API v3
+    """
+    return role_config_groups.get_all_role_config_groups(
+        self._get_resource_root(), self.name, self._get_cluster_name())
+
+  def get_role_config_group(self, name):
+    """
+    Get a role configuration group in the service by name.
+
+    @param name: The name of the role config group.
+    @return: An ApiRoleConfigGroup object.
+    @since: API v3
+    """
+    return role_config_groups.get_role_config_group(
+        self._get_resource_root(), self.name, name, self._get_cluster_name())
+
+  def create_role_config_group(self, name, display_name, role_type):
+    """
+    Create a role config group.
+
+    @param name: The name of the new group.
+    @param display_name: The display name of the new group.
+    @param role_type: The role type of the new group.
+    @return: New ApiRoleConfigGroup object.
+    @since: API v3
+    """
+    return role_config_groups.create_role_config_group(
+        self._get_resource_root(), self.name, name, display_name, role_type,
+        self._get_cluster_name())
+
+  def update_role_config_group(self, name, apigroup):
+    """
+    Update a role config group.
+
+    @param name: Role config group name.
+    @param apigroup: The updated role config group.
+    @return: The updated ApiRoleConfigGroup object.
+    @since: API v3
+    """
+    return role_config_groups.update_role_config_group(
+        self._get_resource_root(), self.name, name, apigroup,
+        self._get_cluster_name())
+
+  def delete_role_config_group(self, name):
+    """
+    Delete a role config group by name.
+
+    @param name: Role config group name.
+    @return: The deleted ApiRoleConfigGroup object.
+    @since: API v3
+    """
+    return role_config_groups.delete_role_config_group(
+        self._get_resource_root(), self.name, name, self._get_cluster_name())
+
+  def get_metrics(self, from_time=None, to_time=None, metrics=None, view=None):
+    """
+    This endpoint is not supported as of v6. Use the timeseries API
+    instead. To get all metrics for a service with the timeseries API use
+    the query:
+
+    'select * where serviceName = $SERVICE_NAME'.
+
+    To get specific metrics for a service use a comma-separated list of
+    the metric names as follows:
+
+    'select $METRIC_NAME1, $METRIC_NAME2 where serviceName = $SERVICE_NAME'.
+
+    For more information see http://tiny.cloudera.com/tsquery_doc
+
+    Retrieve metric readings for the service.
+    @param from_time: A datetime; start of the period to query (optional).
+    @param to_time: A datetime; end of the period to query (default = now).
+    @param metrics: List of metrics to query (default = all).
+    @param view: View to materialize ('full' or 'summary')
+    @return: List of metrics and their readings.
+    """
+    return self._get_resource_root().get_metrics(self._path() + '/metrics',
+        from_time, to_time, metrics, view)
+
+  def start(self):
+    """
+    Start a service.
+
+    @return: Reference to the submitted command.
+    """
+    return self._cmd('start')
+
+  def stop(self):
+    """
+    Stop a service.
+
+    @return: Reference to the submitted command.
+    """
+    return self._cmd('stop')
+
+  def restart(self):
+    """
+    Restart a service.
+
+    @return: Reference to the submitted command.
+    """
+    return self._cmd('restart')
+
+  def start_roles(self, *role_names):
+    """
+    Start a list of roles.
+
+    @param role_names: names of the roles to start.
+    @return: List of submitted commands.
+    """
+    return self._role_cmd('start', role_names)
+
+  def stop_roles(self, *role_names):
+    """
+    Stop a list of roles.
+
+    @param role_names: names of the roles to stop.
+    @return: List of submitted commands.
+    """
+    return self._role_cmd('stop', role_names)
+
+  def restart_roles(self, *role_names):
+    """
+    Restart a list of roles.
+
+    @param role_names: names of the roles to restart.
+    @return: List of submitted commands.
+    """
+    return self._role_cmd('restart', role_names)
+
+  def bootstrap_hdfs_stand_by(self, *role_names):
+    """
+    Bootstrap HDFS stand-by NameNodes.
+
+    Initialize their state by syncing it with the respective HA partner.
+
+    @param role_names: NameNodes to bootstrap.
+    @return: List of submitted commands.
+    """
+    return self._role_cmd('hdfsBootstrapStandBy', role_names)
+
+  def create_beeswax_warehouse(self):
+    """
+    DEPRECATED: use create_hive_warehouse on the Hive service. Deprecated since v3.
+
+    Create the Beeswax role's warehouse for a Hue service.
+
+    @return: Reference to the submitted command.
+    """
+    return self._cmd('hueCreateHiveWarehouse')
+
+  def create_hbase_root(self):
+    """
+    Create the root directory of an HBase service.
+
+    @return: Reference to the submitted command.
+    """
+    return self._cmd('hbaseCreateRoot')
+
+  def create_hdfs_tmp(self):
+    """
+    Create the /tmp directory in HDFS with appropriate ownership and permissions.
+
+    @return: Reference to the submitted command
+    @since: API v2
+    """
+    return self._cmd('hdfsCreateTmpDir')
+
+  def refresh(self, *role_names):
+    """
+    Execute the "refresh" command on a set of roles.
+
+    @param role_names: Names of the roles to refresh.
+    @return: Reference to the submitted command.
+    """
+    return self._role_cmd('refresh', role_names)
+
+  def decommission(self, *role_names):
+    """
+    Decommission roles in a service.
+
+    @param role_names: Names of the roles to decommission.
+    @return: Reference to the submitted command.
+    """
+    return self._cmd('decommission', data=role_names)
+
+  def recommission(self, *role_names):
+    """
+    Recommission roles in a service.
+
+    @param role_names: Names of the roles to recommission.
+    @return: Reference to the submitted command.
+    @since: API v2
+    """
+    return self._cmd('recommission', data=role_names)
+
+  def deploy_client_config(self, *role_names):
+    """
+    Deploys client configuration to the hosts where roles are running.
+
+    @param role_names: Names of the roles to decommission.
+    @return: Reference to the submitted command.
+    """
+    return self._cmd('deployClientConfig', data=role_names)
+
+  def disable_hdfs_auto_failover(self, nameservice):
+    """
+    Disable auto-failover for a highly available HDFS nameservice.
+    This command is no longer supported with API v6 onwards. Use disable_nn_ha instead.
+
+    @param nameservice: Affected nameservice.
+    @return: Reference to the submitted command.
+    """
+    return self._cmd('hdfsDisableAutoFailover', data=nameservice)
+
+  def disable_hdfs_ha(self, active_name, secondary_name,
+      start_dependent_services=True, deploy_client_configs=True,
+                      disable_quorum_storage=False):
+    """
+    Disable high availability for an HDFS NameNode.
+    This command is no longer supported with API v6 onwards. Use disable_nn_ha instead.
+
+    @param active_name: Name of the NameNode to keep.
+    @param secondary_name: Name of (existing) SecondaryNameNode to link to
+                           remaining NameNode.
+    @param start_dependent_services: whether to re-start dependent services.
+    @param deploy_client_configs: whether to re-deploy client configurations.
+    @param disable_quorum_storage: whether to disable Quorum-based Storage. Available since API v2.
+                                   Quorum-based Storage will be disabled for all
+                                   nameservices that have Quorum-based Storage
+                                   enabled.
+    @return: Reference to the submitted command.
+    """
+    args = dict(
+      activeName = active_name,
+      secondaryName = secondary_name,
+      startDependentServices = start_dependent_services,
+      deployClientConfigs = deploy_client_configs,
+    )
+
+    version = self._get_resource_root().version
+    if version < 2:
+      if disable_quorum_storage:
+        raise AttributeError("Quorum-based Storage requires at least API version 2 available in Cloudera Manager 4.1.")
+    else:
+      args['disableQuorumStorage'] = disable_quorum_storage
+
+    return self._cmd('hdfsDisableHa', data=args)
+
+  def enable_hdfs_auto_failover(self, nameservice, active_fc_name,
+      standby_fc_name, zk_service):
+    """
+    Enable auto-failover for an HDFS nameservice.
+    This command is no longer supported with API v6 onwards. Use enable_nn_ha instead.
+
+    @param nameservice: Nameservice for which to enable auto-failover.
+    @param active_fc_name: Name of failover controller to create for active node.
+    @param standby_fc_name: Name of failover controller to create for stand-by node.
+    @param zk_service: ZooKeeper service to use.
+    @return: Reference to the submitted command.
+    """
+    version = self._get_resource_root().version
+
+    args = dict(
+      nameservice = nameservice,
+      activeFCName = active_fc_name,
+      standByFCName = standby_fc_name,
+      zooKeeperService = dict(
+        clusterName = zk_service.clusterRef.clusterName,
+        serviceName = zk_service.name,
+        ),
+      )
+    return self._cmd('hdfsEnableAutoFailover', data=args)
+
+  def enable_hdfs_ha(self, active_name, active_shared_path, standby_name,
+      standby_shared_path, nameservice, start_dependent_services=True,
+      deploy_client_configs=True, enable_quorum_storage=False):
+    """
+    Enable high availability for an HDFS NameNode.
+    This command is no longer supported with API v6 onwards. Use enable_nn_ha instead.
+
+    @param active_name: name of active NameNode.
+    @param active_shared_path: shared edits path for active NameNode.
+                               Ignored if Quorum-based Storage is being enabled.
+    @param standby_name: name of stand-by NameNode.
+    @param standby_shared_path: shared edits path for stand-by NameNode.
+                                Ignored if Quourm Journal is being enabled.
+    @param nameservice: nameservice for the HA pair.
+    @param start_dependent_services: whether to re-start dependent services.
+    @param deploy_client_configs: whether to re-deploy client configurations.
+    @param enable_quorum_storage: whether to enable Quorum-based Storage. Available since API v2.
+                                  Quorum-based Storage will be enabled for all
+                                  nameservices except those configured with NFS High
+                                  Availability.
+    @return: Reference to the submitted command.
+    """
+    version = self._get_resource_root().version
+
+    args = dict(
+      activeName = active_name,
+      standByName = standby_name,
+      nameservice = nameservice,
+      startDependentServices = start_dependent_services,
+      deployClientConfigs = deploy_client_configs,
+    )
+
+    if enable_quorum_storage:
+      if version < 2:
+        raise AttributeError("Quorum-based Storage is not supported prior to Cloudera Manager 4.1.")
+      else:
+        args['enableQuorumStorage'] = enable_quorum_storage
+    else:
+      if active_shared_path is None or standby_shared_path is None:
+        raise AttributeError("Active and standby shared paths must be specified if not enabling Quorum-based Storage")
+      args['activeSharedEditsPath'] = active_shared_path
+      args['standBySharedEditsPath'] = standby_shared_path
+
+    return self._cmd('hdfsEnableHa', data=args)
+
+  def enable_nn_ha(self, active_name, standby_host_id, nameservice, jns,
+      standby_name_dir_list=None, qj_name=None, standby_name=None,
+      active_fc_name=None, standby_fc_name=None, zk_service_name=None,
+      force_init_znode=True, clear_existing_standby_name_dirs=True, clear_existing_jn_edits_dir=True):
+    """
+    Enable High Availability (HA) with Auto-Failover for an HDFS NameNode.
+    @param active_name: Name of Active NameNode.
+    @param standby_host_id: ID of host where Standby NameNode will be created.
+    @param nameservice: Nameservice to be used while enabling HA.
+                        Optional if Active NameNode already has this config set.
+    @param jns: List of Journal Nodes to be created during the command.
+                Each element of the list must be a dict containing the following keys:
+                  - B{jnHostId}: ID of the host where the new JournalNode will be created.
+                  - B{jnName}: Name of the JournalNode role (optional)
+                  - B{jnEditsDir}: Edits dir of the JournalNode. Can be omitted if the config
+                    is already set at RCG level.
+    @param standby_name_dir_list: List of directories for the new Standby NameNode.
+                                  If not provided then it will use same dirs as Active NameNode.
+    @param qj_name: Name of the journal located on each JournalNodes' filesystem.
+                    This can be optionally provided if the config hasn't been already set for the Active NameNode.
+                    If this isn't provided and Active NameNode doesn't also have the config,
+                    then nameservice is used by default.
+    @param standby_name: Name of the Standby NameNode role to be created (Optional).
+    @param active_fc_name: Name of the Active Failover Controller role to be created (Optional).
+    @param standby_fc_name: Name of the Standby Failover Controller role to be created (Optional).
+    @param zk_service_name: Name of the ZooKeeper service to use for auto-failover.
+                            If HDFS service already depends on a ZooKeeper service then that ZooKeeper
+                            service will be used for auto-failover and in that case this parameter
+                            can either be omitted or should be the same ZooKeeper service.
+    @param force_init_znode: Indicates if the ZNode should be force initialized if it is
+                             already present. Useful while re-enabling High Availability. (Default: TRUE)
+    @param clear_existing_standby_name_dirs: Indicates if the existing name directories for Standby NameNode
+                                             should be cleared during the workflow.
+                                             Useful while re-enabling High Availability. (Default: TRUE)
+    @param clear_existing_jn_edits_dir: Indicates if the existing edits directories for the JournalNodes
+                                        for the specified nameservice should be cleared during the workflow.
+                                        Useful while re-enabling High Availability. (Default: TRUE)
+    @return: Reference to the submitted command.
+    @since: API v6
+    """
+    args = dict (
+      activeNnName = active_name,
+      standbyNnName = standby_name,
+      standbyNnHostId = standby_host_id,
+      standbyNameDirList = standby_name_dir_list,
+      nameservice = nameservice,
+      qjName = qj_name,
+      activeFcName = active_fc_name,
+      standbyFcName = standby_fc_name,
+      zkServiceName = zk_service_name,
+      forceInitZNode = force_init_znode,
+      clearExistingStandbyNameDirs = clear_existing_standby_name_dirs,
+      clearExistingJnEditsDir = clear_existing_jn_edits_dir,
+      jns = jns
+    )
+    return self._cmd('hdfsEnableNnHa', data=args, api_version=6)
+
+  def disable_nn_ha(self, active_name, snn_host_id, snn_check_point_dir_list,
+      snn_name=None):
+    """
+    Disable high availability with automatic failover for an HDFS NameNode.
+
+    @param active_name: Name of the NamdeNode role that is going to be active after
+                        High Availability is disabled.
+    @param snn_host_id: Id of the host where the new SecondaryNameNode will be created.
+    @param snn_check_point_dir_list : List of directories used for checkpointing
+                                      by the new SecondaryNameNode.
+    @param snn_name: Name of the new SecondaryNameNode role (Optional).
+    @return: Reference to the submitted command.
+    @since: API v6
+    """
+    args = dict(
+      activeNnName = active_name,
+      snnHostId = snn_host_id,
+      snnCheckpointDirList = snn_check_point_dir_list,
+      snnName = snn_name
+    )
+    return self._cmd('hdfsDisableNnHa', data=args, api_version=6)
+
+  def enable_jt_ha(self, new_jt_host_id, force_init_znode=True, zk_service_name=None,
+      new_jt_name=None, fc1_name=None, fc2_name=None):
+    """
+    Enable high availability for a MR JobTracker.
+
+    @param zk_service_name: Name of the ZooKeeper service to use for auto-failover.
+           If MapReduce service depends on a ZooKeeper service then that ZooKeeper
+           service will be used for auto-failover and in that case this parameter
+           can be omitted.
+    @param new_jt_host_id: id of the host where the second JobTracker
+                        will be added.
+    @param force_init_znode: Initialize the ZNode used for auto-failover even if
+                             it already exists. This can happen if JobTracker HA
+                             was enabled before and then disabled. Disable operation
+                             doesn't delete this ZNode. Defaults to true.
+    @param new_jt_name: Name of the second JobTracker role to be created.
+    @param fc1_name: Name of the Failover Controller role that is co-located with
+                     the existing JobTracker.
+    @param fc2_name: Name of the Failover Controller role that is co-located with
+                     the new JobTracker.
+    @return: Reference to the submitted command.
+    @since: API v5
+    """
+    args = dict(
+      newJtHostId = new_jt_host_id,
+      forceInitZNode = force_init_znode,
+      zkServiceName = zk_service_name,
+      newJtRoleName = new_jt_name,
+      fc1RoleName = fc1_name,
+      fc2RoleName = fc2_name
+    )
+    return self._cmd('enableJtHa', data=args)
+
+  def disable_jt_ha(self, active_name):
+    """
+    Disable high availability for a MR JobTracker active-standby pair.
+
+    @param active_name: name of the JobTracker that will be active after
+                        the disable operation. The other JobTracker and
+                        Failover Controllers will be removed.
+    @return: Reference to the submitted command.
+    """
+    args = dict(
+      activeName = active_name,
+    )
+    return self._cmd('disableJtHa', data=args)
+
+  def enable_rm_ha(self, new_rm_host_id, zk_service_name=None):
+    """
+    Enable high availability for a YARN ResourceManager.
+
+    @param new_rm_host_id: id of the host where the second ResourceManager
+                           will be added.
+    @param zk_service_name: Name of the ZooKeeper service to use for auto-failover.
+           If YARN service depends on a ZooKeeper service then that ZooKeeper
+           service will be used for auto-failover and in that case this parameter
+           can be omitted.
+    @return: Reference to the submitted command.
+    @since: API v6
+    """
+    args = dict(
+      newRmHostId = new_rm_host_id,
+      zkServiceName = zk_service_name
+    )
+    return self._cmd('enableRmHa', data=args)
+
+  def disable_rm_ha(self, active_name):
+    """
+    Disable high availability for a YARN ResourceManager active-standby pair.
+
+    @param active_name: name of the ResourceManager that will be active after
+                        the disable operation. The other ResourceManager
+                        will be removed.
+    @return: Reference to the submitted command.
+    @since: API v6
+    """
+    args = dict(
+      activeName = active_name
+    )
+    return self._cmd('disableRmHa', data=args)
+
+  def enable_oozie_ha(self, new_oozie_server_host_ids, new_oozie_server_role_names=None,
+    zk_service_name=None, load_balancer_host_port=None):
+    """
+    Enable high availability for Oozie.
+
+    @param new_oozie_server_host_ids: List of IDs of the hosts on which new Oozie Servers
+                                      will be added.
+    @param new_oozie_server_role_names: List of names of the new Oozie Servers. This is an
+                                        optional argument, but if provided, it should
+                                        match the length of host IDs provided.
+    @param zk_service_name: Name of the ZooKeeper service that will be used for Oozie HA.
+                            This is an optional parameter if the Oozie to ZooKeeper
+                            dependency is already set.
+    @param load_balancer_host_port: Address and port of the load balancer used for Oozie HA.
+                                    This is an optional parameter if this config is already set.
+    @return: Reference to the submitted command.
+    @since: API v6
+    """
+    args = dict(
+      newOozieServerHostIds = new_oozie_server_host_ids,
+      newOozieServerRoleNames = new_oozie_server_role_names,
+      zkServiceName = zk_service_name,
+      loadBalancerHostPort = load_balancer_host_port
+    )
+    return self._cmd('oozieEnableHa', data=args, api_version=6)
+
+  def disable_oozie_ha(self, active_name):
+    """
+    Disable high availability for Oozie
+
+    @param active_name: Name of the Oozie Server that will be active after
+                        High Availability is disabled.
+    @return: Reference to the submitted command.
+    @since: API v6
+    """
+    args = dict(
+      activeName = active_name
+    )
+    return self._cmd('oozieDisableHa', data=args, api_version=6)
+
+  def failover_hdfs(self, active_name, standby_name, force=False):
+    """
+    Initiate a failover of an HDFS NameNode HA pair.
+
+    This will make the given stand-by NameNode active, and vice-versa.
+
+    @param active_name: name of currently active NameNode.
+    @param standby_name: name of NameNode currently in stand-by.
+    @param force: whether to force failover.
+    @return: Reference to the submitted command.
+    """
+    params = { "force" : "true" and force or "false" }
+    args = { ApiList.LIST_KEY : [ active_name, standby_name ] }
+    return self._cmd('hdfsFailover', data=[ active_name, standby_name ],
+        params = { "force" : "true" and force or "false" })
+
+  def format_hdfs(self, *namenodes):
+    """
+    Format NameNode instances of an HDFS service.
+
+    @param namenodes: Name of NameNode instances to format.
+    @return: List of submitted commands.
+    """
+    return self._role_cmd('hdfsFormat', namenodes)
+
+  def init_hdfs_auto_failover(self, *controllers):
+    """
+    Initialize HDFS failover controller metadata.
+
+    Only one controller per nameservice needs to be initialized.
+
+    @param controllers: Name of failover controller instances to initialize.
+    @return: List of submitted commands.
+    """
+    return self._role_cmd('hdfsInitializeAutoFailover', controllers)
+
+  def init_hdfs_shared_dir(self, *namenodes):
+    """
+    Initialize a NameNode's shared edits directory.
+
+    @param namenodes: Name of NameNode instances.
+    @return: List of submitted commands.
+    """
+    return self._role_cmd('hdfsInitializeSharedDir', namenodes)
+
+  def roll_edits_hdfs(self, nameservice=None):
+    """
+    Roll the edits of an HDFS NameNode or Nameservice.
+
+    @param nameservice: Nameservice whose edits should be rolled.
+                        Required only with a federated HDFS.
+    @return: Reference to the submitted command.
+    @since: API v3
+    """
+    args = dict()
+    if nameservice:
+      args['nameservice'] = nameservice
+
+    return self._cmd('hdfsRollEdits', data=args)
+
+  def upgrade_hdfs_metadata(self):
+    """
+    Upgrade HDFS Metadata as part of a major version upgrade.
+
+    @return: Reference to the submitted command.
+    @since: API v6
+    """
+    return self._cmd('hdfsUpgradeMetadata', api_version=6)
+
+  def upgrade_hbase(self):
+    """
+    Upgrade HBase data in HDFS and ZooKeeper as part of upgrade from CDH4 to CDH5.
+
+    @return: Reference to the submitted command.
+    @since: API v6
+    """
+    return self._cmd('hbaseUpgrade', api_version=6)
+
+  def upgrade_sqoop_db(self):
+    """
+    Upgrade Sqoop Database schema as part of a major version upgrade.
+
+    @return: Reference to the submitted command.
+    @since: API v6
+    """
+    return self._cmd('sqoopUpgradeDb', api_version=6)
+
+  def upgrade_hive_metastore(self):
+    """
+    Upgrade Hive Metastore as part of a major version upgrade.
+
+    @return: Reference to the submitted command.
+    @since: API v6
+    """
+    return self._cmd('hiveUpgradeMetastore', api_version=6)
+
+  def cleanup_zookeeper(self, *servers):
+    """
+    Cleanup a ZooKeeper service or roles.
+
+    If no server role names are provided, the command applies to the whole
+    service, and cleans up all the server roles that are currently running.
+
+    @param servers: ZK server role names (optional).
+    @return: Command reference (for service command) or list of command
+             references (for role commands).
+    """
+    if servers:
+      return self._role_cmd('zooKeeperCleanup', servers)
+    else:
+      return self._cmd('zooKeeperCleanup')
+
+  def init_zookeeper(self, *servers):
+    """
+    Initialize a ZooKeeper service or roles.
+
+    If no server role names are provided, the command applies to the whole
+    service, and initializes all the configured server roles.
+
+    @param servers: ZK server role names (optional).
+    @return: Command reference (for service command) or list of command
+             references (for role commands).
+    """
+    if servers:
+      return self._role_cmd('zooKeeperInit', servers)
+    else:
+      return self._cmd('zooKeeperInit')
+
+  def sync_hue_db(self, *servers):
+    """
+    Synchronize the Hue server's database.
+
+    @param servers: Name of Hue Server roles to synchronize.
+    @return: List of submitted commands.
+    """
+    return self._role_cmd('hueSyncDb', servers)
+
+  def enter_maintenance_mode(self):
+    """
+    Put the service in maintenance mode.
+
+    @return: Reference to the completed command.
+    @since: API v2
+    """
+    cmd = self._cmd('enterMaintenanceMode')
+    if cmd.success:
+      self._update(_get_service(self._get_resource_root(), self._path()))
+    return cmd
+
+  def exit_maintenance_mode(self):
+    """
+    Take the service out of maintenance mode.
+
+    @return: Reference to the completed command.
+    @since: API v2
+    """
+    cmd = self._cmd('exitMaintenanceMode')
+    if cmd.success:
+      self._update(_get_service(self._get_resource_root(), self._path()))
+    return cmd
+
+  def rolling_restart(self, slave_batch_size=None,
+                      slave_fail_count_threshold=None,
+                      sleep_seconds=None,
+                      stale_configs_only=None,
+                      unupgraded_only=None,
+                      restart_role_types=None,
+                      restart_role_names=None):
+    """
+    Rolling restart the roles of a service. The sequence is:
+      1. Restart all the non-slave roles
+      2. If slaves are present restart them in batches of size specified
+      3. Perform any post-command needed after rolling restart
+
+    @param slave_batch_size: Number of slave roles to restart at a time
+           Must be greater than 0. Default is 1.
+           For HDFS, this number should be less than the replication factor (default 3)
+           to ensure data availability during rolling restart.
+    @param slave_fail_count_threshold: The threshold for number of slave batches that
+           are allowed to fail to restart before the entire command is considered failed.
+           Must be >= 0. Default is 0.
+    @param sleep_seconds: Number of seconds to sleep between restarts of slave role batches.
+            Must be >=0. Default is 0.
+    @param stale_configs_only: Restart roles with stale configs only. Default is false.
+    @param unupgraded_only: Restart roles that haven't been upgraded yet. Default is false.
+    @param restart_role_types: Role types to restart. If not specified, all startable roles are restarted.
+    @param restart_role_names: List of specific roles to restart.
+            If none are specified, then all roles of specified role types are restarted.
+    @return: Reference to the submitted command.
+    @since: API v3
+    """
+    args = dict()
+    if slave_batch_size:
+      args['slaveBatchSize'] = slave_batch_size
+    if slave_fail_count_threshold:
+      args['slaveFailCountThreshold'] = slave_fail_count_threshold
+    if sleep_seconds:
+      args['sleepSeconds'] = sleep_seconds
+    if stale_configs_only:
+      args['staleConfigsOnly'] = stale_configs_only
+    if unupgraded_only:
+      args['unUpgradedOnly'] = unupgraded_only
+    if restart_role_types:
+      args['restartRoleTypes'] = restart_role_types
+    if restart_role_names:
+      args['restartRoleNames'] = restart_role_names
+
+    return self._cmd('rollingRestart', data=args)
+
+  def create_replication_schedule(self,
+      start_time, end_time, interval_unit, interval, paused, arguments,
+      alert_on_start=False, alert_on_success=False, alert_on_fail=False,
+      alert_on_abort=False):
+    """
+    Create a new replication schedule for this service.
+
+    The replication argument type varies per service type. The following types
+    are recognized:
+      - HDFS: ApiHdfsReplicationArguments
+      - Hive: ApiHiveReplicationArguments
+
+    @type  start_time: datetime.datetime
+    @param start_time: The time at which the schedule becomes active and first executes.
+    @type  end_time: datetime.datetime
+    @param end_time: The time at which the schedule will expire.
+    @type  interval_unit: str
+    @param interval_unit: The unit of time the `interval` represents. Ex. MINUTE, HOUR,
+                          DAY. See the server documentation for a full list of values.
+    @type  interval: int
+    @param interval: The number of time units to wait until triggering the next replication.
+    @type  paused: bool
+    @param paused: Should the schedule be paused? Useful for on-demand replication.
+    @param arguments: service type-specific arguments for the replication job.
+    @param alert_on_start: whether to generate alerts when the job is started.
+    @param alert_on_success: whether to generate alerts when the job succeeds.
+    @param alert_on_fail: whether to generate alerts when the job fails.
+    @param alert_on_abort: whether to generate alerts when the job is aborted.
+    @return: The newly created schedule.
+    @since: API v3
+    """
+    schedule = ApiReplicationSchedule(self._get_resource_root(),
+      startTime=start_time, endTime=end_time, intervalUnit=interval_unit, interval=interval,
+      paused=paused, alertOnStart=alert_on_start, alertOnSuccess=alert_on_success,
+      alertOnFail=alert_on_fail, alertOnAbort=alert_on_abort)
+
+    if self.type == 'HDFS':
+      if not isinstance(arguments, ApiHdfsReplicationArguments):
+        raise TypeError, 'Unexpected type for HDFS replication argument.'
+      schedule.hdfsArguments = arguments
+    elif self.type == 'HIVE':
+      if not isinstance(arguments, ApiHiveReplicationArguments):
+        raise TypeError, 'Unexpected type for Hive replication argument.'
+      schedule.hiveArguments = arguments
+    else:
+      raise TypeError, 'Replication is not supported for service type ' + self.type
+
+    return self._post("replications", ApiReplicationSchedule, True, [schedule],
+        api_version=3)[0]
+
+  def get_replication_schedules(self):
+    """
+    Retrieve a list of replication schedules.
+
+    @return: A list of replication schedules.
+    @since: API v3
+    """
+    return self._get("replications", ApiReplicationSchedule, True,
+        api_version=3)
+
+  def get_replication_schedule(self, schedule_id):
+    """
+    Retrieve a single replication schedule.
+
+    @param schedule_id: The id of the schedule to retrieve.
+    @return: The requested schedule.
+    @since: API v3
+    """
+    return self._get("replications/%d" % schedule_id, ApiReplicationSchedule,
+        api_version=3)
+
+  def delete_replication_schedule(self, schedule_id):
+    """
+    Delete a replication schedule.
+
+    @param schedule_id: The id of the schedule to delete.
+    @return: The deleted replication schedule.
+    @since: API v3
+    """
+    return self._delete("replications/%s" % schedule_id, ApiReplicationSchedule,
+        api_version=3)
+
+  def update_replication_schedule(self, schedule_id, schedule):
+    """
+    Update a replication schedule.
+
+    @param schedule_id: The id of the schedule to update.
+    @param schedule: The modified schedule.
+    @return: The updated replication schedule.
+    @since: API v3
+    """
+    return self._put("replications/%s" % schedule_id, ApiReplicationSchedule,
+        data=schedule, api_version=3)
+
+  def get_replication_command_history(self, schedule_id, limit=20, offset=0,
+                                      view=None):
+    """
+    Retrieve a list of commands for a replication schedule.
+
+    @param schedule_id: The id of the replication schedule.
+    @param limit: Maximum number of commands to retrieve.
+    @param offset: Index of first command to retrieve.
+    @param view: View to materialize. Valid values are 'full', 'summary', 'export', 'export_redacted'.
+    @return: List of commands executed for a replication schedule.
+    @since: API v4
+    """
+    params = {
+      'limit':  limit,
+      'offset': offset,
+    }
+    if view:
+      params['view'] = view
+
+    return self._get("replications/%s/history" % schedule_id,
+                     ApiReplicationCommand, True, params=params, api_version=4)
+
+  def trigger_replication_schedule(self, schedule_id, dry_run=False):
+    """
+    Trigger replication immediately. Start and end dates on the schedule will be
+    ignored.
+
+    @param schedule_id: The id of the schedule to trigger.
+    @param dry_run: Whether to execute a dry run.
+    @return: The command corresponding to the replication job.
+    @since: API v3
+    """
+    return self._post("replications/%s/run" % schedule_id, ApiCommand,
+        params=dict(dryRun=dry_run),
+        api_version=3)
+
+  def create_snapshot_policy(self, policy):
+    """
+    Create a new snapshot policy for this service.
+    @param policy: The snapshot policy to create
+    @return: The newly created policy.
+    @since: API v6
+    """
+    return self._post("snapshots/policies", ApiSnapshotPolicy, True, [policy],
+        api_version=6)[0]
+
+  def get_snapshot_policies(self, view=None):
+    """
+    Retrieve a list of snapshot policies.
+
+    @param view: View to materialize. Valid values are 'full', 'summary', 'export', 'export_redacted'.
+    @return: A list of snapshot policies.
+    @since: API v6
+    """
+    return self._get("snapshots/policies", ApiSnapshotPolicy, True,
+        params=view and dict(view=view) or None, api_version=6)
+
+  def get_snapshot_policy(self, name, view=None):
+    """
+    Retrieve a single snapshot policy.
+
+    @param name: The name of the snapshot policy to retrieve.
+    @param view: View to materialize. Valid values are 'full', 'summary', 'export', 'export_redacted'.
+    @return: The requested snapshot policy.
+    @since: API v6
+    """
+    return self._get("snapshots/policies/%s" % name, ApiSnapshotPolicy,
+        params=view and dict(view=view) or None, api_version=6)
+
+  def delete_snapshot_policy(self, name):
+    """
+    Delete a snapshot policy.
+
+    @param name: The name of the snapshot policy to delete.
+    @return: The deleted snapshot policy.
+    @since: API v6
+    """
+    return self._delete("snapshots/policies/%s" % name, ApiSnapshotPolicy, api_version=6)
+
+  def update_snapshot_policy(self, name, policy):
+    """
+    Update a snapshot policy.
+
+    @param name: The name of the snapshot policy to update.
+    @param policy: The modified snapshot policy.
+    @return: The updated snapshot policy.
+    @since: API v6
+    """
+    return self._put("snapshots/policies/%s" % name, ApiSnapshotPolicy, data=policy,
+        api_version=6)
+
+  def get_snapshot_command_history(self, name, limit=20, offset=0, view=None):
+    """
+    Retrieve a list of commands triggered by a snapshot policy.
+
+    @param name: The name of the snapshot policy.
+    @param limit: Maximum number of commands to retrieve.
+    @param offset: Index of first command to retrieve.
+    @param view: View to materialize. Valid values are 'full', 'summary', 'export', 'export_redacted'.
+    @return: List of commands triggered by a snapshot policy.
+    @since: API v6
+    """
+    params = {
+      'limit':  limit,
+      'offset': offset,
+    }
+    if view:
+      params['view'] = view
+
+    return self._get("snapshots/policies/%s/history" % name, ApiSnapshotCommand, True,
+        params=params, api_version=6)
+
+
+  def install_oozie_sharelib(self):
+    """
+    Installs the Oozie ShareLib. Oozie must be stopped before running this
+    command.
+
+    @return: Reference to the submitted command.
+    @since: API v3
+    """
+    return self._cmd('installOozieShareLib', api_version=3)
+
+  def create_oozie_db(self):
+    """
+    Creates the Oozie Database Schema in the configured database.
+
+    @return: Reference to the submitted command.
+    @since: API v2
+    """
+    return self._cmd('createOozieDb', api_version=2)
+
+  def upgrade_oozie_db(self):
+    """
+    Upgrade Oozie Database schema as part of a major version upgrade.
+
+    @return: Reference to the submitted command.
+    @since: API v6
+    """
+    return self._cmd('oozieUpgradeDb', api_version=6)
+
+  def create_hive_metastore_tables(self):
+    """
+    Creates the Hive metastore tables in the configured database.
+    Will do nothing if tables already exist. Will not perform an upgrade.
+
+    @return: Reference to the submitted command.
+    @since: API v3
+    """
+    return self._cmd('hiveCreateMetastoreDatabaseTables', api_version=3)
+
+  def create_hive_warehouse(self):
+    """
+    Creates the Hive warehouse directory in HDFS.
+
+    @return: Reference to the submitted command.
+    @since: API v3
+    """
+    return self._cmd('hiveCreateHiveWarehouse')
+
+  def create_hive_userdir(self):
+    """
+    Creates the Hive user directory in HDFS.
+
+    @return: Reference to the submitted command.
+    @since: API v4
+    """
+    return self._cmd('hiveCreateHiveUserDir')
+
+  def create_hive_metastore_database(self):
+    """
+    Create the Hive Metastore Database. Only works with embedded postgresql
+    database. This command should usually be followed by a call to
+    create_hive_metastore_tables.
+
+    @return: Reference to the submitted command.
+    @since: API v4
+    """
+    return self._cmd('hiveCreateMetastoreDatabase', api_version=4)
+
+  def update_hive_metastore_namenodes(self):
+    """
+    Update Hive Metastore to point to a NameNode's Nameservice name instead of
+    hostname. Only available when all Hive Metastore Servers are stopped and
+    HDFS has High Availability.
+
+    Back up the Hive Metastore Database before running this command.
+
+    @return: Reference to the submitted command.
+    @since: API v4
+    """
+    return self._cmd('hiveUpdateMetastoreNamenodes', api_version=4)
+
+  def import_mr_configs_into_yarn(self):
+    """
+    Import MapReduce configuration into Yarn, overwriting Yarn configuration.
+
+    You will lose existing Yarn configuration. Read all MapReduce
+    configuration, role assignments, and role configuration groups and update
+    Yarn with corresponding values. MR1 configuration will be converted into
+    the equivalent MR2 configuration.
+
+    Before running this command, Yarn must be stopped and MapReduce must exist
+    with valid configuration.
+
+    @return: Reference to the submitted command.
+    @since: API v6
+    """
+    return self._cmd('importMrConfigsIntoYarn', api_version=6)
+
+  def switch_to_mr2(self):
+    """
+    Change the cluster to use MR2 instead of MR1. Services will be restarted.
+
+    Will perform the following steps:
+    * Update all services that depend on MapReduce to instead depend on Yarn.
+    * Stop MapReduce
+    * Start Yarn (includes MR2)
+    * Deploy Yarn (MR2) Client Configuration
+
+    Available since API v6.
+
+    @return: Reference to the submitted command.
+    @since: API v6
+    """
+    return self._cmd('switchToMr2', api_version=6)
+
+  def role_command_by_name(self, command_name, *role_names):
+    """
+    Executes a role command by name on the specified
+    roles
+
+    @param command_name: The name of the command.
+    @param role_names: The role names to execute this command on.
+    @return: Reference to the submitted command.
+    @since: API v6
+    """
+    return self._role_cmd(command_name, role_names, api_version=6)
+
+  def service_command_by_name(self, command_name):
+    """
+    Executes a command on the service specified
+    by name.
+
+    @param command_name: The name of the command.
+    @return: Reference to the submitted command.
+    @since: API v6
+    """
+    return self._cmd(command_name, api_version=6)
+
+  def list_commands_by_name(self):
+    """
+    Lists all the commands that can be executed by name
+    on the provided service.
+
+    @return: A list of command metadata objects
+    @since: API v6
+    """
+    return self._get("commandsByName", ApiCommandMetadata, True,
+        api_version=6)
+
+class ApiServiceSetupInfo(ApiService):
+  _ATTRIBUTES = {
+    'name'    : None,
+    'type'    : None,
+    'config'  : Attr(ApiConfig),
+    'roles'   : Attr(roles.ApiRole),
+  }
+
+  def __init__(self, name=None, type=None,
+               config=None, roles=None):
+    # The BaseApiObject expects a resource_root, which we don't care about
+    resource_root = None
+    # Unfortunately, the json key is called "type". So our input arg
+    # needs to be called "type" as well, despite it being a python keyword.
+    BaseApiObject.init(self, None, locals())
+
+  def set_config(self, config):
+    """
+    Set the service configuration.
+
+    @param config: A dictionary of config key/value
+    """
+    if self.config is None:
+      self.config = { }
+    self.config.update(config_to_api_list(config))
+
+  def add_role_type_info(self, role_type, config):
+    """
+    Add a role type setup info.
+
+    @param role_type: Role type
+    @param config: A dictionary of role type configuration
+    """
+    rt_config = config_to_api_list(config)
+    rt_config['roleType'] = role_type
+
+    if self.config is None:
+      self.config = { }
+    if not self.config.has_key(ROLETYPES_CFG_KEY):
+      self.config[ROLETYPES_CFG_KEY] = [ ]
+    self.config[ROLETYPES_CFG_KEY].append(rt_config)
+
+  def add_role_info(self, role_name, role_type, host_id, config=None):
+    """
+    Add a role info. The role will be created along with the service setup.
+
+    @param role_name: Role name
+    @param role_type: Role type
+    @param host_id: The host where the role should run
+    @param config: (Optional) A dictionary of role config values
+    """
+    if self.roles is None:
+      self.roles = [ ]
+    api_config_list = config is not None and config_to_api_list(config) or None
+    self.roles.append({
+        'name' : role_name,
+        'type' : role_type,
+        'hostRef' : { 'hostId' : host_id },
+        'config' : api_config_list })
diff --git a/cm-api/src/cm_api/endpoints/timeseries.py b/cm-api/src/cm_api/endpoints/timeseries.py
new file mode 100644 (file)
index 0000000..e558459
--- /dev/null
@@ -0,0 +1,139 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+
+from cm_api.endpoints.types import *
+
+__docformat__ = "epytext"
+
+TIME_SERIES_PATH   = "/timeseries"
+METRIC_SCHEMA_PATH = "/timeseries/schema"
+
+def query_timeseries(resource_root, query, from_time=None, to_time=None,
+    desired_rollup=None, must_use_desired_rollup=None):
+  """
+  Query for time series data from the CM time series data store.
+  @param query: Query string.
+  @param from_time: Start of the period to query (optional).
+  @type from_time: datetime.datetime Note the that datetime must either be time
+                   zone aware or specified in the server time zone. See the
+                   python datetime documentation for more details about python's
+                   time zone handling.
+  @param to_time: End of the period to query (default = now).
+                  This may be an ISO format string, or a datetime object.
+  @type to_time: datetime.datetime Note the that datetime must either be time
+                 zone aware or specified in the server time zone. See the
+                 python datetime documentation for more details about python's
+                 time zone handling.
+  @param desired_rollup: The aggregate rollup to get data for. This can be
+                         RAW, TEN_MINUTELY, HOURLY, SIX_HOURLY, DAILY, or
+                         WEEKLY. Note that rollup desired is only a hint unless
+                         must_use_desired_rollup is set to true.
+  @param must_use_desired_rollup: Indicates that the monitoring server should
+                                  return the data at the rollup desired.
+  @return: List of ApiTimeSeriesResponse
+  """
+  params = {}
+  if query:
+    params['query'] = query
+  if from_time:
+    params['from'] = from_time.isoformat()
+  if to_time:
+    params['to'] = to_time.isoformat()
+  if desired_rollup:
+    params['desiredRollup'] = desired_rollup
+  if must_use_desired_rollup:
+    params['mustUseDesiredRollup'] = must_use_desired_rollup
+  return call(resource_root.get, TIME_SERIES_PATH,
+      ApiTimeSeriesResponse, True, params=params)
+
+def get_metric_schema(resource_root):
+  """
+  Get the schema for all of the metrics.
+  @return: List of metric schema.
+  """
+  return call(resource_root.get, METRIC_SCHEMA_PATH,
+      ApiMetricSchema, True)
+
+class ApiTimeSeriesCrossEntityMetadata(BaseApiObject):
+  _ATTRIBUTES = {
+    'maxEntityDisplayName' : ROAttr(),
+    'minEntityDisplayName' : ROAttr(),
+    'numEntities'          : ROAttr()
+    }
+
+class ApiTimeSeriesAggregateStatistics(BaseApiObject):
+  _ATTRIBUTES = {
+    'sampleTime'  : ROAttr(datetime.datetime),
+    'sampleValue' : ROAttr(),
+    'count'       : ROAttr(),
+    'min'         : ROAttr(),
+    'minTime'     : ROAttr(datetime.datetime),
+    'max'         : ROAttr(),
+    'maxTime'     : ROAttr(datetime.datetime),
+    'mean'        : ROAttr(),
+    'stdDev'      : ROAttr(),
+    'crossEntityMetadata' : ROAttr(ApiTimeSeriesCrossEntityMetadata)
+    }
+
+class ApiTimeSeriesData(BaseApiObject):
+  _ATTRIBUTES = {
+    'timestamp' : ROAttr(datetime.datetime),
+    'value'     : ROAttr(),
+    'type'      : ROAttr(),
+    'aggregateStatistics' : ROAttr(ApiTimeSeriesAggregateStatistics)
+    }
+
+class ApiTimeSeriesMetadata(BaseApiObject):
+  _ATTRIBUTES = {
+    'metricName'        : ROAttr(),
+    'entityName'        : ROAttr(),
+    'startTime'         : ROAttr(datetime.datetime),
+    'endTime'           : ROAttr(datetime.datetime),
+    'attributes'        : ROAttr(),
+    'unitNumerators'    : ROAttr(),
+    'unitDenominators'  : ROAttr(),
+    'expression'        : ROAttr(),
+    'alias'             : ROAttr(),
+    'metricCollectionFrequencyMs': ROAttr(),
+    'rollupUsed'        : ROAttr()
+    }
+
+class ApiTimeSeries(BaseApiObject):
+  _ATTRIBUTES = {
+    'metadata'          : ROAttr(ApiTimeSeriesMetadata),
+    'data'              : ROAttr(ApiTimeSeriesData),
+    }
+
+class ApiTimeSeriesResponse(BaseApiObject):
+  _ATTRIBUTES = {
+    'timeSeries'      : ROAttr(ApiTimeSeries),
+    'warnings'        : ROAttr(),
+    'errors'          : ROAttr(),
+    'timeSeriesQuery' : ROAttr(),
+    }
+
+class ApiMetricSchema(BaseApiObject):
+  _ATTRIBUTES = {
+    'name'            : ROAttr(),
+    'isCounter'       : ROAttr(),
+    'unitNumerator'   : ROAttr(),
+    'unitDenominator' : ROAttr(),
+    'aliases'         : ROAttr(),
+    'sources'         : ROAttr(),
+    }
+
diff --git a/cm-api/src/cm_api/endpoints/tools.py b/cm-api/src/cm_api/endpoints/tools.py
new file mode 100644 (file)
index 0000000..e3a493d
--- /dev/null
@@ -0,0 +1,31 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__docformat__ = "epytext"
+
+
+ECHO_PATH = "/tools/echo"
+ECHO_ERROR_PATH = "/tools/echoError"
+
+def echo(root_resource, message):
+  """Have the server echo our message back."""
+  params = dict(message=message)
+  return root_resource.get(ECHO_PATH, params)
+
+def echo_error(root_resource, message):
+  """Generate an error, but we get to set the error message."""
+  params = dict(message=message)
+  return root_resource.get(ECHO_ERROR_PATH, params)
diff --git a/cm-api/src/cm_api/endpoints/types.py b/cm-api/src/cm_api/endpoints/types.py
new file mode 100644 (file)
index 0000000..234ed7c
--- /dev/null
@@ -0,0 +1,1136 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+try:
+  import json
+except ImportError:
+  import simplejson as json
+
+import copy
+import datetime
+import time
+
+__docformat__ = "epytext"
+
+class Attr(object):
+  """
+  Encapsulates information about an attribute in the JSON encoding of the
+  object. It identifies properties of the attribute such as whether it's
+  read-only, its type, etc.
+  """
+  DATE_FMT = "%Y-%m-%dT%H:%M:%S.%fZ"
+
+  def __init__(self, atype=None, rw=True, is_api_list=False):
+    self._atype = atype
+    self._is_api_list = is_api_list
+    self.rw = rw
+
+  def to_json(self, value, preserve_ro):
+    """
+    Returns the JSON encoding of the given attribute value.
+
+    If the value has a 'to_json_dict' object, that method is called. Otherwise,
+    the following values are returned for each input type:
+     - datetime.datetime: string with the API representation of a date.
+     - dictionary: if 'atype' is ApiConfig, a list of ApiConfig objects.
+     - python list: python list (or ApiList) with JSON encoding of items
+     - the raw value otherwise
+    """
+    if hasattr(value, 'to_json_dict'):
+      return value.to_json_dict(preserve_ro)
+    elif isinstance(value, dict) and self._atype == ApiConfig:
+      return config_to_api_list(value)
+    elif isinstance(value, datetime.datetime):
+      return value.strftime(self.DATE_FMT)
+    elif isinstance(value, list) or isinstance(value, tuple):
+      if self._is_api_list:
+        return ApiList(value).to_json_dict()
+      else:
+        return [ self.to_json(x, preserve_ro) for x in value ]
+    else:
+      return value
+
+  def from_json(self, resource_root, data):
+    """
+    Parses the given JSON value into an appropriate python object.
+
+    This means:
+     - a datetime.datetime if 'atype' is datetime.datetime
+     - a converted config dictionary or config list if 'atype' is ApiConfig
+     - if the attr is an API list, an ApiList with instances of 'atype'
+     - an instance of 'atype' if it has a 'from_json_dict' method
+     - a python list with decoded versions of the member objects if the input
+       is a python list.
+     - the raw value otherwise
+    """
+    if data is None:
+      return None
+
+    if self._atype == datetime.datetime:
+      return datetime.datetime.strptime(data, self.DATE_FMT)
+    elif self._atype == ApiConfig:
+      # ApiConfig is special. We want a python dictionary for summary views,
+      # but an ApiList for full views. Try to detect each case from the JSON
+      # data.
+      if not data['items']:
+        return { }
+      first = data['items'][0]
+      return json_to_config(data, len(first) == 2)
+    elif self._is_api_list:
+      return ApiList.from_json_dict(data, resource_root, self._atype)
+    elif isinstance(data, list):
+      return [ self.from_json(resource_root, x) for x in data ]
+    elif hasattr(self._atype, 'from_json_dict'):
+      return self._atype.from_json_dict(data, resource_root)
+    else:
+      return data
+
+class ROAttr(Attr):
+  """
+  Subclass that just defines the attribute as read-only.
+  """
+  def __init__(self, atype=None, is_api_list=False):
+    Attr.__init__(self, atype=atype, rw=False, is_api_list=is_api_list)
+
+
+def check_api_version(resource_root, min_version):
+  """
+  Checks if the resource_root's API version it at least the given minimum
+  version.
+  """
+  if resource_root.version < min_version:
+    raise Exception("API version %s is required but %s is in use."
+        % (min_version, resource_root.version))
+
+
+def call(method, path, ret_type,
+    ret_is_list=False, data=None, params=None, api_version=1):
+  """
+  Generic function for calling a resource method and automatically dealing with
+  serialization of parameters and deserialization of return values.
+
+  @param method: method to call (must be bound to a resource;
+                 e.g., "resource_root.get").
+  @param path: the full path of the API method to call.
+  @param ret_type: return type of the call.
+  @param ret_is_list: whether the return type is an ApiList.
+  @param data: Optional data to send as payload to the call.
+  @param params: Optional query parameters for the call.
+  @param api_version: minimum API version for the call.
+  """
+  check_api_version(method.im_self, api_version)
+  if data:
+    data = json.dumps(Attr(is_api_list=True).to_json(data, False))
+    ret = method(path, data=data, params=params)
+  else:
+    ret = method(path, params=params)
+  if ret_type is None:
+    return
+  elif ret_is_list:
+    return ApiList.from_json_dict(ret, method.im_self, ret_type)
+  elif isinstance(ret, list):
+    return [ ret_type.from_json_dict(x, method.im_self) for x in ret ]
+  else:
+    return ret_type.from_json_dict(ret, method.im_self)
+
+class BaseApiObject(object):
+  """
+  The BaseApiObject helps with (de)serialization from/to JSON.
+
+  The derived class has two ways of defining custom attributes:
+   - Overwriting the '_ATTRIBUTES' field with the attribute dictionary
+   - Override the _get_attributes() method, in case static initialization of
+     the above field is not possible.
+
+  It's recommended that the _get_attributes() implementation do caching to
+  avoid computing the dictionary on every invocation.
+
+  The derived class's constructor must call the base class's init() static
+  method. All constructor arguments (aside from self and resource_root) must
+  be keywords arguments with default values (typically None), or
+  from_json_dict() will not work.
+  """
+
+  _ATTRIBUTES = { }
+  _WHITELIST = ( '_resource_root', '_attributes' )
+
+  @classmethod
+  def _get_attributes(cls):
+    """
+    Returns a map of property names to attr instances (or None for default
+    attribute behavior) describing the properties of the object.
+
+    By default, this method will return the class's _ATTRIBUTES field.
+    Classes can override this method to do custom initialization of the
+    attributes when needed.
+    """
+    return cls._ATTRIBUTES
+
+  @staticmethod
+  def init(obj, resource_root, attrs=None):
+    """
+    Wraper around the real constructor to avoid issues with the 'self'
+    argument. Call like this, from a subclass's constructor:
+
+     - BaseApiObject.init(self, locals())
+    """
+    # This works around http://bugs.python.org/issue2646
+    # We use unicode strings as keys in kwargs.
+    str_attrs = { }
+    if attrs:
+      for k, v in attrs.iteritems():
+        if k not in ('self', 'resource_root'):
+          str_attrs[k] = v
+    BaseApiObject.__init__(obj, resource_root, **str_attrs)
+
+  def __init__(self, resource_root, **attrs):
+    """
+    Initializes internal state and sets all known writable properties of the
+    object to None. Then initializes the properties given in the provided
+    attributes dictionary.
+
+    @param resource_root: API resource object.
+    @param attrs: optional dictionary of attributes to set. This should only
+                  contain r/w attributes.
+    """
+    self._resource_root = resource_root
+
+    for name, attr in self._get_attributes().iteritems():
+      object.__setattr__(self, name, None)
+    if attrs:
+      self._set_attrs(attrs, from_json=False)
+
+  def _set_attrs(self, attrs, allow_ro=False, from_json=True):
+    """
+    Sets all the attributes in the dictionary. Optionally, allows setting
+    read-only attributes (e.g. when deserializing from JSON) and skipping
+    JSON deserialization of values.
+    """
+    for k, v in attrs.iteritems():
+      attr = self._check_attr(k, allow_ro)
+      if attr and from_json:
+        v = attr.from_json(self._get_resource_root(), v)
+      object.__setattr__(self, k, v)
+
+  def __setattr__(self, name, val):
+    if name not in BaseApiObject._WHITELIST:
+      self._check_attr(name, False)
+    object.__setattr__(self, name, val)
+
+  def _check_attr(self, name, allow_ro):
+    if name not in self._get_attributes():
+      raise AttributeError('Invalid property %s for class %s.' %
+          (name, self.__class__.__name__))
+    attr = self._get_attributes()[name]
+    if not allow_ro and attr and not attr.rw:
+      raise AttributeError('Attribute %s of class %s is read only.' %
+          (name, self.__class__.__name__))
+    return attr
+
+  def _get_resource_root(self):
+    return self._resource_root
+
+  def _update(self, api_obj):
+    """Copy state from api_obj to this object."""
+    if not isinstance(self, api_obj.__class__):
+      raise ValueError(
+          "Class %s does not derive from %s; cannot update attributes." %
+          (self.__class__, api_obj.__class__))
+
+    for name in self._get_attributes().keys():
+      try:
+        val = getattr(api_obj, name)
+        setattr(self, name, val)
+      except AttributeError, ignored:
+        pass
+
+  def to_json_dict(self, preserve_ro=False):
+    dic = { }
+    for name, attr in self._get_attributes().iteritems():
+      if not preserve_ro and attr and not attr.rw:
+        continue
+      try:
+        value = getattr(self, name)
+        if value is not None:
+          if attr:
+            dic[name] = attr.to_json(value, preserve_ro)
+          else:
+            dic[name] = value
+      except AttributeError:
+        pass
+    return dic
+
+  def __str__(self):
+    """
+    Default implementation of __str__. Uses the type name and the first
+    attribute retrieved from the attribute map to create the string.
+    """
+    name = self._get_attributes().keys()[0]
+    value = getattr(self, name, None)
+    return "<%s>: %s = %s" % (self.__class__.__name__, name, value)
+
+  @classmethod
+  def from_json_dict(cls, dic, resource_root):
+    obj = cls(resource_root)
+    obj._set_attrs(dic, allow_ro=True)
+    return obj
+
+class BaseApiResource(BaseApiObject):
+  """
+  A specialization of BaseApiObject that provides some utility methods for
+  resources. This class allows easier serialization / deserialization of
+  parameters and return values.
+  """
+
+  def _api_version(self):
+    """
+    Returns the minimum API version for this resource. Defaults to 1.
+    """
+    return 1
+
+  def _path(self):
+    """
+    Returns the path to the resource.
+
+    e.g., for a service 'foo' in cluster 'bar', this should return
+    '/clusters/bar/services/foo'.
+    """
+    raise NotImplementedError
+
+  def _require_min_api_version(self, version):
+    """
+    Raise an exception if the version of the api is less than the given version.
+
+    @param version: The minimum required version.
+    """
+    actual_version = self._get_resource_root().version
+    version = max(version, self._api_version())
+    if actual_version < version:
+      raise Exception("API version %s is required but %s is in use."
+          % (version, actual_version))
+
+  def _cmd(self, command, data=None, params=None, api_version=1):
+    """
+    Invokes a command on the resource. Commands are expected to be under the
+    "commands/" sub-resource.
+    """
+    return self._post("commands/" + command, ApiCommand,
+        data=data, params=params, api_version=api_version)
+
+  def _get_config(self, rel_path, view, api_version=1):
+    """
+    Retrieves an ApiConfig list from the given relative path.
+    """
+    self._require_min_api_version(api_version)
+    params = view and dict(view=view) or None
+    resp = self._get_resource_root().get(self._path() + '/' + rel_path,
+        params=params)
+    return json_to_config(resp, view == 'full')
+
+  def _update_config(self, rel_path, config, api_version=1):
+    self._require_min_api_version(api_version)
+    resp = self._get_resource_root().put(self._path() + '/' + rel_path,
+        data=config_to_json(config))
+    return json_to_config(resp, False)
+
+  def _delete(self, rel_path, ret_type, ret_is_list=False, params=None,
+      api_version=1):
+    return self._call('delete', rel_path, ret_type, ret_is_list, None, params,
+      api_version)
+
+  def _get(self, rel_path, ret_type, ret_is_list=False, params=None,
+      api_version=1):
+    return self._call('get', rel_path, ret_type, ret_is_list, None, params,
+      api_version)
+
+  def _post(self, rel_path, ret_type, ret_is_list=False, data=None, params=None,
+      api_version=1):
+    return self._call('post', rel_path, ret_type, ret_is_list, data, params,
+      api_version)
+
+  def _put(self, rel_path, ret_type, ret_is_list=False, data=None, params=None,
+      api_version=1):
+    return self._call('put', rel_path, ret_type, ret_is_list, data, params,
+      api_version)
+
+  def _call(self, method, rel_path, ret_type, ret_is_list=False, data=None,
+      params=None, api_version=1):
+    path = self._path()
+    if rel_path:
+      path += '/' + rel_path
+    return call(getattr(self._get_resource_root(), method),
+        path,
+        ret_type,
+        ret_is_list,
+        data,
+        params,
+        api_version)
+
+class ApiList(BaseApiObject):
+  """A list of some api object"""
+  LIST_KEY = "items"
+
+  def __init__(self, objects, resource_root=None, **attrs):
+    BaseApiObject.__init__(self, resource_root, **attrs)
+    # Bypass checks in BaseApiObject.__setattr__
+    object.__setattr__(self, 'objects', objects)
+
+  def __str__(self):
+    return "<ApiList>(%d): [%s]" % (
+        len(self.objects),
+        ", ".join([str(item) for item in self.objects]))
+
+  def to_json_dict(self, preserve_ro=False):
+    ret = BaseApiObject.to_json_dict(self, preserve_ro)
+    attr = Attr()
+    ret[ApiList.LIST_KEY] = [ attr.to_json(x, preserve_ro) for x in self.objects ]
+    return ret
+
+  def __len__(self):
+    return self.objects.__len__()
+
+  def __iter__(self):
+    return self.objects.__iter__()
+
+  def __getitem__(self, i):
+    return self.objects.__getitem__(i)
+
+  def __getslice(self, i, j):
+    return self.objects.__getslice__(i, j)
+
+  @classmethod
+  def from_json_dict(cls, dic, resource_root, member_cls=None):
+    if not member_cls:
+      member_cls = cls._MEMBER_CLASS
+    attr = Attr(atype=member_cls)
+    items = []
+    if ApiList.LIST_KEY in dic:
+      items = [ attr.from_json(resource_root, x) for x in dic[ApiList.LIST_KEY] ]
+    ret = cls(items)
+    # If the class declares custom attributes, populate them based on the input
+    # dict. The check avoids extra overhead for the common case, where we just
+    # have a plain list. _set_attrs() also does not understand the "items"
+    # attribute, so it can't be in the input data.
+    if cls._ATTRIBUTES:
+      if ApiList.LIST_KEY in dic:
+        dic = copy.copy(dic)
+        del dic[ApiList.LIST_KEY]
+      ret._set_attrs(dic, allow_ro=True)
+    return ret
+
+class ApiHostRef(BaseApiObject):
+  _ATTRIBUTES = {
+    'hostId'  : None,
+  }
+
+  def __init__(self, resource_root, hostId=None):
+    BaseApiObject.init(self, resource_root, locals())
+
+  def __str__(self):
+    return "<ApiHostRef>: %s" % (self.hostId)
+
+class ApiServiceRef(BaseApiObject):
+  _ATTRIBUTES = {
+    'clusterName' : None,
+    'serviceName' : None,
+    'peerName'    : None,
+  }
+
+  def __init__(self, resource_root, serviceName=None, clusterName=None,
+      peerName=None):
+    BaseApiObject.init(self, resource_root, locals())
+
+class ApiClusterRef(BaseApiObject):
+  _ATTRIBUTES = {
+    'clusterName' : None,
+  }
+
+  def __init__(self, resource_root, clusterName = None):
+    BaseApiObject.init(self, resource_root, locals())
+
+class ApiRoleRef(BaseApiObject):
+  _ATTRIBUTES = {
+    'clusterName' : None,
+    'serviceName' : None,
+    'roleName'    : None,
+  }
+
+  def __init__(self, resource_root, serviceName=None, roleName=None,
+      clusterName=None):
+    BaseApiObject.init(self, resource_root, locals())
+
+class ApiRoleConfigGroupRef(BaseApiObject):
+  _ATTRIBUTES = {
+    'roleConfigGroupName' : None,
+  }
+
+  def __init__(self, resource_root, roleConfigGroupName=None):
+    BaseApiObject.init(self, resource_root, locals())
+
+class ApiCommand(BaseApiObject):
+  SYNCHRONOUS_COMMAND_ID = -1
+
+  @classmethod
+  def _get_attributes(cls):
+    if not cls.__dict__.has_key('_ATTRIBUTES'):
+      cls._ATTRIBUTES = {
+        'id'            : ROAttr(),
+        'name'          : ROAttr(),
+        'startTime'     : ROAttr(datetime.datetime),
+        'endTime'       : ROAttr(datetime.datetime),
+        'active'        : ROAttr(),
+        'success'       : ROAttr(),
+        'resultMessage' : ROAttr(),
+        'clusterRef'    : ROAttr(ApiClusterRef),
+        'serviceRef'    : ROAttr(ApiServiceRef),
+        'roleRef'       : ROAttr(ApiRoleRef),
+        'hostRef'       : ROAttr(ApiHostRef),
+        'children'      : ROAttr(ApiCommand, is_api_list=True),
+        'parent'        : ROAttr(ApiCommand),
+        'resultDataUrl' : ROAttr(),
+      }
+    return cls._ATTRIBUTES
+
+  def __str__(self):
+    return "<ApiCommand>: '%s' (id: %s; active: %s; success: %s)" % (
+        self.name, self.id, self.active, self.success)
+
+  def _path(self):
+    return '/commands/%d' % self.id
+
+  def fetch(self):
+    """
+    Retrieve updated data about the command from the server.
+
+    @return: A new ApiCommand object.
+    """
+    if self.id == ApiCommand.SYNCHRONOUS_COMMAND_ID:
+      return self
+
+    resp = self._get_resource_root().get(self._path())
+    return ApiCommand.from_json_dict(resp, self._get_resource_root())
+
+  def wait(self, timeout=None):
+    """
+    Wait for command to finish.
+
+    @param timeout: (Optional) Max amount of time (in seconds) to wait. Wait
+                    forever by default.
+    @return: The final ApiCommand object, containing the last known state.
+             The command may still be running in case of timeout.
+    """
+    if self.id == ApiCommand.SYNCHRONOUS_COMMAND_ID:
+      return self
+
+    SLEEP_SEC = 5
+
+    if timeout is None:
+      deadline = None
+    else:
+      deadline = time.time() + timeout
+
+    while True:
+      cmd = self.fetch()
+      if not cmd.active:
+        return cmd
+
+      if deadline is not None:
+        now = time.time()
+        if deadline < now:
+          return cmd
+        else:
+          time.sleep(min(SLEEP_SEC, deadline - now))
+      else:
+        time.sleep(SLEEP_SEC)
+
+
+  def abort(self):
+    """
+    Abort a running command.
+
+    @return: A new ApiCommand object with the updated information.
+    """
+    if self.id == ApiCommand.SYNCHRONOUS_COMMAND_ID:
+      return self
+
+    path = self._path() + '/abort'
+    resp = self._get_resource_root().post(path)
+    return ApiCommand.from_json_dict(resp, self._get_resource_root())
+
+class ApiBulkCommandList(ApiList):
+  _ATTRIBUTES = {
+    'errors' : ROAttr(),
+  }
+  _MEMBER_CLASS = ApiCommand
+
+class ApiCommandMetadata(BaseApiObject):
+  _ATTRIBUTES = {
+    'name'      : ROAttr(),
+    'argSchema' : ROAttr(),
+  }
+
+  def __init__(self, resource_root):
+    BaseApiObject.init(self, resource_root)
+
+  def __str__(self):
+    return "<ApiCommandMetadata>: %s (%s)" % (self.name, self.argSchema)
+
+#
+# Metrics.
+#
+
+class ApiMetricData(BaseApiObject):
+  """Metric reading data."""
+
+  _ATTRIBUTES = {
+    'timestamp' : ROAttr(datetime.datetime),
+    'value'     : ROAttr(),
+  }
+
+  def __init__(self, resource_root):
+    BaseApiObject.init(self, resource_root)
+
+
+class ApiMetric(BaseApiObject):
+  """Metric information."""
+
+  _ATTRIBUTES = {
+    'name'        : ROAttr(),
+    'context'     : ROAttr(),
+    'unit'        : ROAttr(),
+    'data'        : ROAttr(ApiMetricData),
+    'displayName' : ROAttr(),
+    'description' : ROAttr(),
+  }
+
+  def __init__(self, resource_root):
+    BaseApiObject.init(self, resource_root)
+
+#
+# Activities.
+#
+
+class ApiActivity(BaseApiObject):
+  _ATTRIBUTES = {
+    'name'              : ROAttr(),
+    'type'              : ROAttr(),
+    'parent'            : ROAttr(),
+    'startTime'         : ROAttr(),
+    'finishTime'        : ROAttr(),
+    'id'                : ROAttr(),
+    'status'            : ROAttr(),
+    'user'              : ROAttr(),
+    'group'             : ROAttr(),
+    'inputDir'          : ROAttr(),
+    'outputDir'         : ROAttr(),
+    'mapper'            : ROAttr(),
+    'combiner'          : ROAttr(),
+    'reducer'           : ROAttr(),
+    'queueName'         : ROAttr(),
+    'schedulerPriority' : ROAttr(),
+  }
+
+  def __init__(self, resource_root):
+    BaseApiObject.init(self, resource_root)
+
+  def __str__(self):
+    return "<ApiActivity>: %s (%s)" % (self.name, self.status)
+
+#
+# Replication
+#
+
+class ApiCmPeer(BaseApiObject):
+  _ATTRIBUTES = {
+      'name'      : None,
+      'url'       : None,
+      'username'  : None,
+      'password'  : None,
+    }
+
+  def __str__(self):
+    return "<ApiPeer>: %s (%s)" % (self.name, self.url)
+
+class ApiHdfsReplicationArguments(BaseApiObject):
+  _ATTRIBUTES = {
+    'sourceService'             : Attr(ApiServiceRef),
+    'sourcePath'                : None,
+    'destinationPath'           : None,
+    'mapreduceServiceName'      : None,
+    'userName'                  : None,
+    'numMaps'                   : None,
+    'dryRun'                    : None,
+    'schedulerPoolName'         : None,
+    'abortOnError'              : None,
+    'preservePermissions'       : None,
+    'preserveBlockSize'         : None,
+    'preserveReplicationCount'  : None,
+    'removeMissingFiles'        : None,
+    'skipChecksumChecks'        : None,
+  }
+
+class ApiHdfsReplicationResult(BaseApiObject):
+  _ATTRIBUTES = {
+    'progress'            : ROAttr(),
+    'counters'            : ROAttr(),
+    'numBytesDryRun'      : ROAttr(),
+    'numFilesDryRun'      : ROAttr(),
+    'numFilesExpected'    : ROAttr(),
+    'numBytesExpected'    : ROAttr(),
+    'numFilesCopied'      : ROAttr(),
+    'numBytesCopied'      : ROAttr(),
+    'numFilesSkipped'     : ROAttr(),
+    'numBytesSkipped'     : ROAttr(),
+    'numFilesDeleted'     : ROAttr(),
+    'numFilesCopyFailed'  : ROAttr(),
+    'numBytesCopyFailed'  : ROAttr(),
+    'setupError'          : ROAttr(),
+    'jobId'               : ROAttr(),
+    'jobDetailsUri'       : ROAttr(),
+    'dryRun'              : ROAttr(),
+    'snapshottedDirs'     : ROAttr(),
+  }
+
+class ApiHiveTable(BaseApiObject):
+  _ATTRIBUTES = {
+    'database'  : None,
+    'tableName' : None,
+  }
+
+  def __str__(self):
+    return "<ApiHiveTable>: %s, %s" % (self.database, self.tableName)
+
+class ApiImpalaUDF(BaseApiObject):
+  _ATTRIBUTES = {
+    'database'  : ROAttr(),
+    'signature' : ROAttr(),
+  }
+
+  def __str__(self):
+    return "<ApiImpalaUDF>: %s, %s" % (self.database, self.signature)
+
+class ApiHiveReplicationArguments(BaseApiObject):
+  _ATTRIBUTES = {
+    'sourceService' : Attr(ApiServiceRef),
+    'tableFilters'  : Attr(ApiHiveTable),
+    'exportDir'     : None,
+    'force'         : None,
+    'replicateData' : None,
+    'hdfsArguments' : Attr(ApiHdfsReplicationArguments),
+    'dryRun'        : None,
+  }
+
+class ApiHiveReplicationResult(BaseApiObject):
+  _ATTRIBUTES = {
+    'tableCount'            : ROAttr(),
+    'tables'                : ROAttr(ApiHiveTable),
+    'impalaUDFCount'        : ROAttr(),
+    'impalaUDFs'            : ROAttr(ApiImpalaUDF),
+    'errorCount'            : ROAttr(),
+    'errors'                : ROAttr(),
+    'dataReplicationResult' : ROAttr(ApiHdfsReplicationResult),
+    'dryRun'                : ROAttr(),
+    'phase'                 : ROAttr(),
+  }
+
+class ApiReplicationCommand(ApiCommand):
+  @classmethod
+  def _get_attributes(cls):
+    if not cls.__dict__.has_key('_ATTRIBUTES'):
+      attrs = {
+        'hdfsResult'  : ROAttr(ApiHdfsReplicationResult),
+        'hiveResult'  : ROAttr(ApiHiveReplicationResult),
+      }
+      attrs.update(ApiCommand._get_attributes())
+      cls._ATTRIBUTES = attrs
+    return cls._ATTRIBUTES
+
+class ApiReplicationSchedule(BaseApiObject):
+  _ATTRIBUTES = {
+    'startTime'       : Attr(datetime.datetime),
+    'endTime'         : Attr(datetime.datetime),
+    'interval'        : None,
+    'intervalUnit'    : None,
+    'paused'          : None,
+    'hdfsArguments'   : Attr(ApiHdfsReplicationArguments),
+    'hiveArguments'   : Attr(ApiHiveReplicationArguments),
+    'alertOnStart'    : None,
+    'alertOnSuccess'  : None,
+    'alertOnFail'     : None,
+    'alertOnAbort'    : None,
+    'id'              : ROAttr(),
+    'nextRun'         : ROAttr(datetime.datetime),
+    'history'         : ROAttr(ApiReplicationCommand),
+  }
+
+class ApiHBaseSnapshotPolicyArguments(BaseApiObject):
+  _ATTRIBUTES = {
+    'tableRegExps' : None,
+  }
+
+class ApiHdfsSnapshotPolicyArguments(BaseApiObject):
+  _ATTRIBUTES = {
+    'pathPatterns' : None,
+  }
+
+class ApiHBaseSnapshot(BaseApiObject):
+  _ATTRIBUTES = {
+    'snapshotName'  : None,
+    'tableName'     : None,
+    'creationTime'  : ROAttr(datetime.datetime),
+  }
+
+class ApiHBaseSnapshotError(BaseApiObject):
+  _ATTRIBUTES = {
+    'tableName'     : ROAttr(),
+    'snapshotName'  : ROAttr(),
+    'error'         : ROAttr(),
+  }
+
+class ApiHdfsSnapshot(BaseApiObject):
+  _ATTRIBUTES = {
+    'path'          : None,
+    'snapshotName'  : None,
+    'snapshotPath'  : None,
+    'creationTime'  : ROAttr(datetime.datetime),
+  }
+
+class ApiHdfsSnapshotError(BaseApiObject):
+  _ATTRIBUTES = {
+    'path'          : ROAttr(),
+    'snapshotName'  : ROAttr(),
+    'snapshotPath'  : ROAttr(),
+    'error'         : ROAttr(),
+  }
+
+class ApiHBaseSnapshotResult(BaseApiObject):
+  _ATTRIBUTES = {
+    'processedTableCount'       : ROAttr(),
+    'processedTables'           : ROAttr(),
+    'unprocessedTableCount'     : ROAttr(),
+    'unprocessedTables'         : ROAttr(),
+    'createdSnapshotCount'      : ROAttr(),
+    'createdSnapshots'          : ROAttr(ApiHBaseSnapshot),
+    'deletedSnapshotCount'      : ROAttr(),
+    'deletedSnapshots'          : ROAttr(ApiHBaseSnapshot),
+    'creationErrorCount'        : ROAttr(),
+    'creationErrors'            : ROAttr(ApiHBaseSnapshotError),
+    'deletionErrorCount'        : ROAttr(),
+    'deletionErrors'            : ROAttr(ApiHBaseSnapshotError),
+  }
+
+class ApiHdfsSnapshotResult(BaseApiObject):
+  _ATTRIBUTES = {
+    'processedPathCount'       : ROAttr(),
+    'processedPaths'           : ROAttr(),
+    'unprocessedPathCount'     : ROAttr(),
+    'unprocessedPaths'         : ROAttr(),
+    'createdSnapshotCount'     : ROAttr(),
+    'createdSnapshots'         : ROAttr(ApiHdfsSnapshot),
+    'deletedSnapshotCount'     : ROAttr(),
+    'deletedSnapshots'         : ROAttr(ApiHdfsSnapshot),
+    'creationErrorCount'       : ROAttr(),
+    'creationErrors'           : ROAttr(ApiHdfsSnapshotError),
+    'deletionErrorCount'       : ROAttr(),
+    'deletionErrors'           : ROAttr(ApiHdfsSnapshotError),
+  }
+
+class ApiSnapshotCommand(BaseApiObject):
+  @classmethod
+  def _get_attributes(cls):
+    if not cls.__dict__.has_key('_ATTRIBUTES'):
+      attrs = {
+        'hdfsResult'   : ROAttr(ApiHdfsSnapshotResult),
+        'hbaseResult'  : ROAttr(ApiHBaseSnapshotResult),
+      }
+      attrs.update(ApiCommand._get_attributes())
+      cls._ATTRIBUTES = attrs
+    return cls._ATTRIBUTES
+
+class ApiSnapshotPolicy(BaseApiObject):
+  """
+  @type name: str
+  @ivar name: Name of the snapshot policy.
+  @type description: str
+  @ivar description: Description of the snapshot policy.
+  @type hourly_snapshots: int
+  @ivar hourly_snapshots: Number of hourly snapshots to be retained (default: 0).
+  @type daily_snapshots: int
+  @ivar daily_snapshots: Number of daily snapshots to be retained (default: 0).
+  @type weekly_snapshots: int
+  @ivar weekly_snapshots: Number of weekly snapshots to be retained (default: 0).
+  @type monthly_snapshots: int
+  @ivar monthly_snapshots: Number of monthly snapshots to be retained (default: 0).
+  @type yearly_snapshots: int
+  @ivar yearly_snapshots: Number of yearly snapshots to be retained (default: 0).
+  @type hours_for_hourly_snapshots: list of int
+  @ivar hours_for_hourly_snapshots: Hours of the day that hourly snapshots should be created.
+         Valid values are 0 to 23. If this list is empty, then hourly snapshots are
+         created for every hour.
+  @type minute_of_hour: int
+  @ivar minute_of_hour: Minute in the hour that hourly, daily, weekly, monthly and yearly
+         snapshots should be created. Valid values are 0 to 59 (default: 0).
+  @type hour_of_day: int
+  @ivar hour_of_day: Hour in the day that daily, weekly, monthly and yearly snapshots should be created.
+        Valid values are 0 to 23 (default: 0).
+  @type day_of_week: int
+  @ivar day_of_week: Day of the week that weekly snapshots should be created.
+         Valid values are 1 to 7, 1 representing Sunday (default: 1).
+  @type day_of_month: int
+  @ivar day_of_month: Day of the month that monthly and yearly snapshots should be created.
+         Values from 1 to 31 are allowed. Additionally 0 to -30 can be used to
+         specify offsets from the last day of the month (default: 1).
+  @type month_of_year: int
+  @ivar month_of_year: Month of the year that yearly snapshots should be created.
+         Valid values are 1 to 12, 1 representing January (default: 1).
+  @ivar alert_on_start: whether to generate alerts on start of snapshot creation/deletion activity.
+  @ivar alert_on_success: whether to generate alerts on successful completion of snapshot creation/deletion activity.
+  @ivar alert_on_fail: whether to generate alerts on failure of snapshot creation/deletion activity.
+  @ivar alert_on_abort: whether to generate alerts on abort of snapshot creation/deletion activity.
+  @type hbaseArguments: ApiHBaseSnapshotPolicyArguments
+  @ivar hbaseArguments: HBase specific arguments for the replication job.
+  @type hdfsArguments: ApiHdfsSnapshotPolicyArguments
+  @ivar hdfsArguments: HDFS specific arguments for the replication job.
+  """
+  _ATTRIBUTES = {
+    'name'                    : None,
+    'description'             : None,
+    'hourlySnapshots'         : None,
+    'dailySnapshots'          : None,
+    'weeklySnapshots'         : None,
+    'monthlySnapshots'        : None,
+    'yearlySnapshots'         : None,
+    'minuteOfHour'            : None,
+    'hourOfDay'               : None,
+    'dayOfWeek'               : None,
+    'dayOfMonth'              : None,
+    'monthOfYear'             : None,
+    'hoursForHourlySnapshots' : None,
+    'alertOnStart'            : None,
+    'alertOnSuccess'          : None,
+    'alertOnFail'             : None,
+    'alertOnAbort'            : None,
+    'hbaseArguments'          : Attr(ApiHBaseSnapshotPolicyArguments),
+    'hdfsArguments'           : Attr(ApiHdfsSnapshotPolicyArguments),
+    'lastCommand'             : ROAttr(ApiSnapshotCommand),
+    'lastSuccessfulCommand'   : ROAttr(ApiSnapshotCommand),
+  }
+
+#
+# Batch.
+#
+
+class ApiBatchRequestElement(BaseApiObject):
+  """One element in a batch request."""
+  _ATTRIBUTES = {
+    'method'          : None,
+    'url'             : None,
+    'body'            : None,
+    'contentType'     : None,
+    'acceptType'      : None,
+  }
+
+class ApiBatchResponseElement(BaseApiObject):
+  """One element in a batch response."""
+  _ATTRIBUTES = {
+    'statusCode'      : ROAttr(),
+    'response'        : ROAttr(),
+  }
+
+class ApiBatchResponseList(ApiList):
+  """A list of batch response objects."""
+  _ATTRIBUTES = {
+    'success' : ROAttr(),
+  }
+  _MEMBER_CLASS = ApiBatchResponseElement
+
+#
+# Configuration helpers.
+#
+
+class ApiConfig(BaseApiObject):
+  _ATTRIBUTES = {
+    'name'              : None,
+    'value'             : None,
+    'required'          : ROAttr(),
+    'default'           : ROAttr(),
+    'displayName'       : ROAttr(),
+    'description'       : ROAttr(),
+    'relatedName'       : ROAttr(),
+    'validationState'   : ROAttr(),
+    'validationMessage' : ROAttr(),
+  }
+
+  def __init__(self, resource_root, name=None, value=None):
+    BaseApiObject.init(self, resource_root, locals())
+
+  def __str__(self):
+    return "<ApiConfig>: %s = %s" % (self.name, self.value)
+
+class ApiImpalaQuery(BaseApiObject):
+  _ATTRIBUTES = {
+    'queryId'          : ROAttr(),
+    'queryState'       : ROAttr(),
+    'queryType'        : ROAttr(),
+    'statement'        : ROAttr(),
+    'database'         : ROAttr(),
+    'rowsProduced'     : ROAttr(),
+    'coordinator'      : ROAttr(ApiHostRef),
+    'user'             : ROAttr(),
+    'startTime'        : ROAttr(datetime.datetime),
+    'endTime'          : ROAttr(datetime.datetime),
+    'detailsAvailable' : ROAttr(),
+    'attributes'       : ROAttr(),
+    'durationMillis'   : ROAttr()
+  }
+
+  def __str__(self):
+    return "<ApiImpalaQuery>: %s" % (self.queryId)
+
+
+class ApiImpalaQueryResponse(BaseApiObject):
+
+  _ATTRIBUTES = {
+    'queries'   : ROAttr(ApiImpalaQuery),
+    'warnings'  : ROAttr()
+  }
+
+class ApiImpalaQueryDetailsResponse(BaseApiObject):
+  _ATTRIBUTES = {
+    'details' : ROAttr()
+  }
+
+  def __str__(self):
+    return "<AipImpalaQueryDetailsResponse> %s" % self.details
+
+class ApiImpalaCancelResponse(BaseApiObject):
+  _ATTRIBUTES = {
+    'warning' : ROAttr()
+  }
+
+  def __str__(self):
+    return "<ApiImpalaCancelResponse> %s" % self.warning
+
+class ApiImpalaQueryAttribute(BaseApiObject):
+
+  _ATTRIBUTES = {
+    'name'               : ROAttr(),
+    'type'               : ROAttr(),
+    'displayName'        : ROAttr(),
+    'supportsHistograms' : ROAttr(),
+    'description'        : ROAttr()
+  }
+
+  def __str__(self):
+    return "<ApiImpalaQueryAttribute> %s" % name
+
+class ApiMr2AppInformation(BaseApiObject):
+  _ATTRIBUTES = {
+    'jobState'               : ROAttr()
+  }
+
+  def __str__(self):
+    return "<ApiMr2AppInformation>: %s" % (self.jobState)
+
+class ApiYarnApplication(BaseApiObject):
+  _ATTRIBUTES = {
+    'applicationId'          : ROAttr(),
+    'name'                   : ROAttr(),
+    'user'                   : ROAttr(),
+    'startTime'              : ROAttr(datetime.datetime),
+    'endTime'                : ROAttr(datetime.datetime),
+    'pool'                   : ROAttr(),
+    'state'                  : ROAttr(),
+    'progress'               : ROAttr(),
+    'mr2AppInformation'      : ROAttr(ApiMr2AppInformation),
+    'attributes'             : ROAttr(),
+  }
+
+  def __str__(self):
+    return "<ApiYarnApplication>: %s" % (self.applicationId)
+
+class ApiYarnApplicationResponse(BaseApiObject):
+
+  _ATTRIBUTES = {
+    'applications'   : ROAttr(ApiYarnApplication),
+    'warnings'       : ROAttr()
+  }
+
+class ApiYarnKillResponse(BaseApiObject):
+  _ATTRIBUTES = {
+    'warning' : ROAttr()
+  }
+
+  def __str__(self):
+    return "<ApiYarnKillResponse> %s" % self.warning
+
+class ApiYarnApplicationAttribute(BaseApiObject):
+
+  _ATTRIBUTES = {
+    'name'               : ROAttr(),
+    'type'               : ROAttr(),
+    'displayName'        : ROAttr(),
+    'supportsHistograms' : ROAttr(),
+    'description'        : ROAttr()
+  }
+
+  def __str__(self):
+    return "<ApiYarnApplicationAttribute> %s" % name
+
+def config_to_api_list(dic):
+  """
+  Converts a python dictionary into a list containing the proper
+  ApiConfig encoding for configuration data.
+
+  @param dic: Key-value pairs to convert.
+  @return: JSON dictionary of an ApiConfig list (*not* an ApiList).
+  """
+  config = [ ]
+  for k, v in dic.iteritems():
+    config.append({ 'name' : k, 'value': v })
+  return { ApiList.LIST_KEY : config }
+
+def config_to_json(dic):
+  """
+  Converts a python dictionary into a JSON payload.
+
+  The payload matches the expected "apiConfig list" type used to update
+  configuration parameters using the API.
+
+  @param dic: Key-value pairs to convert.
+  @return: String with the JSON-encoded data.
+  """
+  return json.dumps(config_to_api_list(dic))
+
+def json_to_config(dic, full = False):
+  """
+  Converts a JSON-decoded config dictionary to a python dictionary.
+
+  When materializing the full view, the values in the dictionary will be
+  instances of ApiConfig, instead of strings.
+
+  @param dic: JSON-decoded config dictionary.
+  @param full: Whether to materialize the full view of the config data.
+  @return: Python dictionary with config data.
+  """
+  config = { }
+  for entry in dic['items']:
+    k = entry['name']
+    if full:
+      config[k] = ApiConfig.from_json_dict(entry, None)
+    else:
+      config[k] = entry.get('value')
+  return config
diff --git a/cm-api/src/cm_api/endpoints/users.py b/cm-api/src/cm_api/endpoints/users.py
new file mode 100644 (file)
index 0000000..69c4395
--- /dev/null
@@ -0,0 +1,116 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from cm_api.endpoints.types import *
+
+USERS_PATH = "/users"
+
+def get_all_users(resource_root, view=None):
+  """
+  Get all users.
+
+  @param resource_root: The root Resource object
+  @param view: View to materialize ('full' or 'summary').
+  @return: A list of ApiUser objects.
+  """
+  return call(resource_root.get, USERS_PATH, ApiUser, True,
+      params=view and dict(view=view) or None)
+
+def get_user(resource_root, username):
+  """
+  Look up a user by username.
+
+  @param resource_root: The root Resource object
+  @param username: Username to look up
+  @return: An ApiUser object
+  """
+  return call(resource_root.get,
+      '%s/%s' % (USERS_PATH, username), ApiUser)
+
+def create_user(resource_root, username, password, roles):
+  """
+  Create a user.
+
+  @param resource_root: The root Resource object
+  @param username: Username
+  @param password: Password
+  @param roles: List of roles for the user. This should be [] or ['ROLE_USER']
+                for a regular user, ['ROLE_ADMIN'] for an admin, or
+                ['ROLE_LIMITED'] for a limited admin.
+  @return: An ApiUser object
+  """
+  apiuser = ApiUser(resource_root, username, password=password, roles=roles)
+  return call(resource_root.post, USERS_PATH, ApiUser, True,
+      data=[apiuser])[0]
+
+def delete_user(resource_root, username):
+  """
+  Delete user by username.
+
+  @param resource_root: The root Resource object
+  @param username: Username
+  @return: An ApiUser object
+  """
+  return call(resource_root.delete,
+      '%s/%s' % (USERS_PATH, username), ApiUser)
+
+def update_user(resource_root, user):
+  """
+  Update a user.
+
+  Replaces the user's details with those provided.
+
+  @param resource_root: The root Resource object
+  @param user: An ApiUser object
+  @return: An ApiUser object
+  """
+  return call(resource_root.put,
+      '%s/%s' % (USERS_PATH, user.name), ApiUser, data=user)
+
+class ApiUser(BaseApiResource):
+  _ATTRIBUTES = {
+    'name'      : None,
+    'password'  : None,
+    'roles'     : None,
+  }
+
+  def __init__(self, resource_root, name=None, password=None, roles=None):
+    BaseApiObject.init(self, resource_root, locals())
+
+  def _path(self):
+    return '%s/%s' % (USERS_PATH, self.name)
+
+  def grant_admin_role(self):
+    """
+    Grant admin access to a user. If the user already has admin access, this
+    does nothing. If the user currently has a non-admin role, it will be replaced
+    with the admin role.
+
+    @return: An ApiUser object
+    """
+    apiuser = ApiUser(self._get_resource_root(), self.name, roles=['ROLE_ADMIN'])
+    return self._put('', ApiUser, data=apiuser)
+
+  def revoke_admin_role(self):
+    """
+    Revoke admin access from a user. If the user does not have admin access,
+    this does nothing. After revocation, the user will have the un-privileged
+    regular user role.
+
+    @return: An ApiUser object
+    """
+    apiuser = ApiUser(self._get_resource_root(), self.name, roles=[])
+    return self._put('', ApiUser, data=apiuser)
diff --git a/cm-api/src/cm_api/http_client.py b/cm-api/src/cm_api/http_client.py
new file mode 100644 (file)
index 0000000..8172240
--- /dev/null
@@ -0,0 +1,243 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import cookielib
+import logging
+import posixpath
+import types
+import urllib
+import urllib2
+
+__docformat__ = "epytext"
+
+LOG = logging.getLogger(__name__)
+
+class RestException(Exception):
+  """
+  Any error result from the Rest API is converted into this exception type.
+  """
+  def __init__(self, error):
+    Exception.__init__(self, error)
+    self._error = error
+    self._code = None
+    self._message = str(error)
+    # See if there is a code or a message. (For urllib2.HTTPError.)
+    try:
+      self._code = error.code
+      self._message = error.read()
+    except AttributeError:
+      pass
+
+  def __str__(self):
+    res = self._message or ""
+    if self._code is not None:
+      res += " (error %s)" % (self._code,)
+    return res
+
+  def get_parent_ex(self):
+    if isinstance(self._error, Exception):
+      return self._error
+    return None
+
+  @property
+  def code(self):
+    return self._code
+
+  @property
+  def message(self):
+    return self._message
+
+
+class HttpClient(object):
+  """
+  Basic HTTP client tailored for rest APIs.
+  """
+  def __init__(self, base_url, exc_class=None, logger=None):
+    """
+    @param base_url: The base url to the API.
+    @param exc_class: An exception class to handle non-200 results.
+
+    Creates an HTTP(S) client to connect to the Cloudera Manager API.
+    """
+    self._base_url = base_url.rstrip('/')
+    self._exc_class = exc_class or RestException
+    self._logger = logger or LOG
+    self._headers = { }
+
+    # Make a basic auth handler that does nothing. Set credentials later.
+    self._passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
+    authhandler = urllib2.HTTPBasicAuthHandler(self._passmgr)
+
+    # Make a cookie processor
+    cookiejar = cookielib.CookieJar()
+
+    self._opener = urllib2.build_opener(
+        HTTPErrorProcessor(),
+        urllib2.HTTPCookieProcessor(cookiejar),
+        authhandler)
+
+
+  def set_basic_auth(self, username, password, realm):
+    """
+    Set up basic auth for the client
+    @param username: Login name.
+    @param password: Login password.
+    @param realm: The authentication realm.
+    @return: The current object
+    """
+    self._passmgr.add_password(realm, self._base_url, username, password)
+    return self
+
+  def set_headers(self, headers):
+    """
+    Add headers to the request
+    @param headers: A dictionary with the key value pairs for the headers
+    @return: The current object
+    """
+    self._headers = headers
+    return self
+
+
+  @property
+  def base_url(self):
+    return self._base_url
+
+  @property
+  def logger(self):
+    return self._logger
+
+  def _get_headers(self, headers):
+    res = self._headers.copy()
+    if headers:
+      res.update(headers)
+    return res
+
+  def execute(self, http_method, path, params=None, data=None, headers=None):
+    """
+    Submit an HTTP request.
+    @param http_method: GET, POST, PUT, DELETE
+    @param path: The path of the resource.
+    @param params: Key-value parameter data.
+    @param data: The data to attach to the body of the request.
+    @param headers: The headers to set for this request.
+
+    @return: The result of urllib2.urlopen()
+    """
+    # Prepare URL and params
+    url = self._make_url(path, params)
+    if http_method in ("GET", "DELETE"):
+      if data is not None:
+        self.logger.warn(
+            "GET method does not pass any data. Path '%s'" % (path,))
+        data = None
+
+    # Setup the request
+    request = urllib2.Request(url, data)
+    # Hack/workaround because urllib2 only does GET and POST
+    request.get_method = lambda: http_method
+
+    headers = self._get_headers(headers)
+    for k, v in headers.items():
+      request.add_header(k, v)
+
+    # Call it
+    self.logger.debug("%s %s" % (http_method, url))
+    try:
+      return self._opener.open(request)
+    except urllib2.HTTPError, ex:
+      raise self._exc_class(ex)
+
+  def _make_url(self, path, params):
+    res = self._base_url
+    if path:
+      res += posixpath.normpath('/' + path.lstrip('/'))
+    if params:
+      param_str = urllib.urlencode(params, True)
+      res += '?' + param_str
+    return iri_to_uri(res)
+
+
+class HTTPErrorProcessor(urllib2.HTTPErrorProcessor):
+  """
+  Python 2.4 only recognize 200 and 206 as success. It's broken. So we install
+  the following processor to catch the bug.
+  """
+  def http_response(self, request, response):
+    if 200 <= response.code < 300:
+      return response
+    return urllib2.HTTPErrorProcessor.http_response(self, request, response)
+
+  https_response = http_response
+
+#
+# Method copied from Django
+#
+def iri_to_uri(iri):
+    """
+    Convert an Internationalized Resource Identifier (IRI) portion to a URI
+    portion that is suitable for inclusion in a URL.
+
+    This is the algorithm from section 3.1 of RFC 3987.  However, since we are
+    assuming input is either UTF-8 or unicode already, we can simplify things a
+    little from the full method.
+
+    Returns an ASCII string containing the encoded result.
+    """
+    # The list of safe characters here is constructed from the "reserved" and
+    # "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
+    #     reserved    = gen-delims / sub-delims
+    #     gen-delims  = ":" / "/" / "?" / "#" / "[" / "]" / "@"
+    #     sub-delims  = "!" / "$" / "&" / "'" / "(" / ")"
+    #                   / "*" / "+" / "," / ";" / "="
+    #     unreserved  = ALPHA / DIGIT / "-" / "." / "_" / "~"
+    # Of the unreserved characters, urllib.quote already considers all but
+    # the ~ safe.
+    # The % character is also added to the list of safe characters here, as the
+    # end of section 3.1 of RFC 3987 specifically mentions that % must not be
+    # converted.
+    if iri is None:
+        return iri
+    return urllib.quote(smart_str(iri), safe="/#%[]=:;$&()+,!?*@'~")
+
+#
+# Method copied from Django
+#
+def smart_str(s, encoding='utf-8', strings_only=False, errors='strict'):
+    """
+    Returns a bytestring version of 's', encoded as specified in 'encoding'.
+
+    If strings_only is True, don't convert (some) non-string-like objects.
+    """
+    if strings_only and isinstance(s, (types.NoneType, int)):
+        return s
+    elif not isinstance(s, basestring):
+        try:
+            return str(s)
+        except UnicodeEncodeError:
+            if isinstance(s, Exception):
+                # An Exception subclass containing non-ASCII data that doesn't
+                # know how to print itself properly. We shouldn't raise a
+                # further exception.
+                return ' '.join([smart_str(arg, encoding, strings_only,
+                        errors) for arg in s])
+            return unicode(s).encode(encoding, errors)
+    elif isinstance(s, unicode):
+        return s.encode(encoding, errors)
+    elif s and encoding != 'utf-8':
+        return s.decode('utf-8', errors).encode(encoding, errors)
+    else:
+        return s
+
diff --git a/cm-api/src/cm_api/resource.py b/cm-api/src/cm_api/resource.py
new file mode 100644 (file)
index 0000000..fda0e53
--- /dev/null
@@ -0,0 +1,158 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+try:
+  import json
+except ImportError:
+  import simplejson as json
+import logging
+import posixpath
+import time
+import socket
+import urllib2
+
+LOG = logging.getLogger(__name__)
+
+
+class Resource(object):
+  """
+  Encapsulates a resource, and provides actions to invoke on it.
+  """
+  def __init__(self, client, relpath=""):
+    """
+    @param client: A Client object.
+    @param relpath: The relative path of the resource.
+    """
+    self._client = client
+    self._path = relpath.strip('/')
+    self.retries = 3
+    self.retry_sleep = 3
+
+  @property
+  def base_url(self):
+    return self._client.base_url
+
+  def _join_uri(self, relpath):
+    if relpath is None:
+      return self._path
+    return self._path + posixpath.normpath('/' + relpath)
+
+  def invoke(self, method, relpath=None, params=None, data=None, headers=None):
+    """
+    Invoke an API method.
+    @return: Raw body or JSON dictionary (if response content type is JSON).
+    """
+    path = self._join_uri(relpath)
+    resp = self._client.execute(method,
+                                path,
+                                params=params,
+                                data=data,
+                                headers=headers)
+    try:
+      body = resp.read()
+    except Exception, ex:
+      raise Exception("Command '%s %s' failed: %s" %
+                      (method, path, ex))
+
+    self._client.logger.debug(
+        "%s Got response: %s%s" %
+        (method, body[:32], len(body) > 32 and "..." or ""))
+
+    # Is the response application/json?
+    if len(body) != 0 and \
+          resp.info().getmaintype() == "application" and \
+          resp.info().getsubtype() == "json":
+      try:
+        json_dict = json.loads(body)
+        return json_dict
+      except Exception, ex:
+        self._client.logger.exception('JSON decode error: %s' % (body,))
+        raise ex
+    else:
+      return body
+
+
+  def get(self, relpath=None, params=None):
+    """
+    Invoke the GET method on a resource.
+    @param relpath: Optional. A relative path to this resource's path.
+    @param params: Key-value data.
+
+    @return: A dictionary of the JSON result.
+    """
+    for retry in xrange(self.retries + 1):
+      if retry:
+        time.sleep(self.retry_sleep)
+      try:
+        return self.invoke("GET", relpath, params)
+      except (socket.error, urllib2.URLError) as e:
+        if "timed out" in str(e).lower():
+          log_message = "Timeout issuing GET request for %s." \
+              % (self._join_uri(relpath), )
+          if retry < self.retries:
+            log_message += " Will retry."
+          else:
+            log_message += " No retries left."
+          LOG.warn(log_message, exc_info=True)
+        else:
+          raise
+    else:
+      raise e
+
+
+  def delete(self, relpath=None, params=None):
+    """
+    Invoke the DELETE method on a resource.
+    @param relpath: Optional. A relative path to this resource's path.
+    @param params: Key-value data.
+
+    @return: A dictionary of the JSON result.
+    """
+    return self.invoke("DELETE", relpath, params)
+
+
+  def post(self, relpath=None, params=None, data=None, contenttype=None):
+    """
+    Invoke the POST method on a resource.
+    @param relpath: Optional. A relative path to this resource's path.
+    @param params: Key-value data.
+    @param data: Optional. Body of the request.
+    @param contenttype: Optional.
+
+    @return: A dictionary of the JSON result.
+    """
+    return self.invoke("POST", relpath, params, data,
+                       self._make_headers(contenttype))
+
+
+  def put(self, relpath=None, params=None, data=None, contenttype=None):
+    """
+    Invoke the PUT method on a resource.
+    @param relpath: Optional. A relative path to this resource's path.
+    @param params: Key-value data.
+    @param data: Optional. Body of the request.
+    @param contenttype: Optional.
+
+    @return: A dictionary of the JSON result.
+    """
+    return self.invoke("PUT", relpath, params, data,
+                       self._make_headers(contenttype))
+
+
+  def _make_headers(self, contenttype=None):
+    if contenttype:
+      return { 'Content-Type': contenttype }
+    return None
diff --git a/cm-api/src/cm_api_tests/__init__.py b/cm-api/src/cm_api_tests/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/cm-api/src/cm_api_tests/test_baseapiobject.py b/cm-api/src/cm_api_tests/test_baseapiobject.py
new file mode 100644 (file)
index 0000000..a068873
--- /dev/null
@@ -0,0 +1,109 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import unittest
+from cm_api.endpoints.types import *
+from cm_api_tests import utils
+
+class Child(BaseApiObject):
+  def _get_attributes(self):
+    return { 'value' : None }
+
+class Parent(BaseApiObject):
+  def _get_attributes(self):
+    return {
+      'child'     : Attr(Child),
+      'children'  : Attr(Child),
+      'date'      : Attr(datetime.datetime),
+      'readOnly'  : ROAttr(),
+    }
+
+class Dummy(BaseApiObject):
+  _ATTRIBUTES = {
+    'foo' : None,
+    'bar' : None,
+  }
+
+class TestBaseApiObject(unittest.TestCase):
+
+  def test_props(self):
+    obj = Parent(None)
+    obj.child = Child(None)
+    obj.children = [ ]
+    obj.date = datetime.datetime.now()
+
+    # Setting read-only attribute.
+    with self.assertRaises(AttributeError):
+      obj.readOnly = False
+
+    # Setting unknown attribute.
+    with self.assertRaises(AttributeError):
+      obj.unknown = 'foo'
+
+  def test_serde(self):
+    JSON = '''
+      {
+        "child" : { "value" : "string1" },
+        "children" : [
+          { "value" : 1 },
+          { "value" : "2" }
+        ],
+        "date" : "2013-02-12T12:17:15.831765Z",
+        "readOnly" : true
+      }
+    '''
+    obj = utils.deserialize(JSON, Parent)
+    self.assertIsInstance(obj.child, Child)
+    self.assertEqual('string1', obj.child.value)
+    self.assertIsInstance(obj.children, list)
+    self.assertEqual(2, len(obj.children))
+    self.assertEqual(1, obj.children[0].value)
+    self.assertEqual('2', obj.children[1].value)
+    self.assertIsInstance(obj.date, datetime.datetime)
+    self.assertEqual(2013, obj.date.year)
+    self.assertEqual(2, obj.date.month)
+    self.assertEqual(12, obj.date.day)
+    self.assertEqual(12, obj.date.hour)
+    self.assertEqual(17, obj.date.minute)
+    self.assertEqual(15, obj.date.second)
+    self.assertEqual(831765, obj.date.microsecond)
+    self.assertTrue(obj.readOnly)
+
+    JSON = '''
+      {
+        "children" : [ ]
+      }
+    '''
+    obj = utils.deserialize(JSON, Parent)
+    self.assertEquals([], obj.children)
+
+  def test_init(self):
+    obj = Parent(None)
+    self.assertTrue(hasattr(obj, 'child'))
+    self.assertTrue(hasattr(obj, 'readOnly'))
+
+    obj = Parent(None, date=datetime.datetime.now())
+    self.assertIsInstance(obj.date, datetime.datetime)
+
+    self.assertRaises(AttributeError, Parent, None, readOnly=True)
+
+  def test_empty_property(self):
+    dummy = Dummy(None)
+    dummy.foo = 'foo'
+    json = dummy.to_json_dict()
+    self.assertEqual('foo', json['foo'])
+    self.assertFalse(json.has_key('bar'))
diff --git a/cm-api/src/cm_api_tests/test_baseapiresource.py b/cm-api/src/cm_api_tests/test_baseapiresource.py
new file mode 100644 (file)
index 0000000..47bf3a9
--- /dev/null
@@ -0,0 +1,40 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+from cm_api.endpoints.types import *
+from cm_api_tests import utils
+
+class TestApiResource(BaseApiResource):
+
+  def _path(self):
+    return ''
+
+  def return_list(self):
+    return self._get('return_list', ApiHostRef)
+
+class TestBaseApiResource(unittest.TestCase):
+
+  def test_return_list(self):
+    '''
+    Test that APIs that return raw lists work as expected.
+    '''
+    resource = utils.MockResource(self)
+    expected = [ ApiHostRef(resource, 'foo').to_json_dict() ]
+    resource.expect("GET", "/return_list", retdata=expected)
+    ret = TestApiResource(resource).return_list()
+    self.assertEqual(len(expected), len(ret))
+    self.assertIsInstance(ret[0], ApiHostRef)
diff --git a/cm-api/src/cm_api_tests/test_batch.py b/cm-api/src/cm_api_tests/test_batch.py
new file mode 100644 (file)
index 0000000..30795f8
--- /dev/null
@@ -0,0 +1,41 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+from cm_api.endpoints import batch
+from cm_api.endpoints.types import *
+from cm_api_tests import utils
+
+class TestBatch(unittest.TestCase):
+
+  def test_execute_batch(self):
+    resource = utils.MockResource(self)
+    elems = []
+    elems.append(ApiBatchRequestElement(resource,
+                                        method='GET',
+                                        url='/1/2/3'))
+    elems.append(ApiBatchRequestElement(resource,
+                                        method='POST',
+                                        url='/4/5/6/7',
+                                        body='asdf'))
+    resource.expect("POST", "/batch",
+                    data=elems,
+                    retdata={ 'success' : False, 'items' : [] })
+    ret = batch.do_batch(resource, elems)
+    self.assertIsInstance(ret, ApiBatchResponseList)
+    self.assertIsInstance(ret.success, bool)
+    self.assertFalse(ret.success)
+    self.assertEquals(0, len(ret))
diff --git a/cm-api/src/cm_api_tests/test_clusters.py b/cm-api/src/cm_api_tests/test_clusters.py
new file mode 100644 (file)
index 0000000..9e66f81
--- /dev/null
@@ -0,0 +1,58 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+from cm_api.endpoints.clusters import *
+from cm_api.endpoints.types import *
+from cm_api_tests import utils
+
+class TestCluster(unittest.TestCase):
+
+  def test_add_hosts(self):
+    resource = utils.MockResource(self)
+    cluster = ApiCluster(resource, name="foo")
+
+    data = ApiList([ ApiHostRef(resource, hostId='foo') ])
+
+    resource.expect("POST", "/clusters/foo/hosts",
+        data=data,
+        retdata={ 'items' : [ { 'hostId' : 'foo' } ] })
+    cluster.add_hosts(['foo'])
+
+  def test_update_cdh_version(self):
+    resource = utils.MockResource(self)
+    cluster = ApiCluster(resource, name="foo")
+
+    data = ApiCluster(resource, name='foo', fullVersion='4.2.1')
+
+    resource.expect("PUT", "/clusters/foo",
+        data=data,
+        retdata={ 'name' : 'foo'})
+    cluster.update_cdh_version('4.2.1')
+  def test_upgrade_cdh(self):
+    resource = utils.MockResource(self)
+    cluster = ApiCluster(resource, name="foo")
+
+    data = dict()
+    data['deployClientConfig'] = False
+    data['startAllServices'] = True
+    data['cdhParcelVersion'] = '5.0.0.1-cdh5-1.2.3'
+
+    resource.expect("POST", "/clusters/foo/commands/upgradeCdh",
+        data=data,
+        retdata={ 'name' : 'foo'})
+    cluster.upgrade_cdh(False, True, data['cdhParcelVersion'])
diff --git a/cm-api/src/cm_api_tests/test_cms.py b/cm-api/src/cm_api_tests/test_cms.py
new file mode 100644 (file)
index 0000000..8b45aff
--- /dev/null
@@ -0,0 +1,94 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+from cm_api.endpoints.cms import ClouderaManager
+from cm_api.endpoints.types import config_to_json, ApiConfig
+from cm_api_tests import utils
+
+try:
+  import json
+except ImportError:
+  import simplejson as json
+
+class TestCMS(unittest.TestCase):
+
+  def test_all_hosts_config(self):
+    SUMMARY = """
+      {
+        "items" : [ {
+          "name" : "blacklisted_parcel_products",
+          "value" : "foo,bar"
+        } ]
+      }
+      """
+    FULL = """
+      {
+        "items" : [ {
+          "name" : "blacklisted_parcel_products",
+          "value" : "foo,bar",
+          "required" : false,
+          "default" : "",
+          "displayName" : "Blacklisted Products",
+          "description" : "Parcels for blacklisted products will not be distributed to the host, nor activated for process execution. Already distributed parcels will be undistributed. Already running process will not be affected until the next restart.",
+          "validationState" : "OK"
+        }, {
+          "name" : "rm_enabled",
+          "required" : false,
+          "default" : "false",
+          "displayName" : "Enable Resource Management",
+          "description" : "Enables resource management for all roles on this host.",
+          "validationState" : "OK"
+        } ]
+      }
+      """
+
+    resource = utils.MockResource(self)
+    cms = ClouderaManager(resource)
+
+    resource.expect("GET", "/cm/allHosts/config", retdata=json.loads(SUMMARY))
+    cfg = cms.get_all_hosts_config()
+    self.assertIsInstance(cfg, dict)
+    self.assertEqual(1, len(cfg))
+    self.assertEqual('foo,bar', cfg.get('blacklisted_parcel_products'))
+
+    resource.expect("GET", "/cm/allHosts/config", params={ 'view' : 'full' },
+        retdata=json.loads(FULL))
+    cfg = cms.get_all_hosts_config(view='full')
+    self.assertIsInstance(cfg, dict)
+    self.assertEqual(2, len(cfg))
+    self.assertIsInstance(cfg['blacklisted_parcel_products'], ApiConfig)
+    self.assertFalse(cfg['blacklisted_parcel_products'].required)
+    self.assertEqual('OK', cfg['rm_enabled'].validationState)
+
+    cfg = { 'blacklisted_parcel_products' : 'bar' }
+    resource.expect("PUT", "/cm/allHosts/config", data=config_to_json(cfg),
+        retdata=json.loads(SUMMARY))
+    cms.update_all_hosts_config(cfg)
+
+  def test_host_commission(self):
+    resource = utils.MockResource(self)
+    cms = ClouderaManager(resource)
+
+    resource.expect("POST", "/cm/commands/hostsDecommission",
+        data=[ "host1", "host2" ],
+        retdata={})
+    cms.hosts_decommission([ "host1", "host2" ])
+
+    resource.expect("POST", "/cm/commands/hostsRecommission",
+        data=[ "host1", "host2" ],
+        retdata={})
+    cms.hosts_recommission([ "host1", "host2" ])
diff --git a/cm-api/src/cm_api_tests/test_dashboards.py b/cm-api/src/cm_api_tests/test_dashboards.py
new file mode 100644 (file)
index 0000000..b614e10
--- /dev/null
@@ -0,0 +1,69 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+from cm_api.endpoints.dashboards import *
+from cm_api_tests import utils
+
+class TestDashboards(unittest.TestCase):
+
+  @staticmethod
+  def get_test_dashboard():
+    return  "{\"name\": \"MyDash\",\"json\":\"jsonBlob\"}"
+
+  @staticmethod
+  def get_test_dashboard_list():
+    return "{\"items\": [" + TestDashboards.get_test_dashboard() + "]}"
+
+  def test_get_dashboards(self):
+    resource = utils.MockResource(self)
+    resource.expect("GET", "/timeseries/dashboards",
+      retdata=json.loads(TestDashboards.get_test_dashboard_list()),
+      params=None)
+
+    resp = get_dashboards(resource)
+    self.assertEqual(1, len(resp))
+
+  def test_get_dashboard(self):
+    resource = utils.MockResource(self)
+    resource.expect("GET", "/timeseries/dashboards/myDash",
+      retdata=json.loads(TestDashboards.get_test_dashboard()),
+      params=None)
+
+    resp = get_dashboard(resource, "myDash")
+    self.assertIsInstance(resp, ApiDashboard)
+    self.assertEqual("MyDash", resp.name)
+    self.assertEqual("jsonBlob", resp.json)
+
+  def test_create_dashboards(self):
+    resource = utils.MockResource(self)
+    dashboard = ApiDashboard("newDash", "newJsonBlob")
+    resource.expect("POST", "/timeseries/dashboards",
+      retdata=json.loads(TestDashboards.get_test_dashboard_list()),
+      params=None,
+      data=[dashboard])
+
+    resp = create_dashboards(resource, [dashboard])
+    self.assertEquals(1, len(resp))
+
+  def test_delete_dashboard(self):
+    resource = utils.MockResource(self)
+    resource.expect("DELETE", "/timeseries/dashboards/oldDash",
+      retdata=json.loads(TestDashboards.get_test_dashboard()),
+      params=None)
+
+    resp = delete_dashboard(resource, "oldDash")
+
diff --git a/cm-api/src/cm_api_tests/test_events.py b/cm-api/src/cm_api_tests/test_events.py
new file mode 100644 (file)
index 0000000..29f352a
--- /dev/null
@@ -0,0 +1,37 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+from cm_api.endpoints.events import *
+from cm_api_tests import utils
+
+class TestEvents(unittest.TestCase):
+
+  def test_event_query(self):
+    res = utils.MockResource(self)
+
+    query = 'foo'
+    event = ApiEvent(res)
+    event.__dict__['content'] = 'bar'
+    expected = ApiEventQueryResult([ event ])
+    expected.__dict__['totalResults'] = 42
+
+    res.expect('GET', '/events', params=dict(query=query),
+        retdata=expected.to_json_dict(True))
+    ret = query_events(res, query)
+    self.assertEqual(1, len(ret))
+    self.assertEqual(expected[0].content, ret[0].content)
+    self.assertEqual(42, expected.totalResults)
diff --git a/cm-api/src/cm_api_tests/test_host_template.py b/cm-api/src/cm_api_tests/test_host_template.py
new file mode 100644 (file)
index 0000000..63ab697
--- /dev/null
@@ -0,0 +1,41 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+from cm_api.endpoints.host_templates import *
+from cm_api.endpoints.types import *
+from cm_api_tests import utils
+
+class TestHostTemplates(unittest.TestCase):
+
+  def test_update(self):
+    res = utils.MockResource(self)
+
+    cluster = ApiClusterRef(res, clusterName='c1')
+    tmpl = ApiHostTemplate(res, name='foo')
+    tmpl.__dict__['clusterRef'] = cluster
+
+    rcgs = [
+      ApiRoleConfigGroupRef(res, roleConfigGroupName='rcg1'),
+      ApiRoleConfigGroupRef(res, roleConfigGroupName='rcg2'),
+    ]
+    expected = ApiHostTemplate(res, name='foo', roleConfigGroupRefs=rcgs)
+
+    res.expect('PUT', '/clusters/c1/hostTemplates/foo',
+        data=expected,
+        retdata=expected.to_json_dict())
+    ret = tmpl.set_role_config_groups(rcgs)
+    self.assertEqual(len(rcgs), len(ret.roleConfigGroupRefs))
diff --git a/cm-api/src/cm_api_tests/test_impala.py b/cm-api/src/cm_api_tests/test_impala.py
new file mode 100644 (file)
index 0000000..45f7b05
--- /dev/null
@@ -0,0 +1,72 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import unittest
+from cm_api.endpoints.clusters import *
+from cm_api.endpoints.services import *
+from cm_api.endpoints.types import *
+from cm_api_tests import utils
+
+class TestImpala(unittest.TestCase):
+
+  def test_get_queries(self):
+    resource = utils.MockResource(self)
+    service = ApiService(resource, name="bar")
+
+    time = datetime.datetime.now()
+
+    resource.expect("GET", "/cm/service/impalaQueries",
+        retdata={ 'queries': [], 'warnings' : [] },
+        params={ 'from':time.isoformat(), 'to':time.isoformat(), \
+            'filter':'', 'limit':100, 'offset':0 })
+    resp = service.get_impala_queries(time, time)
+    self.assertEquals(0, len(resp.queries))
+
+  def test_get_details(self):
+    resource = utils.MockResource(self)
+    service = ApiService(resource, name="bar")
+
+    resource.expect("GET", "/cm/service/impalaQueries/randomId",
+        retdata={ 'details': '' },
+        params={ 'format':'text' } )
+    resp = service.get_query_details('randomId')
+    self.assertEquals('', resp.details)
+
+  def test_cancel_query(self):
+    resource = utils.MockResource(self)
+    service = ApiService(resource, name="bar")
+
+    resource.expect("POST", "/cm/service/impalaQueries/randomId/cancel",
+        retdata={ 'warning' : 'test' })
+    resp = service.cancel_impala_query('randomId')
+    self.assertEquals('test', resp.warning)
+
+  def test_attributes(self):
+    resource = utils.MockResource(self)
+    service = ApiService(resource, name="bar")
+
+    resource.expect("GET", "/cm/service/impalaQueries/attributes",
+        retdata=[{ 'name' : 'test',
+                  'type' : 'STRING',
+                  'displayName' : 'testDisplayName',
+                  'supportsHistograms' : True,
+                  'description' : 'testDescription' }])
+    resp = service.get_impala_query_attributes()
+    self.assertEquals(1, len(resp))
+    attr = resp[0]
+    self.assertIsInstance(attr, ApiImpalaQueryAttribute)
+    self.assertEquals('test', attr.name)
diff --git a/cm-api/src/cm_api_tests/test_replication.py b/cm-api/src/cm_api_tests/test_replication.py
new file mode 100644 (file)
index 0000000..e452185
--- /dev/null
@@ -0,0 +1,301 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import random
+import unittest
+try:
+  import json
+except ImportError:
+  import simplejson as json
+
+from cm_api.endpoints.types import *
+from cm_api.endpoints.services import ApiService
+from cm_api_tests import utils
+
+class TestReplicationTypes(unittest.TestCase):
+
+  def test_hdfs_arguments(self):
+    RAW = '''{
+      "sourceService" : {
+        "peerName" : "vst2",
+        "clusterName" : "Cluster 1 - CDH4",
+        "serviceName" : "HDFS-1"
+      },
+      "sourcePath" : "/data",
+      "destinationPath" : "/copy/data2",
+      "mapreduceServiceName" : "MAPREDUCE-1",
+      "schedulerPoolName" : "medium",
+      "userName" : "systest",
+      "dryRun" : false,
+      "abortOnError" : true,
+      "removeMissingFiles" : false,
+      "preserveReplicationCount" : true,
+      "preserveBlockSize" : true,
+      "preservePermissions" : false
+    }'''
+    args = utils.deserialize(RAW, ApiHdfsReplicationArguments)
+    self.assertEquals('vst2', args.sourceService.peerName)
+    self.assertEquals('Cluster 1 - CDH4', args.sourceService.clusterName)
+    self.assertEquals('HDFS-1', args.sourceService.serviceName)
+    self.assertEquals('/data', args.sourcePath)
+    self.assertEquals('/copy/data2', args.destinationPath)
+    self.assertEquals('MAPREDUCE-1', args.mapreduceServiceName)
+    self.assertEquals('medium', args.schedulerPoolName)
+    self.assertEquals('systest', args.userName)
+    self.assertFalse(args.dryRun)
+    self.assertTrue(args.abortOnError)
+    self.assertFalse(args.removeMissingFiles)
+    self.assertTrue(args.preserveBlockSize)
+    self.assertFalse(args.preservePermissions)
+    self.assertTrue(args.preserveReplicationCount)
+
+  def test_hive_arguments(self):
+    RAW = '''{
+      "sourceService" : {
+        "peerName" : "vst2",
+        "clusterName" : "Cluster 1 - CDH4",
+        "serviceName" : "HIVE-1"
+      },
+      "force" : true,
+      "replicateData" : true,
+      "hdfsArguments" : {
+        "mapreduceServiceName" : "MAPREDUCE-1",
+        "dryRun" : false,
+        "abortOnError" : false,
+        "removeMissingFiles" : false,
+        "preserveReplicationCount" : false,
+        "preserveBlockSize" : false,
+        "preservePermissions" : false
+      },
+      "tableFilters" : [
+        { "database" : "db1", "tableName" : "table1" }
+      ],
+      "dryRun" : false
+    }'''
+    args = utils.deserialize(RAW, ApiHiveReplicationArguments)
+    self.assertEquals('vst2', args.sourceService.peerName)
+    self.assertEquals('Cluster 1 - CDH4', args.sourceService.clusterName)
+    self.assertEquals('HIVE-1', args.sourceService.serviceName)
+    self.assertTrue(args.force)
+    self.assertTrue(args.replicateData)
+    self.assertIsInstance(args.hdfsArguments, ApiHdfsReplicationArguments)
+    self.assertIsInstance(args.tableFilters, list)
+    self.assertEquals(1, len(args.tableFilters))
+    self.assertIsInstance(args.tableFilters[0], ApiHiveTable)
+    self.assertEquals("db1", args.tableFilters[0].database)
+    self.assertEquals("table1", args.tableFilters[0].tableName)
+
+  def test_hive_results(self):
+    RAW = '''{
+      "phase" : "EXPORT",
+      "tableCount" : 1,
+      "tables" : [
+        { "database" : "db1", "tableName" : "table1" }
+      ],
+      "impalaUDFCount" : 1,
+      "impalaUDFs" : [
+        { "database" : "db1", "signature" : "func1(STRING)" }
+      ],
+      "errorCount" : 1,
+      "errors" : [
+        { "database" : "db1", "tableName" : "table2",
+          "impalaUDF" : "func2(INT)", "error" : "message" }
+      ],
+      "dataReplicationResult" : {
+        "progress" : 50
+      },
+      "dryRun" : false
+    }'''
+    res = utils.deserialize(RAW, ApiHiveReplicationResult)
+    self.assertEquals('EXPORT', res.phase)
+    self.assertEquals(1, res.tableCount)
+    self.assertEquals(1, len(res.tables))
+    self.assertEquals('db1', res.tables[0].database)
+    self.assertEquals('table1', res.tables[0].tableName)
+    self.assertEquals(1, res.impalaUDFCount)
+    self.assertEquals(1, len(res.impalaUDFs))
+    self.assertEquals('db1', res.impalaUDFs[0].database)
+    self.assertEquals('func1(STRING)', res.impalaUDFs[0].signature)
+    self.assertEquals(1, res.errorCount)
+    self.assertEquals('db1', res.errors[0]['database'])
+    self.assertEquals('table2', res.errors[0]['tableName'])
+    self.assertEquals('func2(INT)', res.errors[0]['impalaUDF'])
+    self.assertEquals('message', res.errors[0]['error'])
+    self.assertEquals(50, res.dataReplicationResult.progress)
+    self.assertFalse(res.dryRun)
+
+  def test_schedule(self):
+    RAW = '''{
+      "id" : 39,
+      "startTime" : "2012-12-10T23:11:31.041Z",
+      "interval" : 1,
+      "intervalUnit" : "DAY",
+      "paused" : false,
+      "nextRun" : "2013-01-15T23:11:31.041Z",
+      "history" : [ {
+        "id" : 738,
+        "name" : "HiveReplicationCommand",
+        "startTime" : "2013-01-15T18:28:24.895Z",
+        "endTime" : "2013-01-15T18:30:49.446Z",
+        "active" : false,
+        "success" : true,
+        "resultMessage" : "Hive Replication Finished Successfully.",
+        "resultDataUrl" : "/cmf/command/738/download",
+        "serviceRef" : {
+          "clusterName" : "Cluster 1 - CDH4",
+          "serviceName" : "HIVE-1"
+        },
+        "hiveResult" : {
+          "tableCount" : 3,
+          "tables" : [ {
+            "database" : "default",
+            "tableName" : "repl_test_1"
+          }, {
+            "database" : "default",
+            "tableName" : "sample_07"
+          }, {
+            "database" : "default",
+            "tableName" : "sample_08"
+          } ],
+          "errorCount" : 0,
+          "errors" : [ ],
+          "dataReplicationResult" : {
+            "progress" : 100,
+            "numFilesCopied" : 0,
+            "numBytesCopied" : 0,
+            "numFilesSkipped" : 3,
+            "numBytesSkipped" : 92158,
+            "numFilesDeleted" : 0,
+            "numFilesCopyFailed" : 0,
+            "numBytesCopyFailed" : 0,
+            "dryRun" : false
+          },
+          "dryRun" : false
+        }
+      } ],
+      "alertOnStart" : false,
+      "alertOnSuccess" : false,
+      "alertOnFail" : false,
+      "alertOnAbort" : false,
+      "hiveArguments" : {
+        "sourceService" : {
+          "peerName" : "vst2",
+          "clusterName" : "Cluster 1 - CDH4",
+          "serviceName" : "HIVE-1"
+        },
+        "force" : true,
+        "replicateData" : true,
+        "hdfsArguments" : {
+          "mapreduceServiceName" : "MAPREDUCE-1",
+          "dryRun" : false,
+          "abortOnError" : false,
+          "removeMissingFiles" : false,
+          "preserveReplicationCount" : false,
+          "preserveBlockSize" : false,
+          "preservePermissions" : false
+        },
+        "dryRun" : false
+      }
+    }'''
+    sched = utils.deserialize(RAW, ApiReplicationSchedule)
+    self.assertEqual(39, sched.id)
+    self.assertEqual(self._parse_time("2012-12-10T23:11:31.041Z"), sched.startTime)
+    self.assertEqual('DAY', sched.intervalUnit)
+    self.assertEqual(1, sched.interval)
+    self.assertFalse(sched.paused)
+    self.assertEqual(self._parse_time("2013-01-15T23:11:31.041Z"), sched.nextRun)
+    self.assertFalse(sched.alertOnStart)
+    self.assertIsNotNone(sched.hiveArguments)
+
+    self.assertEqual(1, len(sched.history))
+    self.assertIsInstance(sched.history[0], ApiReplicationCommand)
+    self.assertEqual('default', sched.history[0].hiveResult.tables[0].database)
+    self.assertEqual(92158, sched.history[0].hiveResult.dataReplicationResult.numBytesSkipped)
+    self.assertEqual(3, sched.history[0].hiveResult.tableCount)
+    self.assertEqual(0, sched.history[0].hiveResult.errorCount)
+
+  def test_peers(self):
+    RAW = '''{
+      "name" : "peer1",
+      "url" : "http://peer1",
+      "username" : "user1",
+      "password" : "pwd"
+    }'''
+    peer = ApiCmPeer.from_json_dict(json.loads(RAW), None)
+    self.assertEquals("peer1", peer.name)
+    self.assertEquals("http://peer1", peer.url)
+    self.assertEquals("user1", peer.username)
+    self.assertEquals("pwd", peer.password)
+
+  def _parse_time(self, tstr):
+    return datetime.datetime.strptime(tstr, Attr.DATE_FMT)
+
+
+class TestReplicationRequests(unittest.TestCase):
+
+  def __init__(self, methodName):
+    super(TestReplicationRequests, self).__init__(methodName)
+    self.resource = utils.MockResource(self)
+
+  def test_replication_crud(self):
+    service = ApiService(self.resource, 'hdfs1', 'HDFS')
+    service.__dict__['clusterRef'] = ApiClusterRef(self.resource, clusterName='cluster1')
+
+    hdfs_args = ApiHdfsReplicationArguments(self.resource)
+    hdfs_args.sourceService = ApiServiceRef('cluster2', 'hdfs2')
+    hdfs_args.sourcePath = '/src'
+    hdfs_args.destinationPath = '/dst'
+
+    return_sched = ApiReplicationSchedule(self.resource,
+        interval=2, intervalUnit='DAY')
+    return_sched.hdfsArguments = hdfs_args
+    return_sched.__dict__['id'] = 1
+    return_list = ApiList([ return_sched ]).to_json_dict()
+
+    self.resource.expect("POST",
+        "/clusters/cluster1/services/hdfs1/replications",
+        retdata=return_list)
+
+    sched = service.create_replication_schedule(
+        None, None, 'DAY', 2, True, hdfs_args, alert_on_fail=True)
+    self.assertEqual(return_sched.intervalUnit, sched.intervalUnit)
+    self.assertEqual(return_sched.interval, sched.interval)
+    self.assertIsInstance(sched.hdfsArguments, ApiHdfsReplicationArguments)
+
+    self.resource.expect("GET",
+        "/clusters/cluster1/services/hdfs1/replications",
+        retdata=return_list)
+    service.get_replication_schedules()
+
+    self.resource.expect("GET",
+        "/clusters/cluster1/services/hdfs1/replications/1",
+        retdata=return_sched.to_json_dict())
+    service.get_replication_schedule(1)
+
+    self.resource.expect("PUT",
+        "/clusters/cluster1/services/hdfs1/replications/1",
+        data=return_sched,
+        retdata=return_sched.to_json_dict())
+    service.update_replication_schedule(1, return_sched)
+
+    self.resource.expect("DELETE",
+        "/clusters/cluster1/services/hdfs1/replications/1",
+        retdata=return_sched.to_json_dict())
+    service.delete_replication_schedule(1)
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/cm-api/src/cm_api_tests/test_services.py b/cm-api/src/cm_api_tests/test_services.py
new file mode 100644 (file)
index 0000000..79b1ee9
--- /dev/null
@@ -0,0 +1,47 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+from cm_api.endpoints.clusters import ApiCluster
+from cm_api.endpoints.services import *
+from cm_api.endpoints.types import *
+from cm_api_tests import utils
+
+class TestService(unittest.TestCase):
+
+  def __init__(self, methodName):
+    unittest.TestCase.__init__(self, methodName)
+    self.resource = utils.MockResource(self)
+    self.service = ApiService(self.resource, 'hdfs1', 'HDFS')
+    self.service.__dict__['clusterRef'] = \
+        ApiClusterRef(self.resource, clusterName='cluster1')
+
+  def test_create_hdfs_tmp(self):
+    self.resource.expect("POST", "/clusters/cluster1/services/hdfs1/commands/hdfsCreateTmpDir",
+        retdata=ApiCommand(self.resource).to_json_dict())
+    self.service.create_hdfs_tmp()
+
+  def test_role_cmd(self):
+    args = ['role1', 'role2']
+    expected = ApiBulkCommandList([ApiCommand(self.resource)])
+    expected.__dict__['errors'] = [ 'err1', 'err2' ]
+
+    self.resource.expect("POST", "/clusters/cluster1/services/hdfs1/roleCommands/start",
+        data=ApiList(args),
+        retdata=expected.to_json_dict(True))
+    ret = self.service.start_roles(*args)
+    self.assertEqual(1, len(ret))
+    self.assertEqual(expected.errors, ret.errors)
diff --git a/cm-api/src/cm_api_tests/test_snapshot.py b/cm-api/src/cm_api_tests/test_snapshot.py
new file mode 100644 (file)
index 0000000..77d5fdc
--- /dev/null
@@ -0,0 +1,260 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import random
+import unittest
+try:
+  import json
+except ImportError:
+  import simplejson as json
+
+from cm_api.endpoints.types import *
+from cm_api.endpoints.services import ApiService
+from cm_api_tests import utils
+
+class TestSnapshotTypes(unittest.TestCase):
+  def __init__(self, methodName):
+    super(TestSnapshotTypes, self).__init__(methodName)
+    self.resource = utils.MockResource(self)
+
+  def test_hdfs_arguments(self):
+    RAW = '''{"pathPatterns" : "/user/oozie"}'''
+    args = utils.deserialize(RAW, ApiHdfsSnapshotPolicyArguments)
+    self.assertEquals('/user/oozie', args.pathPatterns)
+
+  def test_hbase_arguments(self):
+    RAW = '''{"tableRegExps" : "table1"}'''
+    args = utils.deserialize(RAW, ApiHBaseSnapshotPolicyArguments)
+    self.assertEquals('table1', args.tableRegExps)
+
+  def test_hbase_snapshot(self):
+    RAW = '''{
+      "snapshotName" : "sn1",
+      "tableName" : "table1",
+      "creationTime" : "2012-12-10T23:11:31.041Z" }'''
+    args = utils.deserialize(RAW, ApiHBaseSnapshot)
+    self.assertEquals('sn1', args.snapshotName)
+    self.assertEquals('table1', args.tableName)
+    self.assertEquals(self._parse_time("2012-12-10T23:11:31.041Z"), args.creationTime)
+
+  def test_hdfs_snapshot(self):
+    RAW = '''{
+      "path" : "/abc",
+      "snapshotName" : "sn1",
+      "snapshotPath" : "/abc/.snapshot/sn1",
+      "creationTime" : "2012-12-10T23:11:31.041Z" }'''
+    args = utils.deserialize(RAW, ApiHdfsSnapshot)
+    self.assertEquals('/abc', args.path)
+    self.assertEquals('sn1', args.snapshotName)
+    self.assertEquals('/abc/.snapshot/sn1', args.snapshotPath)
+    self.assertEquals(self._parse_time("2012-12-10T23:11:31.041Z"), args.creationTime)
+
+  def test_hbase_snapshot_error(self):
+    RAW = '''{
+      "snapshotName" : "sn1",
+      "tableName" : "table1",
+      "error" : "bad snapshot" }'''
+    args = utils.deserialize(RAW, ApiHBaseSnapshotError)
+    self.assertEquals('sn1', args.snapshotName)
+    self.assertEquals('table1', args.tableName)
+    self.assertEquals('bad snapshot', args.error)
+
+  def test_hdfs_snapshot_error(self):
+    RAW = '''{
+      "snapshotPath" : "/abc/.snapshot/sn1",
+      "snapshotName" : "sn1",
+      "path" : "/abc",
+      "error" : "bad snapshot" }'''
+    args = utils.deserialize(RAW, ApiHdfsSnapshotError)
+    self.assertEquals('/abc/.snapshot/sn1', args.snapshotPath)
+    self.assertEquals('/abc', args.path)
+    self.assertEquals('sn1', args.snapshotName)
+    self.assertEquals('bad snapshot', args.error)
+
+  def test_hbase_snapshot_result(self):
+    RAW = '''{
+      "processedTableCount" : 5,
+      "processedTables"     : ["t1", "t2", "t3", "t4", "t5"],
+      "unprocessedTableCount" : "2",
+      "unprocessedTables" : ["nt1", "nt2"],
+      "createdSnapshotCount" : 5,
+      "createdSnapshots" : [
+          {"snapshotName" : "sn1",
+          "tableName" : "t1",
+          "creationTime" : "2012-12-10T23:11:31.041Z"},
+          {"snapshotName" : "sn2",
+          "tableName" : "t2",
+          "creationTime" : "2012-12-10T23:11:31.041Z"},
+          {"snapshotName" : "sn3",
+          "tableName" : "t3",
+          "creationTime" : "2012-12-10T23:11:31.041Z"},
+          {"snapshotName" : "sn4",
+          "tableName" : "t4",
+          "creationTime" : "2012-12-10T23:11:31.041Z"},
+          {"snapshotName" : "sn5",
+          "tableName" : "t5",
+          "creationTime" : "2012-12-10T23:11:31.041Z"}],
+      "deletedSnapshotCount" : 1,
+      "deletedSnapshots" : [
+          {"snapshotName" : "dn1",
+          "tableName" : "t1",
+          "creationTime" : "2012-12-10T23:11:31.041Z"}],
+      "creationErrorCount" : 1,
+      "creationErrors" : [{
+          "snapshotName" : "sn1",
+          "tableName" : "table1",
+          "error" : "bad snapshot"}],
+      "deletionErrorCount" : 0,
+      "deletionErrors" : []
+       }'''
+
+    args = utils.deserialize(RAW, ApiHBaseSnapshotResult)
+    self.assertEquals(5, args.processedTableCount)
+    self.assertEquals(["t1", "t2", "t3", "t4", "t5"], args.processedTables)
+    self.assertEquals('2', args.unprocessedTableCount)
+    self.assertEquals(['nt1', 'nt2'], args.unprocessedTables)
+    self.assertEquals(5, args.createdSnapshotCount)
+    self.assertEquals('t3', args.createdSnapshots[2].tableName)
+    self.assertEquals(1, args.deletedSnapshotCount)
+    self.assertEquals('dn1', args.deletedSnapshots[0].snapshotName)
+    self.assertEquals(1, args.creationErrorCount)
+    self.assertEquals("bad snapshot", args.creationErrors[0].error)
+    self.assertEquals(0, args.deletionErrorCount)
+    self.assertEquals([], args.deletionErrors)
+
+  def test_hdfs_snapshot_result(self):
+    RAW = '''{
+      "processedPathCount" : 5,
+      "processedPaths"     : ["/t1", "/t2", "/t3", "/t4", "/t5"],
+      "unprocessedPathCount" : "2",
+      "unprocessedPaths" : ["nt1", "nt2"],
+      "createdSnapshotCount" : 5,
+      "createdSnapshots" : [
+          {"snapshotName" : "sn1",
+          "snapshotPath" : "/t1/.snapshot/sn1",
+          "path" : "/t1",
+          "creationTime" : "2012-12-10T23:11:31.041Z"},
+          {"snapshotName" : "sn2",
+          "snapshotPath" : "/t1/.snapshot/sn1",
+          "path" : "/t2",
+          "creationTime" : "2012-12-10T23:11:31.041Z"},
+          {"snapshotName" : "sn3",
+          "snapshotPath" : "/t1/.snapshot/sn1",
+          "path" : "/t3",
+          "creationTime" : "2012-12-10T23:11:31.041Z"},
+          {"snapshotName" : "sn4",
+          "snapshotPath" : "/t1/.snapshot/sn1",
+          "path" : "/t4",
+          "creationTime" : "2012-12-10T23:11:31.041Z"},
+          {"snapshotName" : "sn5",
+          "snapshotPath" : "/t1/.snapshot/sn1",
+          "path" : "/t5",
+          "creationTime" : "2012-12-10T23:11:31.041Z"}],
+      "deletedSnapshotCount" : 1,
+      "deletedSnapshots" : [
+          {"snapshotName" : "dn1",
+          "path" : "/t1",
+          "snapshotPath" : "/t1/.snapshot/dn1",
+          "creationTime" : "2012-12-10T23:11:31.041Z"}],
+      "creationErrorCount" : 1,
+      "creationErrors" : [{
+          "snapshotName" : "sn1",
+          "path" : "/t1",
+          "snapshotPath" : "/t1/.snapshot/sn1",
+          "error" : "bad snapshot"}],
+      "deletionErrorCount" : 0,
+      "deletionErrors" : []
+       }'''
+
+    args = utils.deserialize(RAW, ApiHdfsSnapshotResult)
+    self.assertEquals(5, args.processedPathCount)
+    self.assertEquals(["/t1", "/t2", "/t3", "/t4", "/t5"], args.processedPaths)
+    self.assertEquals('2', args.unprocessedPathCount)
+    self.assertEquals(['nt1', 'nt2'], args.unprocessedPaths)
+    self.assertEquals(5, args.createdSnapshotCount)
+    self.assertEquals('/t3', args.createdSnapshots[2].path)
+    self.assertEquals(1, args.deletedSnapshotCount)
+    self.assertEquals('dn1', args.deletedSnapshots[0].snapshotName)
+    self.assertEquals(1, args.creationErrorCount)
+    self.assertEquals("bad snapshot", args.creationErrors[0].error)
+    self.assertEquals(0, args.deletionErrorCount)
+    self.assertEquals([], args.deletionErrors)
+
+
+  def _parse_time(self, tstr):
+    return datetime.datetime.strptime(tstr, Attr.DATE_FMT) 
+
+
+class TestSnapshotRequests(unittest.TestCase):
+
+  def __init__(self, methodName):
+    super(TestSnapshotRequests, self).__init__(methodName)
+    self.resource = utils.MockResource(self)
+
+  def test_snapshot(self):
+    service = ApiService(self.resource, 'hdfs1', 'HDFS')
+    service.__dict__['clusterRef'] = ApiClusterRef(self.resource, clusterName='cluster1')
+
+    hdfs_args = ApiHdfsSnapshotPolicyArguments(self.resource, pathPatterns='/user/oozie')
+
+
+    return_policy = ApiSnapshotPolicy(self.resource, name='sn1',
+                                   weeklySnapshots=2,
+                                   hdfsArguments=hdfs_args)
+    return_policy.__dict__['id'] = 1
+    return_list = ApiList([ return_policy ]).to_json_dict()
+    self.resource.expect("POST",
+        "/clusters/cluster1/services/hdfs1/snapshots/policies",
+        retdata=return_list)
+
+    policy = service.create_snapshot_policy(return_policy)
+    self._test_policy(return_policy, policy)
+
+    self.resource.expect("GET",
+        "/clusters/cluster1/services/hdfs1/snapshots/policies",
+        retdata=return_list)
+    policies = service.get_snapshot_policies()
+    self.assertEqual(1, len(policies))
+    self._test_policy(return_policy, policies[0])
+
+    self.resource.expect("GET",
+        "/clusters/cluster1/services/hdfs1/snapshots/policies/sn1",
+        retdata=return_policy.to_json_dict())
+    self._test_policy(return_policy, service.get_snapshot_policy("sn1"))
+
+    return_policy.dayOfWeek=5
+    self.resource.expect("PUT",
+        "/clusters/cluster1/services/hdfs1/snapshots/policies/sn1",
+        data=return_policy,
+        retdata=return_policy.to_json_dict())
+    policy = service.update_snapshot_policy('sn1', return_policy)
+    self._test_policy(return_policy, policy)
+
+    self.resource.expect("DELETE",
+        "/clusters/cluster1/services/hdfs1/snapshots/policies/sn1",
+        retdata=return_policy.to_json_dict())
+    policy = service.delete_snapshot_policy('sn1')
+    self._test_policy(return_policy, policy)
+
+  def _test_policy(self, exp_policy, policy):
+    self.assertIsInstance(policy, ApiSnapshotPolicy)
+    self.assertEqual(exp_policy.name, policy.name)
+    self.assertIsInstance(policy.hdfsArguments, ApiHdfsSnapshotPolicyArguments)
+    self.assertEqual(exp_policy.hdfsArguments.pathPatterns, policy.hdfsArguments.pathPatterns)
+    self.assertEqual(exp_policy.weeklySnapshots, policy.weeklySnapshots)
+    self.assertEqual(exp_policy.dayOfWeek, policy.dayOfWeek)
+
diff --git a/cm-api/src/cm_api_tests/test_timeseries.py b/cm-api/src/cm_api_tests/test_timeseries.py
new file mode 100644 (file)
index 0000000..27e0368
--- /dev/null
@@ -0,0 +1,215 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import unittest
+from cm_api.endpoints.timeseries import *
+from cm_api.endpoints.types import ApiList
+from cm_api_tests import utils
+
+try:
+  import json
+except ImportError:
+  import simplejson as json
+
+class TestTimeSeries(unittest.TestCase):
+
+  def test_query_timeseries(self):
+    TIME_SERIES = '''{
+      "items" : [ {
+        "timeSeries" : [ {
+          "metadata" : {
+            "metricName" : "cpu_percent",
+            "entityName" : "localhost",
+            "startTime" : "2013-05-08T22:58:28.868Z",
+            "endTime" : "2013-05-08T23:03:28.868Z",
+            "attributes" : {
+              "hostname" : "localhost",
+              "hostId" : "localhost",
+              "rackId" : "/default",
+              "category" : "HOST",
+              "entityName" : "localhost"
+            },
+            "unitNumerators" : [ "percent" ],
+            "unitDenominators" : [ ]
+          },
+          "data" : [ {
+            "timestamp" : "2013-05-08T22:59:06.000Z",
+            "value" : 1.7,
+            "type" : "SAMPLE",
+            "aggregateStatistics" : {
+              "sampleTime": "2013-05-08T22:58:06.000Z",
+              "sampleValue": 317,
+              "count": 3,
+              "min": 305,
+              "minTime": "2013-05-08T22:58:06.000Z",
+              "max": 317,
+              "maxTime": "2013-05-08T22:58:06.000Z",
+              "mean": 311.6666666666667,
+              "stdDev": 6.110100926606199,
+              "crossEntityMetadata": {
+                "maxEntityDisplayName": "DATANODE (host1.com)",
+                "minEntityDisplayName": "DATANODE (host2.com)",
+                "numEntities": 3
+              }
+            }
+          }, {
+            "timestamp" : "2013-05-08T23:00:06.000Z",
+            "value" : 3.5,
+            "type" : "SAMPLE",
+            "aggregateStatistics" : {
+              "sampleTime": "2013-05-08T22:58:07.000Z",
+              "sampleValue": 319,
+              "count": 3,
+              "min": 304,
+              "minTime": "2013-05-08T22:58:07.000Z",
+              "max": 319,
+              "maxTime": "2013-05-08T22:58:07.000Z",
+              "mean": 311.6666666666667,
+              "stdDev": 6.110100926606199
+            }
+          }, {
+            "timestamp" : "2013-05-08T23:01:06.000Z",
+            "value" : 2.1,
+            "type" : "SAMPLE"
+          }, {
+            "timestamp" : "2013-05-08T23:02:06.000Z",
+            "value" : 1.5,
+            "type" : "SAMPLE"
+          }, {
+            "timestamp" : "2013-05-08T23:03:06.000Z",
+            "value" : 1.7,
+            "type" : "SAMPLE"
+          } ]
+        } ],
+        "warnings" : [ ],
+        "timeSeriesQuery" : "select cpu_percent"
+      } ]
+    }'''
+
+    api_resource = utils.MockResource(self)
+    time = datetime.datetime.now()
+    api_resource.expect("GET", "/timeseries",
+                        retdata=json.loads(TIME_SERIES),
+                        params={ 'from':time.isoformat(), 'to':time.isoformat(),
+                                 'query':'select cpu_percent'})
+    responses = query_timeseries(api_resource,
+                                 "select cpu_percent",
+                                 time,
+                                 time)
+
+    self.assertIsInstance(responses, ApiList)
+    self.assertEqual(1, len(responses))
+    response = responses[0]
+    self.assertIsInstance(response, ApiTimeSeriesResponse)
+    self.assertEqual("select cpu_percent", response.timeSeriesQuery)
+    self.assertFalse(response.warnings)
+    self.assertFalse(response.errors)
+    self.assertEqual(1, len(response.timeSeries))
+    timeseries = response.timeSeries[0]
+    self.assertIsInstance(timeseries, ApiTimeSeries)
+    metadata = timeseries.metadata
+    self.assertIsInstance(metadata, ApiTimeSeriesMetadata)
+    self.assertEqual("cpu_percent", metadata.metricName)
+    self.assertEqual("localhost", metadata.entityName)
+    self.assertIsInstance(metadata.attributes, dict)
+    self.assertEqual("cpu_percent", metadata.metricName)
+    self.assertEqual(5, len(timeseries.data))
+    for data in timeseries.data:
+      self.assertIsInstance(data, ApiTimeSeriesData)
+      self.assertEqual("SAMPLE", data.type)
+      self.assertIsInstance(data.timestamp, datetime.datetime)
+      self.assertTrue(data.value)
+    # first and second points have aggregate data.
+    data  = timeseries.data[0]
+    self.assertIsNotNone(data.aggregateStatistics)
+    self.assertIsInstance(data.aggregateStatistics, ApiTimeSeriesAggregateStatistics)
+    self.assertEqual(317, data.aggregateStatistics.sampleValue)
+    xEntityMetadata = data.aggregateStatistics.crossEntityMetadata
+    self.assertIsNotNone(xEntityMetadata)
+    self.assertIsInstance(xEntityMetadata, ApiTimeSeriesCrossEntityMetadata)
+    self.assertEqual("DATANODE (host1.com)", xEntityMetadata.maxEntityDisplayName)
+    data  = timeseries.data[1]
+    self.assertIsNotNone(data.aggregateStatistics)
+    self.assertIsInstance(data.aggregateStatistics, ApiTimeSeriesAggregateStatistics)
+    self.assertEqual(319, data.aggregateStatistics.sampleValue)
+    xEntityMetadata = data.aggregateStatistics.crossEntityMetadata
+    self.assertIsNone(xEntityMetadata)
+
+    # Test the with-rollups call
+    api_resource.expect("GET", "/timeseries",
+                        retdata=json.loads(TIME_SERIES),
+                        params={ 'from':time.isoformat(), 'to':time.isoformat(),
+                                 'query':'select cpu_percent',
+                                 'desiredRollup':'RAW',
+                                 'mustUseDesiredRollup': True})
+    responses = query_timeseries(api_resource,
+                                 "select cpu_percent",
+                                 time,
+                                 time,
+                                 "RAW",
+                                 True)
+
+
+  def test_get_metric_schema(self):
+    METRICS = '''{
+      "items" : [ {
+        "name" : "event_drain_success_count_flume_sink_min_rate",
+        "isCounter" : false,
+        "unitNumerator" : "events",
+        "unitDenominator" : "seconds",
+        "aliases" : [ ],
+        "sources" : {
+          "CLUSTER" : [ "cdh3", "cdh4" ],
+          "FLUME" : [ "cdh3", "cdh4" ]
+        }
+      }, {
+        "name" : "drop_receive",
+        "isCounter" : true,
+        "unitNumerator" : "packets",
+        "aliases" : [ "network_interface_drop_receive" ],
+        "sources" : {
+          "NETWORK_INTERFACE" : [ "enterprise" ]
+        }
+      } ]
+    }'''
+
+    api_resource = utils.MockResource(self)
+    api_resource.expect("GET", "/timeseries/schema",
+                        retdata=json.loads(METRICS))
+    metrics = get_metric_schema(api_resource)
+
+    self.assertIsInstance(metrics, ApiList)
+    self.assertEqual(2, len(metrics))
+    metric = metrics[0]
+    self.assertIsInstance(metric, ApiMetricSchema)
+    self.assertEqual("event_drain_success_count_flume_sink_min_rate",
+                     metric.name)
+    self.assertFalse(metric.isCounter)
+    self.assertEqual("events", metric.unitNumerator)
+    self.assertEqual("seconds", metric.unitDenominator)
+    self.assertFalse(metric.aliases)
+    self.assertIsInstance(metric.sources, dict)
+    metric = metrics[1]
+    self.assertIsInstance(metric, ApiMetricSchema)
+    self.assertEqual("drop_receive", metric.name)
+    self.assertTrue(metric.isCounter)
+    self.assertEqual("packets", metric.unitNumerator)
+    self.assertFalse( metric.unitDenominator)
+    self.assertIsInstance(metric.aliases, list)
+    self.assertEquals("network_interface_drop_receive", metric.aliases[0])
+    self.assertIsInstance(metric.sources, dict)
+    self.assertEquals("enterprise", metric.sources["NETWORK_INTERFACE"][0])
diff --git a/cm-api/src/cm_api_tests/test_users.py b/cm-api/src/cm_api_tests/test_users.py
new file mode 100644 (file)
index 0000000..368ca87
--- /dev/null
@@ -0,0 +1,49 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+from cm_api.endpoints.users import *
+from cm_api_tests import utils
+
+class TestUsers(unittest.TestCase):
+
+  def test_grant_admin(self):
+    res = utils.MockResource(self)
+
+    user = ApiUser(res, "alice")
+    expected = { 'name' : 'alice', 'roles' : [ 'ROLE_ADMIN' ] }
+    res.expect("PUT", "/users/alice", data=expected, retdata=expected)
+    updated = user.grant_admin_role()
+    self.assertTrue('ROLE_ADMIN' in updated.roles)
+    self.assertEqual(1, len(updated.roles))
+
+  def test_revoke_admin(self):
+    res = utils.MockResource(self)
+
+    user = ApiUser(res, "alice")
+    expected = { 'name' : 'alice', 'roles' : [ ] }
+    res.expect("PUT", "/users/alice", data=expected, retdata=expected)
+    updated = user.revoke_admin_role()
+    self.assertEqual(0, len(updated.roles))
+
+  def test_update_user(self):
+    res = utils.MockResource(self)
+    user = ApiUser(res, "alice", roles=[ 'ROLE_LIMITED' ])
+    expected = { 'name' : 'alice', 'roles' : [ 'ROLE_LIMITED'] }
+    res.expect("PUT", "/users/alice", data=expected, retdata=expected)
+    updated = update_user(res, user)
+    self.assertTrue('ROLE_LIMITED' in updated.roles)
+    self.assertEqual(1, len(updated.roles))
diff --git a/cm-api/src/cm_api_tests/test_yarn.py b/cm-api/src/cm_api_tests/test_yarn.py
new file mode 100644 (file)
index 0000000..5e97d46
--- /dev/null
@@ -0,0 +1,63 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import json
+import unittest
+from cm_api.endpoints.clusters import *
+from cm_api.endpoints.services import *
+from cm_api.endpoints.types import *
+from cm_api_tests import utils
+
+class TestYarn(unittest.TestCase):
+
+  def test_get_yarn_applications(self):
+    resource = utils.MockResource(self)
+    service = ApiService(resource, name="bar")
+
+    time = datetime.datetime.now()
+
+    resource.expect("GET", "/cm/service/yarnApplications",
+    retdata={ 'applications': [], 'warnings' : [] },
+    params={ 'from':time.isoformat(), 'to':time.isoformat(), \
+        'filter':'', 'limit':100, 'offset':0 })
+    resp = service.get_yarn_applications(time, time)
+    self.assertEquals(0, len(resp.applications))
+
+  def test_kill_application(self):
+    resource = utils.MockResource(self)
+    service = ApiService(resource, name="bar")
+
+    resource.expect("POST", "/cm/service/yarnApplications/randomId/kill",
+        retdata={ 'warning' : 'test' })
+    resp = service.kill_yarn_application('randomId')
+    self.assertEquals('test', resp.warning)
+
+  def test_attributes(self):
+    resource = utils.MockResource(self)
+    service = ApiService(resource, name="bar")
+
+    resource.expect("GET", "/cm/service/yarnApplications/attributes",
+        retdata=[{ 'name' : 'test',
+                  'type' : 'STRING',
+                  'displayName' : 'testDisplayName',
+                  'supportsHistograms' : True,
+                  'description' : 'testDescription' }])
+    resp = service.get_yarn_application_attributes()
+    self.assertEquals(1, len(resp))
+    attr = resp[0]
+    self.assertIsInstance(attr, ApiYarnApplicationAttribute)
+    self.assertEquals('test', attr.name)
diff --git a/cm-api/src/cm_api_tests/utils.py b/cm-api/src/cm_api_tests/utils.py
new file mode 100644 (file)
index 0000000..ac43419
--- /dev/null
@@ -0,0 +1,86 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from cm_api import api_client
+from cm_api.endpoints.types import Attr
+from cm_api.resource import Resource
+
+try:
+  import json
+except ImportError:
+  import simplejson as json
+
+class MockResource(Resource):
+  """
+  Allows code to control the behavior of a resource's "invoke" method for
+  unit testing.
+  """
+
+  def __init__(self, test, version=api_client.API_CURRENT_VERSION):
+    Resource.__init__(self, None)
+    self._next_expect = None
+    self.test = test
+    self.version = version
+
+  @property
+  def base_url(self):
+    return ""
+
+  def invoke(self, method, relpath=None, params=None, data=None, headers=None):
+    """
+    Checks the expected input data and returns the appropriate data to the caller.
+    """
+    exp_method, exp_path, exp_params, exp_data, exp_headers, retdata = self._next_expect
+    self._next_expect = None
+
+    if exp_method is not None:
+      self.test.assertEquals(exp_method, method)
+    if exp_path is not None:
+      self.test.assertEquals(exp_path, relpath)
+    if exp_params is not None:
+      self.test.assertEquals(exp_params, params)
+    if exp_data is not None:
+      if not isinstance(exp_data, str):
+        exp_data = json.dumps(Attr(is_api_list=True).to_json(exp_data, False))
+      self.test.assertEquals(exp_data, data)
+    if exp_headers is not None:
+      self.test.assertEquals(exp_headers, headers)
+    return retdata
+
+  def expect(self, method, reqpath, params=None, data=None, headers=None,
+      retdata=None):
+    """
+    Sets the data to expect in the next call to invoke().
+
+    @param method: method to expect, or None for any.
+    @param reqpath: request path, or None for any.
+    @param params: query parameters, or None for any.
+    @param data: request body, or None for any.
+    @param headers: request headers, or None for any.
+    @param retdata: data to return from the invoke call.
+    """
+    self._next_expect = (method, reqpath, params, data, headers, retdata)
+
+def deserialize(raw_data, cls):
+  """
+  Deserializes raw JSON data into an instance of cls.
+
+  The data is deserialized, serialized again using the class's to_json_dict()
+  implementation, and deserialized again, to make sure both from_json_dict()
+  and to_json_dict() are working.
+  """
+  instance = cls.from_json_dict(json.loads(raw_data), None)
+  return cls.from_json_dict(instance.to_json_dict(preserve_ro=True), None)
diff --git a/cm-api/src/cm_shell/__init__.py b/cm-api/src/cm_shell/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/cm-api/src/cm_shell/cmps.py b/cm-api/src/cm_shell/cmps.py
new file mode 100755 (executable)
index 0000000..f728c12
--- /dev/null
@@ -0,0 +1,644 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import getpass
+import argparse
+import readline
+import os
+import cmd
+from prettytable import PrettyTable
+from cm_api.api_client import ApiResource, ApiException
+from urllib2 import URLError
+
+# Config
+CONFIG = {'cluster': None, 'output_type': 'table', 'seperator': None}
+
+# Initial Prompt
+INIT_PROMPT = "cloudera> "
+
+# Banner shown at interactive shell login
+BANNER = "Welcome to the Cloudera Manager Console\nSelect a cluster using 'show clusters' and 'use'"
+
+# If true, than the user is running a non-interactive shell (ie: scripting)
+EXECUTE = False
+
+# Readline fix for hyphens
+readline.set_completer_delims(readline.get_completer_delims().replace('-', ''))
+
+# Global API object
+api = None
+
+
+class ClouderaShell(cmd.Cmd):
+    """
+    Interactive shell for communicating with your
+    Cloudera Cluster making use of the cm_api
+    """
+
+    # Set initial cloudera prompt
+    prompt = INIT_PROMPT
+
+    # Set login banner
+    intro = BANNER
+
+    # Help headers
+    doc_header = "Cloudera Manager Commands"
+    undoc_header = "Other Commands"
+
+    # Initial cache is blank
+    # when autocomplete for one of these components
+    # is triggered, it will automatically cache them
+    CACHED_ROLES = {}
+    CACHED_SERVICES = None
+    CACHED_CLUSTERS = None
+
+    def preloop(self):
+        "Checks if the cluster was pre-defined"
+        if CONFIG['cluster']:
+            self.set_cluster(CONFIG['cluster'])
+        else:
+            self.cluster_object = None
+
+    def generate_output(self, headers, rows, align=None):
+        if CONFIG['output_type'] == "table":
+            table = PrettyTable(headers)
+            if align:
+                for h in align:
+                    table.align[h] = 'l'
+
+            for r in rows:
+                table.add_row(r)
+            print(table)
+
+        if CONFIG['output_type'] == "csv":
+            print(','.join(headers))
+            for r in rows:
+                print(','.join(r))
+
+        if CONFIG['output_type'] == "custom":
+            SEP = CONFIG['seperator']
+            print(SEP.join(headers))
+            for r in rows:
+                print(SEP.join(r))
+
+    def emptyline(self):
+        """Called each time a user hits enter, by
+        default it will redo the last command, this
+        is an extension so it does nothing."""
+        pass
+
+    def set_cluster(self, cluster):
+        try:
+            cluster = api.get_cluster(cluster)
+        except ApiException:
+            print("Cluster Not Found!")
+            return None
+
+        self.cluster_object = cluster
+        if not EXECUTE:
+            print("Connected to %s" % (cluster.name))
+        self.prompt = cluster.name + "> "
+        return True
+
+    @property
+    def cluster(self):
+        if EXECUTE:
+            if not self.set_cluster(CONFIG['cluster']):
+                sys.exit(1)
+            return self.cluster_object.name
+
+        if self.cluster_object:
+            return self.cluster_object.name
+        else:
+            return None
+
+    def has_cluster(self):
+        if not self.cluster:
+            print("Error: No cluster currently selected")
+            return None
+        else:
+            return True
+
+    def get_log(self, role, log_type=None):
+        if not role:
+            return None
+
+        if not self.has_cluster():
+            return None
+
+        if '-' not in role:
+            print("Please enter a valid role name")
+            return None
+
+        try:
+            service = api.get_cluster(self.cluster).get_service(role.split('-')[0])
+            role = service.get_role(role)
+            try:
+                if EXECUTE:
+                    output = sys.stdout
+                else:
+                    output = os.popen("less", "w")
+                if log_type == "full":
+                    output.write(role.get_full_log())
+                if log_type == "stdout":
+                    output.write(role.get_stdout())
+                if log_type == "stderr":
+                    output.write(role.get_stderr())
+
+                if not EXECUTE:
+                    output.close()
+            except IOError:
+                pass
+        except ApiException:
+            print("Error: Role or Service Not Found")
+
+    def do_status(self, service):
+        """
+        List all services on the cluster
+        Usage:
+            > status
+        """
+        if service:
+            self.do_show("services", single=service)
+        else:
+            self.do_show("services")
+
+    def do_log(self, role):
+        """
+        Download log file for role
+        Usage:
+            > log <role>    Download log
+        """
+        self.get_log(role, log_type="full")
+
+    def do_stdout(self, role):
+        """
+        Download stdout file for role
+        Usage:
+            > stdout <role>     Download stdout
+        """
+        self.get_log(role, log_type="stdout")
+
+    def do_stderr(self, role):
+        """
+        Download stderr file for role
+        Usage:
+            > stderr <role>     Download stderr
+        """
+        self.get_log(role, log_type="stderr")
+
+    def do_show(self, option, single=None):
+        """
+        General System Information
+        Usage:
+            > show clusters     list of clusters this CM manages
+            > show hosts        list of all hosts CM manages
+            > show services     list of all services on this cluster
+                                including their health.
+        """
+        headers = []
+        rows = []
+        align = None
+        # show clusters
+        if option == "clusters":
+            "Display list of clusters on system"
+            headers = ["CLUSTER NAME"]
+            clusters = api.get_all_clusters()
+            for cluster in clusters:
+                rows.append([cluster.name])
+
+        # show hosts
+        if option == "hosts":
+            "Display a list of hosts avaiable on the system"
+            headers = ["HOSTNAME", "IP ADDRESS", "RACK"]
+            align = ["HOSTNAME", "IP ADDRESS", "RACK"]
+            for host in api.get_all_hosts():
+                rows.append([host.hostname, host.ipAddress, host.rackId])
+
+        # show services
+        if option == "services":
+            "Show list of services on the cluster"
+            headers = ["NAME", "SERVICE", "STATUS", "HEALTH", "CONFIG"]
+            align = ["NAME", "SERVICE"]
+
+            # Check if the user has selected a cluster
+            if not self.has_cluster():
+                print("Error: Please select a cluster first")
+                return None
+
+            if not single:
+                for s in api.get_cluster(self.cluster).get_all_services():
+                    if s.configStale:
+                        config = "STALE"
+                    else:
+                        config = "UP TO DATE"
+                    rows.append([s.name, s.type, s.serviceState, s.healthSummary, config])
+            else:
+                s = api.get_cluster(self.cluster).get_service(single)
+                if s.configStale:
+                    config = "STALE"
+                else:
+                    config = "UP TO DATE"
+                rows.append([s.name, s.type, s.serviceState, s.healthSummary, config])
+
+        self.generate_output(headers, rows, align=align)
+
+    def complete_log(self, text, line, start_index, end_index):
+        return self.roles_autocomplete(text, line, start_index, end_index)
+
+    def complete_stdout(self, text, line, start_index, end_index):
+        return self.roles_autocomplete(text, line, start_index, end_index)
+
+    def complete_stderr(self, text, line, start_index, end_index):
+        return self.roles_autocomplete(text, line, start_index, end_index)
+
+    def complete_show(self, text, line, start_index, end_index):
+        show_commands = ["clusters", "hosts", "services"]
+        if text:
+            return [c for c in show_commands if c.startswith(text)]
+        else:
+            return show_commands
+
+    def service_action(self, service, action):
+        "Perform given action on service for the selected cluster"
+        try:
+            service = api.get_cluster(self.cluster).get_service(service)
+        except ApiException:
+            print("Service not found")
+            return None
+
+        if action == "start":
+            service.start()
+        if action == "restart":
+            service.restart()
+        if action == "stop":
+            service.stop()
+
+        return True
+
+    def services_autocomplete(self, text, line, start_index, end_index, append=[]):
+        if not self.cluster:
+            return None
+        else:
+            if not self.CACHED_SERVICES:
+                services = [s.name for s in api.get_cluster(self.cluster).get_all_services()]
+                self.CACHED_SERVICES = services
+
+            if text:
+                return [s for s in self.CACHED_SERVICES + append if s.startswith(text)]
+            else:
+                return self.CACHED_SERVICES + append
+
+    def do_start_service(self, service):
+        """
+        Start a service
+        Usage:
+            > start_service <service>
+        """
+        if not self.has_cluster():
+            return None
+
+        if self.service_action(service=service, action="start"):
+            print("%s is being started" % (service))
+        else:
+            print("Error starting service")
+            return None
+
+    def complete_start_service(self, text, line, start_index, end_index):
+        return self.services_autocomplete(text, line, start_index, end_index)
+
+    def do_restart_service(self, service):
+        """
+        Restart a service
+        Usage:
+            > restart_service <service>
+        """
+        if not self.has_cluster():
+            return None
+
+        if self.service_action(service=service, action="restart"):
+            print("%s is being restarted" % (service))
+        else:
+            print("Error restarting service")
+            return None
+
+    def complete_restart_service(self, text, line, start_index, end_index):
+        return self.services_autocomplete(text, line, start_index, end_index)
+
+    def do_stop_service(self, service):
+        """
+        Stop a service
+        Usage:
+            > stop_service <service>
+        """
+        if not self.has_cluster():
+            return None
+
+        if self.service_action(service=service, action="stop"):
+            print("%s is being stopped" % (service))
+        else:
+            print("Error stopping service")
+            return None
+
+    def complete_stop_service(self, text, line, start_index, end_index):
+        return self.services_autocomplete(text, line, start_index, end_index)
+
+    def do_use(self, cluster):
+        """
+        Connect to Cluster
+        Usage:
+            > use <cluster>
+        """
+        if not self.set_cluster(cluster):
+            print("Error setting cluster")
+
+    def cluster_autocomplete(self, text, line, start_index, end_index):
+        "autocomplete for the use command, obtain list of clusters first"
+        if not self.CACHED_CLUSTERS:
+            clusters = [cluster.name for cluster in api.get_all_clusters()]
+            self.CACHED_CLUSTERS = clusters
+
+        if text:
+            return [cluster for cluster in self.CACHED_CLUSTERS if cluster.startswith(text)]
+        else:
+            return self.CACHED_CLUSTERS
+
+    def complete_use(self, text, line, start_index, end_index):
+        return self.cluster_autocomplete(text, line, start_index, end_index)
+
+    def do_roles(self, service):
+        """
+        Role information
+        Usage:
+            > roles <servicename>   Display role information for service
+            > roles all             Display all role information for cluster
+        """
+        if not self.has_cluster():
+            return None
+
+        if not service:
+            return None
+
+        if service == "all":
+            if not self.CACHED_SERVICES:
+                self.services_autocomplete('', service, 0, 0)
+
+            for s in self.CACHED_SERVICES:
+                print("= " + s.upper() + " =")
+                self.do_roles(s)
+            return None
+        try:
+            service = api.get_cluster(self.cluster).get_service(service)
+            headers = ["ROLE TYPE", "HOST", "ROLE NAME", "STATE", "HEALTH", "CONFIG"]
+            align = ["ROLE TYPE", "ROLE NAME", "HOST"]
+            rows = []
+            for roletype in service.get_role_types():
+                for role in service.get_roles_by_type(roletype):
+                    if role.configStale:
+                        config = "STALE"
+                    else:
+                        config = "UP TO DATE"
+                    rows.append([role.type, role.hostRef.hostId, role.name, role.roleState, role.healthSummary, config])
+            self.generate_output(headers, rows, align=align)
+        except ApiException:
+            print("Service not found")
+
+    def complete_roles(self, text, line, start_index, end_index):
+        return self.services_autocomplete(text, line, start_index, end_index, append=["all"])
+
+    def roles_autocomplete(self, text, line, start_index, end_index):
+        "Return full list of roles"
+        if '-' not in line:
+            # Append a dash to each service, makes for faster autocompletion of
+            # roles
+            return [s + '-' for s in self.services_autocomplete(text, line, start_index, end_index)]
+        else:
+            key, role = line.split()[1].split('-', 1)
+            if key not in self.CACHED_ROLES:
+                service = api.get_cluster(self.cluster).get_service(key)
+                roles = []
+                for t in service.get_role_types():
+                    for r in service.get_roles_by_type(t):
+                        roles.append(r.name)
+
+                self.CACHED_ROLES[key] = roles
+
+            if not role:
+                return self.CACHED_ROLES[key]
+            else:
+                return [r for r in self.CACHED_ROLES[key] if r.startswith(line.split()[1])]
+
+    def do_start_role(self, role):
+        """
+        Start a role
+        Usage:
+            > start_role <role>     Restarts this role
+        """
+        if not role:
+            return None
+
+        if not self.has_cluster():
+            return None
+
+        if '-' not in role:
+            print("Please enter a valid role name")
+            return None
+
+        try:
+            service = api.get_cluster(self.cluster).get_service(role.split('-')[0])
+            service.start_roles(role)
+            print("Starting Role")
+        except ApiException:
+            print("Error: Role or Service Not Found")
+
+    def complete_start_role(self, text, line, start_index, end_index):
+        return self.roles_autocomplete(text, line, start_index, end_index)
+
+    def do_restart_role(self, role):
+        """
+        Restart a role
+        Usage:
+            > restart_role <role>   Restarts this role
+        """
+        if not role:
+            return None
+
+        if not self.has_cluster():
+            return None
+
+        if '-' not in role:
+            print("Please enter a valid role name")
+            return None
+
+        try:
+            service = api.get_cluster(self.cluster).get_service(role.split('-')[0])
+            service.restart_roles(role)
+            print("Restarting Role")
+        except ApiException:
+            print("Error: Role or Service Not Found")
+
+    def complete_restart_role(self, text, line, start_index, end_index):
+        return self.roles_autocomplete(text, line, start_index, end_index)
+
+    def do_stop_role(self, role):
+        """
+        Stop a role
+        Usage:
+            > stop_role <role>  Stops this role
+        """
+        if not role:
+            return None
+
+        if not self.has_cluster():
+            return None
+
+        if '-' not in role:
+            print("Please enter a valid role name")
+            return None
+
+        try:
+            service = api.get_cluster(self.cluster).get_service(role.split('-')[0])
+            service.stop_roles(role)
+            print("Stopping Role")
+        except ApiException:
+            print("Error: Role or Service Not Found")
+
+    def complete_stop_role(self, text, line, start_index, end_index):
+        return self.roles_autocomplete(text, line, start_index, end_index)
+
+    def do_stop_cluster(self, cluster):
+        """
+        Completely stop the cluster
+        Usage:
+            > stop_cluster <cluster>
+        """
+        try:
+            cluster = api.get_cluster(cluster)
+            cluster.stop()
+            print("Stopping Cluster")
+        except ApiException:
+            print("Cluster not found")
+            return None
+
+    def complete_stop_cluster(self, text, line, start_index, end_index):
+        return self.cluster_autocomplete(text, line, start_index, end_index)
+
+    def do_start_cluster(self, cluster):
+        """
+        Start the cluster
+        Usage:
+            > start_cluster <cluster>
+        """
+        try:
+            cluster = api.get_cluster(cluster)
+            cluster.start()
+            print("Starting Cluster")
+        except ApiException:
+            print("Cluster not found")
+            return None
+
+    def complete_start_cluster(self, text, line, start_index, end_index):
+        return self.cluster_autocomplete(text, line, start_index, end_index)
+
+    def do_version(self, cluster=None):
+        """
+        Obtain cluster CDH version
+        Usage:
+            > version
+            or
+            > version <cluster>
+        """
+        if not cluster:
+            if not self.has_cluster():
+                return None
+            else:
+                cluster = api.get_cluster(self.cluster)
+        else:
+            try:
+                cluster = api.get_cluster(cluster)
+            except ApiException:
+                print("Error: Cluster not found")
+                return None
+
+        print("Version: %s" % (cluster.version))
+
+    def complete_version(self, text, line, start_index, end_index):
+        return self.cluster_autocomplete(text, line, start_index, end_index)
+
+    def complete_status(self, text, line, start_index, end_index):
+        return self.services_autocomplete(text, line, start_index, end_index)
+
+
+def main():
+    parser = argparse.ArgumentParser(description='Cloudera Manager Shell')
+    parser.add_argument('-H', '--host', '--hostname', action='store', dest='hostname', required=True)
+    parser.add_argument('-p', '--port', action='store', dest='port', type=int, default=7180)
+    parser.add_argument('-u', '--user', '--username',  action='store', dest='username')
+    parser.add_argument('-c', '--cluster', action='store', dest='cluster')
+    parser.add_argument('--password', action='store', dest='password')
+    parser.add_argument('-e', '--execute', action='store', dest='execute')
+    parser.add_argument('-s', '--seperator', action='store', dest='seperator')
+    args = parser.parse_args()
+
+    # Check if a username was suplied, if not, prompt the user
+    if not args.username:
+        args.username = raw_input("Enter Username: ")
+
+    # Check if the password was supplied, if not, prompt the user
+    if not args.password:
+        args.password = getpass.getpass("Enter Password: ")
+
+    # Attempt to authenticate using the API
+    global api
+    api = ApiResource(args.hostname, args.port, args.username, args.password)
+    try:
+        api.echo("ping")
+    except ApiException:
+        try:
+            api = ApiResource(args.hostname, args.port, args.username, args.password, version=1)
+            api.echo("ping")
+        except ApiException:
+            print("Unable to Authenticate")
+            sys.exit(1)
+    except URLError:
+        print("Error: Could not connect to %s" % (args.hostname))
+        sys.exit(1)
+
+    CONFIG['cluster'] = args.cluster
+
+    # Check if a custom seperator was supplied for the output
+    if args.seperator:
+        CONFIG['output_type'] = 'custom'
+        CONFIG['seperator'] = args.seperator
+
+    # Check if user is attempting non-interactive shell
+    if args.execute:
+        EXECUTE = True
+        shell = ClouderaShell()
+        for command in args.execute.split(';'):
+            shell.onecmd(command)
+        sys.exit(0)
+
+    try:
+        ClouderaShell().cmdloop()
+    except KeyboardInterrupt:
+        sys.stdout.write("\n")
+        sys.exit(0)
+
+if __name__ == "__main__":
+    main()
diff --git a/cm-api/src/cm_shell/prettytable.py b/cm-api/src/cm_shell/prettytable.py
new file mode 100644 (file)
index 0000000..30d72b5
--- /dev/null
@@ -0,0 +1,1068 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2009, Luke Maurits <luke@maurits.id.au>
+# All rights reserved.
+# With contributions from:
+#  * Chris Clark
+#  * Klein Stephane
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+#   this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.
+# * The name of the author may not be used to endorse or promote products
+#   derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+__version__ = "0.6"
+
+import sys
+import copy
+import random
+import textwrap
+
+py3k = sys.version_info[0] >= 3
+if py3k:
+    unicode = str
+    basestring = str
+    from html import escape
+else:
+    from cgi import escape
+
+# hrule styles
+FRAME = 0
+ALL   = 1
+NONE  = 2
+
+# Table styles
+DEFAULT = 10
+MSWORD_FRIENDLY = 11
+PLAIN_COLUMNS = 12
+RANDOM = 20
+
+def _get_size(text):
+    max_width = 0
+    max_height = 0
+    text = _unicode(text)
+    for line in text.split("\n"):
+        max_height += 1
+        if len(line) > max_width:
+            max_width = len(line)
+
+    return (max_width, max_height)
+        
+def _unicode(value, encoding="UTF-8"):
+    if not isinstance(value, basestring):
+        value = str(value)
+    if not isinstance(value, unicode):
+        value = unicode(value, encoding, "replace")
+    return value
+
+class PrettyTable(object):
+
+    def __init__(self, field_names=None, **kwargs):
+
+        """Return a new PrettyTable instance
+
+        Arguments:
+
+        field_names - list or tuple of field names
+        fields - list or tuple of field names to include in displays
+        start - index of first data row to include in output
+        end - index of last data row to include in output PLUS ONE (list slice style)
+        fields - names of fields (columns) to include
+        header - print a header showing field names (True or False)
+        border - print a border around the table (True or False)
+        hrules - controls printing of horizontal rules after rows.  Allowed values: FRAME, ALL, NONE
+       int_format - controls formatting of integer data
+       float_format - controls formatting of floating point data
+        padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
+        left_padding_width - number of spaces on left hand side of column data
+        right_padding_width - number of spaces on right hand side of column data
+        vertical_char - single character string used to draw vertical lines
+        horizontal_char - single character string used to draw horizontal lines
+        junction_char - single character string used to draw line junctions
+        sortby - name of field to sort rows by
+        sort_key - sorting key function, applied to data points before sorting
+        reversesort - True or False to sort in descending or ascending order"""
+
+        # Data
+        self._field_names = []
+        self._align = {}
+        self._max_width = {}
+        self._rows = []
+        if field_names:
+            self.field_names = field_names
+        else:
+            self._widths = []
+        self._rows = []
+
+        # Options
+        self._options = "start end fields header border sortby reversesort sort_key attributes format hrules".split()
+        self._options.extend("int_format float_format padding_width left_padding_width right_padding_width".split())
+        self._options.extend("vertical_char horizontal_char junction_char".split())
+        for option in self._options:
+            if option in kwargs:
+                self._validate_option(option, kwargs[option])
+            else:
+                kwargs[option] = None
+
+
+        self._start = kwargs["start"] or 0
+        self._end = kwargs["end"] or None
+        self._fields = kwargs["fields"] or None
+
+        self._header = kwargs["header"] or True
+        self._border = kwargs["border"] or True
+        self._hrules = kwargs["hrules"] or FRAME
+
+        self._sortby = kwargs["sortby"] or None
+        self._reversesort = kwargs["reversesort"] or False
+        self._sort_key = kwargs["sort_key"] or (lambda x: x)
+
+        self._int_format = kwargs["float_format"] or {}
+        self._float_format = kwargs["float_format"] or {}
+        self._padding_width = kwargs["padding_width"] or 1
+        self._left_padding_width = kwargs["left_padding_width"] or None
+        self._right_padding_width = kwargs["right_padding_width"] or None
+
+        self._vertical_char = kwargs["vertical_char"] or "|"
+        self._horizontal_char = kwargs["horizontal_char"] or "-"
+        self._junction_char = kwargs["junction_char"] or "+"
+        
+        self._format = kwargs["format"] or False
+        self._attributes = kwargs["attributes"] or {}
+   
+    def __getattr__(self, name):
+
+        if name == "rowcount":
+            return len(self._rows)
+        elif name == "colcount":
+            if self._field_names:
+                return len(self._field_names)
+            elif self._rows:
+                return len(self._rows[0])
+            else:
+                return 0
+        else:
+            raise AttributeError(name)
+    def __getitem__(self, index):
+
+        newtable = copy.deepcopy(self)
+        if isinstance(index, slice):
+            newtable._rows = self._rows[index]
+        elif isinstance(index, int):
+            newtable._rows = [self._rows[index],]
+        else:
+            raise Exception("Index %s is invalid, must be an integer or slice" % str(index))
+        return newtable
+
+    def __str__(self):
+        if py3k:
+            return self.get_string()
+        else:
+            return self.get_string().encode("ascii","replace")
+
+    def __unicode__(self):
+        return self.get_string()
+
+    ##############################
+    # ATTRIBUTE VALIDATORS       #
+    ##############################
+
+    # The method _validate_option is all that should be used elsewhere in the code base to validate options.
+    # It will call the appropriate validation method for that option.  The individual validation methods should
+    # never need to be called directly (although nothing bad will happen if they *are*).
+    # Validation happens in TWO places.
+    # Firstly, in the property setters defined in the ATTRIBUTE MANAGMENT section.
+    # Secondly, in the _get_options method, where keyword arguments are mixed with persistent settings
+
+    def _validate_option(self, option, val):
+        if option in ("start", "end", "padding_width", "left_padding_width", "right_padding_width", "format"):
+            self._validate_nonnegative_int(option, val)
+        elif option in ("sortby"):
+            self._validate_field_name(option, val)
+        elif option in ("sort_key"):
+            self._validate_function(option, val)
+        elif option in ("hrules"):
+            self._validate_hrules(option, val)
+        elif option in ("fields"):
+            self._validate_all_field_names(option, val)
+        elif option in ("header", "border", "reversesort"):
+            self._validate_true_or_false(option, val)
+        elif option in ("int_format"):
+            self._validate_int_format(option, val)
+        elif option in ("float_format"):
+            self._validate_float_format(option, val)
+        elif option in ("vertical_char", "horizontal_char", "junction_char"):
+            self._validate_single_char(option, val)
+        elif option in ("attributes"):
+            self._validate_attributes(option, val)
+        else:
+            raise Exception("Unrecognised option: %s!" % option)
+
+    def _validate_align(self, val):
+        try:
+            assert val in ["l","c","r"]
+        except AssertionError:
+            raise Exception("Alignment %s is invalid, use l, c or r!" % val)
+
+    def _validate_nonnegative_int(self, name, val):
+        try:
+            assert int(val) >= 0
+        except AssertionError:
+            raise Exception("Invalid value for %s: %s!" % (name, _unicode(val)))
+
+    def _validate_true_or_false(self, name, val):
+        try:
+            assert val in (True, False)
+        except AssertionError:
+            raise Exception("Invalid value for %s!  Must be True or False." % name)
+
+    def _validate_int_format(self, name, val):
+        if val == "":
+            return
+        try:
+            assert type(val) in (str, unicode)
+            assert val.isdigit()
+        except AssertionError:
+            raise Exception("Invalid value for %s!  Must be an integer format string." % name)
+
+    def _validate_float_format(self, name, val):
+        if val == "":
+            return
+        try:
+            assert type(val) in (str, unicode)
+            assert "." in val
+            bits = val.split(".")
+            assert len(bits) <= 2
+            assert bits[0] == "" or bits[0].isdigit()
+            assert bits[1] == "" or bits[1].isdigit()
+        except AssertionError:
+            raise Exception("Invalid value for %s!  Must be a float format string." % name)
+
+    def _validate_function(self, name, val):
+        try:
+            assert hasattr(val, "__call__")
+        except AssertionError:
+            raise Exception("Invalid value for %s!  Must be a function." % name)
+
+    def _validate_hrules(self, name, val):
+        try:
+            assert val in (ALL, FRAME, NONE)
+        except AssertionError:
+            raise Exception("Invalid value for %s!  Must be ALL, FRAME or NONE." % name)
+
+    def _validate_field_name(self, name, val):
+        try:
+            assert val in self._field_names
+        except AssertionError:
+            raise Exception("Invalid field name: %s!" % val)
+
+    def _validate_all_field_names(self, name, val):
+        try:
+            for x in val:
+                self._validate_field_name(name, x)
+        except AssertionError:
+            raise Exception("fields must be a sequence of field names!")
+
+    def _validate_single_char(self, name, val):
+        try:
+            assert len(_unicode(val)) == 1
+        except AssertionError:
+            raise Exception("Invalid value for %s!  Must be a string of length 1." % name)
+
+    def _validate_attributes(self, name, val):
+        try:
+            assert isinstance(val, dict)
+        except AssertionError:
+            raise Exception("attributes must be a dictionary of name/value pairs!")
+
+    ##############################
+    # ATTRIBUTE MANAGEMENT       #
+    ##############################
+
+    def _get_field_names(self):
+        return self._field_names
+        """The names of the fields
+
+        Arguments:
+
+        fields - list or tuple of field names"""
+    def _set_field_names(self, val):
+        if self._field_names:
+            old_names = self._field_names[:]
+        self._field_names = val
+        if self._align and old_names:
+            for old_name, new_name in zip(old_names, val):
+                self._align[new_name] = self._align[old_name]
+            for old_name in old_names:
+                self._align.pop(old_name)
+        else:
+            for field in self._field_names:
+                self._align[field] = "c"
+    field_names = property(_get_field_names, _set_field_names)
+
+    def _get_align(self):
+        return self._align
+    def _set_align(self, val):
+        self._validate_align(val)
+        for field in self._field_names:
+            self._align[field] = val
+    align = property(_get_align, _set_align)
+
+    def _get_max_width(self):
+        return self._max_width
+    def _set_max_width(self, val):
+        self._validate_nonnegativeint(val)
+        for field in self._field_names:
+            self._max_width[field] = val
+    max_width = property(_get_max_width, _set_max_width)
+    
+    def _get_start(self):
+        """Start index of the range of rows to print
+
+        Arguments:
+
+        start - index of first data row to include in output"""
+        return self._start
+
+    def _set_start(self, val):
+        self._validate_option("start", val)
+        self._start = val
+    start = property(_get_start, _set_start)
+
+    def _get_end(self):
+        """End index of the range of rows to print
+
+        Arguments:
+
+        end - index of last data row to include in output PLUS ONE (list slice style)"""
+        return self._end
+    def _set_end(self, val):
+        self._validate_option("end", val)
+        self._end = val
+    end = property(_get_end, _set_end)
+
+    def _get_sortby(self):
+        """Name of field by which to sort rows
+
+        Arguments:
+
+        sortby - field name to sort by"""
+        return self._sortby
+    def _set_sortby(self, val):
+        self._validate_option("sortby", val)
+        self._sortby = val
+    sortby = property(_get_sortby, _set_sortby)
+
+    def _get_reversesort(self):
+        """Controls direction of sorting (ascending vs descending)
+
+        Arguments:
+
+        reveresort - set to True to sort by descending order, or False to sort by ascending order"""
+        return self._reversesort
+    def _set_reversesort(self, val):
+        self._validate_option("reversesort", val)
+        self._reversesort = val
+    reversesort = property(_get_reversesort, _set_reversesort)
+
+    def _get_sort_key(self):
+        """Sorting key function, applied to data points before sorting
+
+        Arguments:
+
+        sort_key - a function which takes one argument and returns something to be sorted"""
+        return self._sort_key
+    def _set_sort_key(self, val):
+        self._validate_option("sort_key", val)
+        self._sort_key = val
+    sort_key = property(_get_sort_key, _set_sort_key)
+    def _get_header(self):
+        """Controls printing of table header with field names
+
+        Arguments:
+
+        header - print a header showing field names (True or False)"""
+        return self._header
+    def _set_header(self, val):
+        self._validate_option("header", val)
+        self._header = val
+    header = property(_get_header, _set_header)
+
+    def _get_border(self):
+        """Controls printing of border around table
+
+        Arguments:
+
+        border - print a border around the table (True or False)"""
+        return self._border
+    def _set_border(self, val):
+        self._validate_option("border", val)
+        self._border = val
+    border = property(_get_border, _set_border)
+
+    def _get_hrules(self):
+        """Controls printing of horizontal rules after rows
+
+        Arguments:
+
+        hrules - horizontal rules style.  Allowed values: FRAME, ALL, NONE"""
+        return self._hrules
+    def _set_hrules(self, val):
+        self._validate_option("hrules", val)
+        self._hrules = val
+    hrules = property(_get_hrules, _set_hrules)
+
+    def _get_int_format(self):
+        """Controls formatting of integer data
+        Arguments:
+
+        int_format - integer format string"""
+        return self._int_format
+    def _set_int_format(self, val):
+        self._validate_option("int_format", val)
+        for field in self._field_names:
+            self._int_format[field] = val
+    int_format = property(_get_int_format, _set_int_format)
+
+    def _get_float_format(self):
+        """Controls formatting of floating point data
+        Arguments:
+
+        float_format - floating point format string"""
+        return self._float_format
+    def _set_float_format(self, val):
+        self._validate_option("float_format", val)
+        for field in self._field_names:
+            self._float_format[field] = val
+    float_format = property(_get_float_format, _set_float_format)
+
+    def _get_padding_width(self):
+        """The number of empty spaces between a column's edge and its content
+
+        Arguments:
+
+        padding_width - number of spaces, must be a positive integer"""
+        return self._padding_width
+    def _set_padding_width(self, val):
+        self._validate_option("padding_width", val)
+        self._padding_width = val
+    padding_width = property(_get_padding_width, _set_padding_width)
+
+    def _get_left_padding_width(self):
+        """The number of empty spaces between a column's left edge and its content
+
+        Arguments:
+
+        left_padding - number of spaces, must be a positive integer"""
+        return self._left_padding_width
+    def _set_left_padding_width(self, val):
+        self._validate_option("left_padding_width", val)
+        self._left_padding_width = val
+    left_padding_width = property(_get_left_padding_width, _set_left_padding_width)
+
+    def _get_right_padding_width(self):
+        """The number of empty spaces between a column's right edge and its content
+
+        Arguments:
+
+        right_padding - number of spaces, must be a positive integer"""
+        return self._right_padding_width
+    def _set_right_padding_width(self, val):
+        self._validate_option("right_padding_width", val)
+        self._right_padding_width = val
+    right_padding_width = property(_get_right_padding_width, _set_right_padding_width)
+
+    def _get_vertical_char(self):
+        """The charcter used when printing table borders to draw vertical lines
+
+        Arguments:
+
+        vertical_char - single character string used to draw vertical lines"""
+        return self._vertical_char
+    def _set_vertical_char(self, val):
+        self._validate_option("vertical_char", val)
+        self._vertical_char = val
+    vertical_char = property(_get_vertical_char, _set_vertical_char)
+
+    def _get_horizontal_char(self):
+        """The charcter used when printing table borders to draw horizontal lines
+
+        Arguments:
+
+        horizontal_char - single character string used to draw horizontal lines"""
+        return self._horizontal_char
+    def _set_horizontal_char(self, val):
+        self._validate_option("horizontal_char", val)
+        self._horizontal_char = val
+    horizontal_char = property(_get_horizontal_char, _set_horizontal_char)
+
+    def _get_junction_char(self):
+        """The charcter used when printing table borders to draw line junctions
+
+        Arguments:
+
+        junction_char - single character string used to draw line junctions"""
+        return self._junction_char
+    def _set_junction_char(self, val):
+        self._validate_option("vertical_char", val)
+        self._junction_char = val
+    junction_char = property(_get_junction_char, _set_junction_char)
+
+    def _get_format(self):
+        """Controls whether or not HTML tables are formatted to match styling options
+
+        Arguments:
+
+        format - True or False"""
+        return self._format
+    def _set_format(self, val):
+        self._validate_option("format", val)
+        self._format = val
+    format = property(_get_format, _set_format)
+
+    def _get_attributes(self):
+        """A dictionary of HTML attribute name/value pairs to be included in the <table> tag when printing HTML
+
+        Arguments:
+
+        attributes - dictionary of attributes"""
+        return self._attributes
+    def _set_attributes(self, val):
+        self.validate_option("attributes", val)
+        self._attributes = val
+    attributes = property(_get_attributes, _set_attributes)
+
+    ##############################
+    # OPTION MIXER               #
+    ##############################
+
+    def _get_options(self, kwargs):
+
+        options = {}
+        for option in self._options:
+            if option in kwargs:
+                self._validate_option(option, kwargs[option])
+                options[option] = kwargs[option]
+            else:
+                options[option] = getattr(self, "_"+option)
+        return options
+
+    ##############################
+    # PRESET STYLE LOGIC         #
+    ##############################
+
+    def set_style(self, style):
+
+        if style == DEFAULT:
+            self._set_default_style()
+        elif style == MSWORD_FRIENDLY:
+            self._set_msword_style()
+        elif style == PLAIN_COLUMNS:
+            self._set_columns_style()
+        elif style == RANDOM:
+            self._set_random_style()
+        else:
+            raise Exception("Invalid pre-set style!")
+
+    def _set_default_style(self):
+
+        self.header = True
+        self.border = True
+        self._hrules = FRAME
+        self.padding_width = 1
+        self.left_padding_width = 1
+        self.right_padding_width = 1
+        self.vertical_char = "|"
+        self.horizontal_char = "-"
+        self.junction_char = "+"
+
+    def _set_msword_style(self):
+
+        self.header = True
+        self.border = True
+        self._hrules = NONE
+        self.padding_width = 1
+        self.left_padding_width = 1
+        self.right_padding_width = 1
+        self.vertical_char = "|"
+
+    def _set_columns_style(self):
+
+        self.header = True
+        self.border = False
+        self.padding_width = 1
+        self.left_padding_width = 0
+        self.right_padding_width = 8
+
+    def _set_random_style(self):
+
+        # Just for fun!
+        self.header = random.choice((True, False))
+        self.border = random.choice((True, False))
+        self._hrules = random.choice((ALL, FRAME, NONE))
+        self.left_padding_width = random.randint(0,5)
+        self.right_padding_width = random.randint(0,5)
+        self.vertical_char = random.choice("~!@#$%^&*()_+|-=\{}[];':\",./;<>?")
+        self.horizontal_char = random.choice("~!@#$%^&*()_+|-=\{}[];':\",./;<>?")
+        self.junction_char = random.choice("~!@#$%^&*()_+|-=\{}[];':\",./;<>?")
+
+    ##############################
+    # DATA INPUT METHODS         #
+    ##############################
+
+    def add_row(self, row):
+
+        """Add a row to the table
+
+        Arguments:
+
+        row - row of data, should be a list with as many elements as the table
+        has fields"""
+
+        if self._field_names and len(row) != len(self._field_names):
+            raise Exception("Row has incorrect number of values, (actual) %d!=%d (expected)" %(len(row),len(self._field_names)))
+        self._rows.append(list(row))
+
+    def del_row(self, row_index):
+
+        """Delete a row to the table
+
+        Arguments:
+
+        row_index - The index of the row you want to delete.  Indexing starts at 0."""
+
+        if row_index > len(self._rows)-1:
+            raise Exception("Cant delete row at index %d, table only has %d rows!" % (row_index, len(self._rows)))
+        del self._rows[row_index]
+
+    def add_column(self, fieldname, column, align="c"):
+
+        """Add a column to the table.
+
+        Arguments:
+
+        fieldname - name of the field to contain the new column of data
+        column - column of data, should be a list with as many elements as the
+        table has rows
+        align - desired alignment for this column - "l" for left, "c" for centre and "r" for right"""
+
+        if len(self._rows) in (0, len(column)):
+            self._validate_align(align)
+            self._field_names.append(fieldname)
+            self._align[fieldname] = align
+            for i in range(0, len(column)):
+                if len(self._rows) < i+1:
+                    self._rows.append([])
+                self._rows[i].append(column[i])
+        else:
+            raise Exception("Column length %d does not match number of rows %d!" % (len(column), len(self._rows)))
+
+    def clear_rows(self):
+
+        """Delete all rows from the table but keep the current field names"""
+
+        self._rows = []
+
+    def clear(self):
+
+        """Delete all rows and field names from the table, maintaining nothing but styling options"""
+
+        self._rows = []
+        self._field_names = []
+        self._widths = []
+
+    ##############################
+    # MISC PUBLIC METHODS        #
+    ##############################
+
+    def copy(self):
+        return copy.deepcopy(self)
+
+    ##############################
+    # MISC PRIVATE METHODS       #
+    ##############################
+
+    def _format_value(self, field, value):
+        if isinstance(value, int) and field in self._int_format:
+            value = ("%%%sd" % self._int_format[field]) % value 
+        elif isinstance(value, float) and field in self._float_format:
+            value = ("%%%sf" % self._float_format[field]) % value 
+        return value
+
+    def _compute_widths(self, rows, options):
+        if options["header"]:
+            widths = [_get_size(field)[0] for field in self._field_names]
+        else:
+            widths = len(self.field_names) * [0]
+        for row in rows:
+            for index, value in enumerate(row):
+                value = self._format_value(self.field_names[index], value)
+                widths[index] = max(widths[index], _get_size(_unicode(value))[0])
+        self._widths = widths
+
+    def _get_padding_widths(self, options):
+
+        if options["left_padding_width"] is not None:
+            lpad = options["left_padding_width"]
+        else:
+            lpad = options["padding_width"]
+        if options["right_padding_width"] is not None:
+            rpad = options["right_padding_width"]
+        else:
+            rpad = options["padding_width"]
+        return lpad, rpad
+
+    def _get_rows(self, options):
+        """Return only those data rows that should be printed, based on slicing and sorting.
+
+        Arguments:
+
+        options - dictionary of option settings."""
+       
+       # Make a copy of only those rows in the slice range 
+        rows = copy.deepcopy(self._rows[options["start"]:options["end"]])
+        # Sort if necessary
+        if options["sortby"]:
+            sortindex = self._field_names.index(options["sortby"])
+            # Decorate
+            rows = [[row[sortindex]]+row for row in rows]
+            # Sort
+            rows.sort(reverse=options["reversesort"], key=options["sort_key"])
+            # Undecorate
+            rows = [row[1:] for row in rows]
+        return rows
+         
+    ##############################
+    # PLAIN TEXT STRING METHODS  #
+    ##############################
+
+    def get_string(self, **kwargs):
+
+        """Return string representation of table in current state.
+
+        Arguments:
+
+        start - index of first data row to include in output
+        end - index of last data row to include in output PLUS ONE (list slice style)
+        fields - names of fields (columns) to include
+        header - print a header showing field names (True or False)
+        border - print a border around the table (True or False)
+        hrules - controls printing of horizontal rules after rows.  Allowed values: FRAME, ALL, NONE
+       int_format - controls formatting of integer data
+       float_format - controls formatting of floating point data
+        padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
+        left_padding_width - number of spaces on left hand side of column data
+        right_padding_width - number of spaces on right hand side of column data
+        vertical_char - single character string used to draw vertical lines
+        horizontal_char - single character string used to draw horizontal lines
+        junction_char - single character string used to draw line junctions
+        sortby - name of field to sort rows by
+        sort_key - sorting key function, applied to data points before sorting
+        reversesort - True or False to sort in descending or ascending order"""
+
+        options = self._get_options(kwargs)
+
+        bits = []
+
+        # Don't think too hard about an empty table
+        if self.rowcount == 0:
+            return ""
+
+        rows = self._get_rows(options)
+        self._compute_widths(rows, options)
+
+        # Build rows
+        # (for now, this is done before building headers etc. because rowbits.append
+        # contains width-adjusting voodoo which has to be done first.  This is ugly
+        # and Wrong and will change soon)
+        rowbits = []
+        for row in rows:
+            rowbits.append(self._stringify_row(row, options))
+
+
+        # Add header or top of border
+        if options["header"]:
+            bits.append(self._stringify_header(options))
+        elif options["border"] and options["hrules"] != NONE:
+            bits.append(self._hrule)
+
+        # Add rows
+        bits.extend(rowbits)
+
+        # Add bottom of border
+        if options["border"] and not options["hrules"]:
+            bits.append(self._hrule)
+        
+        string = "\n".join(bits)
+        self._nonunicode = string
+        return _unicode(string)
+
+    def _stringify_hrule(self, options):
+
+        if not options["border"]:
+            return ""
+        lpad, rpad = self._get_padding_widths(options)
+        bits = [options["junction_char"]]
+        for field, width in zip(self._field_names, self._widths):
+            if options["fields"] and field not in options["fields"]:
+                continue
+            bits.append((width+lpad+rpad)*options["horizontal_char"])
+            bits.append(options["junction_char"])
+        return "".join(bits)
+
+    def _stringify_header(self, options):
+
+        bits = []
+        lpad, rpad = self._get_padding_widths(options)
+        if options["border"]:
+            if options["hrules"] != NONE:
+                bits.append(self._hrule)
+                bits.append("\n")
+            bits.append(options["vertical_char"])
+        for field, width, in zip(self._field_names, self._widths):
+            if options["fields"] and field not in options["fields"]:
+                continue
+            if self._align[field] == "l":
+                bits.append(" " * lpad + _unicode(field).ljust(width) + " " * rpad)
+            elif self._align[field] == "r":
+                bits.append(" " * lpad + _unicode(field).rjust(width) + " " * rpad)
+            else:
+                bits.append(" " * lpad + _unicode(field).center(width) + " " * rpad)
+            if options["border"]:
+                bits.append(options["vertical_char"])
+        if options["border"] and options["hrules"] != NONE:
+            bits.append("\n")
+            bits.append(self._hrule)
+        return "".join(bits)
+
+    def _stringify_row(self, row, options):
+        
+        for index, value in enumerate(row):
+            row[index] = self._format_value(self.field_names[index], value)
+
+        for index, field, value, width, in zip(range(0,len(row)), self._field_names, row, self._widths):
+            # Enforce max widths
+            max_width = self._max_width.get(field, 0)
+            lines = _unicode(value).split("\n")
+            new_lines = []
+            for line in lines: 
+                if max_width and len(line) > max_width:
+                    line = textwrap.fill(line, max_width)
+                new_lines.append(line)
+            lines = new_lines
+            value = "\n".join(lines)
+            row[index] = value
+
+        #old_widths = self._widths[:]
+
+        for index, field in enumerate(self._field_names):
+            namewidth = len(field)
+            datawidth = min(self._widths[index], self._max_width.get(field, self._widths[index]))
+            if options["header"]:
+               self._widths[index] = max(namewidth, datawidth)
+            else:
+               self._widths[index] = datawidth
+        
+        row_height = 0
+        for c in row:
+            h = _get_size(c)[1]
+            if h > row_height:
+                row_height = h
+
+        bits = []
+        lpad, rpad = self._get_padding_widths(options)
+        for y in range(0, row_height):
+            bits.append([])
+            if options["border"]:
+                bits[y].append(self.vertical_char)
+
+        for field, value, width, in zip(self._field_names, row, self._widths):
+
+            lines = _unicode(value).split("\n")
+            if len(lines) < row_height:
+                lines = lines + ([""] * (row_height-len(lines)))
+
+            y = 0
+            for l in lines:
+                if options["fields"] and field not in options["fields"]:
+                    continue
+
+                if self._align[field] == "l":
+                    bits[y].append(" " * lpad + _unicode(l).ljust(width) + " " * rpad)
+                elif self._align[field] == "r":
+                    bits[y].append(" " * lpad + _unicode(l).rjust(width) + " " * rpad)
+                else:
+                    bits[y].append(" " * lpad + _unicode(l).center(width) + " " * rpad)
+                if options["border"]:
+                    bits[y].append(self.vertical_char)
+
+                y += 1
+
+        self._hrule = self._stringify_hrule(options)
+        
+        if options["border"] and options["hrules"]== ALL:
+            bits[row_height-1].append("\n")
+            bits[row_height-1].append(self._hrule)
+
+        for y in range(0, row_height):
+            bits[y] = "".join(bits[y])
+
+        #self._widths = old_widths
+
+        return "\n".join(bits)
+
+    ##############################
+    # HTML STRING METHODS        #
+    ##############################
+
+    def get_html_string(self, **kwargs):
+
+        """Return string representation of HTML formatted version of table in current state.
+
+        Arguments:
+
+        start - index of first data row to include in output
+        end - index of last data row to include in output PLUS ONE (list slice style)
+        fields - names of fields (columns) to include
+        header - print a header showing field names (True or False)
+        border - print a border around the table (True or False)
+        hrules - controls printing of horizontal rules after rows.  Allowed values: FRAME, ALL, NONE
+       int_format - controls formatting of integer data
+       float_format - controls formatting of floating point data
+        padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
+        left_padding_width - number of spaces on left hand side of column data
+        right_padding_width - number of spaces on right hand side of column data
+        sortby - name of field to sort rows by
+        sort_key - sorting key function, applied to data points before sorting
+        attributes - dictionary of name/value pairs to include as HTML attributes in the <table> tag"""
+
+        options = self._get_options(kwargs)
+
+        if options["format"]:
+            string = self._get_formatted_html_string(options)
+        else:
+            string = self._get_simple_html_string(options)
+
+        self._nonunicode = string
+        return _unicode(string)
+
+    def _get_simple_html_string(self, options):
+
+        bits = []
+        # Slow but works
+        table_tag = '<table'
+        if options["border"]:
+            table_tag += ' border="1"'
+        if options["attributes"]:
+            for attr_name in options["attributes"]:
+                table_tag += ' %s="%s"' % (attr_name, options["attributes"][attr_name])
+        table_tag += '>'
+        bits.append(table_tag)
+
+        # Headers
+        if options["header"]:
+            bits.append("    <tr>")
+            for field in self._field_names:
+                if options["fields"] and field not in options["fields"]:
+                    continue
+                bits.append("        <th>%s</th>" % escape(_unicode(field)).replace("\n", "<br />"))
+            bits.append("    </tr>")
+
+        # Data
+        rows = self._get_rows(options)
+        for row in rows:
+            bits.append("    <tr>")
+            for field, datum in zip(self._field_names, row):
+                if options["fields"] and field not in options["fields"]:
+                    continue
+                bits.append("        <td>%s</td>" % escape(_unicode(datum)).replace("\n", "<br />"))
+            bits.append("    </tr>")
+
+        bits.append("</table>")
+        string = "\n".join(bits)
+
+        self._nonunicode = string
+        return _unicode(string)
+
+    def _get_formatted_html_string(self, options):
+
+        bits = []
+        lpad, rpad = self._get_padding_widths(options)
+        # Slow but works
+        table_tag = '<table'
+        if options["border"]:
+            table_tag += ' border="1"'
+        if options["hrules"] == NONE:
+            table_tag += ' frame="vsides" rules="cols"'
+        if options["attributes"]:
+            for attr_name in options["attributes"]:
+                table_tag += ' %s="%s"' % (attr_name, options["attributes"][attr_name])
+        table_tag += '>'
+        bits.append(table_tag)
+        # Headers
+        if options["header"]:
+            bits.append("    <tr>")
+            for field in self._field_names:
+                if options["fields"] and field not in options["fields"]:
+                    continue
+                bits.append("        <th style=\"padding-left: %dem; padding-right: %dem; text-align: center\">%s</th>" % (lpad, rpad, escape(_unicode(field)).replace("\n", "<br />")))
+            bits.append("    </tr>")
+        # Data
+        rows = self._get_rows(options)
+        for row in self._rows:
+            bits.append("    <tr>")
+            for field, datum in zip(self._field_names, row):
+                if options["fields"] and field not in options["fields"]:
+                    continue
+                if self._align[field] == "l":
+                    bits.append("        <td style=\"padding-left: %dem; padding-right: %dem; text-align: left\">%s</td>" % (lpad, rpad, escape(_unicode(datum)).replace("\n", "<br />")))
+                elif self._align[field] == "r":
+                    bits.append("        <td style=\"padding-left: %dem; padding-right: %dem; text-align: right\">%s</td>" % (lpad, rpad, escape(_unicode(datum)).replace("\n", "<br />")))
+                else:
+                    bits.append("        <td style=\"padding-left: %dem; padding-right: %dem; text-align: center\">%s</td>" % (lpad, rpad, escape(_unicode(datum)).replace("\n", "<br />")))
+            bits.append("    </tr>")
+        bits.append("</table>")
+        string = "\n".join(bits)
+
+        self._nonunicode = string
+        return _unicode(string)
+
+def main():
+
+    x = PrettyTable(["City name", "Area", "Population", "Annual Rainfall"])
+    x.sortby = "Population"
+    x.reversesort = True
+    x.int_format["Area"] = "04"
+    x.float_format = "6.1"
+    x.align["City name"] = "l" # Left align city names
+    x.add_row(["Adelaide", 1295, 1158259, 600.5])
+    x.add_row(["Brisbane", 5905, 1857594, 1146.4])
+    x.add_row(["Darwin", 112, 120900, 1714.7])
+    x.add_row(["Hobart", 1357, 205556, 619.5])
+    x.add_row(["Sydney", 2058, 4336374, 1214.8])
+    x.add_row(["Melbourne", 1566, 3806092, 646.9])
+    x.add_row(["Perth", 5386, 1554769, 869.4])
+    print(x)
+    
+if __name__ == "__main__":
+    main()
diff --git a/debian/changelog b/debian/changelog
new file mode 100644 (file)
index 0000000..107d919
--- /dev/null
@@ -0,0 +1,11 @@
+cm-api (7.0~mos6.1) trusty; urgency=low
+
+  * Build python-cm-api 7.0 for Ubuntu 14.04
+
+ -- Ivan Berezovskiy <iberezovskiy@mirantis.com>  Fri, 20 Feb 2015 16:56:38 +0300
+
+cm-api (7.0) unstable; urgency=low
+
+ * Initial spec for Cloudera API Client;
+
+ -- Denis Egorenko <degorenko@mirantis.com> Fri, 19 Sep 2014 18:26:28 +0000
diff --git a/debian/compat b/debian/compat
new file mode 100644 (file)
index 0000000..7f8f011
--- /dev/null
@@ -0,0 +1 @@
+7
diff --git a/debian/control b/debian/control
new file mode 100644 (file)
index 0000000..97fd50d
--- /dev/null
@@ -0,0 +1,16 @@
+Source: cm-api
+Section: python
+Priority: extra
+Maintainer: MOS Sahara Team <mos-sahara@mirantis.com>
+Build-Depends: debhelper (>= 8.0.0),
+ python-setuptools,
+Standards-Version: 3.9.2
+Homepage: https://github.com/cloudera/cm_api
+
+Package: python-cm-api
+Architecture: all
+Depends: ${misc:Depends}
+ python,
+ ${python:Depends},
+Description: Cloudera Manager RESTful API Client.
+ Cloudera Manager is the market-leading management platform for CDH. As the industry’s first end-to-end management application for Apache Hadoop, Cloudera Manager sets the standard for enterprise deployment by delivering granular visibility into and control over every part of CDH – empowering operators to improve cluster performance, enhance quality of service, increase compliance and reduce administrative costs.
diff --git a/debian/copyright b/debian/copyright
new file mode 100644 (file)
index 0000000..3ca7fb0
--- /dev/null
@@ -0,0 +1,27 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: cm-api
+Source:
+
+Files: debian/*
+Copyright: (c) 2015, MOS Sahara Team <mos-sahara@mirantis.com>
+License: Apache-2
+
+Files: *
+Copyright: (c) 2012, Cloudera Manager developers
+License: Apache-2
+
+License: Apache-2
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+    http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ .
+ On Debian-based systems the full text of the Apache version 2.0 license
+ can be found in /usr/share/common-licenses/Apache-2.0.
diff --git a/debian/docs b/debian/docs
new file mode 100644 (file)
index 0000000..d019688
--- /dev/null
@@ -0,0 +1,2 @@
+README.md
+SHELL_README.md
diff --git a/debian/files b/debian/files
new file mode 100644 (file)
index 0000000..faf684c
--- /dev/null
@@ -0,0 +1 @@
+cm-api_6.0.2_all.deb python extra
diff --git a/debian/rules b/debian/rules
new file mode 100755 (executable)
index 0000000..fb50f2e
--- /dev/null
@@ -0,0 +1,7 @@
+#!/usr/bin/make -f
+# -*- makefile -*-
+
+export DH_VERBOSE=1
+
+%:
+       dh $@ --buildsystem python_distutils --with python2.7
diff --git a/tests/runtests.sh b/tests/runtests.sh
new file mode 100644 (file)
index 0000000..d2a25fe
--- /dev/null
@@ -0,0 +1,14 @@
+#!/bin/bash -x
+RES=0
+
+case $1 in
+    python-cm-api)
+      python -c 'import cm_api'
+      RES=$?
+      ;;
+    *)
+      echo "test not defined, skipping..."
+      ;;
+esac
+
+exit $RES